Spaces:
Paused
Paused
Add stop button
Browse files
app.py
CHANGED
@@ -22,6 +22,7 @@ pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_con
|
|
22 |
def extract_response_pairs(text):
|
23 |
turns = re.split(r'(USER:|ASSISTANT:)', text)[1:]
|
24 |
turns = [turn.strip() for turn in turns if turn.strip()]
|
|
|
25 |
conv_list = []
|
26 |
for i in range(0, len(turns[1::2]), 2):
|
27 |
if i + 1 < len(turns[1::2]):
|
@@ -63,7 +64,7 @@ def bot(history_chat, text_input, image,
|
|
63 |
min_length,
|
64 |
top_p):
|
65 |
chat_history = " ".join(history_chat) # history as a str to be passed to model
|
66 |
-
chat_history = chat_history + f"USER: <image>\n{text_input}\nASSISTANT:" #
|
67 |
|
68 |
|
69 |
inference_result = infer(image, chat_history,
|
@@ -76,16 +77,16 @@ def bot(history_chat, text_input, image,
|
|
76 |
# return inference and parse for new history
|
77 |
chat_val = extract_response_pairs(inference_result)
|
78 |
|
79 |
-
#
|
80 |
chat_state_list = copy.deepcopy(chat_val)
|
81 |
-
chat_state_list[-1][1] = "" #
|
82 |
|
83 |
# add characters iteratively
|
84 |
|
85 |
for character in chat_val[-1][1]:
|
86 |
chat_state_list[-1][1] += character
|
87 |
time.sleep(0.05)
|
88 |
-
#
|
89 |
print(chat_state_list)
|
90 |
yield chat_state_list
|
91 |
|
@@ -178,9 +179,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
178 |
]
|
179 |
with gr.Row():
|
180 |
clear_chat_button = gr.Button("Clear")
|
|
|
181 |
chat_button = gr.Button("Submit", variant="primary")
|
182 |
|
183 |
-
|
184 |
image, temperature,
|
185 |
length_penalty,
|
186 |
repetition_penalty,
|
@@ -188,7 +190,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
188 |
min_length,
|
189 |
top_p], chatbot)
|
190 |
|
191 |
-
text_input.submit(
|
192 |
add_text,
|
193 |
[chatbot, text_input],
|
194 |
[chatbot, text_input]
|
@@ -220,6 +222,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
220 |
history_chat
|
221 |
],
|
222 |
queue=False)
|
|
|
|
|
|
|
|
|
223 |
examples = [["./examples/baklava.png", "How to make this pastry?"],["./examples/bee.png","Describe this image."]]
|
224 |
gr.Examples(examples=examples, inputs=[image, text_input, chat_inputs])
|
225 |
|
@@ -227,4 +233,4 @@ with gr.Blocks(css="style.css") as demo:
|
|
227 |
|
228 |
|
229 |
if __name__ == "__main__":
|
230 |
-
demo.queue(max_size=10).launch(debug=True)
|
|
|
22 |
def extract_response_pairs(text):
|
23 |
turns = re.split(r'(USER:|ASSISTANT:)', text)[1:]
|
24 |
turns = [turn.strip() for turn in turns if turn.strip()]
|
25 |
+
print(f"conv turns are {turns[1::2]}")
|
26 |
conv_list = []
|
27 |
for i in range(0, len(turns[1::2]), 2):
|
28 |
if i + 1 < len(turns[1::2]):
|
|
|
64 |
min_length,
|
65 |
top_p):
|
66 |
chat_history = " ".join(history_chat) # history as a str to be passed to model
|
67 |
+
chat_history = chat_history + f"USER: <image>\n{text_input}\nASSISTANT:" # add text input for prompting
|
68 |
|
69 |
|
70 |
inference_result = infer(image, chat_history,
|
|
|
77 |
# return inference and parse for new history
|
78 |
chat_val = extract_response_pairs(inference_result)
|
79 |
|
80 |
+
# create history list for yielding the last inference response
|
81 |
chat_state_list = copy.deepcopy(chat_val)
|
82 |
+
chat_state_list[-1][1] = "" # empty last response
|
83 |
|
84 |
# add characters iteratively
|
85 |
|
86 |
for character in chat_val[-1][1]:
|
87 |
chat_state_list[-1][1] += character
|
88 |
time.sleep(0.05)
|
89 |
+
# yield history but with last response being streamed
|
90 |
print(chat_state_list)
|
91 |
yield chat_state_list
|
92 |
|
|
|
179 |
]
|
180 |
with gr.Row():
|
181 |
clear_chat_button = gr.Button("Clear")
|
182 |
+
cancel_btn = gr.Button("Stop Generation")
|
183 |
chat_button = gr.Button("Submit", variant="primary")
|
184 |
|
185 |
+
chat_event1 = chat_button.click(add_text, [chatbot, text_input], [chatbot, text_input]).then(bot, [chatbot, text_input,
|
186 |
image, temperature,
|
187 |
length_penalty,
|
188 |
repetition_penalty,
|
|
|
190 |
min_length,
|
191 |
top_p], chatbot)
|
192 |
|
193 |
+
chat_event2 = text_input.submit(
|
194 |
add_text,
|
195 |
[chatbot, text_input],
|
196 |
[chatbot, text_input]
|
|
|
222 |
history_chat
|
223 |
],
|
224 |
queue=False)
|
225 |
+
cancel_btn.click(
|
226 |
+
None, [], [],
|
227 |
+
cancels=[chat_event1, chat_event2]
|
228 |
+
)
|
229 |
examples = [["./examples/baklava.png", "How to make this pastry?"],["./examples/bee.png","Describe this image."]]
|
230 |
gr.Examples(examples=examples, inputs=[image, text_input, chat_inputs])
|
231 |
|
|
|
233 |
|
234 |
|
235 |
if __name__ == "__main__":
|
236 |
+
demo.queue(max_size=10).launch(debug=True)
|