Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -38,7 +38,8 @@ def gpt3(question,vqa_answer,caption):
|
|
38 |
temperature=0.7,
|
39 |
)
|
40 |
answer = response.choices[0].text.strip()
|
41 |
-
return "input_text:\n"+prompt+"\n\n output_answer:\n"+answer
|
|
|
42 |
|
43 |
|
44 |
def inference_chat(input_image,input_text):
|
@@ -51,7 +52,8 @@ def inference_chat(input_image,input_text):
|
|
51 |
out=processor.batch_decode(out, skip_special_tokens=True)
|
52 |
vqa="\n".join(out)
|
53 |
gpt3_out=gpt3(input_text,vqa,cap)
|
54 |
-
|
|
|
55 |
|
56 |
with gr.Blocks(
|
57 |
css="""
|
@@ -86,8 +88,8 @@ with gr.Blocks(
|
|
86 |
'''
|
87 |
with gr.Column():
|
88 |
caption_output = gr.Textbox(lines=0, label="VQA ")
|
89 |
-
#caption_output_v1 = gr.Textbox(lines=0, label="Caption Output(模型caption输出)")
|
90 |
gpt3_output_v1 = gr.Textbox(lines=0, label="VQA+LLM")
|
|
|
91 |
|
92 |
image_input.change(
|
93 |
lambda: ("", "", []),
|
@@ -106,7 +108,7 @@ with gr.Blocks(
|
|
106 |
clear_button.click(
|
107 |
lambda: ("", [], []),
|
108 |
[],
|
109 |
-
[chat_input, state,caption_output,gpt3_output_v1],
|
110 |
queue=False,
|
111 |
)
|
112 |
submit_button.click(
|
@@ -115,7 +117,7 @@ with gr.Blocks(
|
|
115 |
image_input,
|
116 |
chat_input,
|
117 |
],
|
118 |
-
[caption_output,gpt3_output_v1],
|
119 |
)
|
120 |
'''
|
121 |
cap_submit_button.click(
|
|
|
38 |
temperature=0.7,
|
39 |
)
|
40 |
answer = response.choices[0].text.strip()
|
41 |
+
# return "input_text:\n"+prompt+"\n\n output_answer:\n"+answer
|
42 |
+
return answer
|
43 |
|
44 |
|
45 |
def inference_chat(input_image,input_text):
|
|
|
52 |
out=processor.batch_decode(out, skip_special_tokens=True)
|
53 |
vqa="\n".join(out)
|
54 |
gpt3_out=gpt3(input_text,vqa,cap)
|
55 |
+
gpt3_out1=gpt3(input_text,'',cap)
|
56 |
+
return out[0], gpt3_out,gpt3_out1
|
57 |
|
58 |
with gr.Blocks(
|
59 |
css="""
|
|
|
88 |
'''
|
89 |
with gr.Column():
|
90 |
caption_output = gr.Textbox(lines=0, label="VQA ")
|
|
|
91 |
gpt3_output_v1 = gr.Textbox(lines=0, label="VQA+LLM")
|
92 |
+
caption_output_v1 = gr.Textbox(lines=0, label="CAP+LLM")
|
93 |
|
94 |
image_input.change(
|
95 |
lambda: ("", "", []),
|
|
|
108 |
clear_button.click(
|
109 |
lambda: ("", [], []),
|
110 |
[],
|
111 |
+
[chat_input, state,caption_output,gpt3_output_v1,caption_output_v1],
|
112 |
queue=False,
|
113 |
)
|
114 |
submit_button.click(
|
|
|
117 |
image_input,
|
118 |
chat_input,
|
119 |
],
|
120 |
+
[caption_output,gpt3_output_v1,caption_output_v1],
|
121 |
)
|
122 |
'''
|
123 |
cap_submit_button.click(
|