Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -47,10 +47,6 @@ def chat(image, text, temperature, length_penalty,
|
|
47 |
|
48 |
output = postprocess_output(outputs[0]["generated_text"])
|
49 |
history_chat.append(output)
|
50 |
-
print(f"history_chat is {history_chat}")
|
51 |
-
print(f"user response {user_response}")
|
52 |
-
print(f"assistant response {assistant_response}")
|
53 |
-
|
54 |
|
55 |
chat_val = extract_response_pairs(" ".join(history_chat))
|
56 |
return chat_val, history_chat
|
@@ -65,7 +61,7 @@ css = """
|
|
65 |
"""
|
66 |
with gr.Blocks(css="style.css") as demo:
|
67 |
gr.Markdown(DESCRIPTION)
|
68 |
-
gr.Markdown("LLaVA is now available in transformers with 4-bit quantization!")
|
69 |
chatbot = gr.Chatbot(label="Chat", show_label=False)
|
70 |
with gr.Row():
|
71 |
image = gr.Image(type="pil")
|
|
|
47 |
|
48 |
output = postprocess_output(outputs[0]["generated_text"])
|
49 |
history_chat.append(output)
|
|
|
|
|
|
|
|
|
50 |
|
51 |
chat_val = extract_response_pairs(" ".join(history_chat))
|
52 |
return chat_val, history_chat
|
|
|
61 |
"""
|
62 |
with gr.Blocks(css="style.css") as demo:
|
63 |
gr.Markdown(DESCRIPTION)
|
64 |
+
gr.Markdown("## LLaVA, one of the greatest multimodal chat models is now available in transformers with 4-bit quantization!")
|
65 |
chatbot = gr.Chatbot(label="Chat", show_label=False)
|
66 |
with gr.Row():
|
67 |
image = gr.Image(type="pil")
|