File size: 2,791 Bytes
7a1e0cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import os
import time
import GPTSimple as ai
import random
import vision

# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
DESC = "# LLaMA 3.1 Vision\n<p>LLaMA 3.1 Vision uses LLaMA 3.1 405B and Florence 2 to give vision to LLaMA</p>"

def print_like_dislike(x: gr.LikeData):
    print(x.index, x.value, x.liked)

def add_message(history, message):
    for x in message["files"]:
        history.append(((x,), None))
    if message["text"] is not None:
        history.append((message["text"], None))
    return history, gr.MultimodalTextbox(value=None, interactive=False)

def bot(history):
    his = [{"role": "system", "content": "you are a helpful assistant. you can\"see\" image that the user sends by the description being in [IMG][/IMG]. don't reference how you can only see a description"}]
    nextone = ""
    for i in history:
        if isinstance(i[0], tuple):
            nextone += "[IMG]" + vision.see_file(i[0][0]) + "[/IMG]\n"
        else:
            his.append({"role": "user", "content": nextone + i[0]})
            nextone = ""
        if i[1] is not None:
            his.append({"role": "assistant", "content": i[1]})
    chat = ai.conversation(base_url="deepinfra", model="meta-llama/Meta-Llama-3.1-405B-Instruct", history=his)
    print(his)
    stre = chat.generate(stream=True)
    history[-1][1] = ""
    for character in stre:
        if character.token is not None:
            history[-1][1] += character.token
            yield history

def clear_history():
    return [], {"text":"", "files":[]}

def retry_last(history):
    history[-1][1]=None
    res = bot(history)
    for i in res:
        yield i

with gr.Blocks(fill_height=True, theme=gr.themes.Soft(), css="style.css") as demo:
    gr.Markdown(DESC)
    chatbot = gr.Chatbot(
        elem_id="chatbot",
        bubble_full_width=False,
        scale=1,
        show_label=False
    )

    with gr.Row():
        dl = gr.Button("", icon="delete.svg")
        chat_input = gr.MultimodalTextbox(interactive=True,
                                          file_count="multiple",
                                          placeholder="Enter message or upload file...", show_label=False)

        re = gr.Button("", icon="retry.svg")

    chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
    bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
    bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])

    dl.click(clear_history, None, [chatbot, chat_input])
    re.click(retry_last, [chatbot], chatbot)

demo.queue()
demo.launch()