File size: 2,908 Bytes
58974f8
 
 
 
 
 
62bb2b9
 
 
 
 
 
 
58974f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62bb2b9
 
58974f8
62bb2b9
2123fe3
62bb2b9
 
2123fe3
62bb2b9
 
 
2123fe3
58974f8
 
 
 
 
2123fe3
62bb2b9
 
 
 
 
 
 
2123fe3
 
 
 
3e525a7
2123fe3
 
 
 
62bb2b9
58974f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import core
import openai
import models
import time
import gradio as gr
import os
import asyncio
import time

# openai.api_base = "https://lonlie.plus7.plus/v1"
# openai.api_key = "sk-ac6SSdDqwB6Syk0U3e88FcF49f6b4c3c9c4e0247A38aB699"
# os.environ["OPENAI_API_KEY"]="sk-ac6SSdDqwB6Syk0U3e88FcF49f6b4c3c9c4e0247A38aB699"
# os.environ["OPENAI_API_BASE"]="https://lonlie.plus7.plus/v1"

api_key = os.environ["OPENAI_API_KEY"]
api_base = os.environ["OPENAI_API_BASE"]

# def embed(texts: list):
#         return openai.Embedding.create(input=texts, model="text-embedding-ada-002")["data"]["embedding"]

def chatbot_initialize():
    retriever = core.retriever.ChromaRetriever(pdf_dir="", 
                                               collection_name="langchain", 
                                               split_args={"size": 2048, "overlap": 10}, #embedding_model="text-embedding-ada-002"
                                               embed_model=models.BiomedModel()
                                               )
    Chatbot = core.chatbot.RetrievalChatbot(retriever=retriever)
    return Chatbot

async def respond(query, chat_history, img_path_list, chat_history_string):
    time1 = time.time()
    global Chatbot
    response, logs = await Chatbot.response(query, image_paths=img_path_list, return_logs=True)
    chat_history.append((query, response))
    if img_path_list is None:
        chat_history_string += "Query: " + query + "\nImage: None" + "\nResponse: " + response + "\n\n\n"
    else:
        chat_history_string += "Query: " + query + "\nImages: " + "\n".join([path.name for path in img_path_list]) + "\nResponse: " + response + "\n\n\n"
    time2 = time.time()
    print(f"Total: {time2-time1}")
    return "", chat_history, logs, chat_history_string

if __name__ == "__main__":
    global Chatbot
    Chatbot=chatbot_initialize()

    with gr.Blocks() as demo:
        # chat = gr.ChatInterface(
        #     fn=respond,
        #     chatbot=gr.Chatbot(show_label=True, show_copy_button=True),
        #     additional_inputs=[
        #         gr.Image(type="filepath"),
        #     ]
        # )
        with gr.Row():
            with gr.Column(scale=2):
                chatbot = gr.Chatbot()
                msg = gr.Textbox(label="Query", show_label=True)
                imgs = gr.File(file_count='multiple', file_types=['image'], type="filepath", label='Upload Images')
                clear = gr.ClearButton([msg, chatbot])
            with gr.Column(scale=1):
                sidebar = gr.Textbox(label="Subquestions", show_label=True, show_copy_button=True, interactive=False, max_lines=30)
                history = gr.Textbox(label="Copy Chat History", show_label=True, show_copy_button=True, interactive=False, max_lines=5)
            msg.submit(respond, inputs=[msg, chatbot, imgs, history], outputs=[msg, chatbot, sidebar, history])
    demo.queue().launch()