Spaces:
Runtime error
Runtime error
add toggle
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def add_text(history, text):
|
|
43 |
|
44 |
|
45 |
def bot(history, api_kind,
|
46 |
-
num_docs, model_kind, sub_vector_size, chunk_size, splitter_type):
|
47 |
query = history[-1][0]
|
48 |
|
49 |
if not query:
|
@@ -72,22 +72,23 @@ def bot(history, api_kind,
|
|
72 |
|
73 |
|
74 |
history[-1][1] = ""
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
89 |
yield history, prompt_html
|
90 |
-
|
91 |
|
92 |
with gr.Blocks() as demo:
|
93 |
chatbot = gr.Chatbot(
|
@@ -114,6 +115,7 @@ with gr.Blocks() as demo:
|
|
114 |
num_docs = gr.Slider(1, 20, label="number of docs", step=1, value=4)
|
115 |
model_kind = gr.Radio(choices=emb_models, value="bge", label="embedding model")
|
116 |
sub_vector_size = gr.Radio(choices=sub_vectors, value="32", label="sub-vector size")
|
|
|
117 |
with gr.Row():
|
118 |
api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace", label="Chat model engine")
|
119 |
chunk_size = gr.Radio(choices=chunk_sizes, value="2000", label="chunk size")
|
@@ -123,7 +125,7 @@ with gr.Blocks() as demo:
|
|
123 |
# Turn off interactivity while generating if you click
|
124 |
txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
125 |
bot, [chatbot, api_kind,
|
126 |
-
num_docs, model_kind, sub_vector_size, chunk_size, splitter_type
|
127 |
], [chatbot, prompt_html])
|
128 |
|
129 |
# Turn it back on
|
|
|
43 |
|
44 |
|
45 |
def bot(history, api_kind,
|
46 |
+
num_docs, model_kind, sub_vector_size, chunk_size, splitter_type, all_at_once):
|
47 |
query = history[-1][0]
|
48 |
|
49 |
if not query:
|
|
|
72 |
|
73 |
|
74 |
history[-1][1] = ""
|
75 |
+
if all_at_once:
|
76 |
+
for model_name, doc, size, sub_vector in combinations:
|
77 |
+
documents_i = retrieve(query, int(num_docs), model_name, sub_vector, size, doc)
|
78 |
+
prompt_i = template.render(documents=documents_i, query=query)
|
79 |
+
prompt_html = template_html.render(documents=documents, query=query)
|
80 |
+
|
81 |
+
hist_chunk = ""
|
82 |
+
prev_hist = history[-1][1] + f"\nmodel {model_name}, splitter {doc}, size {size}, sub vector {sub_vector}\n"
|
83 |
+
for character in generate_fn(prompt_i, history[:-1]):
|
84 |
+
hist_chunk = character
|
85 |
+
history[-1][1] = prev_hist + hist_chunk
|
86 |
+
yield history, prompt_html
|
87 |
+
else:
|
88 |
+
for character in generate_fn(prompt, history[:-1]):
|
89 |
+
history[-1][1] = character
|
90 |
yield history, prompt_html
|
91 |
+
|
92 |
|
93 |
with gr.Blocks() as demo:
|
94 |
chatbot = gr.Chatbot(
|
|
|
115 |
num_docs = gr.Slider(1, 20, label="number of docs", step=1, value=4)
|
116 |
model_kind = gr.Radio(choices=emb_models, value="bge", label="embedding model")
|
117 |
sub_vector_size = gr.Radio(choices=sub_vectors, value="32", label="sub-vector size")
|
118 |
+
all_at_once = gr.Checkbox(value=False, label="Run all at once")
|
119 |
with gr.Row():
|
120 |
api_kind = gr.Radio(choices=["HuggingFace", "OpenAI"], value="HuggingFace", label="Chat model engine")
|
121 |
chunk_size = gr.Radio(choices=chunk_sizes, value="2000", label="chunk size")
|
|
|
125 |
# Turn off interactivity while generating if you click
|
126 |
txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
127 |
bot, [chatbot, api_kind,
|
128 |
+
num_docs, model_kind, sub_vector_size, chunk_size, splitter_type, all_at_once
|
129 |
], [chatbot, prompt_html])
|
130 |
|
131 |
# Turn it back on
|