Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,10 +22,6 @@ from llama_index.core import PromptTemplate
|
|
22 |
from llama_index.core.llms import ChatMessage, MessageRole
|
23 |
from llama_index.core.chat_engine import CondenseQuestionChatEngine
|
24 |
|
25 |
-
# Helps asyncio run within Jupyter
|
26 |
-
import nest_asyncio
|
27 |
-
import asyncio
|
28 |
-
nest_asyncio.apply()
|
29 |
|
30 |
# load env file
|
31 |
load_dotenv()
|
@@ -74,7 +70,7 @@ else:
|
|
74 |
)
|
75 |
documents = reader.load_data()
|
76 |
print("index creating with `%d` documents", len(documents))
|
77 |
-
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model, transformations=[splitter]
|
78 |
index.storage_context.persist(persist_dir="./vectordb")
|
79 |
|
80 |
"""
|
@@ -118,7 +114,6 @@ retriever = DocumentSummaryIndexEmbeddingRetriever(
|
|
118 |
retriever = VectorIndexRetriever(
|
119 |
index = index,
|
120 |
similarity_top_k = 10,
|
121 |
-
use_async=True
|
122 |
#vector_store_query_mode="mmr",
|
123 |
#vector_store_kwargs={"mmr_threshold": 0.4}
|
124 |
)
|
@@ -197,13 +192,13 @@ with gr.Blocks() as demo:
|
|
197 |
placeholder="Ask me something",)
|
198 |
clear = gr.Button("Delete")
|
199 |
|
200 |
-
|
201 |
return "", history + [[user_message, None]]
|
202 |
|
203 |
-
|
204 |
user_message = history[-1][0]
|
205 |
#bot_message = chat_engine.chat(user_message)
|
206 |
-
bot_message =
|
207 |
history[-1][1] = ""
|
208 |
for character in bot_message.response:
|
209 |
history[-1][1] += character
|
@@ -215,4 +210,4 @@ with gr.Blocks() as demo:
|
|
215 |
)
|
216 |
clear.click(lambda: None, None, chatbot, queue=False)
|
217 |
# demo.queue()
|
218 |
-
|
|
|
22 |
from llama_index.core.llms import ChatMessage, MessageRole
|
23 |
from llama_index.core.chat_engine import CondenseQuestionChatEngine
|
24 |
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# load env file
|
27 |
load_dotenv()
|
|
|
70 |
)
|
71 |
documents = reader.load_data()
|
72 |
print("index creating with `%d` documents", len(documents))
|
73 |
+
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model, transformations=[splitter])
|
74 |
index.storage_context.persist(persist_dir="./vectordb")
|
75 |
|
76 |
"""
|
|
|
114 |
retriever = VectorIndexRetriever(
|
115 |
index = index,
|
116 |
similarity_top_k = 10,
|
|
|
117 |
#vector_store_query_mode="mmr",
|
118 |
#vector_store_kwargs={"mmr_threshold": 0.4}
|
119 |
)
|
|
|
192 |
placeholder="Ask me something",)
|
193 |
clear = gr.Button("Delete")
|
194 |
|
195 |
+
def user(user_message, history):
|
196 |
return "", history + [[user_message, None]]
|
197 |
|
198 |
+
def bot(history):
|
199 |
user_message = history[-1][0]
|
200 |
#bot_message = chat_engine.chat(user_message)
|
201 |
+
bot_message = query_engine.query(user_message + "Let's think step by step to get the correct answer. If you cannot provide an answer, say you don't know.")
|
202 |
history[-1][1] = ""
|
203 |
for character in bot_message.response:
|
204 |
history[-1][1] += character
|
|
|
210 |
)
|
211 |
clear.click(lambda: None, None, chatbot, queue=False)
|
212 |
# demo.queue()
|
213 |
+
demo.launch(share=False)
|