hymai commited on
Commit
f300490
·
verified ·
1 Parent(s): ee96da3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -22,7 +22,12 @@ from llama_index.core import PromptTemplate
22
  from llama_index.core.llms import ChatMessage, MessageRole
23
  from llama_index.core.chat_engine import CondenseQuestionChatEngine
24
 
 
 
 
 
25
 
 
26
  load_dotenv()
27
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
28
  LLAMAINDEX_API_KEY = os.getenv('LLAMAINDEX_API_KEY')
@@ -69,7 +74,7 @@ else:
69
  )
70
  documents = reader.load_data()
71
  print("index creating with `%d` documents", len(documents))
72
- index = VectorStoreIndex.from_documents(documents, embed_model=embed_model, transformations=[splitter])
73
  index.storage_context.persist(persist_dir="./vectordb")
74
 
75
  """
@@ -113,6 +118,7 @@ retriever = DocumentSummaryIndexEmbeddingRetriever(
113
  retriever = VectorIndexRetriever(
114
  index = index,
115
  similarity_top_k = 10,
 
116
  #vector_store_query_mode="mmr",
117
  #vector_store_kwargs={"mmr_threshold": 0.4}
118
  )
@@ -197,7 +203,7 @@ with gr.Blocks() as demo:
197
  def bot(history):
198
  user_message = history[-1][0]
199
  #bot_message = chat_engine.chat(user_message)
200
- bot_message = query_engine.query(user_message + "Let's think step by step to get the correct answer. If you cannot provide an answer, say you don't know.")
201
  history[-1][1] = ""
202
  for character in bot_message.response:
203
  history[-1][1] += character
 
22
  from llama_index.core.llms import ChatMessage, MessageRole
23
  from llama_index.core.chat_engine import CondenseQuestionChatEngine
24
 
25
+ # Helps asyncio run within Jupyter
26
+ import nest_asyncio
27
+ import asyncio
28
+ nest_asyncio.apply()
29
 
30
+ # load env file
31
  load_dotenv()
32
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
33
  LLAMAINDEX_API_KEY = os.getenv('LLAMAINDEX_API_KEY')
 
74
  )
75
  documents = reader.load_data()
76
  print("index creating with `%d` documents", len(documents))
77
+ index = VectorStoreIndex.from_documents(documents, embed_model=embed_model, transformations=[splitter], use_async=True)
78
  index.storage_context.persist(persist_dir="./vectordb")
79
 
80
  """
 
118
  retriever = VectorIndexRetriever(
119
  index = index,
120
  similarity_top_k = 10,
121
+ use_async=True
122
  #vector_store_query_mode="mmr",
123
  #vector_store_kwargs={"mmr_threshold": 0.4}
124
  )
 
203
  def bot(history):
204
  user_message = history[-1][0]
205
  #bot_message = chat_engine.chat(user_message)
206
+ bot_message = query_engine.aquery(user_message + "Let's think step by step to get the correct answer. If you cannot provide an answer, say you don't know.")
207
  history[-1][1] = ""
208
  for character in bot_message.response:
209
  history[-1][1] += character