fdehlinger commited on
Commit
9f2a34c
·
1 Parent(s): 3145d7c
Files changed (1) hide show
  1. app.py +16 -1
app.py CHANGED
@@ -6,15 +6,30 @@ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
  from llama_index.core import Settings
8
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  openai.api_key = os.environ['OpenAI_ApiKey']
10
  Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
 
11
 
12
  documents = SimpleDirectoryReader("data").load_data()
13
  index = VectorStoreIndex.from_documents(documents)
14
  query_engine = index.as_query_engine()
15
 
16
  def greet(question):
17
- return query_engine.query(question)
 
 
18
 
19
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
20
  demo.launch()
 
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
  from llama_index.core import Settings
8
 
9
+ import logging
10
+
11
+ # Configure logging
12
+ logging.basicConfig(
13
+ level=logging.INFO, # Set the logging level
14
+ format='%(asctime)s - %(levelname)s - %(message)s', # Define the log format
15
+ handlers=[
16
+ logging.StreamHandler() # Output logs to the console
17
+ ]
18
+ )
19
+
20
+
21
  openai.api_key = os.environ['OpenAI_ApiKey']
22
  Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
23
+ logging.info("Start load document.")
24
 
25
  documents = SimpleDirectoryReader("data").load_data()
26
  index = VectorStoreIndex.from_documents(documents)
27
  query_engine = index.as_query_engine()
28
 
29
  def greet(question):
30
+ return question
31
+ # return query_engine.query(question)
32
+
33
 
34
  demo = gr.Interface(fn=greet, inputs="text", outputs="text")
35
  demo.launch()