File size: 952 Bytes
5aff646
f8732ac
 
 
b0470a0
2ddf4d0
 
5aff646
9f2a34c
7356f5c
f8732ac
 
b0470a0
 
 
 
 
 
 
 
 
3145d7c
f8732ac
3145d7c
b0470a0
af5a304
 
9f2a34c
e70859a
9d1b610
5aff646
9d1b610
5aff646
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import os
import openai

from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings


openai.api_key = os.environ['OpenAI_ApiKey']
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")


# documents = SimpleDirectoryReader("data").load_data()
# index = VectorStoreIndex.from_documents(documents)

persist_dir = "index"
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)


query_engine = index.as_query_engine()

def greet(question):

    # return f"Hello, {question} !"
    return query_engine.query(question)

question_textbox = gr.Textbox(label="Your question")
answer_textbox = gr.Textbox(label="Answer")

demo = gr.Interface(fn=greet, inputs=question_textbox, outputs=answer_textbox)
demo.launch()