Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import openai | |
from llama_index.core import VectorStoreIndex, StorageContext, load_index_from_storage | |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding | |
from llama_index.core import Settings | |
openai.api_key = os.environ['OpenAI_ApiKey'] | |
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") | |
# documents = SimpleDirectoryReader("data").load_data() | |
# index = VectorStoreIndex.from_documents(documents) | |
persist_dir = "index" | |
storage_context = StorageContext.from_defaults(persist_dir=persist_dir) | |
index = load_index_from_storage(storage_context) | |
query_engine = index.as_query_engine() | |
def greet(question): | |
# return f"Hello, {question} !" | |
return query_engine.query(question) | |
question_textbox = gr.Textbox(label="Your question") | |
answer_textbox = gr.Textbox(label="Answer") | |
demo = gr.Interface(fn=greet, inputs=question_textbox, outputs=answer_textbox) | |
demo.launch() |