Spaces:
Sleeping
Sleeping
import json | |
import streamlit as st | |
from llama_index.core.tools import QueryEngineTool, ToolMetadata | |
from llama_index.core.agent import ReActAgent | |
from llama_index.llms.ollama import Ollama | |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings | |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding | |
from llama_index.llms.ollama import Ollama | |
from llama_index.core import Settings | |
from llama_index.llms.groq import Groq | |
from llama_index.core.document import Document | |
import os | |
# Streamlit app | |
st.set_page_config(page_title="Document Q&A", page_icon="📄") | |
st.title("Upload a Document for Question Answering") | |
uploaded_file = st.file_uploader("Choose a file", type=["txt", "pdf", "docx"]) | |
if uploaded_file is not None: | |
# Load the document | |
content = uploaded_file.read().decode("utf-8") | |
documents = [Document(text=content)] | |
# bge-base embedding model | |
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") | |
# ollama | |
Settings.llm = Groq(model="mixtral-8x7b-32768", api_key="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") | |
# Create the index | |
index = VectorStoreIndex.from_documents(documents) | |
# Create query engine | |
query_engine = index.as_query_engine() | |
# Ask a question | |
question = st.text_input("Ask a question about the document:") | |
if st.button("Get Answer"): | |
response = query_engine.query(question) | |
st.write("Answer:", response) | |
else: | |
st.write("Please upload a file to proceed.") | |