File size: 1,537 Bytes
885c166
986568d
885c166
 
 
 
 
 
 
 
986568d
885c166
 
986568d
 
885c166
986568d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
885c166
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import json
import streamlit as st
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.llms.ollama import Ollama
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.core import Settings
from llama_index.llms.groq import Groq
from llama_index.core.document import Document
import os

# Streamlit app
st.set_page_config(page_title="Document Q&A", page_icon="📄")

st.title("Upload a Document for Question Answering")

uploaded_file = st.file_uploader("Choose a file", type=["txt", "pdf", "docx"])

if uploaded_file is not None:
    # Load the document
    content = uploaded_file.read().decode("utf-8")
    documents = [Document(text=content)]

    # bge-base embedding model
    Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")

    # ollama
    Settings.llm = Groq(model="mixtral-8x7b-32768", api_key="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")

    # Create the index
    index = VectorStoreIndex.from_documents(documents)

    # Create query engine
    query_engine = index.as_query_engine()

    # Ask a question
    question = st.text_input("Ask a question about the document:")
    if st.button("Get Answer"):
        response = query_engine.query(question)
        st.write("Answer:", response)
else:
    st.write("Please upload a file to proceed.")