samsonleegh commited on
Commit
f130ba6
·
verified ·
1 Parent(s): 073f90e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -0
app.py CHANGED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from gradio.components import upload_button
4
+ from llama_index.llms.groq import Groq
5
+ from llama_index.llms.openai import OpenAI
6
+ from llama_index.core import Settings
7
+ from llama_index.embeddings.openai import OpenAIEmbedding
8
+ from llama_index.core.node_parser import SentenceSplitter
9
+ from llama_parse import LlamaParse
10
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
11
+ from llama_index.core.retrievers import VectorIndexRetriever
12
+ from llama_index.core import get_response_synthesizer
13
+ from llama_index.core.query_engine import RetrieverQueryEngine
14
+ from llama_index.core.postprocessor import SimilarityPostprocessor
15
+ #from llama_index.embeddings.huggingface import HuggingFaceEmbedding
16
+ import gradio as gr
17
+ import shutil
18
+
19
+ load_dotenv()
20
+
21
+ OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
22
+ #GROQ_API_KEY = os.getenv('GROQ_API_KEY')
23
+ LLAMAINDEX_API_KEY = os.getenv('LLAMAINDEX_API_KEY')
24
+
25
+ # llm = Groq(model="llama-3.1-70b-versatile", api_key=GROQ_API_KEY)
26
+ llm = OpenAI(model="gpt-4o-mini",api_key = OPENAI_API_KEY)
27
+ # response = llm.complete("Explain the importance of low latency LLMs")
28
+ # response.text
29
+ Settings.llm = llm
30
+
31
+ # set up embedding model
32
+ # embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
33
+ embed_model = OpenAIEmbedding()
34
+ Settings.embed_model = embed_model
35
+
36
+ # create splitter
37
+ splitter = SentenceSplitter(chunk_size=10000, chunk_overlap=100)
38
+ Settings.transformations = [splitter]
39
+
40
+ def upload_file(file_ls):
41
+ try:
42
+ shutil.rmtree('./data')
43
+ except:
44
+ pass
45
+ UPLOAD_FOLDER = './data'
46
+ if not os.path.exists(UPLOAD_FOLDER):
47
+ os.mkdir(UPLOAD_FOLDER)
48
+ for file in file_ls:
49
+ shutil.copy(file, UPLOAD_FOLDER)
50
+ gr.Info("File uploaded")
51
+
52
+ def process_documents():
53
+ # create parser
54
+ parser = LlamaParse(
55
+ api_key=LLAMAINDEX_API_KEY,
56
+ result_type="markdown", # "markdown" and "text" are available
57
+ verbose=True,
58
+ )
59
+
60
+ filename_fn = lambda filename: {"file_name": filename}
61
+ required_exts = [".pdf",".docx"]
62
+ file_extractor = {".pdf": parser}
63
+ reader = SimpleDirectoryReader(
64
+ input_dir="./data",
65
+ file_extractor=file_extractor,
66
+ required_exts=required_exts,
67
+ recursive=True,
68
+ file_metadata=filename_fn
69
+ )
70
+ documents = reader.load_data()
71
+ len_docs = len(documents)
72
+ print("index creating with `%d` documents", len(documents))
73
+ global index
74
+ index = VectorStoreIndex.from_documents(documents, embed_model=embed_model, transformations=[splitter])
75
+ index.storage_context.persist(persist_dir="./vectordb")
76
+ return f"Processed {len_docs} documents successfully.{len_docs}"
77
+
78
+ def query_index(query_input):
79
+ # set up retriever
80
+ retriever = VectorIndexRetriever(
81
+ index=index,
82
+ similarity_top_k = 15,
83
+ #vector_store_query_mode="mmr",
84
+ #vector_store_kwargs={"mmr_threshold": 0.4}
85
+ )
86
+
87
+ # set up response synthesizer
88
+ # response_synthesizer = get_response_synthesizer()
89
+
90
+ # setting up query engine
91
+ query_engine = RetrieverQueryEngine(
92
+ retriever = retriever,
93
+ node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.53)],
94
+ response_synthesizer=get_response_synthesizer(response_mode="tree_summarize",verbose=True)
95
+ )
96
+ # print(query_engine.get_prompts())
97
+
98
+ output = query_engine.query(query_input)
99
+ return output.response
100
+ # source_nodes_list = output.source_nodes
101
+
102
+ # Gradio interface
103
+ with gr.Blocks() as demo:
104
+ gr.Markdown("# RAG with Llamaindex")
105
+
106
+ upload_button = gr.UploadButton("Click to upload a file", file_count="multiple")
107
+ upload_button.upload(upload_file, upload_button)
108
+ # File upload interface
109
+ # with gr.Row():
110
+ # docs = gr.Files(label="Upload Documents", file_types=[".txt", ".pdf"])
111
+
112
+ # Process button
113
+ process_button = gr.Button("Process Documents")
114
+
115
+ # Output for document processing
116
+ process_output = gr.Textbox(label="Processing Output")
117
+
118
+ # Query interface
119
+ query_input = gr.Textbox(label="Enter your query")
120
+ query_button = gr.Button("Submit Query")
121
+ query_output = gr.Textbox(label="Response")
122
+
123
+ # Create Gradio interface for document upload
124
+ # upload_interface = gr.Interface(
125
+ # fn=process_documents,
126
+ # inputs=gr.inputs.File(file_count="multiple"),
127
+ # outputs="text",
128
+ # title="Upload Documents",
129
+ # description="Upload text files to index them for querying."
130
+ # )
131
+ # # Linking the processing function
132
+ process_button.click(fn=process_documents, inputs=None, outputs=process_output)
133
+
134
+ # Linking the query function
135
+ query_button.click(fn=query_index, inputs=query_input, outputs=query_output)
136
+
137
+ # Run the interface
138
+ if __name__ == "__main__":
139
+ demo.launch()