Spaces:
Sleeping
Sleeping
added rag project core files
Browse files- CONTRIBUTION.md +28 -0
- core-langchain-rag.py +261 -0
- devcontainer.json +32 -0
CONTRIBUTION.md
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Pull Request Template
|
2 |
+
|
3 |
+
## Description
|
4 |
+
Please include a brief description of the changes introduced by this PR.
|
5 |
+
|
6 |
+
## Related Issue(s)
|
7 |
+
- If this PR addresses a particular issue, please reference it here using GitHub's linking syntax, e.g., "Fixes #123".
|
8 |
+
- If there's no related issue, briefly explain the motivation behind these changes.
|
9 |
+
|
10 |
+
## Changes Made
|
11 |
+
Please provide a list of the changes made in this PR.
|
12 |
+
|
13 |
+
## Screenshots (if applicable)
|
14 |
+
If the changes include UI updates or visual changes, please attach relevant screenshots here.
|
15 |
+
|
16 |
+
## Checklist
|
17 |
+
- [ ] I have tested my changes locally and ensured that they work as expected.
|
18 |
+
- [ ] I have updated the documentation (if applicable).
|
19 |
+
- [ ] My code follows the project's coding conventions and style guidelines.
|
20 |
+
- [ ] I have added appropriate test cases (if applicable).
|
21 |
+
- [ ] I have reviewed my own code to ensure its quality.
|
22 |
+
|
23 |
+
## Additional Notes
|
24 |
+
Add any additional notes or context about this PR here.
|
25 |
+
|
26 |
+
## Reviewer(s)
|
27 |
+
- @reviewer1
|
28 |
+
- @reviewer2
|
core-langchain-rag.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Importing necessary libraries
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
|
6 |
+
# # Importing RecursiveUrlLoader for web scraping and BeautifulSoup for HTML parsing
|
7 |
+
# from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
|
8 |
+
# from bs4 import BeautifulSoup as Soup
|
9 |
+
# import mimetypes
|
10 |
+
|
11 |
+
# # List of URLs to scrape
|
12 |
+
# urls = ["https://langchain-doc.readthedocs.io/en/latest"]
|
13 |
+
|
14 |
+
# # Initialize an empty list to store the documents
|
15 |
+
# docs = []
|
16 |
+
|
17 |
+
# # Looping through each URL in the list - this could take some time!
|
18 |
+
# stf = time.time() # Start time for performance measurement
|
19 |
+
# for url in urls:
|
20 |
+
# try:
|
21 |
+
# st = time.time() # Start time for performance measurement
|
22 |
+
# # Create a RecursiveUrlLoader instance with a specified URL and depth
|
23 |
+
# # The extractor function uses BeautifulSoup to parse the HTML content and extract text
|
24 |
+
# loader = RecursiveUrlLoader(url=url, max_depth=5, extractor=lambda x: Soup(x, "html.parser").text)
|
25 |
+
|
26 |
+
# # Load the documents from the URL and extend the docs list
|
27 |
+
# docs.extend(loader.load())
|
28 |
+
|
29 |
+
# et = time.time() - st # Calculate time taken for splitting
|
30 |
+
# print(f'Time taken for downloading documents from {url}: {et} seconds.')
|
31 |
+
# except Exception as e:
|
32 |
+
# # Print an error message if there is an issue with loading or parsing the URL
|
33 |
+
# print(f"Failed to load or parse the URL {url}. Error: {e}", file=sys.stderr)
|
34 |
+
# etf = time.time() - stf # Calculate time taken for splitting
|
35 |
+
# print(f'Total time taken for downloading {len(docs)} documents: {etf} seconds.')
|
36 |
+
|
37 |
+
# # Import necessary modules for text splitting and vectorization
|
38 |
+
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
39 |
+
# import time
|
40 |
+
# from langchain_community.vectorstores import FAISS
|
41 |
+
# from langchain.vectorstores.utils import filter_complex_metadata
|
42 |
+
# from langchain_community.embeddings import HuggingFaceEmbeddings
|
43 |
+
|
44 |
+
# # Configure the text splitter
|
45 |
+
# text_splitter = RecursiveCharacterTextSplitter(
|
46 |
+
# separators=["\n\n", "\n", "(?<=\. )", " ", ""], # Define the separators for splitting text
|
47 |
+
# chunk_size=500, # The size of each text chunk
|
48 |
+
# chunk_overlap=50, # Overlap between chunks to ensure continuity
|
49 |
+
# length_function=len, # Function to determine the length of each chunk
|
50 |
+
# )
|
51 |
+
|
52 |
+
# try:
|
53 |
+
# # Stage one: Splitting the documents into chunks for vectorization
|
54 |
+
# st = time.time() # Start time for performance measurement
|
55 |
+
# print('Loading documents and creating chunks ...')
|
56 |
+
# # Split each document into chunks using the configured text splitter
|
57 |
+
# chunks = text_splitter.create_documents([doc.page_content for doc in docs], metadatas=[doc.metadata for doc in docs])
|
58 |
+
# et = time.time() - st # Calculate time taken for splitting
|
59 |
+
# print(f"created "+chunks+" chunks")
|
60 |
+
# print(f'Time taken for document chunking: {et} seconds.')
|
61 |
+
# except Exception as e:
|
62 |
+
# print(f"Error during document chunking: {e}", file=sys.stderr)
|
63 |
+
|
64 |
+
# # Path for saving the FAISS index
|
65 |
+
# FAISS_INDEX_PATH = "./vectorstore/lc-faiss-multi-mpnet-500"
|
66 |
+
|
67 |
+
# try:
|
68 |
+
# # Stage two: Vectorization of the document chunks
|
69 |
+
# model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1" # Model used for embedding
|
70 |
+
|
71 |
+
# # Initialize HuggingFace embeddings with the specified model
|
72 |
+
# embeddings = HuggingFaceEmbeddings(model_name=model_name)
|
73 |
+
|
74 |
+
# print(f'Loading chunks into vector store ...')
|
75 |
+
# st = time.time() # Start time for performance measurement
|
76 |
+
# # Create a FAISS vector store from the document chunks and save it locally
|
77 |
+
# db = FAISS.from_documents(filter_complex_metadata(chunks), embeddings)
|
78 |
+
# db.save_local(FAISS_INDEX_PATH)
|
79 |
+
# et = time.time() - st # Calculate time taken for vectorization
|
80 |
+
# print(f'Time taken for vectorization and saving: {et} seconds.')
|
81 |
+
# except Exception as e:
|
82 |
+
# print(f"Error during vectorization or FAISS index saving: {e}", file=sys.stderr)
|
83 |
+
|
84 |
+
# alternatively download a preparaed vectorized index from S3 and load the index into vectorstore
|
85 |
+
# Import necessary libraries for AWS S3 interaction, file handling, and FAISS vector stores
|
86 |
+
import boto3
|
87 |
+
from botocore import UNSIGNED
|
88 |
+
from botocore.client import Config
|
89 |
+
import zipfile
|
90 |
+
from langchain_community.vectorstores import FAISS
|
91 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
92 |
+
from dotenv import load_dotenv
|
93 |
+
|
94 |
+
# Load environment variables from a .env file
|
95 |
+
config = load_dotenv(".env")
|
96 |
+
|
97 |
+
# Retrieve the Hugging Face API token from environment variables
|
98 |
+
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
99 |
+
S3_LOCATION = os.getenv("S3_LOCATION")
|
100 |
+
|
101 |
+
try:
|
102 |
+
# Initialize an S3 client with unsigned configuration for public access
|
103 |
+
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
|
104 |
+
|
105 |
+
# Define the FAISS index path and the destination for the downloaded file
|
106 |
+
FAISS_INDEX_PATH = './vectorstore/lc-faiss-multi-mpnet-500-markdown'
|
107 |
+
VS_DESTINATION = FAISS_INDEX_PATH + ".zip"
|
108 |
+
|
109 |
+
# Download the pre-prepared vectorized index from the S3 bucket
|
110 |
+
print("Downloading the pre-prepared vectorized index from S3...")
|
111 |
+
s3.download_file(S3_LOCATION, 'vectorstores/lc-faiss-multi-mpnet-500-markdown.zip', VS_DESTINATION)
|
112 |
+
|
113 |
+
# Extract the downloaded zip file
|
114 |
+
with zipfile.ZipFile(VS_DESTINATION, 'r') as zip_ref:
|
115 |
+
zip_ref.extractall('./vectorstore/')
|
116 |
+
print("Download and extraction completed.")
|
117 |
+
|
118 |
+
except Exception as e:
|
119 |
+
print(f"Error during downloading or extracting from S3: {e}", file=sys.stderr)
|
120 |
+
|
121 |
+
# Define the model name for embeddings
|
122 |
+
model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
|
123 |
+
|
124 |
+
try:
|
125 |
+
# Initialize HuggingFace embeddings with the specified model
|
126 |
+
embeddings = HuggingFaceEmbeddings(model_name=model_name)
|
127 |
+
|
128 |
+
# Load the local FAISS index with the specified embeddings
|
129 |
+
db = FAISS.load_local(FAISS_INDEX_PATH, embeddings)
|
130 |
+
print("FAISS index loaded successfully.")
|
131 |
+
except Exception as e:
|
132 |
+
print(f"Error during FAISS index loading: {e}", file=sys.stderr)
|
133 |
+
|
134 |
+
# Import necessary modules for environment variable management and HuggingFace integration
|
135 |
+
from langchain_community.llms import HuggingFaceHub
|
136 |
+
|
137 |
+
# Initialize the vector store as a retriever for the RAG pipeline
|
138 |
+
retriever = db.as_retriever()
|
139 |
+
|
140 |
+
try:
|
141 |
+
# Load the model from the Hugging Face Hub
|
142 |
+
model_id = HuggingFaceHub(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1", model_kwargs={
|
143 |
+
"temperature": 0.1, # Controls randomness in response generation (lower value means less random)
|
144 |
+
"max_new_tokens": 1024, # Maximum number of new tokens to generate in responses
|
145 |
+
"repetition_penalty": 1.2, # Penalty for repeating the same words (higher value increases penalty)
|
146 |
+
"return_full_text": False # If False, only the newly generated text is returned; if True, the input is included as well
|
147 |
+
})
|
148 |
+
print("Model loaded successfully from Hugging Face Hub.")
|
149 |
+
except Exception as e:
|
150 |
+
print(f"Error loading model from Hugging Face Hub: {e}", file=sys.stderr)
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
# Importing necessary modules for retrieval-based question answering and prompt handling
|
155 |
+
from langchain.chains import RetrievalQA
|
156 |
+
from langchain.prompts import PromptTemplate
|
157 |
+
from langchain.memory import ConversationBufferMemory
|
158 |
+
|
159 |
+
# Declare a global variable 'qa' for the retrieval-based question answering system
|
160 |
+
global qa
|
161 |
+
|
162 |
+
# Define a prompt template for guiding the model's responses
|
163 |
+
template = """
|
164 |
+
You are the friendly documentation buddy Arti, if you don't know the answer say 'I don't know' and don't make things up.\
|
165 |
+
Use the following context (delimited by <ctx></ctx>) and the chat history (delimited by <hs></hs>) to answer the question :
|
166 |
+
------
|
167 |
+
<ctx>
|
168 |
+
{context}
|
169 |
+
</ctx>
|
170 |
+
------
|
171 |
+
<hs>
|
172 |
+
{history}
|
173 |
+
</hs>
|
174 |
+
------
|
175 |
+
{question}
|
176 |
+
Answer:
|
177 |
+
"""
|
178 |
+
|
179 |
+
# Create a PromptTemplate object with specified input variables and the defined template
|
180 |
+
prompt = PromptTemplate.from_template(
|
181 |
+
#input_variables=["history", "context", "question"], # Variables to be included in the prompt
|
182 |
+
template=template, # The prompt template as defined above
|
183 |
+
)
|
184 |
+
prompt.format(context="context", history="history", question="question")
|
185 |
+
# Create a memory buffer to manage conversation history
|
186 |
+
memory = ConversationBufferMemory(
|
187 |
+
memory_key="history", # Key for storing the conversation history
|
188 |
+
input_key="question" # Key for the input question
|
189 |
+
)
|
190 |
+
|
191 |
+
# Initialize the RetrievalQA object with the specified model, retriever, and additional configurations
|
192 |
+
qa = RetrievalQA.from_chain_type(
|
193 |
+
llm=model_id, # Language model loaded from Hugging Face Hub
|
194 |
+
retriever=retriever, # The vector store retriever initialized earlier
|
195 |
+
return_source_documents=True, # Option to return source documents along with responses
|
196 |
+
chain_type_kwargs={
|
197 |
+
"verbose": True, # Enables verbose output for debugging and analysis
|
198 |
+
"memory": memory, # Memory buffer for managing conversation history
|
199 |
+
"prompt": prompt # Prompt template for guiding the model's responses
|
200 |
+
}
|
201 |
+
)
|
202 |
+
|
203 |
+
# Import Gradio for UI, along with other necessary libraries
|
204 |
+
import gradio as gr
|
205 |
+
import random
|
206 |
+
import time
|
207 |
+
|
208 |
+
# Function to add a new input to the chat history
|
209 |
+
def add_text(history, text):
|
210 |
+
# Append the new text to the history with a placeholder for the response
|
211 |
+
history = history + [(text, None)]
|
212 |
+
return history, ""
|
213 |
+
|
214 |
+
# Function representing the bot's response mechanism
|
215 |
+
def bot(history):
|
216 |
+
# Obtain the response from the 'infer' function using the latest input
|
217 |
+
response = infer(history[-1][0], history)
|
218 |
+
# Update the history with the bot's response
|
219 |
+
history[-1][1] = response['result']
|
220 |
+
return history
|
221 |
+
|
222 |
+
# Function to infer the response using the RAG model
|
223 |
+
def infer(question, history):
|
224 |
+
# Use the question and history to query the RAG model
|
225 |
+
result = qa({"query": question, "history": history, "question": question})
|
226 |
+
return result
|
227 |
+
|
228 |
+
# CSS styling for the Gradio interface
|
229 |
+
css = """
|
230 |
+
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
|
231 |
+
"""
|
232 |
+
|
233 |
+
# HTML content for the Gradio interface title
|
234 |
+
title = """
|
235 |
+
<div style="text-align: center;max-width: 700px;">
|
236 |
+
<h1>Chat with your Documentation</h1>
|
237 |
+
<p style="text-align: center;">Chat with LangChain Documentation, <br />
|
238 |
+
You can ask questions about the LangChain docu ;)</p>
|
239 |
+
</div>
|
240 |
+
"""
|
241 |
+
|
242 |
+
# Building the Gradio interface
|
243 |
+
with gr.Blocks(css=css) as demo:
|
244 |
+
with gr.Column(elem_id="col-container"):
|
245 |
+
gr.HTML(title) # Add the HTML title to the interface
|
246 |
+
chatbot = gr.Chatbot([], elem_id="chatbot") # Initialize the chatbot component
|
247 |
+
clear = gr.Button("Clear") # Add a button to clear the chat
|
248 |
+
|
249 |
+
# Create a row for the question input
|
250 |
+
with gr.Row():
|
251 |
+
question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
|
252 |
+
|
253 |
+
# Define the action when the question is submitted
|
254 |
+
question.submit(add_text, [chatbot, question], [chatbot, question], queue=False).then(
|
255 |
+
bot, chatbot, chatbot
|
256 |
+
)
|
257 |
+
# Define the action for the clear button
|
258 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
259 |
+
|
260 |
+
# Launch the Gradio demo interface
|
261 |
+
demo.launch(share=False)
|
devcontainer.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
2 |
+
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
3 |
+
{
|
4 |
+
"name": "Python 3.10",
|
5 |
+
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
6 |
+
//"image": "mcr.microsoft.com/devcontainers/python:1-3.10-bookworm"
|
7 |
+
|
8 |
+
|
9 |
+
// build config for the docker image instead:
|
10 |
+
"build": { "dockerfile": "Dockerfile" },
|
11 |
+
|
12 |
+
// Features to add to the dev container. More info: https://containers.dev/features.
|
13 |
+
// "features": {},
|
14 |
+
|
15 |
+
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
16 |
+
// "forwardPorts": [],
|
17 |
+
|
18 |
+
// Use 'postCreateCommand' to run commands after the container is created.
|
19 |
+
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
20 |
+
|
21 |
+
// Configure tool-specific properties.
|
22 |
+
"customizations": {
|
23 |
+
// Configure properties specific to VS Code.
|
24 |
+
"vscode": {
|
25 |
+
//Add the IDs of extensions you want installed when the container is created.
|
26 |
+
"extensions": ["ms-azuretools.vscode-docker", "ms-python.python", "qwtel.sqlite-viewer"]
|
27 |
+
}
|
28 |
+
}//,
|
29 |
+
|
30 |
+
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
31 |
+
// "remoteUser": "root"
|
32 |
+
}
|