Spaces:
Sleeping
Sleeping
UPDATE: follow ups
Browse files- functions.py +34 -9
functions.py
CHANGED
@@ -6,9 +6,9 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
6 |
from langchain_qdrant import QdrantVectorStore
|
7 |
from langchain_qdrant import RetrievalMode
|
8 |
from langchain_core.prompts.chat import ChatPromptTemplate
|
9 |
-
from
|
10 |
-
from langchain_core.
|
11 |
-
from
|
12 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
13 |
from langchain.memory import ChatMessageHistory
|
14 |
from pandasai import SmartDataframe
|
@@ -16,9 +16,7 @@ from langchain_core.chat_history import BaseChatMessageHistory
|
|
16 |
from langchain_community.document_loaders import YoutubeLoader
|
17 |
from langchain.docstore.document import Document
|
18 |
from langchain_huggingface import HuggingFaceEmbeddings
|
19 |
-
from langchain.retrievers import ContextualCompressionRetriever
|
20 |
from langchain_qdrant import FastEmbedSparse
|
21 |
-
from langchain.retrievers.document_compressors import FlashrankRerank
|
22 |
from supabase.client import create_client
|
23 |
from qdrant_client import QdrantClient
|
24 |
from langchain_groq import ChatGroq
|
@@ -73,6 +71,24 @@ NOTE: Generate responses directly without using phrases like "Response:" or "Ans
|
|
73 |
prompt = ChatPromptTemplate.from_template(prompt)
|
74 |
chatHistoryStore = dict()
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
def createUser(user_id: str, username: str, email: str) -> dict:
|
78 |
userData = client.table("ConversAI_UserInfo").select("*").execute().data
|
@@ -157,6 +173,7 @@ def addDocuments(texts: list[tuple[str]], vectorstore: str):
|
|
157 |
|
158 |
def format_docs(docs: str):
|
159 |
global sources
|
|
|
160 |
sources = []
|
161 |
context = ""
|
162 |
for doc in docs:
|
@@ -169,6 +186,7 @@ def format_docs(docs: str):
|
|
169 |
else:
|
170 |
pass
|
171 |
sources = list(set(sources))
|
|
|
172 |
return context
|
173 |
|
174 |
|
@@ -194,6 +212,9 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
|
|
194 |
global prompt
|
195 |
global client
|
196 |
global sources
|
|
|
|
|
|
|
197 |
global vectorEmbeddings
|
198 |
global sparseEmbeddings
|
199 |
vectorStoreName = vectorstore
|
@@ -211,7 +232,7 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
|
|
211 |
"question": RunnableLambda(lambda x: x["question"]),
|
212 |
"chatHistory": RunnableLambda(lambda x: x["chatHistory"])}
|
213 |
| prompt
|
214 |
-
| ChatGroq(
|
215 |
| StrOutputParser()
|
216 |
)
|
217 |
messageChain = RunnableWithMessageHistory(
|
@@ -221,11 +242,15 @@ def answerQuery(query: str, vectorstore: str, llmModel: str = "llama3-70b-8192")
|
|
221 |
history_messages_key="chatHistory"
|
222 |
)
|
223 |
chain = RunnablePassthrough.assign(messages_trimmed=trimMessages) | messageChain
|
224 |
-
|
225 |
-
|
226 |
{"question": query},
|
227 |
{"configurable": {"session_id": vectorStoreName}}
|
228 |
-
)
|
|
|
|
|
|
|
|
|
229 |
"sources": sources
|
230 |
}
|
231 |
|
|
|
6 |
from langchain_qdrant import QdrantVectorStore
|
7 |
from langchain_qdrant import RetrievalMode
|
8 |
from langchain_core.prompts.chat import ChatPromptTemplate
|
9 |
+
from langchain_core.prompts import PromptTemplate
|
10 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
11 |
+
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
|
12 |
from langchain_core.runnables.history import RunnableWithMessageHistory
|
13 |
from langchain.memory import ChatMessageHistory
|
14 |
from pandasai import SmartDataframe
|
|
|
16 |
from langchain_community.document_loaders import YoutubeLoader
|
17 |
from langchain.docstore.document import Document
|
18 |
from langchain_huggingface import HuggingFaceEmbeddings
|
|
|
19 |
from langchain_qdrant import FastEmbedSparse
|
|
|
20 |
from supabase.client import create_client
|
21 |
from qdrant_client import QdrantClient
|
22 |
from langchain_groq import ChatGroq
|
|
|
71 |
prompt = ChatPromptTemplate.from_template(prompt)
|
72 |
chatHistoryStore = dict()
|
73 |
|
74 |
+
class FollowUps(BaseModel):
|
75 |
+
q1: str = Field(description="First Follow-up Question")
|
76 |
+
q2: str = Field(description="Second Follow-up Question")
|
77 |
+
q3: str = Field(description="Third Follow-up Question")
|
78 |
+
|
79 |
+
followUpPrompt = """
|
80 |
+
You are an expert chatbot at framing follow up questions using some given text such that their answers can be found in the text itself and have been given the task of doing the same. Frame appropriate and meaningful questions out of it. Also, if no the given text says NO CONTEXT FOUND, please return an empty string for each question asked.
|
81 |
+
\n{format_instructions}
|
82 |
+
\n{context}
|
83 |
+
"""
|
84 |
+
jsonParser = JsonOutputParser(pydantic_object=FollowUps)
|
85 |
+
followUpPrompt = PromptTemplate(
|
86 |
+
template=followUpPrompt,
|
87 |
+
input_variables=["context"],
|
88 |
+
partial_variables={"format_instructions": jsonParser.get_format_instructions()},
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
|
93 |
def createUser(user_id: str, username: str, email: str) -> dict:
|
94 |
userData = client.table("ConversAI_UserInfo").select("*").execute().data
|
|
|
173 |
|
174 |
def format_docs(docs: str):
|
175 |
global sources
|
176 |
+
global tempContext
|
177 |
sources = []
|
178 |
context = ""
|
179 |
for doc in docs:
|
|
|
186 |
else:
|
187 |
pass
|
188 |
sources = list(set(sources))
|
189 |
+
tempContext = context
|
190 |
return context
|
191 |
|
192 |
|
|
|
212 |
global prompt
|
213 |
global client
|
214 |
global sources
|
215 |
+
global jsonParser
|
216 |
+
global tempContext
|
217 |
+
global followUpPrompt
|
218 |
global vectorEmbeddings
|
219 |
global sparseEmbeddings
|
220 |
vectorStoreName = vectorstore
|
|
|
232 |
"question": RunnableLambda(lambda x: x["question"]),
|
233 |
"chatHistory": RunnableLambda(lambda x: x["chatHistory"])}
|
234 |
| prompt
|
235 |
+
| ChatGroq(model_name=llmModel, temperature=0.75, max_tokens=512)
|
236 |
| StrOutputParser()
|
237 |
)
|
238 |
messageChain = RunnableWithMessageHistory(
|
|
|
242 |
history_messages_key="chatHistory"
|
243 |
)
|
244 |
chain = RunnablePassthrough.assign(messages_trimmed=trimMessages) | messageChain
|
245 |
+
followUpChain = prompt | ChatGroq(model_name="gemma2-9b-it", temperature=0) | jsonParser
|
246 |
+
output = chain.invoke(
|
247 |
{"question": query},
|
248 |
{"configurable": {"session_id": vectorStoreName}}
|
249 |
+
)
|
250 |
+
followUpQuestions = followUpChain.invoke({"context": tempContext})
|
251 |
+
return {
|
252 |
+
"output": output,
|
253 |
+
"followUpQuestions": followUpQuestions,
|
254 |
"sources": sources
|
255 |
}
|
256 |
|