Spaces:
Sleeping
Sleeping
Update QnA.py
Browse files
QnA.py
CHANGED
@@ -5,6 +5,7 @@ from langchain.chains.summarize.chain import load_summarize_chain
|
|
5 |
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
6 |
from langchain.retrievers.document_compressors import LLMChainExtractor
|
7 |
from langchain.retrievers import ContextualCompressionRetriever
|
|
|
8 |
|
9 |
#from Api_Key import google_plam
|
10 |
from langchain_groq import ChatGroq
|
@@ -91,7 +92,7 @@ def Q_A(vectorstore,question,API_KEY,compressor=False):
|
|
91 |
|
92 |
else:
|
93 |
prompt = prompt_template_to_analyze_resume()
|
94 |
-
|
95 |
question_answer_chain = create_stuff_documents_chain(chat_llm, prompt)
|
96 |
|
97 |
chain = create_retrieval_chain(retriever, question_answer_chain)
|
|
|
5 |
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
6 |
from langchain.retrievers.document_compressors import LLMChainExtractor
|
7 |
from langchain.retrievers import ContextualCompressionRetriever
|
8 |
+
from langchain.chains.question_answering import load_qa_chain
|
9 |
|
10 |
#from Api_Key import google_plam
|
11 |
from langchain_groq import ChatGroq
|
|
|
92 |
|
93 |
else:
|
94 |
prompt = prompt_template_to_analyze_resume()
|
95 |
+
# question_answer_chain = load_qa_chain(chat_llm, chain_type="stuff", prompt=prompt)
|
96 |
question_answer_chain = create_stuff_documents_chain(chat_llm, prompt)
|
97 |
|
98 |
chain = create_retrieval_chain(retriever, question_answer_chain)
|