Spaces:
Sleeping
Sleeping
from langchain_core.runnables import RunnableLambda | |
from langchain.schema.runnable import RunnablePassthrough | |
from data.retriever import Retriever | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from llm.gemini import Gemini | |
class GenerateQuestionsService: | |
_retrieve = Retriever() | |
_model = Gemini() | |
def handle(self, query: str): | |
rag_chain = { | |
"context": self._retrieve.retriever | RunnableLambda(self._format_docs), | |
"question": RunnablePassthrough(), | |
} | RunnableLambda(self._get_questions) | |
response_rag = self._retrieve.docs_retriever | |
rag_result = rag_chain.invoke(query) | |
retriever_result = response_rag.invoke(query) | |
print("RAG result:", rag_result) | |
print("Retriever result:", retriever_result) | |
return {"rag_result": rag_result, "retriever_result": retriever_result} | |
def _get_questions(self, _dict): | |
question = _dict["question"] | |
context = _dict["context"] | |
messages = self._model.template.format_messages( | |
context=context, | |
question=question, | |
format_questions_instructions=self._model._format_questions_instructions, | |
) | |
tries = 0 | |
while tries < 3: | |
try: | |
chat = ChatGoogleGenerativeAI(model="gemini-pro") | |
response = chat.invoke(messages) | |
return self._model.parser.parse(response.content) | |
except Exception as e: | |
print(e) | |
tries += 1 | |
return "Não foi possível gerar as questões." | |
def _format_docs(self, docs): | |
return "\n\n".join(doc.page_content for doc in docs) | |