Spaces:
Running
Running
luanpoppe
commited on
Commit
·
1a93363
1
Parent(s):
fbd71a3
feat
Browse files- endpoint_teste/views.py +2 -1
- environment.py +1 -0
- langchain_backend/main.py +6 -5
- langchain_backend/utils.py +3 -6
endpoint_teste/views.py
CHANGED
@@ -3,6 +3,7 @@ from rest_framework import viewsets, filters
|
|
3 |
from django_filters.rest_framework import DjangoFilterBackend
|
4 |
from endpoint_teste.models import EndpointTesteModel
|
5 |
from endpoint_teste.serializer import EndpointTesteSerializer, PDFUploadSerializer
|
|
|
6 |
|
7 |
from rest_framework.decorators import api_view
|
8 |
from rest_framework.response import Response
|
@@ -87,7 +88,7 @@ def getPDF(request):
|
|
87 |
try:
|
88 |
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=serializer.validated_data['model'])
|
89 |
except:
|
90 |
-
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path)
|
91 |
|
92 |
os.remove(temp_file_path)
|
93 |
|
|
|
3 |
from django_filters.rest_framework import DjangoFilterBackend
|
4 |
from endpoint_teste.models import EndpointTesteModel
|
5 |
from endpoint_teste.serializer import EndpointTesteSerializer, PDFUploadSerializer
|
6 |
+
from ..environment import default_model
|
7 |
|
8 |
from rest_framework.decorators import api_view
|
9 |
from rest_framework.response import Response
|
|
|
88 |
try:
|
89 |
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=serializer.validated_data['model'])
|
90 |
except:
|
91 |
+
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=default_model)
|
92 |
|
93 |
os.remove(temp_file_path)
|
94 |
|
environment.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
default_model = "gpt-4o-mini"
|
langchain_backend/main.py
CHANGED
@@ -12,10 +12,11 @@ def get_llm_answer(system_prompt, user_prompt, pdf_url, model):
|
|
12 |
else:
|
13 |
pages = getPDF()
|
14 |
retriever = create_retriever(pages)
|
15 |
-
rag_chain = None
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
20 |
results = rag_chain.invoke({"input": user_prompt})
|
21 |
return results
|
|
|
12 |
else:
|
13 |
pages = getPDF()
|
14 |
retriever = create_retriever(pages)
|
15 |
+
# rag_chain = None
|
16 |
+
rag_chain = create_retrieval_chain(retriever, create_prompt_llm_chain(system_prompt, model))
|
17 |
+
# if model:
|
18 |
+
# rag_chain = create_retrieval_chain(retriever, create_prompt_llm_chain(system_prompt, model))
|
19 |
+
# else:
|
20 |
+
# rag_chain = create_retrieval_chain(retriever, create_prompt_llm_chain(system_prompt))
|
21 |
results = rag_chain.invoke({"input": user_prompt})
|
22 |
return results
|
langchain_backend/utils.py
CHANGED
@@ -7,6 +7,7 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
7 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
8 |
from langchain_core.prompts import ChatPromptTemplate
|
9 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
10 |
|
11 |
os.environ.get("OPENAI_API_KEY")
|
12 |
os.environ.get("HUGGINGFACEHUB_API_TOKEN")
|
@@ -30,14 +31,10 @@ def create_retriever(documents):
|
|
30 |
|
31 |
return retriever
|
32 |
|
33 |
-
def create_prompt_llm_chain(system_prompt, modelParam
|
34 |
-
if modelParam ==
|
35 |
model = ChatOpenAI(model=modelParam)
|
36 |
else:
|
37 |
-
|
38 |
-
print('\n\n\n')
|
39 |
-
print(os.environ.get("HUGGINGFACEHUB_API_TOKEN"))
|
40 |
-
|
41 |
model = HuggingFaceEndpoint(
|
42 |
repo_id=modelParam,
|
43 |
task="text-generation",
|
|
|
7 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
8 |
from langchain_core.prompts import ChatPromptTemplate
|
9 |
from langchain_huggingface import HuggingFaceEndpoint
|
10 |
+
from ..environment import default_model
|
11 |
|
12 |
os.environ.get("OPENAI_API_KEY")
|
13 |
os.environ.get("HUGGINGFACEHUB_API_TOKEN")
|
|
|
31 |
|
32 |
return retriever
|
33 |
|
34 |
+
def create_prompt_llm_chain(system_prompt, modelParam):
|
35 |
+
if modelParam == default_model:
|
36 |
model = ChatOpenAI(model=modelParam)
|
37 |
else:
|
|
|
|
|
|
|
|
|
38 |
model = HuggingFaceEndpoint(
|
39 |
repo_id=modelParam,
|
40 |
task="text-generation",
|