luanpoppe
feat: melhorando swagger para indicar campo recebendo arquivos PDF
bca06e1
raw
history blame
4.31 kB
import tempfile, os
from rest_framework import viewsets, filters
from django_filters.rest_framework import DjangoFilterBackend
from endpoint_teste.models import EndpointTesteModel
from endpoint_teste.serializer import EndpointTesteSerializer, PDFUploadSerializer
from setup.environment import default_model
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import api_view, parser_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from langchain_backend.main import get_llm_answer
from .serializer import TesteSerializer
from langchain_huggingface import HuggingFaceEndpoint
class EndpointTesteViewSet(viewsets.ModelViewSet):
"""Mostrará todas as tarefas"""
queryset = EndpointTesteModel.objects.order_by("id").all()
serializer_class = EndpointTesteSerializer
filter_backends = [DjangoFilterBackend, filters.SearchFilter]
search_fields = ["id"]
@api_view(["GET", "POST"])
def getTeste(request):
if request.method == "POST":
serializer = TesteSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = request.data
pdf_url = None
if data["pdf_url"]:
pdf_url = data["pdf_url"]
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], pdf_url)
return Response({
"Resposta": resposta_llm
})
if request.method == "GET":
# hugging_face_token = os.environ.get("hugging_face_token")
# API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B"
# headers = {"Authorization": "Bearer " + hugging_face_token}
# def query(payload):
# response = requests.post(API_URL, headers=headers, json=payload)
# return response.json()
# output = query({
# "inputs": "Can you please let us know more details about your something I don't know",
# })
# print('output: ', output)
# print('output: ', dir(output))
llm = HuggingFaceEndpoint(
repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
task="text-generation",
max_new_tokens=100,
do_sample=False,
huggingfacehub_api_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
)
result = llm.invoke("Hugging Face is")
print('result: ', result)
return Response(result)
@extend_schema(
request=PDFUploadSerializer,
)
@api_view(["POST"])
@parser_classes([MultiPartParser])
def getPDF(request):
if request.method == "POST":
serializer = PDFUploadSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
listaPDFs = []
print('\n\n')
data = request.data
print('data: ', data)
embedding = serializer.validated_data.get("embedding", "gpt")
model = serializer.validated_data.get("model", default_model)
# pdf_file = serializer.validated_data['file']
for file in serializer.validated_data['files']:
print("file: ", file)
file.seek(0)
# Create a temporary file to save the uploaded PDF
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
# Write the uploaded file content to the temporary file
for chunk in file.chunks():
temp_file.write(chunk)
temp_file_path = temp_file.name # Get the path of the temporary file
listaPDFs.append(temp_file_path)
# print('temp_file_path: ', temp_file_path)
print('listaPDFs: ', listaPDFs)
resposta_llm = None
# resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=model, embedding=embedding)
resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], listaPDFs, model=model, embedding=embedding)
for file in listaPDFs:
os.remove(file)
# os.remove(temp_file_path)
return Response({
"Resposta": resposta_llm
})