File size: 4,210 Bytes
aeb38f0
e63103b
 
 
9717e5b
1a93363
e63103b
 
 
 
deb05dc
e63103b
3b3d8b9
e63103b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182e6fa
 
3b3d8b9
 
 
 
 
 
aeb38f0
3b3d8b9
 
 
 
 
 
d8142ab
 
 
 
 
3b3d8b9
 
 
 
9717e5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b3d8b9
 
2fa8c74
3b3d8b9
2fa8c74
1a93363
9717e5b
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import tempfile, os, requests
from rest_framework import viewsets, filters
from django_filters.rest_framework import DjangoFilterBackend
from endpoint_teste.models import EndpointTesteModel
from endpoint_teste.serializer import EndpointTesteSerializer, PDFUploadSerializer
from ..environment import default_model

from rest_framework.decorators import api_view
from rest_framework.response import Response

from langchain_backend.main import get_llm_answer
from .serializer import TesteSerializer
from langchain_huggingface import HuggingFaceEndpoint

class EndpointTesteViewSet(viewsets.ModelViewSet):
    """Mostrará todas as tarefas"""
    queryset = EndpointTesteModel.objects.order_by("id").all()
    serializer_class = EndpointTesteSerializer
    filter_backends = [DjangoFilterBackend, filters.SearchFilter]
    search_fields = ["id"]

@api_view(["GET", "POST"])
def getTeste(request):
    if request.method == "POST":
        serializer = TesteSerializer(data=request.data)
        if serializer.is_valid(raise_exception=True):
            data = request.data
            pdf_url = None
            if data["pdf_url"]:
                pdf_url = data["pdf_url"]
            resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], pdf_url)
            return Response({
                "Resposta": resposta_llm
            })
    if request.method == "GET":
        # hugging_face_token = os.environ.get("hugging_face_token")
        # API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B"
        # headers = {"Authorization": "Bearer " + hugging_face_token}
        # def query(payload):
        #     response = requests.post(API_URL, headers=headers, json=payload)
        #     return response.json()
            
        # output = query({
        #     "inputs": "Can you please let us know more details about your something I don't know",
        # })
        # print('output: ', output)
        # print('output: ', dir(output))
        llm = HuggingFaceEndpoint(
            repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
            task="text-generation",
            max_new_tokens=100,
            do_sample=False,
            huggingfacehub_api_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
        )
        result = llm.invoke("Hugging Face is")
        print('result: ', result)
        return Response(result)

@api_view(["POST"])
def getPDF(request):
    if request.method == "POST":
        serializer = PDFUploadSerializer(data=request.data)
        if serializer.is_valid(raise_exception=True):
            # Access the uploaded file
            data = request.data
            print('data: ', data)
            pdf_file = serializer.validated_data['file']
            pdf_file.seek(0)
            # print(dir(pdf_file))
            # print('pdf_file: ', pdf_file.read())
            # pdf_content = pdf_file.read()
            # Save the file or process it as needed
            # For example, you can save it to a specific location
            # with open(f'endpoint_teste/media/uploads/{pdf_file.name}', 'wb+') as destination:
            #     for chunk in pdf_file.chunks():
            #         destination.write(chunk)
            # return Response({"message": "File uploaded successfully."})

            # Create a temporary file to save the uploaded PDF
            with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
                # Write the uploaded file content to the temporary file
                for chunk in pdf_file.chunks():
                    temp_file.write(chunk)
                temp_file_path = temp_file.name  # Get the path of the temporary file
            print('temp_file_path: ', temp_file_path)

            resposta_llm = None
            try:
                resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=serializer.validated_data['model'])
            except:
                resposta_llm = get_llm_answer(data["system_prompt"], data["user_message"], temp_file_path, model=default_model)

            os.remove(temp_file_path)

            return Response({
                "Resposta": resposta_llm
            })