File size: 3,122 Bytes
d97b57a
 
 
 
 
 
 
52ad1f7
d97b57a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import requests
import os

#openai
openai_api_key = os.environ['API_KEY_OPENAI']

#azure
azure_api_key = os.environ['API_KEY_AZURE']
azure_api_base =  "https://openai-619.openai.azure.com/" # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/
azure_api_type = 'azure'
azure_api_version = '2022-12-01' # this may change in the future

def gpt3(prompt, model, service, max_tokens=400):
    
    if service == 'openai':
        if model == 'gpt-3.5-turbo':
            api_endpoint = "https://api.openai.com/v1/chat/completions"
            data = {
                "model": "gpt-3.5-turbo",
                "messages": [{"role": "user", "content": prompt}]
            }
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {openai_api_key}"
            }
            response = requests.post(api_endpoint, headers=headers, json=data)
            return response.json()['choices'][0]['message']['content']

        elif model == 'gpt-3':
            api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions"
            data = {
                "prompt": prompt,
                "max_tokens": max_tokens,
                "temperature": 0.5
            }
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {openai_api_key}"
            }
            response = requests.post(api_endpoint, headers=headers, json=data)
            return response.json()["choices"][0]["text"]
                
    elif service == 'azure':
        
        if model == 'gpt-3':
            azure_deployment_name='gpt3'

            api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/completions?api-version={azure_api_version}"""

            headers = {
                "Content-Type": "application/json",
                "api-key": azure_api_key
            }

            data = {
                "prompt": prompt,
                "max_tokens": max_tokens
            }
            response = requests.post(api_endpoint, headers=headers, json=data)

            generated_text = response.json()["choices"][0]["text"]
            return generated_text

        elif model == 'gpt-3.5-turbo':
            azure_deployment_name='gpt-35-turbo' #cannot be creative with the name
            headers = {
                "Content-Type": "application/json",
                "api-key": azure_api_key
            }
            json_data = {
                'messages': [
                    {
                        'role': 'user',
                        'content': prompt,
                    },
                ],
            }
            api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/chat/completions?api-version=2023-03-15-preview"""
            response = requests.post(api_endpoint, headers=headers, json=json_data)
            return response.json()['choices'][0]['message']['content']

#azure is much more sensible to max_tokens
gpt3('how are you?', model='gpt-3.5-turbo', service='azure')