Spaces:
Runtime error
Runtime error
import requests | |
import os | |
#openai | |
# openai_api_key = os.environ['GPT3_API_KEY_OPENAI'] | |
#azure | |
azure_api_key = os.environ['GPT3_API_KEY_AZURE'] | |
azure_api_base = "https://openai-619.openai.azure.com/" # your endpoint should look like the following https://YOUR_RESOURCE_NAME.openai.azure.com/ | |
azure_api_type = 'azure' | |
azure_api_version = '2022-12-01' # this may change in the future | |
def gpt3(prompt, model, service, max_tokens=400): | |
if service == 'openai': | |
if model == 'gpt-3.5-turbo': | |
api_endpoint = "https://api.openai.com/v1/chat/completions" | |
data = { | |
"model": "gpt-3.5-turbo", | |
"messages": [{"role": "user", "content": prompt}] | |
} | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {openai_api_key}" | |
} | |
response = requests.post(api_endpoint, headers=headers, json=data) | |
print(response) | |
return response.json()['choices'][0]['message']['content'] | |
elif model == 'gpt-3': | |
api_endpoint = "https://api.openai.com/v1/engines/text-davinci-003/completions" | |
data = { | |
"prompt": prompt, | |
"max_tokens": max_tokens, | |
"temperature": 0.5 | |
} | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {openai_api_key}" | |
} | |
response = requests.post(api_endpoint, headers=headers, json=data) | |
return response.json()["choices"][0]["text"] | |
elif service == 'azure': | |
if model == 'gpt-3': | |
azure_deployment_name='gpt3' | |
api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/completions?api-version={azure_api_version}""" | |
headers = { | |
"Content-Type": "application/json", | |
"api-key": azure_api_key | |
} | |
data = { | |
"prompt": prompt, | |
"max_tokens": max_tokens | |
} | |
response = requests.post(api_endpoint, headers=headers, json=data) | |
generated_text = response.json()["choices"][0]["text"] | |
return generated_text | |
elif model == 'gpt-3.5-turbo': | |
azure_deployment_name='gpt-35-turbo' #cannot be creative with the name | |
headers = { | |
"Content-Type": "application/json", | |
"api-key": azure_api_key | |
} | |
json_data = { | |
'messages': [ | |
{ | |
'role': 'user', | |
'content': prompt, | |
}, | |
], | |
} | |
api_endpoint = f"""{azure_api_base}openai/deployments/{azure_deployment_name}/chat/completions?api-version=2023-03-15-preview""" | |
response = requests.post(api_endpoint, headers=headers, json=json_data) | |
return response.json()['choices'][0]['message']['content'] | |
#azure is much more sensible to max_tokens | |
gpt3('how are you?', model='gpt-3.5-turbo', service='azure') |