Spaces:
Running
Running
import requests | |
import os | |
from datetime import datetime | |
from dateutil.relativedelta import relativedelta | |
import openai | |
baseUrl = 'https://api.openai.com/v1' | |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"] | |
rate_limit_per_model = { | |
"gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500 | |
"gpt-4": 200, | |
"gpt-4-32k": 1000 | |
} | |
body_gpt = { | |
"gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}, | |
"gpt-4": {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}, | |
"gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]} | |
} | |
def get_headers(key): | |
headers = {'Authorization': f'Bearer {key}'} | |
return headers | |
def get_subscription(key): | |
headers = get_headers(key) | |
if check_key_availability(): | |
rpm = "0" | |
tpm = "0" | |
org = "" | |
quota = "" | |
has_gpt4_32k = False | |
has_gpt4 = False | |
available_models = [model["root"] for model in openai.Model.list()["data"]] | |
key_highest_model = "" | |
if check_gpt4_32k_availability(available_models): | |
key_highest_model = GPT_TYPES[2] | |
has_gpt4_32k = True | |
has_gpt4 = True | |
elif check_gpt4_availability(available_models): | |
key_highest_model = GPT_TYPES[1] | |
has_gpt4 = True | |
else: | |
key_highest_model = GPT_TYPES[0] | |
r = requests.post(f"{baseUrl}/chat/completions", headers=headers, json=body_gpt[key_highest_model]) | |
result = r.json() | |
if "id" in result: | |
rpm = r.headers.get("x-ratelimit-limit-requests", "0") | |
tpm = r.headers.get("x-ratelimit-limit-tokens", "0") | |
org = r.headers.get('openai-organization', "") | |
quota = check_key_type(key_highest_model, int(rpm)) | |
else: | |
e = result.get("error", {}).get("code", "") | |
quota = f"Error: {e}" | |
org = get_org_name(key) | |
return {"has_gpt4_32k": has_gpt4_32k, | |
"has_gpt4": has_gpt4, | |
"organization": org, | |
"rpm": f"{rpm} ({key_highest_model})", | |
"tpm": f"{tpm}", | |
"quota": quota} | |
else: | |
return {"has_gpt4_32k": has_gpt4_32k, | |
"has_gpt4": has_gpt4, | |
"organization": "", | |
"rpm": "", | |
"tpm": "", | |
"quota": ""} | |
def get_org_name(key): | |
headers=get_headers(key) | |
r = requests.post(f"{baseUrl}/images/generations", headers=headers) | |
return r.headers['openai-organization'] | |
def check_key_type(model, rpm): | |
if rpm >= rate_limit_per_model[model]: | |
return "yes | pay" | |
else: | |
return "yes | trial" | |
def check_gpt4_availability(available_models): | |
if 'gpt-4' in available_models: | |
return True | |
else: | |
return False | |
def check_gpt4_32k_availability(available_models): | |
if 'gpt-4-32k' in available_models: | |
return True | |
else: | |
return False | |
def check_key_availability(): | |
try: | |
openai.Model.list() | |
return True | |
except: | |
return False | |
if __name__ == "__main__": | |
key = os.getenv("OPENAI_API_KEY") | |
results = get_subscription(key) | |
for k, v in results.items(): | |
print(f"{k}: {v}") |