Spaces:
Running
Running
import requests | |
import os | |
from datetime import datetime | |
from dateutil.relativedelta import relativedelta | |
import openai | |
queryUrl = 'https://api.openai.com/v1/chat/completions' | |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"] | |
rate_limit_per_model = { | |
"gpt-3.5-turbo-new": 2000, | |
"gpt-3.5-turbo-old": 3500, | |
"gpt-4": 200, | |
"gpt-4-32k": 1000 # No actual clue, rare enough | |
} | |
body_turbo = {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]} | |
body_gpt4 = {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]} | |
def get_headers(key): | |
headers = {'Authorization': f'Bearer {key}'} | |
return headers | |
def get_subscription(key): | |
headers = get_headers(key) | |
#results = r.json() | |
gpt4_avai = check_gpt4_availability() | |
if check_key_availability(): | |
rpm = "" | |
org = "" | |
quota = "" | |
r = requests.post(queryUrl, headers=headers, json=body_gpt4 if gpt4_avai else body_turbo) | |
result = r.json() | |
if "id" in result: | |
rpm = r.headers['x-ratelimit-limit-requests'] | |
org = r.headers['openai-organization'] | |
quota = check_key_type("gpt-4" if gpt4_avai else "gpt-3.5-turbo", int(rpm)) | |
else: | |
e = result["error"]["code"] | |
quota = f"Error: {e}" | |
return {"organization": org, | |
"rpm": rpm, | |
"quota": quota} | |
else: | |
return {"organization": "", | |
"rpm": "", | |
"quota": ""} | |
#"has_payment_method": False, | |
#"hard_limit_usd": "", | |
#"plan": ""} | |
#def get_usage(key): | |
# if check_key_availability(): | |
# start_date = datetime.now().strftime('%Y-%m-01') | |
# end_date = (datetime.now() + relativedelta(months=1)).strftime('%Y-%m-01') | |
# queryUrl = f'https://api.openai.com/dashboard/billing/usage?start_date={start_date}&end_date={end_date}' | |
# headers = get_headers(key) | |
# r = requests.get(queryUrl, headers=headers) | |
# return r.json() | |
# else: | |
# return "" | |
def check_key_type(model, rpm): | |
if model == "gpt-3.5-turbo": | |
if rpm > rate_limit_per_model['gpt-3.5-turbo-old']: | |
return "yes | pay, possibly big key" | |
elif rpm == rate_limit_per_model['gpt-3.5-turbo-new'] or rpm == rate_limit_per_model['gpt-3.5-turbo-old']: | |
return "yes | pay" | |
else: | |
return "yes | trial" | |
else: | |
if rpm < rate_limit_per_model[model]: | |
return "yes | trial" | |
elif rpm == rate_limit_per_model[model]: | |
return "yes | pay" | |
else: | |
return "yes | pay, possibly big key" | |
def check_gpt4_availability(): | |
if check_key_availability(): | |
available_models = [model["root"] for model in openai.Model.list()["data"]] | |
if 'gpt-4' in available_models: | |
return True | |
else: | |
return False | |
else: | |
return False | |
def check_gpt4_32k_availability(): | |
if check_key_availability(): | |
available_models = [model["root"] for model in openai.Model.list()["data"]] | |
if 'gpt-4-32k' in available_models: | |
return True | |
else: | |
return False | |
else: | |
return False | |
def check_key_availability(): | |
try: | |
openai.Model.list() | |
return True | |
except: | |
return False | |
if __name__ == "__main__": | |
key = os.getenv("OPENAI_API_KEY") | |
# results = get_usage(key) | |
# print(results) | |
results = get_subscription(key) | |
for k, v in results.items(): | |
print(f"{k}: {v}") |