Spaces:
Running
Running
Commit
·
e0ae8d4
1
Parent(s):
f85e3ed
Update api_usage.py
Browse files- api_usage.py +54 -69
api_usage.py
CHANGED
@@ -4,13 +4,12 @@ from datetime import datetime
|
|
4 |
from dateutil.relativedelta import relativedelta
|
5 |
import openai
|
6 |
|
7 |
-
|
8 |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
|
9 |
rate_limit_per_model = {
|
10 |
-
"gpt-3.5-turbo
|
11 |
-
"gpt-3.5-turbo-pay": 3500,
|
12 |
"gpt-4": 200,
|
13 |
-
"gpt-4-32k": 1000
|
14 |
}
|
15 |
body_gpt = {
|
16 |
"gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
|
@@ -18,95 +17,83 @@ body_gpt = {
|
|
18 |
"gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
19 |
}
|
20 |
|
21 |
-
|
22 |
def get_headers(key):
|
23 |
headers = {'Authorization': f'Bearer {key}'}
|
24 |
return headers
|
25 |
|
26 |
def get_subscription(key):
|
27 |
-
headers = get_headers(key)
|
28 |
-
|
29 |
-
|
30 |
-
key_highest_model = ""
|
31 |
-
if check_gpt4_32k_availability():
|
32 |
-
key_highest_model = GPT_TYPES[2]
|
33 |
-
elif check_gpt4_availability():
|
34 |
-
key_highest_model = GPT_TYPES[1]
|
35 |
-
else:
|
36 |
-
key_highest_model = GPT_TYPES[0]
|
37 |
-
|
38 |
if check_key_availability():
|
39 |
-
rpm = ""
|
|
|
40 |
org = ""
|
41 |
quota = ""
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
result = r.json()
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
|
|
48 |
else:
|
49 |
-
e = result
|
50 |
quota = f"Error: {e}"
|
|
|
51 |
|
52 |
-
return {"
|
53 |
-
"
|
|
|
|
|
|
|
54 |
"quota": quota}
|
55 |
else:
|
56 |
-
return {"
|
|
|
|
|
57 |
"rpm": "",
|
|
|
58 |
"quota": ""}
|
59 |
-
#"has_payment_method": False,
|
60 |
-
#"hard_limit_usd": "",
|
61 |
-
#"plan": ""}
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
# queryUrl = f'https://api.openai.com/dashboard/billing/usage?start_date={start_date}&end_date={end_date}'
|
68 |
-
# headers = get_headers(key)
|
69 |
-
# r = requests.get(queryUrl, headers=headers)
|
70 |
-
# return r.json()
|
71 |
-
# else:
|
72 |
-
# return ""
|
73 |
|
74 |
def check_key_type(model, rpm):
|
75 |
-
if
|
76 |
-
|
77 |
-
return "yes | pay, possibly big key"
|
78 |
-
elif rpm > rate_limit_per_model['gpt-3.5-turbo-trial'] and rpm <= rate_limit_per_model['gpt-3.5-turbo-pay']:
|
79 |
-
return "yes | pay"
|
80 |
-
else:
|
81 |
-
return "yes | trial"
|
82 |
else:
|
83 |
-
|
84 |
-
return "yes | trial"
|
85 |
-
elif rpm == rate_limit_per_model[model]:
|
86 |
-
return "yes | pay"
|
87 |
-
else:
|
88 |
-
return "yes | pay, possibly big key"
|
89 |
|
90 |
-
def check_gpt4_availability():
|
91 |
-
if
|
92 |
-
|
93 |
-
if 'gpt-4' in available_models:
|
94 |
-
return True
|
95 |
-
else:
|
96 |
-
return False
|
97 |
else:
|
98 |
return False
|
99 |
|
100 |
-
def check_gpt4_32k_availability():
|
101 |
-
if
|
102 |
-
|
103 |
-
if 'gpt-4-32k' in available_models:
|
104 |
-
return True
|
105 |
-
else:
|
106 |
-
return False
|
107 |
else:
|
108 |
return False
|
109 |
-
|
110 |
def check_key_availability():
|
111 |
try:
|
112 |
openai.Model.list()
|
@@ -116,8 +103,6 @@ def check_key_availability():
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
key = os.getenv("OPENAI_API_KEY")
|
119 |
-
# results = get_usage(key)
|
120 |
-
# print(results)
|
121 |
|
122 |
results = get_subscription(key)
|
123 |
for k, v in results.items():
|
|
|
4 |
from dateutil.relativedelta import relativedelta
|
5 |
import openai
|
6 |
|
7 |
+
baseUrl = 'https://api.openai.com/v1'
|
8 |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
|
9 |
rate_limit_per_model = {
|
10 |
+
"gpt-3.5-turbo": 2000, # new pay turbo will have 2000 RPM for the first 48 hours then become 3500
|
|
|
11 |
"gpt-4": 200,
|
12 |
+
"gpt-4-32k": 1000
|
13 |
}
|
14 |
body_gpt = {
|
15 |
"gpt-3.5-turbo": {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]},
|
|
|
17 |
"gpt-4-32k": {"model": "gpt-4-32k", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
18 |
}
|
19 |
|
|
|
20 |
def get_headers(key):
|
21 |
headers = {'Authorization': f'Bearer {key}'}
|
22 |
return headers
|
23 |
|
24 |
def get_subscription(key):
|
25 |
+
headers = get_headers(key)
|
26 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
if check_key_availability():
|
28 |
+
rpm = "0"
|
29 |
+
tpm = "0"
|
30 |
org = ""
|
31 |
quota = ""
|
32 |
+
has_gpt4_32k = False
|
33 |
+
has_gpt4 = False
|
34 |
+
available_models = [model["root"] for model in openai.Model.list()["data"]]
|
35 |
+
key_highest_model = ""
|
36 |
+
|
37 |
+
if check_gpt4_32k_availability(available_models):
|
38 |
+
key_highest_model = GPT_TYPES[2]
|
39 |
+
has_gpt4_32k = True
|
40 |
+
has_gpt4 = True
|
41 |
+
elif check_gpt4_availability(available_models):
|
42 |
+
key_highest_model = GPT_TYPES[1]
|
43 |
+
has_gpt4 = True
|
44 |
+
else:
|
45 |
+
key_highest_model = GPT_TYPES[0]
|
46 |
+
|
47 |
+
r = requests.post(f"{baseUrl}/chat/completions", headers=headers, json=body_gpt[key_highest_model])
|
48 |
result = r.json()
|
49 |
+
|
50 |
+
if "id" in result:
|
51 |
+
rpm = r.headers.get("x-ratelimit-limit-requests", "0")
|
52 |
+
tpm = r.headers.get("x-ratelimit-limit-tokens", "0")
|
53 |
+
org = r.headers.get('openai-organization', "")
|
54 |
+
quota = check_key_type(key_highest_model, int(rpm))
|
55 |
else:
|
56 |
+
e = result.get("error", {}).get("code", "")
|
57 |
quota = f"Error: {e}"
|
58 |
+
org = get_org_name(key)
|
59 |
|
60 |
+
return {"has_gpt4_32k": has_gpt4_32k,
|
61 |
+
"has_gpt4": has_gpt4,
|
62 |
+
"organization": org,
|
63 |
+
"rpm": f"{rpm} ({key_highest_model})",
|
64 |
+
"tpm": f"{tpm}",
|
65 |
"quota": quota}
|
66 |
else:
|
67 |
+
return {"has_gpt4_32k": has_gpt4_32k,
|
68 |
+
"has_gpt4": has_gpt4,
|
69 |
+
"organization": "",
|
70 |
"rpm": "",
|
71 |
+
"tpm": "",
|
72 |
"quota": ""}
|
|
|
|
|
|
|
73 |
|
74 |
+
def get_org_name(key):
|
75 |
+
headers=get_headers(key)
|
76 |
+
r = requests.post(f"{baseUrl}/images/generations", headers=headers)
|
77 |
+
return r.headers['openai-organization']
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
def check_key_type(model, rpm):
|
80 |
+
if rpm >= rate_limit_per_model[model]:
|
81 |
+
return "yes | pay"
|
|
|
|
|
|
|
|
|
|
|
82 |
else:
|
83 |
+
return "yes | trial"
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
def check_gpt4_availability(available_models):
|
86 |
+
if 'gpt-4' in available_models:
|
87 |
+
return True
|
|
|
|
|
|
|
|
|
88 |
else:
|
89 |
return False
|
90 |
|
91 |
+
def check_gpt4_32k_availability(available_models):
|
92 |
+
if 'gpt-4-32k' in available_models:
|
93 |
+
return True
|
|
|
|
|
|
|
|
|
94 |
else:
|
95 |
return False
|
96 |
+
|
97 |
def check_key_availability():
|
98 |
try:
|
99 |
openai.Model.list()
|
|
|
103 |
|
104 |
if __name__ == "__main__":
|
105 |
key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
106 |
|
107 |
results = get_subscription(key)
|
108 |
for k, v in results.items():
|