Spaces:
Runtime error
Runtime error
远兮
commited on
Commit
·
42bad84
1
Parent(s):
5ce9c8c
chat接口改为支持stream
Browse files
chatgpt-next-web/chat_proxy.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import websocket
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
MY_OPENAI_API_KEY = os.environ.get('MY_OPENAI_API_KEY')
|
6 |
+
|
7 |
+
|
8 |
+
def on_message(ws, message):
|
9 |
+
response = json.loads(message)
|
10 |
+
# 处理来自 ChatGPT 的响应
|
11 |
+
print(response['choices'][0]['text'])
|
12 |
+
|
13 |
+
# 根据您的条件判断是否需要关闭连接
|
14 |
+
if '完成对话的条件':
|
15 |
+
ws.close()
|
16 |
+
|
17 |
+
|
18 |
+
def on_error(ws, error):
|
19 |
+
print(error)
|
20 |
+
|
21 |
+
|
22 |
+
def on_close(ws):
|
23 |
+
print("连接已关闭")
|
24 |
+
|
25 |
+
|
26 |
+
def on_open(ws):
|
27 |
+
# 发送初始消息
|
28 |
+
ws.send(json.dumps({
|
29 |
+
'type': 'input',
|
30 |
+
'message': '你好,ChatGPT!'
|
31 |
+
}))
|
32 |
+
|
33 |
+
# 发送后续消息(用于完成)
|
34 |
+
ws.send(json.dumps({
|
35 |
+
'type': 'input',
|
36 |
+
'message': '技术问题是...'
|
37 |
+
}))
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
# 创建 WebSocket 连接和其他初始化代码
|
42 |
+
ws = websocket.WebSocketApp(
|
43 |
+
'wss://api.openai.com/v1/streaming_chat_completion',
|
44 |
+
on_message=on_message,
|
45 |
+
on_error=on_error,
|
46 |
+
on_close=on_close,
|
47 |
+
header={
|
48 |
+
'Authorization': 'Bearer YOUR_API_KEY'
|
49 |
+
}
|
50 |
+
)
|
51 |
+
|
52 |
+
ws.on_open = on_open
|
53 |
+
|
54 |
+
# 运行 WebSocket 连接
|
55 |
+
ws.run_forever()
|
redis/test_user_redis.py → chatgpt-next-web/service.py
RENAMED
@@ -7,15 +7,15 @@ import jwt
|
|
7 |
import datetime
|
8 |
import requests
|
9 |
import os
|
10 |
-
from flask import Flask, request, jsonify, Request
|
11 |
from redis import Redis
|
12 |
|
13 |
SECERT_KEY = "8U2LL1"
|
14 |
MY_OPENAI_API_KEY = os.environ.get('MY_OPENAI_API_KEY')
|
15 |
|
16 |
app = Flask(__name__)
|
17 |
-
redis = Redis(host='192.168.3.229', port=6379, password='lizhen-redis')
|
18 |
-
|
19 |
# redis = Redis(host='localhost', port=6379)
|
20 |
|
21 |
|
@@ -219,7 +219,7 @@ def validate():
|
|
219 |
return jsonify({'code': 0, 'message': 'Chat limit not exceeded'})
|
220 |
|
221 |
|
222 |
-
@app.route('/chat/completions', methods=['POST'])
|
223 |
def proxy_chat_completions():
|
224 |
token = parse_token(request)
|
225 |
model = request.json.get('model')
|
@@ -248,23 +248,40 @@ def proxy_chat_completions():
|
|
248 |
# 获取请求数据
|
249 |
data = request.get_json()
|
250 |
|
|
|
|
|
251 |
# 设置请求头部信息
|
252 |
headers = {
|
253 |
'Authorization': f'Bearer {MY_OPENAI_API_KEY}',
|
254 |
'Content-Type': 'application/json'
|
255 |
}
|
256 |
-
# 将请求转发到 OpenAI API
|
257 |
-
response = requests.post(
|
258 |
-
'https://api.openai.com/v1/chat/completions', json=data, headers=headers)
|
259 |
|
260 |
-
|
261 |
-
|
|
|
|
|
262 |
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
265 |
|
266 |
-
|
267 |
-
|
268 |
|
269 |
|
270 |
def parse_token(request: Request):
|
|
|
7 |
import datetime
|
8 |
import requests
|
9 |
import os
|
10 |
+
from flask import Flask, request, jsonify, Request, Response
|
11 |
from redis import Redis
|
12 |
|
13 |
SECERT_KEY = "8U2LL1"
|
14 |
MY_OPENAI_API_KEY = os.environ.get('MY_OPENAI_API_KEY')
|
15 |
|
16 |
app = Flask(__name__)
|
17 |
+
# redis = Redis(host='192.168.3.229', port=6379, password='lizhen-redis')
|
18 |
+
redis = Redis(host='10.254.13.87', port=6379)
|
19 |
# redis = Redis(host='localhost', port=6379)
|
20 |
|
21 |
|
|
|
219 |
return jsonify({'code': 0, 'message': 'Chat limit not exceeded'})
|
220 |
|
221 |
|
222 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
223 |
def proxy_chat_completions():
|
224 |
token = parse_token(request)
|
225 |
model = request.json.get('model')
|
|
|
248 |
# 获取请求数据
|
249 |
data = request.get_json()
|
250 |
|
251 |
+
stream = request.json.get('stream')
|
252 |
+
|
253 |
# 设置请求头部信息
|
254 |
headers = {
|
255 |
'Authorization': f'Bearer {MY_OPENAI_API_KEY}',
|
256 |
'Content-Type': 'application/json'
|
257 |
}
|
|
|
|
|
|
|
258 |
|
259 |
+
if stream:
|
260 |
+
# 将请求转发到 OpenAI API
|
261 |
+
response = requests.post(
|
262 |
+
'https://api.openai.com/v1/chat/completions', json=data, headers=headers, stream=True, verify=False)
|
263 |
|
264 |
+
# 获取 OpenAI API 的响应数据
|
265 |
+
result = response.iter_content(chunk_size=8192)
|
266 |
+
|
267 |
+
user_package_key = f'user:{user_id}:package'
|
268 |
+
redis.hincrby(user_package_key, 'basic_chat_limit', -1)
|
269 |
+
|
270 |
+
# 返回 OpenAI API 的响应给客户端
|
271 |
+
return Response(result, content_type=response.headers['content-type'])
|
272 |
+
else:
|
273 |
+
# 将请求转发到 OpenAI API
|
274 |
+
response = requests.post(
|
275 |
+
'https://api.openai.com/v1/chat/completions', json=data, headers=headers)
|
276 |
+
|
277 |
+
# 获取 OpenAI API 的响应数据
|
278 |
+
result = response.json()
|
279 |
+
|
280 |
+
user_package_key = f'user:{user_id}:package'
|
281 |
+
redis.hincrby(user_package_key, 'basic_chat_limit', -1)
|
282 |
|
283 |
+
# 返回 OpenAI API 的响应给客户端
|
284 |
+
return result, response.status_code
|
285 |
|
286 |
|
287 |
def parse_token(request: Request):
|
openai-example/ stream_chat_completion.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai # for OpenAI API calls
|
2 |
+
|
3 |
+
# Example of an OpenAI ChatCompletion request with stream=True
|
4 |
+
# https://platform.openai.com/docs/guides/chat
|
5 |
+
|
6 |
+
# a ChatCompletion request
|
7 |
+
response = openai.ChatCompletion.create(
|
8 |
+
model='gpt-3.5-turbo',
|
9 |
+
messages=[
|
10 |
+
{'role': 'user', 'content': "写一个50字左右的小故事"}
|
11 |
+
],
|
12 |
+
temperature=0,
|
13 |
+
stream=True # this time, we set stream=True
|
14 |
+
)
|
15 |
+
|
16 |
+
for chunk in response:
|
17 |
+
print(chunk)
|
openai-example/typical_chat_completion.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai # for OpenAI API calls
|
2 |
+
import time # for measuring time duration of API calls
|
3 |
+
|
4 |
+
# Example of an OpenAI ChatCompletion request
|
5 |
+
# https://platform.openai.com/docs/guides/chat
|
6 |
+
|
7 |
+
# record the time before the request is sent
|
8 |
+
start_time = time.time()
|
9 |
+
|
10 |
+
# send a ChatCompletion request to count to 100
|
11 |
+
response = openai.ChatCompletion.create(
|
12 |
+
model='gpt-3.5-turbo',
|
13 |
+
messages=[
|
14 |
+
{'role': 'user', 'content': 'Count to 100, with a comma between each number and no newlines. E.g., 1, 2, 3, ...'}
|
15 |
+
],
|
16 |
+
temperature=0,
|
17 |
+
)
|
18 |
+
|
19 |
+
# calculate the time it took to receive the response
|
20 |
+
response_time = time.time() - start_time
|
21 |
+
|
22 |
+
# print the time delay and text received
|
23 |
+
print(f"Full response received {response_time:.2f} seconds after request")
|
24 |
+
print(f"Full response received:\n{response}")
|