Spaces:
Running
Running
JunchuanYu
commited on
Commit
·
0c34dbf
1
Parent(s):
701ee6e
Upload 2 files
Browse files- app.py +123 -0
- requirements.txt +1 -0
app.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import openai
|
4 |
+
import requests
|
5 |
+
import json
|
6 |
+
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
7 |
+
# openai.api_key='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'
|
8 |
+
prompt_templates = {"Default ChatGPT": ""}
|
9 |
+
|
10 |
+
def get_empty_state():
|
11 |
+
return {"total_tokens": 0, "messages": []}
|
12 |
+
|
13 |
+
def download_prompt_templates():
|
14 |
+
url = "https://raw.githubusercontent.com/JunchuanYu/Sydney/main/prompts.csv"
|
15 |
+
response = requests.get(url)
|
16 |
+
|
17 |
+
for line in response.text.splitlines()[1:]:
|
18 |
+
act, prompt = line.split('","')
|
19 |
+
prompt_templates[act.replace('"', '')] = prompt.replace('"', '')
|
20 |
+
|
21 |
+
choices = list(prompt_templates.keys())
|
22 |
+
return gr.update(value=choices[0], choices=choices)
|
23 |
+
|
24 |
+
def on_token_change(user_token):
|
25 |
+
openai.api_key = user_token or os.environ.get("OPENAI_API_KEY")
|
26 |
+
|
27 |
+
def on_prompt_template_change(prompt_template):
|
28 |
+
if not isinstance(prompt_template, str): return
|
29 |
+
return prompt_templates[prompt_template]
|
30 |
+
|
31 |
+
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state):
|
32 |
+
|
33 |
+
history = state['messages']
|
34 |
+
|
35 |
+
if not prompt:
|
36 |
+
return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 3000", state
|
37 |
+
|
38 |
+
prompt_template = prompt_templates[prompt_template]
|
39 |
+
|
40 |
+
system_prompt = []
|
41 |
+
if prompt_template:
|
42 |
+
system_prompt = [{ "role": "system", "content": prompt_template }]
|
43 |
+
|
44 |
+
prompt_msg = { "role": "user", "content": prompt }
|
45 |
+
|
46 |
+
try:
|
47 |
+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
48 |
+
|
49 |
+
history.append(prompt_msg)
|
50 |
+
history.append(completion.choices[0].message.to_dict())
|
51 |
+
|
52 |
+
state['total_tokens'] += completion['usage']['total_tokens']
|
53 |
+
|
54 |
+
except Exception as e:
|
55 |
+
history.append(prompt_msg)
|
56 |
+
history.append({
|
57 |
+
"role": "system",
|
58 |
+
"content": f"Error: {e}"
|
59 |
+
})
|
60 |
+
|
61 |
+
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else ""
|
62 |
+
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
63 |
+
input_visibility = user_token or state['total_tokens'] < 3000
|
64 |
+
|
65 |
+
return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state
|
66 |
+
|
67 |
+
def clear_conversation():
|
68 |
+
return gr.update(value=None, visible=True), None, "", get_empty_state()
|
69 |
+
|
70 |
+
css = """
|
71 |
+
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
|
72 |
+
#chatbox {min-height: 400px;}
|
73 |
+
#header {text-align: center;}
|
74 |
+
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
|
75 |
+
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;}
|
76 |
+
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
|
77 |
+
.message { font-size: 1.2em; }
|
78 |
+
"""
|
79 |
+
with gr.Blocks(css=css) as demo:
|
80 |
+
|
81 |
+
state = gr.State(get_empty_state())
|
82 |
+
|
83 |
+
|
84 |
+
with gr.Column(elem_id="col-container"):
|
85 |
+
gr.Markdown("""
|
86 |
+
# Sydney-AI <b>
|
87 |
+
|
88 |
+
<p align="left"> This app is an intelligent online chat app developed based on the newly released OpenAI API "gpt-3.5-turbo". The app's operating costs are sponsored by "45度科研人". Currently, the tokens is limited to 3000. If you want to remove this restriction, you can input your own OpenAI API key.<p>
|
89 |
+
<p align="left"> The default model role of the app is the original assistant of ChatGPT, but you can also choose from the provided roles. <p>
|
90 |
+
<p align="left"> Two adjustable parameters are provided to optimize the model: temperature, where a larger value leads to more creative replies, and max tokens, where a larger value allows the model to reply with more content. <p>
|
91 |
+
APP link: [https://junchuanyu-sydney.hf.space](https://junchuanyu-sydney.hf.space)""",elem_id="header")
|
92 |
+
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column():
|
95 |
+
chatbot = gr.Chatbot(elem_id="chatbox").style(color_map=("blue", "green"))
|
96 |
+
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press submit", visible=True).style(container=False)
|
97 |
+
btn_submit = gr.Button("Submit")
|
98 |
+
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
|
99 |
+
btn_clear_conversation = gr.Button("Restart Conversation")
|
100 |
+
with gr.Column():
|
101 |
+
gr.Markdown("Enter your own OpenAI API Key to remove the 3000 token limit. You can get it follow this instruction [here](https://blog.pangao.vip/%E8%B6%85%E8%AF%A6%E7%BB%86%E6%B3%A8%E5%86%8COpenAI%E6%8E%A5%E5%8F%A3%E8%B4%A6%E5%8F%B7%E7%9A%84%E6%95%99%E7%A8%8B/).", elem_id="label")
|
102 |
+
user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False)
|
103 |
+
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
104 |
+
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
|
105 |
+
with gr.Accordion("Advanced parameters", open=False):
|
106 |
+
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)")
|
107 |
+
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
|
108 |
+
# gr.Markdown("![](https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png)",elem_id="header")
|
109 |
+
|
110 |
+
gr.Markdown("""
|
111 |
+
you can follow the WeChat public account [45度科研人] and leave me a message!
|
112 |
+
<div align=center><img width = '200' height ='200' src ="https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png"/></div>""", elem_id="header")
|
113 |
+
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
114 |
+
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
115 |
+
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
116 |
+
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
117 |
+
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
118 |
+
|
119 |
+
|
120 |
+
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
|
121 |
+
|
122 |
+
|
123 |
+
demo.launch(debug=True, height='800px')
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai==0.27.0
|