Update app.py
Browse files
app.py
CHANGED
@@ -67,67 +67,54 @@ def calculate_eou(chat_ctx, session):
|
|
67 |
eou_token_id = tokenizer.encode("<|im_end|>")[-1]
|
68 |
return probs[eou_token_id]
|
69 |
|
70 |
-
|
71 |
-
# Respond function
|
72 |
def respond(
|
73 |
message,
|
74 |
history: list[tuple[str, str]],
|
75 |
-
max_tokens,
|
76 |
-
temperature,
|
77 |
-
top_p,
|
78 |
):
|
79 |
-
|
80 |
-
messages = [{"role": "system", "content": os.environ.get("CHARACTER_DESC")}]
|
81 |
|
82 |
-
for val in history[-
|
83 |
if val[0]:
|
84 |
messages.append({"role": "user", "content": val[0]})
|
85 |
if val[1]:
|
86 |
messages.append({"role": "assistant", "content": val[1]})
|
87 |
|
88 |
-
# Add the new user message to the context
|
89 |
messages.append({"role": "user", "content": message})
|
90 |
|
91 |
-
# Calculate EOU probability
|
92 |
eou_prob = calculate_eou(messages, onnx_session)
|
93 |
-
print(f"EOU Probability: {eou_prob}")
|
94 |
|
95 |
-
# If EOU is below the threshold, ask for more input
|
96 |
if eou_prob < EOU_THRESHOLD:
|
97 |
yield "[Waiting for user to continue input...]"
|
98 |
return
|
99 |
|
100 |
-
# Generate response with Qwen
|
101 |
response = ""
|
102 |
-
for
|
103 |
messages,
|
104 |
max_tokens=max_tokens,
|
105 |
stream=True,
|
106 |
temperature=temperature,
|
107 |
top_p=top_p,
|
108 |
):
|
109 |
-
token =
|
110 |
response += token
|
111 |
yield response
|
112 |
-
|
113 |
-
print(f"Generated response: {response}")
|
114 |
|
|
|
115 |
|
116 |
-
# Gradio interface
|
117 |
demo = gr.ChatInterface(
|
118 |
respond,
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
# gr.Slider(
|
125 |
-
# minimum=0.1,
|
126 |
-
# maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
|
127 |
-
# ),
|
128 |
-
# ],
|
129 |
)
|
130 |
|
131 |
if __name__ == "__main__":
|
132 |
-
demo.launch()
|
133 |
-
|
|
|
67 |
eou_token_id = tokenizer.encode("<|im_end|>")[-1]
|
68 |
return probs[eou_token_id]
|
69 |
|
70 |
+
# Respond function with default parameters
|
|
|
71 |
def respond(
|
72 |
message,
|
73 |
history: list[tuple[str, str]],
|
74 |
+
max_tokens=256,
|
75 |
+
temperature=0.7,
|
76 |
+
top_p=0.95,
|
77 |
):
|
78 |
+
messages = [{"role": "system", "content": os.environ.get("CHARACTER_DESC", "You are a helpful assistant.")}]
|
|
|
79 |
|
80 |
+
for val in history[-MAX_HISTORY:]: # Use last 4 pairs
|
81 |
if val[0]:
|
82 |
messages.append({"role": "user", "content": val[0]})
|
83 |
if val[1]:
|
84 |
messages.append({"role": "assistant", "content": val[1]})
|
85 |
|
|
|
86 |
messages.append({"role": "user", "content": message})
|
87 |
|
|
|
88 |
eou_prob = calculate_eou(messages, onnx_session)
|
89 |
+
print(f"EOU Probability: {eou_prob}")
|
90 |
|
|
|
91 |
if eou_prob < EOU_THRESHOLD:
|
92 |
yield "[Waiting for user to continue input...]"
|
93 |
return
|
94 |
|
|
|
95 |
response = ""
|
96 |
+
for chunk in qwen_client.chat_completion(
|
97 |
messages,
|
98 |
max_tokens=max_tokens,
|
99 |
stream=True,
|
100 |
temperature=temperature,
|
101 |
top_p=top_p,
|
102 |
):
|
103 |
+
token = chunk.choices[0].delta.content or ""
|
104 |
response += token
|
105 |
yield response
|
|
|
|
|
106 |
|
107 |
+
print(f"Generated response: {response}")
|
108 |
|
109 |
+
# Gradio interface with additional inputs
|
110 |
demo = gr.ChatInterface(
|
111 |
respond,
|
112 |
+
additional_inputs=[
|
113 |
+
gr.Slider(minimum=1, maximum=4096, value=256, step=1, label="Max new tokens"),
|
114 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
115 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
116 |
+
],
|
|
|
|
|
|
|
|
|
|
|
117 |
)
|
118 |
|
119 |
if __name__ == "__main__":
|
120 |
+
demo.launch()
|
|