Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# app.py
|
2 |
import streamlit as st
|
3 |
from models import demo
|
4 |
-
|
5 |
# Page configuration
|
6 |
st.set_page_config(
|
7 |
page_title="DeepSeek Chatbot - ruslanmv.com",
|
@@ -16,40 +16,34 @@ if "messages" not in st.session_state:
|
|
16 |
# Sidebar for model selection and parameters
|
17 |
with st.sidebar:
|
18 |
st.header("Model Configuration")
|
19 |
-
|
20 |
# Model selection
|
21 |
-
|
22 |
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B": gr.load(
|
23 |
-
name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
24 |
-
src="huggingface"
|
25 |
-
),
|
26 |
-
"deepseek-ai/DeepSeek-R1": gr.load(name="deepseek-ai/DeepSeek-R1", src="huggingface"),
|
27 |
-
"deepseek-ai/DeepSeek-R1-Zero": gr.load(name="deepseek-ai/DeepSeek-R1-Zero", src="huggingface")
|
28 |
-
}
|
29 |
-
|
30 |
-
selected_model_name = st.selectbox(
|
31 |
"Choose Model",
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
index=0
|
34 |
)
|
35 |
-
|
36 |
-
|
37 |
# System message
|
38 |
system_message = st.text_area(
|
39 |
"System Message",
|
40 |
value="You are a friendly Chatbot created by ruslanmv.com",
|
41 |
height=100
|
42 |
)
|
43 |
-
|
44 |
# Generation parameters
|
45 |
-
|
46 |
"Max New Tokens",
|
47 |
min_value=1,
|
48 |
max_value=4000,
|
49 |
value=512,
|
50 |
step=10
|
51 |
)
|
52 |
-
|
53 |
temperature = st.slider(
|
54 |
"Temperature",
|
55 |
min_value=0.1,
|
@@ -57,7 +51,7 @@ with st.sidebar:
|
|
57 |
value=0.7,
|
58 |
step=0.1
|
59 |
)
|
60 |
-
|
61 |
top_p = st.slider(
|
62 |
"Top-p (nucleus sampling)",
|
63 |
min_value=0.1,
|
@@ -79,34 +73,28 @@ for message in st.session_state.messages:
|
|
79 |
if prompt := st.chat_input("Type your message..."):
|
80 |
# Add user message to chat history
|
81 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
82 |
-
|
83 |
# Display user message
|
84 |
with st.chat_message("user"):
|
85 |
st.markdown(prompt)
|
86 |
-
|
87 |
-
# Prepare conversation history in the required format
|
88 |
-
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
|
89 |
-
|
90 |
try:
|
91 |
# Generate response using selected model
|
92 |
with st.spinner("Generating response..."):
|
93 |
-
# The model expects
|
94 |
-
response =
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
"max_new_tokens": max_new_tokens,
|
100 |
-
"repetition_penalty": 1.0
|
101 |
-
}
|
102 |
)
|
103 |
-
|
104 |
# Display assistant response
|
105 |
with st.chat_message("assistant"):
|
106 |
st.markdown(response)
|
107 |
-
|
108 |
# Add assistant response to chat history
|
109 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
110 |
-
|
111 |
except Exception as e:
|
112 |
-
st.error(f"Error generating response: {str(e)}")
|
|
|
1 |
# app.py
|
2 |
import streamlit as st
|
3 |
from models import demo
|
4 |
+
|
5 |
# Page configuration
|
6 |
st.set_page_config(
|
7 |
page_title="DeepSeek Chatbot - ruslanmv.com",
|
|
|
16 |
# Sidebar for model selection and parameters
|
17 |
with st.sidebar:
|
18 |
st.header("Model Configuration")
|
19 |
+
|
20 |
# Model selection
|
21 |
+
selected_model = st.selectbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"Choose Model",
|
23 |
+
[
|
24 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
25 |
+
"deepseek-ai/DeepSeek-R1",
|
26 |
+
"deepseek-ai/DeepSeek-R1-Zero"
|
27 |
+
],
|
28 |
index=0
|
29 |
)
|
30 |
+
|
|
|
31 |
# System message
|
32 |
system_message = st.text_area(
|
33 |
"System Message",
|
34 |
value="You are a friendly Chatbot created by ruslanmv.com",
|
35 |
height=100
|
36 |
)
|
37 |
+
|
38 |
# Generation parameters
|
39 |
+
max_tokens = st.slider(
|
40 |
"Max New Tokens",
|
41 |
min_value=1,
|
42 |
max_value=4000,
|
43 |
value=512,
|
44 |
step=10
|
45 |
)
|
46 |
+
|
47 |
temperature = st.slider(
|
48 |
"Temperature",
|
49 |
min_value=0.1,
|
|
|
51 |
value=0.7,
|
52 |
step=0.1
|
53 |
)
|
54 |
+
|
55 |
top_p = st.slider(
|
56 |
"Top-p (nucleus sampling)",
|
57 |
min_value=0.1,
|
|
|
73 |
if prompt := st.chat_input("Type your message..."):
|
74 |
# Add user message to chat history
|
75 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
76 |
+
|
77 |
# Display user message
|
78 |
with st.chat_message("user"):
|
79 |
st.markdown(prompt)
|
80 |
+
|
|
|
|
|
|
|
81 |
try:
|
82 |
# Generate response using selected model
|
83 |
with st.spinner("Generating response..."):
|
84 |
+
# The model expects parameters as separate arguments
|
85 |
+
response = demo.fn(
|
86 |
+
f"{system_message}\n\nUser: {prompt}\nAssistant:", # Full prompt
|
87 |
+
max_new_tokens=max_tokens,
|
88 |
+
temperature=temperature,
|
89 |
+
top_p=top_p
|
|
|
|
|
|
|
90 |
)
|
91 |
+
|
92 |
# Display assistant response
|
93 |
with st.chat_message("assistant"):
|
94 |
st.markdown(response)
|
95 |
+
|
96 |
# Add assistant response to chat history
|
97 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
98 |
+
|
99 |
except Exception as e:
|
100 |
+
st.error(f"Error generating response: {str(e)}")
|