Pinkstack commited on
Commit
4bd3834
·
verified ·
1 Parent(s): 07b627d

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +131 -16
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,6 +1,38 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from typing import Iterator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  client = InferenceClient("Pinkstack/Superthoughts-lite-v1")
6
 
@@ -11,9 +43,17 @@ def respond(
11
  max_tokens: int,
12
  temperature: float,
13
  top_p: float,
 
14
  ) -> Iterator[str]:
15
  messages = [{"role": "system", "content": system_message}]
16
 
 
 
 
 
 
 
 
17
  # Add history to messages
18
  for user_msg, assistant_msg in history:
19
  if user_msg:
@@ -21,8 +61,9 @@ def respond(
21
  if assistant_msg:
22
  messages.append({"role": "assistant", "content": assistant_msg})
23
 
24
- # Add current message
25
- messages.append({"role": "user", "content": message})
 
26
 
27
  # Initialize response
28
  response = ""
@@ -77,17 +118,65 @@ details summary:after {
77
  details[open] summary:after {
78
  content: " ▼";
79
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  """
81
 
 
82
  # Create Gradio interface
83
  with gr.Blocks(css=css) as demo:
84
  gr.Markdown("# Chat with Superthoughts lite! (1.7B)")
85
  gr.Markdown("**Warning:** The first output from the AI may take a few moments. After the first message, it should work at a decent speed, keep in mind that this chat is only meant for testing and experimenting.")
86
-
87
  chatbot = gr.Chatbot(height=600)
88
- msg = gr.Textbox(label="Your message", placeholder="Type your message here...")
89
-
 
 
 
 
 
 
 
 
90
  with gr.Accordion("Advanced Settings", open=False):
 
 
 
 
 
91
  system_message = gr.Textbox(
92
  value="You must act in a conversational matter and always include at the start <think> ... </think> <output> ... </output> tokens.",
93
  label="System message"
@@ -102,9 +191,9 @@ with gr.Blocks(css=css) as demo:
102
  temperature = gr.Slider(
103
  minimum=0.1,
104
  maximum=2.0,
105
- value=0.85,
106
  step=0.1,
107
- label="Temperature"
108
  )
109
  top_p = gr.Slider(
110
  minimum=0.1,
@@ -118,31 +207,58 @@ with gr.Blocks(css=css) as demo:
118
  """Add user message to history"""
119
  return "", history + [[user_message, None]]
120
 
121
- def bot(history: list, system_message: str, max_tokens: int, temperature: float, top_p: float) -> Iterator[list]:
 
 
 
 
 
 
 
122
  """Generate and stream bot responses"""
123
  user_message, _ = history[-1]
124
  history[-1][1] = "" # Initialize bot's response
125
-
126
- for partial_response in respond(user_message, history[:-1], system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
 
127
  history[-1][1] = partial_response
128
  yield history
129
 
130
- # Set up chat message handling
131
- msg.submit(
 
 
 
 
 
 
 
 
 
 
132
  user,
133
  [msg, chatbot],
134
  [msg, chatbot],
135
  queue=False
136
  ).then(
137
  bot,
138
- [chatbot, system_message, max_tokens, temperature, top_p],
139
  chatbot
140
  )
141
 
 
 
142
  # Add a clear button
143
  clear = gr.Button("Clear Conversation")
144
  clear.click(lambda: None, None, chatbot, queue=False)
145
-
146
  # Add disclaimer
147
  gr.Markdown(
148
  """
@@ -152,7 +268,6 @@ with gr.Blocks(css=css) as demo:
152
  """
153
  )
154
 
155
- # Launch the interface
156
  if __name__ == "__main__":
157
  demo.queue()
158
- demo.launch(share=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from typing import Iterator
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ from urllib.parse import quote_plus
7
+
8
+ def search_web(query: str, num_results: int = 3) -> list[str]:
9
+ """
10
+ Search the web and return text from the first n results.
11
+ Using DuckDuckGo.
12
+ """
13
+ try:
14
+ # Encode the search query
15
+ encoded_query = quote_plus(query)
16
+
17
+ # Make request to DuckDuckGo
18
+ url = f"https://html.duckduckgo.com/html/?q={encoded_query}"
19
+ headers = {
20
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
21
+ }
22
+ response = requests.get(url, headers=headers)
23
+ soup = BeautifulSoup(response.text, 'html.parser')
24
+
25
+ # Extract results
26
+ results = []
27
+ for result in soup.find_all('div', class_='result')[:num_results]:
28
+ title = result.find('a', class_='result__a')
29
+ snippet = result.find('a', class_='result__snippet')
30
+ if title and snippet:
31
+ results.append(f"Title: {title.text.strip()}\nExcerpt: {snippet.text.strip()}\n")
32
+
33
+ return results
34
+ except Exception as e:
35
+ return [f"Search error: {str(e)}"]
36
 
37
  client = InferenceClient("Pinkstack/Superthoughts-lite-v1")
38
 
 
43
  max_tokens: int,
44
  temperature: float,
45
  top_p: float,
46
+ enable_search: bool,
47
  ) -> Iterator[str]:
48
  messages = [{"role": "system", "content": system_message}]
49
 
50
+ # If search is enabled, get search results and add to context
51
+ search_context = ""
52
+ if enable_search:
53
+ search_results = search_web(message)
54
+ if search_results:
55
+ search_context = "Search results:\n" + "\n".join(search_results) + "\n\nBased on these results, "
56
+
57
  # Add history to messages
58
  for user_msg, assistant_msg in history:
59
  if user_msg:
 
61
  if assistant_msg:
62
  messages.append({"role": "assistant", "content": assistant_msg})
63
 
64
+ # Add current message with search context if enabled
65
+ full_message = search_context + message if search_context else message
66
+ messages.append({"role": "user", "content": full_message})
67
 
68
  # Initialize response
69
  response = ""
 
118
  details[open] summary:after {
119
  content: " ▼";
120
  }
121
+ /* ChatGPT-like UI */
122
+ .gradio-container {
123
+ font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
124
+ }
125
+ .chat-container {
126
+ max-width: 800px;
127
+ margin: auto;
128
+ }
129
+
130
+ .chat-message {
131
+ padding: 10px;
132
+ border-radius: 8px;
133
+ margin-bottom: 10px;
134
+ }
135
+
136
+ .user-message {
137
+ background-color: #f0f0f0;
138
+ text-align: right;
139
+ }
140
+
141
+ .bot-message {
142
+ background-color: #ffffff;
143
+ text-align: left;
144
+ }
145
+
146
+ .message-text {
147
+ white-space: pre-wrap;
148
+ }
149
+ .button-container {
150
+ display: flex;
151
+ justify-content: flex-end;
152
+ gap: 10px; /* Space between buttons */
153
+ margin-top: 5px;
154
+ }
155
  """
156
 
157
+
158
  # Create Gradio interface
159
  with gr.Blocks(css=css) as demo:
160
  gr.Markdown("# Chat with Superthoughts lite! (1.7B)")
161
  gr.Markdown("**Warning:** The first output from the AI may take a few moments. After the first message, it should work at a decent speed, keep in mind that this chat is only meant for testing and experimenting.")
162
+
163
  chatbot = gr.Chatbot(height=600)
164
+ with gr.Row():
165
+ msg = gr.Textbox(
166
+ label="Your message",
167
+ placeholder="Type your message here...",
168
+ scale=7,
169
+ container=False
170
+ )
171
+ submit_btn = gr.Button("Send", variant="primary", scale=1)
172
+ stop_btn = gr.Button("Stop", variant="stop", scale=1)
173
+
174
  with gr.Accordion("Advanced Settings", open=False):
175
+ enable_search = gr.Checkbox(
176
+ label="Enable web search [Beta]",
177
+ value=False,
178
+ info="When enabled, the AI will search the web for relevant information before responding, powered by duckduckgo."
179
+ )
180
  system_message = gr.Textbox(
181
  value="You must act in a conversational matter and always include at the start <think> ... </think> <output> ... </output> tokens.",
182
  label="System message"
 
191
  temperature = gr.Slider(
192
  minimum=0.1,
193
  maximum=2.0,
194
+ value=0.6,
195
  step=0.1,
196
+ label="Temperature/Creativeness"
197
  )
198
  top_p = gr.Slider(
199
  minimum=0.1,
 
207
  """Add user message to history"""
208
  return "", history + [[user_message, None]]
209
 
210
+ def bot(
211
+ history: list,
212
+ system_message: str,
213
+ max_tokens: int,
214
+ temperature: float,
215
+ top_p: float,
216
+ enable_search: bool
217
+ ) -> Iterator[list]:
218
  """Generate and stream bot responses"""
219
  user_message, _ = history[-1]
220
  history[-1][1] = "" # Initialize bot's response
221
+
222
+ for partial_response in respond(
223
+ user_message,
224
+ history[:-1],
225
+ system_message,
226
+ max_tokens,
227
+ temperature,
228
+ top_p,
229
+ enable_search
230
+ ):
231
  history[-1][1] = partial_response
232
  yield history
233
 
234
+ submit_event = msg.submit(
235
+ user,
236
+ [msg, chatbot],
237
+ [msg, chatbot],
238
+ queue=False
239
+ ).then(
240
+ bot,
241
+ [chatbot, system_message, max_tokens, temperature, top_p, enable_search],
242
+ chatbot
243
+ )
244
+
245
+ submit_click_event = submit_btn.click(
246
  user,
247
  [msg, chatbot],
248
  [msg, chatbot],
249
  queue=False
250
  ).then(
251
  bot,
252
+ [chatbot, system_message, max_tokens, temperature, top_p, enable_search],
253
  chatbot
254
  )
255
 
256
+ stop_btn.click(None, [], [], cancels=[submit_event, submit_click_event])
257
+
258
  # Add a clear button
259
  clear = gr.Button("Clear Conversation")
260
  clear.click(lambda: None, None, chatbot, queue=False)
261
+
262
  # Add disclaimer
263
  gr.Markdown(
264
  """
 
268
  """
269
  )
270
 
 
271
  if __name__ == "__main__":
272
  demo.queue()
273
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ huggingface_hub==0.25.2
2
+ beautifulsoup4>=4.12.0
3
+ tiktoken