jsr90 commited on
Commit
348ae5d
·
1 Parent(s): 227ac6a

Update app.py

Browse files

remove unnecessary elements

Files changed (1) hide show
  1. app.py +14 -109
app.py CHANGED
@@ -1,23 +1,16 @@
1
- import datetime
2
  import os
3
  import re
4
- from io import StringIO
5
 
6
  import gradio as gr
7
- import pandas as pd
8
- from huggingface_hub import upload_file
9
  from text_generation import Client
10
 
11
  from dialogues import DialogueTemplate
12
- from share_btn import (community_icon_html, loading_icon_html, share_btn_css,
13
- share_js)
14
 
15
  model2endpoint = {
16
  "starchat-beta": os.environ.get("API_URL", None),
17
  }
18
  model_names = list(model2endpoint.keys())
19
 
20
-
21
  def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
22
  past = []
23
  for data in chatbot:
@@ -52,19 +45,19 @@ def has_no_history(chatbot, history):
52
 
53
 
54
  def generate(
55
- model_name,
56
- system_message,
57
  user_message,
58
  chatbot,
59
  history,
60
- temperature,
61
- top_k,
62
- top_p,
63
- max_new_tokens,
64
- repetition_penalty,
65
  ):
 
 
 
 
 
 
 
66
  client = Client(
67
- model2endpoint[model_name]
68
  )
69
  # Don't return meaningless message when the input is empty
70
  if not user_message:
@@ -163,14 +156,16 @@ def process_example(args):
163
  return [x, y]
164
 
165
 
166
- title = """<h1 align="center">⭐ StarChat Saturdays 💬</h1>"""
 
 
 
167
  custom_css = """
168
  #banner-image {
169
  display: block;
170
  margin-left: auto;
171
  margin-right: auto;
172
  }
173
-
174
  #chat-message {
175
  font-size: 14px;
176
  min-height: 300px;
@@ -180,15 +175,6 @@ custom_css = """
180
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
181
  gr.HTML(title)
182
 
183
- with gr.Row():
184
- selected_model = gr.Radio(choices=model_names, value=model_names[0], label="Select a model")
185
-
186
- with gr.Accordion(label="System Prompt", open=False, elem_id="parameters-accordion"):
187
- system_message = gr.Textbox(
188
- elem_id="system-message",
189
- placeholder="Below is a conversation between a human user and a helpful AI coding assistant.",
190
- show_label=False,
191
- )
192
  with gr.Row():
193
  with gr.Box():
194
  output = gr.Markdown()
@@ -200,86 +186,16 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
200
  with gr.Row():
201
  send_button = gr.Button("Send", elem_id="send-btn", visible=True)
202
 
203
- # regenerate_button = gr.Button("Regenerate", elem_id="send-btn", visible=True)
204
-
205
- clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True)
206
-
207
- with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
208
- temperature = gr.Slider(
209
- label="Temperature",
210
- value=0.2,
211
- minimum=0.0,
212
- maximum=1.0,
213
- step=0.1,
214
- interactive=True,
215
- info="Higher values produce more diverse outputs",
216
- )
217
- top_k = gr.Slider(
218
- label="Top-k",
219
- value=50,
220
- minimum=0.0,
221
- maximum=100,
222
- step=1,
223
- interactive=True,
224
- info="Sample from a shortlist of top-k tokens",
225
- )
226
- top_p = gr.Slider(
227
- label="Top-p (nucleus sampling)",
228
- value=0.95,
229
- minimum=0.0,
230
- maximum=1,
231
- step=0.05,
232
- interactive=True,
233
- info="Higher values sample more low-probability tokens",
234
- )
235
- max_new_tokens = gr.Slider(
236
- label="Max new tokens",
237
- value=1024,
238
- minimum=0,
239
- maximum=2048,
240
- step=4,
241
- interactive=True,
242
- info="The maximum numbers of new tokens",
243
- )
244
- repetition_penalty = gr.Slider(
245
- label="Repetition Penalty",
246
- value=1.2,
247
- minimum=0.0,
248
- maximum=10,
249
- step=0.1,
250
- interactive=True,
251
- info="The parameter for repetition penalty. 1.0 means no penalty.",
252
- )
253
- # with gr.Group(elem_id="share-btn-container"):
254
- # community_icon = gr.HTML(community_icon_html, visible=True)
255
- # loading_icon = gr.HTML(loading_icon_html, visible=True)
256
- # share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
257
- with gr.Row():
258
- gr.Examples(
259
- examples=examples,
260
- inputs=[user_message],
261
- cache_examples=False,
262
- fn=process_example,
263
- outputs=[output],
264
- )
265
-
266
  history = gr.State([])
267
- # To clear out "message" input textbox and use this to regenerate message
268
  last_user_message = gr.State("")
269
 
270
  user_message.submit(
271
  generate,
272
  inputs=[
273
- selected_model,
274
- system_message,
275
  user_message,
276
  chatbot,
277
  history,
278
- temperature,
279
- top_k,
280
- top_p,
281
- max_new_tokens,
282
- repetition_penalty,
283
  ],
284
  outputs=[chatbot, history, last_user_message, user_message],
285
  )
@@ -287,22 +203,11 @@ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
287
  send_button.click(
288
  generate,
289
  inputs=[
290
- selected_model,
291
- system_message,
292
  user_message,
293
  chatbot,
294
  history,
295
- temperature,
296
- top_k,
297
- top_p,
298
- max_new_tokens,
299
- repetition_penalty,
300
  ],
301
  outputs=[chatbot, history, last_user_message, user_message],
302
  )
303
 
304
- clear_chat_button.click(clear_chat, outputs=[chatbot, history])
305
- selected_model.change(clear_chat, outputs=[chatbot, history])
306
- # share_button.click(None, [], [], _js=share_js)
307
-
308
- demo.queue(concurrency_count=16).launch()
 
 
1
  import os
2
  import re
 
3
 
4
  import gradio as gr
 
 
5
  from text_generation import Client
6
 
7
  from dialogues import DialogueTemplate
 
 
8
 
9
  model2endpoint = {
10
  "starchat-beta": os.environ.get("API_URL", None),
11
  }
12
  model_names = list(model2endpoint.keys())
13
 
 
14
  def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
15
  past = []
16
  for data in chatbot:
 
45
 
46
 
47
  def generate(
 
 
48
  user_message,
49
  chatbot,
50
  history,
 
 
 
 
 
51
  ):
52
+ system_message = "Below is a conversation between a human user and a helpful AI coding assistant."
53
+ temperature = 0.2
54
+ top_k = 50
55
+ top_p = 0.95
56
+ max_new_tokens = 1024
57
+ repetition_penalty = 1.2
58
+
59
  client = Client(
60
+ model2endpoint["starchat-beta"]
61
  )
62
  # Don't return meaningless message when the input is empty
63
  if not user_message:
 
156
  return [x, y]
157
 
158
 
159
+ title = """<h1 align="center">⭐ StarChat Saturdays 💬</h1>
160
+ <h2 align="center">Asistente de IA para estudiantes de Inteligencia Artificial</h2>
161
+ <h3 align="center">¡Tu privacidad es nuestra prioridad! Toda la información compartida en esta conversación se elimina automáticamente una vez que salgas del chat.</h3>
162
+ """
163
  custom_css = """
164
  #banner-image {
165
  display: block;
166
  margin-left: auto;
167
  margin-right: auto;
168
  }
 
169
  #chat-message {
170
  font-size: 14px;
171
  min-height: 300px;
 
175
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
176
  gr.HTML(title)
177
 
 
 
 
 
 
 
 
 
 
178
  with gr.Row():
179
  with gr.Box():
180
  output = gr.Markdown()
 
186
  with gr.Row():
187
  send_button = gr.Button("Send", elem_id="send-btn", visible=True)
188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  history = gr.State([])
190
+
191
  last_user_message = gr.State("")
192
 
193
  user_message.submit(
194
  generate,
195
  inputs=[
 
 
196
  user_message,
197
  chatbot,
198
  history,
 
 
 
 
 
199
  ],
200
  outputs=[chatbot, history, last_user_message, user_message],
201
  )
 
203
  send_button.click(
204
  generate,
205
  inputs=[
 
 
206
  user_message,
207
  chatbot,
208
  history,
 
 
 
 
 
209
  ],
210
  outputs=[chatbot, history, last_user_message, user_message],
211
  )
212
 
213
+ demo.queue(concurrency_count=16).launch()