vicky4s4s commited on
Commit
187cef3
·
verified ·
1 Parent(s): da1bd03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -50
app.py CHANGED
@@ -1,10 +1,7 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
- client = InferenceClient(
5
- "mistralai/Mistral-7B-Instruct-v0.1"
6
- )
7
-
8
 
9
  def format_prompt(message, history):
10
  prompt = "<s>"
@@ -15,7 +12,7 @@ def format_prompt(message, history):
15
  return prompt
16
 
17
  def generate(
18
- prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
19
  ):
20
  temperature = float(temperature)
21
  if temperature < 1e-2:
@@ -41,50 +38,15 @@ def generate(
41
  yield output
42
  return output
43
 
 
 
 
44
 
45
- additional_inputs=[
46
- gr.Slider(
47
- label="Temperature",
48
- value=0.9,
49
- minimum=0.0,
50
- maximum=1.0,
51
- step=0.05,
52
- interactive=True,
53
- info="Higher values produce more diverse outputs",
54
- ),
55
- gr.Slider(
56
- label="Max new tokens",
57
- value=256,
58
- minimum=0,
59
- maximum=1049,
60
- step=64,
61
- interactive=True,
62
- info="The maximum numbers of new tokens",
63
- ),
64
- gr.Slider(
65
- label="Top-p (nucleus sampling)",
66
- value=0.90,
67
- minimum=0.0,
68
- maximum=1,
69
- step=0.05,
70
- interactive=True,
71
- info="Higher values sample more low-probability tokens",
72
- ),
73
- gr.Slider(
74
- label="Repetition penalty",
75
- value=1.2,
76
- minimum=1.0,
77
- maximum=2.0,
78
- step=0.05,
79
- interactive=True,
80
- info="Penalize repeated tokens",
81
- )
82
- ]
83
-
84
 
85
- gr.ChatInterface(
86
- fn=generate,
87
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
88
- additional_inputs=additional_inputs,
89
- title="""Mistral 7B"""
90
- ).launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
 
5
 
6
  def format_prompt(message, history):
7
  prompt = "<s>"
 
12
  return prompt
13
 
14
  def generate(
15
+ prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
16
  ):
17
  temperature = float(temperature)
18
  if temperature < 1e-2:
 
38
  yield output
39
  return output
40
 
41
+
42
+ mychatbot = gr.Chatbot(
43
+ avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
44
 
45
+ demo = gr.ChatInterface(fn=generate,
46
+ chatbot=mychatbot,
47
+ title=" Mixtral 8x7b Chat",
48
+ retry_btn=None,
49
+ undo_btn=None
50
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ demo.queue().launch(show_api=False)