Elijahbodden commited on
Commit
88cc46f
·
verified ·
1 Parent(s): c53e6cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -86,34 +86,35 @@ def respond(
86
  response += token
87
  yield response
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo:
91
  gr.Markdown("# EliGPT v1.3")
92
  gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)")
93
  with gr.Accordion("Q&A:", open=False):
94
- gr.Markdown("""Q Why is the model so fucking slow
95
- A The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
96
- Q Why is the model so dumb
97
- A Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
98
- Q Either it just made something up or I don't know you at all
99
- A Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
100
  """)
101
- gr.ChatInterface(
102
- respond,
103
- additional_inputs_accordion=gr.Accordion(label="Options", open=True),
104
- # additional_inputs=[
105
- # gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
106
- # # ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
107
- # gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
108
- # gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
109
- # gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
110
- # gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
111
- # gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
112
- # gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
113
- # gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
114
- # ],
115
- )
116
-
117
 
118
  if __name__ == "__main__":
119
  demo.launch()
 
86
  response += token
87
  yield response
88
 
89
+ ci = gr.ChatInterface(
90
+ respond,
91
+ additional_inputs_accordion=gr.Accordion(label="Options", open=True),
92
+ additional_inputs=[
93
+ gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
94
+ # ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
95
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
96
+ gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="How chaotic should the model be?"),
97
+ gr.Slider(minimum=0, maximum=512, value=5, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
98
+ gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
99
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
100
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
101
+ gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
102
+ ],
103
+ )
104
+
105
 
106
  with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo:
107
  gr.Markdown("# EliGPT v1.3")
108
  gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)")
109
  with gr.Accordion("Q&A:", open=False):
110
+ gr.Markdown("""Q: Why is the model so fucking slow
111
+ A: The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
112
+ Q: Why is the model so dumb
113
+ A: Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
114
+ Q: Either it just made something up or I don't know you at all
115
+ A: Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
116
  """)
117
+ ci.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  if __name__ == "__main__":
120
  demo.launch()