Spaces:
Sleeping
Sleeping
Elijahbodden
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -93,30 +93,30 @@ ci = gr.ChatInterface(
|
|
93 |
respond,
|
94 |
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
|
95 |
additional_inputs=[
|
96 |
-
gr.Radio(presets.keys(), label="Personality preset", info="Slightly influence the model's personality
|
97 |
-
#
|
98 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
|
99 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="
|
100 |
-
gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='
|
101 |
-
gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='
|
102 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='
|
103 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='
|
104 |
-
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="
|
105 |
],
|
106 |
)
|
107 |
|
108 |
|
109 |
with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo:
|
110 |
gr.Markdown("# EliGPT v1.3")
|
111 |
-
gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)\
|
112 |
-
with gr.Accordion("Q&A:", open=False):
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
ci.render()
|
121 |
|
122 |
if __name__ == "__main__":
|
|
|
93 |
respond,
|
94 |
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
|
95 |
additional_inputs=[
|
96 |
+
gr.Radio(presets.keys(), label="Personality preset", info="Slightly influence the model's personality with the power of gaslighting TM", value="Default"),
|
97 |
+
# presets uncache the prompt and prompt processing is a big part of the generation time. Do not switch preset in the middle of a long convo if you want a response this millenium
|
98 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Min_p", info="Lower values give it more \"personality\""),
|
99 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature", info="Higher values are less coherent and more random"),
|
100 |
+
gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='Lower values make the model give shorter messages'),
|
101 |
+
gr.Slider(minimum=0.5, maximum=1.5, value=1.015, step=0.001, label="Length penalty decay factor", info='Higher values give less variance in max message length'),
|
102 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='Increase if the model repeats itself too much'),
|
103 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='Increase to make the model more creative with what words it uses'),
|
104 |
+
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="Cut off the model if its response is longer than this"),
|
105 |
],
|
106 |
)
|
107 |
|
108 |
|
109 |
with gr.Blocks(css=".bubble-gap {gap: 6px !important}", theme="shivi/calm_seafoam") as demo:
|
110 |
gr.Markdown("# EliGPT v1.3")
|
111 |
+
gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)\n(The model can be slow when multiple people are using it. Duplicate the space to get your own free faster instance)")
|
112 |
+
# with gr.Accordion("Q&A:", open=False):
|
113 |
+
# gr.Markdown("""Q: Why is the model so fucking slow
|
114 |
+
# A: The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
|
115 |
+
# Q: Why is the model so dumb
|
116 |
+
# A: Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
|
117 |
+
# Q: Either it just made something up or I don't know you at all
|
118 |
+
# A: Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
|
119 |
+
# """)
|
120 |
ci.render()
|
121 |
|
122 |
if __name__ == "__main__":
|