Spaces:
Sleeping
Sleeping
Elijahbodden
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -85,28 +85,24 @@ def respond(
|
|
85 |
|
86 |
response += token
|
87 |
yield response
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
respond,
|
97 |
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
|
98 |
css=".bubble-gap {gap: 6px !important}",
|
99 |
theme="shivi/calm_seafoam",
|
100 |
-
description="""Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)
|
101 |
-
Q&A:
|
102 |
-
Q Why is the model so fucking slow
|
103 |
-
A The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
|
104 |
-
Q Why is the model so dumb
|
105 |
-
A Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
|
106 |
-
Q Either it just made something up or I don't know you at all
|
107 |
-
A Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
|
108 |
-
""",
|
109 |
-
title="EliGPT v1.3",
|
110 |
additional_inputs=[
|
111 |
gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
|
112 |
# ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
|
@@ -118,7 +114,7 @@ demo = gr.ChatInterface(
|
|
118 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
|
119 |
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
|
120 |
],
|
121 |
-
)
|
122 |
|
123 |
|
124 |
if __name__ == "__main__":
|
|
|
85 |
|
86 |
response += token
|
87 |
yield response
|
88 |
+
|
89 |
+
|
90 |
+
with gr.Blocks() as demo:
|
91 |
+
gr.Markdown("# EliGPT v1.3")
|
92 |
+
gr.Markdown("Llama 3 8b finetuned on 2.5k of my discord messages. [Train your own clone!](https://gist.github.com/Elijah-Bodden/1964bd02fcd19efef65f6e0cd92881c4)")
|
93 |
+
with gr.Accordion("Q&A:"):
|
94 |
+
gr.Markdown("""Q Why is the model so fucking slow
|
95 |
+
A The model might be slow if it hasn't run recently or a lot of people are using it (it's running on llama.cpp on a single a very slow cpu). You can duplicate the space to get your own (free) instance with no wait times.
|
96 |
+
Q Why is the model so dumb
|
97 |
+
A Llama 3 8b is impressive, but it's still tiny. This model is basically what you'd get if you shoved my brain into a toddler's head - it's just too small to be smart
|
98 |
+
Q Either it just made something up or I don't know you at all
|
99 |
+
A Probably the former. It's prone to hallucinating facts and opinions I don't hold. Take everything it says with a big grain of salt
|
100 |
+
""")
|
101 |
+
gr.ChatInterface(
|
102 |
respond,
|
103 |
additional_inputs_accordion=gr.Accordion(label="Options", open=True),
|
104 |
css=".bubble-gap {gap: 6px !important}",
|
105 |
theme="shivi/calm_seafoam",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
additional_inputs=[
|
107 |
gr.Radio(presets.keys(), label="Personality preset", info="VERY SLIGHTLY influence the model's personality [WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT THE MODEL WILL BECOME VERY SLOW]", value="Default"),
|
108 |
# ("The model will become slow" is bc this uncaches the prompt and prompt processing is a big part of the generation time)
|
|
|
114 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
|
115 |
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
|
116 |
],
|
117 |
+
)
|
118 |
|
119 |
|
120 |
if __name__ == "__main__":
|