Spaces:
Sleeping
Sleeping
Elijahbodden
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -122,7 +122,7 @@ def respond(
|
|
122 |
messages.append({"role": "assistant", "content": response})
|
123 |
|
124 |
# Yes we make a new file every completion because fuck my life
|
125 |
-
upload_json_to_hub(messages, uuid4())
|
126 |
|
127 |
|
128 |
demo = gr.ChatInterface(
|
@@ -131,58 +131,16 @@ demo = gr.ChatInterface(
|
|
131 |
css=".bubble-gap {gap: 6px !important}",
|
132 |
theme="shivi/calm_seafoam",
|
133 |
description="The model may take a while if it hasn't run recently or a lot of people are using it",
|
134 |
-
title="EliGPT v1.
|
135 |
additional_inputs=[
|
136 |
gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way", value="none"),
|
137 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", info="How chaotic should the model be?"),
|
138 |
-
gr.Slider(
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
info="Basically, how many drugs should the model be on?"
|
145 |
-
),
|
146 |
-
gr.Slider(
|
147 |
-
minimum=0.0,
|
148 |
-
maximum=1.0,
|
149 |
-
value=0.1,
|
150 |
-
step=0.01,
|
151 |
-
label="Mirostat eta",
|
152 |
-
info="I don't even know man"
|
153 |
-
),
|
154 |
-
gr.Slider(
|
155 |
-
minimum=0.0,
|
156 |
-
maximum=1.0,
|
157 |
-
value=0.1,
|
158 |
-
step=0.01,
|
159 |
-
label="Frequency penalty",
|
160 |
-
info='"Don\'repeat yourself"'
|
161 |
-
),
|
162 |
-
gr.Slider(
|
163 |
-
minimum=0.0,
|
164 |
-
maximum=1.0,
|
165 |
-
value=0.0,
|
166 |
-
step=0.01,
|
167 |
-
label="Presence penalty",
|
168 |
-
info='"Use lots of diverse words"'
|
169 |
-
),
|
170 |
-
gr.Slider(
|
171 |
-
minimum=0,
|
172 |
-
maximum=512,
|
173 |
-
value=10,
|
174 |
-
step=1,
|
175 |
-
label="Length penalty start",
|
176 |
-
info='When should the model start being more likely to shut up?'
|
177 |
-
),
|
178 |
-
gr.Slider(
|
179 |
-
minimum=0.5,
|
180 |
-
maximum=1.5,
|
181 |
-
value=1.02,
|
182 |
-
step=0.01,
|
183 |
-
label="Length penalty decay factor",
|
184 |
-
info='How fast should the stop likelihood increase?'
|
185 |
-
),
|
186 |
gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens", info="How many words can the model generate?"),
|
187 |
],
|
188 |
)
|
|
|
122 |
messages.append({"role": "assistant", "content": response})
|
123 |
|
124 |
# Yes we make a new file every completion because fuck my life
|
125 |
+
upload_json_to_hub(messages, str(uuid4()) + ".json")
|
126 |
|
127 |
|
128 |
demo = gr.ChatInterface(
|
|
|
131 |
css=".bubble-gap {gap: 6px !important}",
|
132 |
theme="shivi/calm_seafoam",
|
133 |
description="The model may take a while if it hasn't run recently or a lot of people are using it",
|
134 |
+
title="EliGPT v1.3",
|
135 |
additional_inputs=[
|
136 |
gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way", value="none"),
|
137 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", info="How chaotic should the model be?"),
|
138 |
+
gr.Slider(minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", info="Basically, how many drugs should the model be on?"),
|
139 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", info="I don't even know man"),
|
140 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
|
141 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
|
142 |
+
gr.Slider(minimum=0, maximum=512, value=10, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
|
143 |
+
gr.Slider(minimum=0.5, maximum=1.5, value=1.02, step=0.01, label="Length penalty decay factor", info='How fast should the stop likelihood increase?'),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens", info="How many words can the model generate?"),
|
145 |
],
|
146 |
)
|