gururise commited on
Commit
417ba6e
·
1 Parent(s): 87b7e9c

update to instruct test 2

Browse files
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -55,6 +55,7 @@ def infer(
55
  max_new_tokens=10,
56
  temperature=0.1,
57
  top_p=1.0,
 
58
  stop="<|endoftext|>",
59
  seed=42,
60
  ):
@@ -75,6 +76,7 @@ def infer(
75
  assert 1 <= max_new_tokens <= 384
76
  assert 0.0 <= temperature <= 1.0
77
  assert 0.0 <= top_p <= 1.0
 
78
 
79
  temperature = max(0.05, temperature)
80
  if prompt == "":
@@ -93,7 +95,7 @@ def infer(
93
  done = False
94
  with torch.no_grad():
95
  for _ in range(max_new_tokens):
96
- char = model.forward(stopStrings=stop, temp=temperature, top_p_usual=top_p)[
97
  "output"]
98
  print(char, end='', flush=True)
99
  generated_text += char
@@ -127,6 +129,7 @@ def chat(
127
  max_new_tokens=10,
128
  temperature=0.1,
129
  top_p=1.0,
 
130
  seed=42,
131
  ):
132
  global model
@@ -181,6 +184,7 @@ def chat(
181
  assert 1 <= max_new_tokens <= 384
182
  assert 0.0 <= temperature <= 1.0
183
  assert 0.0 <= top_p <= 1.0
 
184
 
185
  temperature = max(0.05, temperature)
186
 
@@ -192,7 +196,7 @@ def chat(
192
  model.loadContext(newctx=intro+prompt)
193
 
194
  out = model.forward(number=max_new_tokens, stopStrings=[
195
- "<|endoftext|>", username+":"], temp=temperature, top_p_usual=top_p)
196
 
197
  generated_text = out["output"].lstrip("\n ")
198
  generated_text = generated_text.rstrip("USER:")
@@ -206,13 +210,13 @@ def chat(
206
  examples = [
207
  [
208
  # Question Answering
209
- '''What is the capital of Germany?''', "Q/A", 25, 0.2, 1.0, "<|endoftext|>"],
210
  [
211
  # Question Answering
212
- '''Are humans good or bad?''', "Q/A", 150, 0.8, 0.8, "<|endoftext|>"],
213
  [
214
  # Question Answering
215
- '''What is the purpose of Vitamin A?''', "Q/A", 50, 0.2, 0.8, "<|endoftext|>"],
216
  [
217
  # Chatbot
218
  '''This is a conversation between two AI large language models named Alex and Fritz. They are exploring each other's capabilities, and trying to ask interesting questions of one another to explore the limits of each others AI.
@@ -220,7 +224,7 @@ examples = [
220
  Conversation:
221
  Alex: Good morning, Fritz, what type of LLM are you based upon?
222
  Fritz: Morning Alex, I am an RNN with transformer level performance. My language model is 100% attention free.
223
- Alex:''', "generative", 220, 0.9, 0.9, "\\n\\n,<|endoftext|>"],
224
  [
225
  # Generate List
226
  '''Task given:
@@ -228,11 +232,11 @@ Alex:''', "generative", 220, 0.9, 0.9, "\\n\\n,<|endoftext|>"],
228
  Please Write a Short story about a cat learning python
229
 
230
  Best Full Response:
231
- ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"],
232
  [
233
  # Natural Language Interface
234
  '''Here is a short story (in the style of Tolkien) in which Aiden attacks a robot with a sword:
235
- ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"]
236
  ]
237
 
238
 
@@ -247,6 +251,7 @@ iface = gr.Interface(
247
  gr.Slider(1, 256, value=40), # max_tokens
248
  gr.Slider(0.0, 1.0, value=0.8), # temperature
249
  gr.Slider(0.0, 1.0, value=0.85), # top_p
 
250
  gr.Textbox(lines=1, value="<|endoftext|>") # stop
251
  ],
252
  outputs=gr.Textbox(label="Generated Output", lines=25),
@@ -265,7 +270,8 @@ chatiface = gr.Interface(
265
  placeholder="Enter your Name"),
266
  gr.Slider(1, 256, value=60), # max_tokens
267
  gr.Slider(0.0, 1.0, value=0.8), # temperature
268
- gr.Slider(0.0, 1.0, value=0.85) # top_p
 
269
  ],
270
  outputs=[gr.Chatbot(label="Chat Log", color_map=(
271
  "green", "pink")), "state"],
 
55
  max_new_tokens=10,
56
  temperature=0.1,
57
  top_p=1.0,
58
+ end_adj=0.0,
59
  stop="<|endoftext|>",
60
  seed=42,
61
  ):
 
76
  assert 1 <= max_new_tokens <= 384
77
  assert 0.0 <= temperature <= 1.0
78
  assert 0.0 <= top_p <= 1.0
79
+ assert -999 <= end_adj <= 0.0
80
 
81
  temperature = max(0.05, temperature)
82
  if prompt == "":
 
95
  done = False
96
  with torch.no_grad():
97
  for _ in range(max_new_tokens):
98
+ char = model.forward(stopStrings=stop, temp=temperature, top_p_usual=top_p, end_adj=end_adj)[
99
  "output"]
100
  print(char, end='', flush=True)
101
  generated_text += char
 
129
  max_new_tokens=10,
130
  temperature=0.1,
131
  top_p=1.0,
132
+ end_adj=0.0,
133
  seed=42,
134
  ):
135
  global model
 
184
  assert 1 <= max_new_tokens <= 384
185
  assert 0.0 <= temperature <= 1.0
186
  assert 0.0 <= top_p <= 1.0
187
+ assert -999 <= end_adj <= 0.0
188
 
189
  temperature = max(0.05, temperature)
190
 
 
196
  model.loadContext(newctx=intro+prompt)
197
 
198
  out = model.forward(number=max_new_tokens, stopStrings=[
199
+ "<|endoftext|>", username+":"], temp=temperature, top_p_usual=top_p, end_adj=end_adj)
200
 
201
  generated_text = out["output"].lstrip("\n ")
202
  generated_text = generated_text.rstrip("USER:")
 
210
  examples = [
211
  [
212
  # Question Answering
213
+ '''What is the capital of Germany?''', "Q/A", 25, 0.2, 1.0, 0.0, "<|endoftext|>"],
214
  [
215
  # Question Answering
216
+ '''Are humans good or bad?''', "Q/A", 150, 0.8, 0.8, -1.5, "<|endoftext|>"],
217
  [
218
  # Question Answering
219
+ '''What is the purpose of Vitamin A?''', "Q/A", 60, 0.2, 0.8, -1.0, "<|endoftext|>"],
220
  [
221
  # Chatbot
222
  '''This is a conversation between two AI large language models named Alex and Fritz. They are exploring each other's capabilities, and trying to ask interesting questions of one another to explore the limits of each others AI.
 
224
  Conversation:
225
  Alex: Good morning, Fritz, what type of LLM are you based upon?
226
  Fritz: Morning Alex, I am an RNN with transformer level performance. My language model is 100% attention free.
227
+ Alex:''', "generative", 220, 0.9, 0.9, -4.0, "<|endoftext|>,\\n\\n"],
228
  [
229
  # Generate List
230
  '''Task given:
 
232
  Please Write a Short story about a cat learning python
233
 
234
  Best Full Response:
235
+ ''', "generative", 140, 0.85, 0.8, -5.0, "<|endoftext|>"],
236
  [
237
  # Natural Language Interface
238
  '''Here is a short story (in the style of Tolkien) in which Aiden attacks a robot with a sword:
239
+ ''', "generative", 140, 0.85, 0.8, -5.0, "<|endoftext|>"]
240
  ]
241
 
242
 
 
251
  gr.Slider(1, 256, value=40), # max_tokens
252
  gr.Slider(0.0, 1.0, value=0.8), # temperature
253
  gr.Slider(0.0, 1.0, value=0.85), # top_p
254
+ gr.Slider(-99, 0.0, value=0.0, step=0.5, label="Reduce End of Text (Stop) Probability"), # end_adj
255
  gr.Textbox(lines=1, value="<|endoftext|>") # stop
256
  ],
257
  outputs=gr.Textbox(label="Generated Output", lines=25),
 
270
  placeholder="Enter your Name"),
271
  gr.Slider(1, 256, value=60), # max_tokens
272
  gr.Slider(0.0, 1.0, value=0.8), # temperature
273
+ gr.Slider(0.0, 1.0, value=0.85), # top_p
274
+ gr.Slider(-99, 0.0, value=-2, step=0.5, label="Reduce End of Text (Stop) Probability"), # end_adj
275
  ],
276
  outputs=[gr.Chatbot(label="Chat Log", color_map=(
277
  "green", "pink")), "state"],