Hjgugugjhuhjggg commited on
Commit
f7e7ec1
·
verified ·
1 Parent(s): eeba3ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -175,6 +175,7 @@ async def generate(request: GenerateRequest):
175
  repetition_penalty=repetition_penalty,
176
  do_sample=do_sample,
177
  num_return_sequences=num_return_sequences,
 
178
  )
179
  if stream:
180
  return StreamingResponse(
@@ -245,27 +246,29 @@ async def stream_text(model, tokenizer, input_text,
245
  outputs.sequences[0][len(encoded_input["input_ids"][0]):],
246
  skip_special_tokens=True
247
  )
 
 
 
 
 
 
 
248
 
249
  output_text += new_text
250
-
251
  for text in new_text.split():
252
  yield json.dumps({"text": text, "is_end": False}) + "\n"
253
-
254
  if stop_criteria(outputs.sequences, None):
255
  yield json.dumps({"text": "", "is_end": True}) + "\n"
256
  break
257
-
258
- if len(new_text) == 0:
259
- for text in output_text.split():
260
- yield json.dumps({"text": text, "is_end": False}) + "\n"
261
- yield json.dumps({"text": "", "is_end": True}) + "\n"
262
- break
263
-
264
  encoded_input = tokenizer(
265
  output_text, return_tensors="pt",
266
  truncation=True
267
  ).to(device)
268
  output_text = ""
 
269
 
270
  async def generate_text(model, tokenizer, input_text,
271
  generation_config, stop_sequences,
@@ -299,6 +302,7 @@ async def generate_text(model, tokenizer, input_text,
299
 
300
  return generated_text
301
 
 
302
  @app.post("/generate-image")
303
  async def generate_image(request: GenerateRequest):
304
  try:
 
175
  repetition_penalty=repetition_penalty,
176
  do_sample=do_sample,
177
  num_return_sequences=num_return_sequences,
178
+ eos_token_id = tokenizer.eos_token_id
179
  )
180
  if stream:
181
  return StreamingResponse(
 
246
  outputs.sequences[0][len(encoded_input["input_ids"][0]):],
247
  skip_special_tokens=True
248
  )
249
+
250
+ if len(new_text) == 0:
251
+ if not stop_criteria(outputs.sequences, None):
252
+ for text in output_text.split():
253
+ yield json.dumps({"text": text, "is_end": False}) + "\n"
254
+ yield json.dumps({"text": "", "is_end": True}) + "\n"
255
+ break
256
 
257
  output_text += new_text
258
+
259
  for text in new_text.split():
260
  yield json.dumps({"text": text, "is_end": False}) + "\n"
261
+
262
  if stop_criteria(outputs.sequences, None):
263
  yield json.dumps({"text": "", "is_end": True}) + "\n"
264
  break
265
+
 
 
 
 
 
 
266
  encoded_input = tokenizer(
267
  output_text, return_tensors="pt",
268
  truncation=True
269
  ).to(device)
270
  output_text = ""
271
+
272
 
273
  async def generate_text(model, tokenizer, input_text,
274
  generation_config, stop_sequences,
 
302
 
303
  return generated_text
304
 
305
+
306
  @app.post("/generate-image")
307
  async def generate_image(request: GenerateRequest):
308
  try: