1inkusFace commited on
Commit
be8af01
·
verified ·
1 Parent(s): 02e3b46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -8
app.py CHANGED
@@ -245,13 +245,13 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
245
 
246
  def captioning(img):
247
  prompts_array = [
248
- "Adjectives describing this scene are:",
249
  "The color scheme of this image is",
250
- "This scene could be described in detail as",
251
- "The characters in this scene are",
252
  "The larger details in this scene include",
253
  "The smaller details in this scene include",
254
- "The feeling this scene seems like",
255
  "The setting of this scene must be located",
256
  # Add more prompts here
257
  ]
@@ -264,8 +264,8 @@ def captioning(img):
264
  **inputsa,
265
  do_sample=False,
266
  num_beams=5,
267
- max_length=128,
268
- min_length=1,
269
  top_p=0.9,
270
  repetition_penalty=1.5,
271
  length_penalty=1.0,
@@ -282,8 +282,8 @@ def captioning(img):
282
  **inputs,
283
  do_sample=False,
284
  num_beams=5,
285
- max_length=128,
286
- min_length=1,
287
  top_p=0.9,
288
  repetition_penalty=1.5,
289
  length_penalty=1.0,
@@ -293,6 +293,14 @@ def captioning(img):
293
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
294
  output_prompt.append(response_text)
295
  print(f"{response_text}\n") # Print only the response text
 
 
 
 
 
 
 
 
296
  print(output_prompt)
297
  return output_prompt
298
 
@@ -386,6 +394,7 @@ def generate_30(
386
  samples=1,
387
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
388
  ):
 
389
  seed = random.randint(0, MAX_SEED)
390
  generator = torch.Generator(device='cuda').manual_seed(seed)
391
  if latent_file is not None: # Check if a latent file is provided
@@ -511,6 +520,7 @@ def generate_60(
511
  samples=1,
512
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
513
  ):
 
514
  seed = random.randint(0, MAX_SEED)
515
  generator = torch.Generator(device='cuda').manual_seed(seed)
516
  if latent_file is not None: # Check if a latent file is provided
@@ -636,6 +646,7 @@ def generate_90(
636
  samples=1,
637
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
638
  ):
 
639
  seed = random.randint(0, MAX_SEED)
640
  generator = torch.Generator(device='cuda').manual_seed(seed)
641
  if latent_file is not None: # Check if a latent file is provided
 
245
 
246
  def captioning(img):
247
  prompts_array = [
248
+ # "Adjectives describing this scene are:",
249
  "The color scheme of this image is",
250
+ # "This scene could be described in detail as",
251
+ # "The characters in this scene are",
252
  "The larger details in this scene include",
253
  "The smaller details in this scene include",
254
+ # "The feeling this scene seems like",
255
  "The setting of this scene must be located",
256
  # Add more prompts here
257
  ]
 
264
  **inputsa,
265
  do_sample=False,
266
  num_beams=5,
267
+ max_length=96,
268
+ #min_length=1,
269
  top_p=0.9,
270
  repetition_penalty=1.5,
271
  length_penalty=1.0,
 
282
  **inputs,
283
  do_sample=False,
284
  num_beams=5,
285
+ max_length=96,
286
+ #min_length=1,
287
  top_p=0.9,
288
  repetition_penalty=1.5,
289
  length_penalty=1.0,
 
293
  response_text = generated_text.replace(prompt, "").strip() #Or could try .split(prompt, 1)[-1].strip()
294
  output_prompt.append(response_text)
295
  print(f"{response_text}\n") # Print only the response text
296
+
297
+ # Continue conversation:
298
+ inputf = processor5(images=img, text=generated_text + 'So therefore', return_tensors="pt").to('cuda')
299
+ generated_ids = model5.generate(**inputf, max_length=96)
300
+ generated_text = processor5.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
301
+ response_text = generated_text.replace(generated_text, "").strip()
302
+ print(response_text)
303
+ output_prompt.append(response_text)
304
  print(output_prompt)
305
  return output_prompt
306
 
 
394
  samples=1,
395
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
396
  ):
397
+ image_paths=[]
398
  seed = random.randint(0, MAX_SEED)
399
  generator = torch.Generator(device='cuda').manual_seed(seed)
400
  if latent_file is not None: # Check if a latent file is provided
 
520
  samples=1,
521
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
522
  ):
523
+ image_paths=[]
524
  seed = random.randint(0, MAX_SEED)
525
  generator = torch.Generator(device='cuda').manual_seed(seed)
526
  if latent_file is not None: # Check if a latent file is provided
 
646
  samples=1,
647
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
648
  ):
649
+ image_paths=[]
650
  seed = random.randint(0, MAX_SEED)
651
  generator = torch.Generator(device='cuda').manual_seed(seed)
652
  if latent_file is not None: # Check if a latent file is provided