ford442 commited on
Commit
56daa8c
·
verified ·
1 Parent(s): 9e59bb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -22
app.py CHANGED
@@ -274,7 +274,7 @@ def generate_30(
274
  guidance_scale: float = 4,
275
  num_inference_steps: int = 125,
276
  randomize_seed: bool = False,
277
- latent_file = None, # Add latents file input
278
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
279
  ):
280
  #torch.backends.cudnn.benchmark = False
@@ -286,12 +286,7 @@ def generate_30(
286
  seed = int(randomize_seed_fn(seed, randomize_seed))
287
  generator = torch.Generator(device='cuda').manual_seed(seed)
288
  if latent_file is not None: # Check if a latent file is provided
289
- if not isinstance(latent_file, Image.Image):
290
- image = Image.fromarray(latent_file)
291
- image = image.resize((width, height)) # Example resize, adjust as needed
292
- image = image.convert("RGB") # Ensure it's a 3-channel image
293
- image = np.array(image)
294
- sd_image_a = torch.from_numpy(image).permute(2, 0, 1) / 127.5 - 1.0 # Normalize
295
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
296
  filename= f'rv_IP_{timestamp}.txt'
297
  print("-- using image file --")
@@ -332,7 +327,7 @@ def generate_60(
332
  guidance_scale: float = 4,
333
  num_inference_steps: int = 250,
334
  randomize_seed: bool = False,
335
- latent_file = None, # Add latents file input
336
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
337
  ):
338
  #torch.backends.cudnn.benchmark = True
@@ -344,12 +339,7 @@ def generate_60(
344
  seed = int(randomize_seed_fn(seed, randomize_seed))
345
  generator = torch.Generator(device='cuda').manual_seed(seed)
346
  if latent_file is not None: # Check if a latent file is provided
347
- if not isinstance(latent_file, Image.Image):
348
- image = Image.fromarray(latent_file)
349
- image = image.resize((width, height)) # Example resize, adjust as needed
350
- image = image.convert("RGB") # Ensure it's a 3-channel image
351
- image = np.array(image)
352
- sd_image_a = torch.from_numpy(image).permute(2, 0, 1) / 127.5 - 1.0 # Normalize
353
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
354
  filename= f'rv_IP_{timestamp}.txt'
355
  print("-- using image file --")
@@ -390,7 +380,7 @@ def generate_90(
390
  guidance_scale: float = 4,
391
  num_inference_steps: int = 250,
392
  randomize_seed: bool = False,
393
- latent_file = None, # Add latents file input
394
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
395
  ):
396
  #torch.backends.cudnn.benchmark = True
@@ -402,12 +392,7 @@ def generate_90(
402
  seed = int(randomize_seed_fn(seed, randomize_seed))
403
  generator = torch.Generator(device='cuda').manual_seed(seed)
404
  if latent_file is not None: # Check if a latent file is provided
405
- if not isinstance(latent_file, Image.Image):
406
- image = Image.fromarray(latent_file)
407
- image = image.resize((width, height)) # Example resize, adjust as needed
408
- image = image.convert("RGB") # Ensure it's a 3-channel image
409
- image = np.array(image)
410
- sd_image_a = torch.from_numpy(image).permute(2, 0, 1) / 127.5 - 1.0 # Normalize
411
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
412
  filename= f'rv_IP_{timestamp}.txt'
413
  print("-- using image file --")
@@ -480,7 +465,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
480
  result = gr.Gallery(label="Result", columns=1, show_label=False)
481
 
482
  with gr.Row():
483
- latent_file = gr.Image(label="Image Prompt (Required)", interactive=True)
484
  style_selection = gr.Radio(
485
  show_label=True,
486
  container=True,
 
274
  guidance_scale: float = 4,
275
  num_inference_steps: int = 125,
276
  randomize_seed: bool = False,
277
+ latent_file = gr.File(), # Add latents file input
278
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
279
  ):
280
  #torch.backends.cudnn.benchmark = False
 
286
  seed = int(randomize_seed_fn(seed, randomize_seed))
287
  generator = torch.Generator(device='cuda').manual_seed(seed)
288
  if latent_file is not None: # Check if a latent file is provided
289
+ sd_image_a = Image.open(latent_file.name)
 
 
 
 
 
290
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
291
  filename= f'rv_IP_{timestamp}.txt'
292
  print("-- using image file --")
 
327
  guidance_scale: float = 4,
328
  num_inference_steps: int = 250,
329
  randomize_seed: bool = False,
330
+ latent_file = gr.File(), # Add latents file input
331
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
332
  ):
333
  #torch.backends.cudnn.benchmark = True
 
339
  seed = int(randomize_seed_fn(seed, randomize_seed))
340
  generator = torch.Generator(device='cuda').manual_seed(seed)
341
  if latent_file is not None: # Check if a latent file is provided
342
+ sd_image_a = Image.open(latent_file.name)
 
 
 
 
 
343
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
344
  filename= f'rv_IP_{timestamp}.txt'
345
  print("-- using image file --")
 
380
  guidance_scale: float = 4,
381
  num_inference_steps: int = 250,
382
  randomize_seed: bool = False,
383
+ latent_file = gr.File(), # Add latents file input
384
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
385
  ):
386
  #torch.backends.cudnn.benchmark = True
 
392
  seed = int(randomize_seed_fn(seed, randomize_seed))
393
  generator = torch.Generator(device='cuda').manual_seed(seed)
394
  if latent_file is not None: # Check if a latent file is provided
395
+ sd_image_a = Image.open(latent_file.name)
 
 
 
 
 
396
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
397
  filename= f'rv_IP_{timestamp}.txt'
398
  print("-- using image file --")
 
465
  result = gr.Gallery(label="Result", columns=1, show_label=False)
466
 
467
  with gr.Row():
468
+ latent_file = gr.File(label="Image Prompt (Required)")
469
  style_selection = gr.Radio(
470
  show_label=True,
471
  container=True,