guardiancc commited on
Commit
8129d6a
·
verified ·
1 Parent(s): e61a9de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -1423,7 +1423,7 @@ base_model = "black-forest-labs/FLUX.1-dev"
1423
  #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
1424
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
1425
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
1426
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
1427
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
1428
  vae=good_vae,
1429
  transformer=pipe.transformer,
@@ -1440,7 +1440,7 @@ controlnet_model = "InstantX/FLUX.1-dev-controlnet-canny"
1440
  controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=dtype)
1441
  pipe_canny = FluxControlNetPipeline.from_pretrained(
1442
  base_model, controlnet=controlnet, torch_dtype=dtype
1443
- ).to(device)
1444
 
1445
  MAX_SEED = 2**32-1
1446
 
@@ -1507,6 +1507,7 @@ def generate_canny(image, type="canny"):
1507
  @spaces.GPU(duration=100)
1508
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
1509
  generator = torch.Generator(device="cuda").manual_seed(seed)
 
1510
  with calculateDuration("Generating image"):
1511
  # Generate image
1512
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
@@ -1524,6 +1525,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
1524
 
1525
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
1526
  generator = torch.Generator(device="cuda").manual_seed(seed)
 
1527
  image_input = load_image(image_input_path)
1528
  final_image = pipe_i2i(
1529
  prompt=prompt_mash,
@@ -1541,8 +1543,9 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
1541
 
1542
  def generate_image_canny(prompt_mash, canny, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
1543
  control_image = load_image(canny)
 
1544
 
1545
- image = pipe(
1546
  prompt=prompt_mash,
1547
  control_image=control_image,
1548
  controlnet_conditioning_scale=0.6,
 
1423
  #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
1424
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
1425
  good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
1426
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1)
1427
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
1428
  vae=good_vae,
1429
  transformer=pipe.transformer,
 
1440
  controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=dtype)
1441
  pipe_canny = FluxControlNetPipeline.from_pretrained(
1442
  base_model, controlnet=controlnet, torch_dtype=dtype
1443
+ )
1444
 
1445
  MAX_SEED = 2**32-1
1446
 
 
1507
  @spaces.GPU(duration=100)
1508
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
1509
  generator = torch.Generator(device="cuda").manual_seed(seed)
1510
+ pipe.to('cuda')
1511
  with calculateDuration("Generating image"):
1512
  # Generate image
1513
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
 
1525
 
1526
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
1527
  generator = torch.Generator(device="cuda").manual_seed(seed)
1528
+ pipe_i2i.to('cuda')
1529
  image_input = load_image(image_input_path)
1530
  final_image = pipe_i2i(
1531
  prompt=prompt_mash,
 
1543
 
1544
  def generate_image_canny(prompt_mash, canny, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
1545
  control_image = load_image(canny)
1546
+ pipe_canny.to('cuda')
1547
 
1548
+ image = pipe_canny(
1549
  prompt=prompt_mash,
1550
  control_image=control_image,
1551
  controlnet_conditioning_scale=0.6,