Himanshu-AT commited on
Commit
b0b7bea
·
1 Parent(s): f9694e5

optimizations

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -16,6 +16,8 @@ MAX_IMAGE_SIZE = 2048
16
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
17
  pipe.load_lora_weights("alvdansen/flux-koda")
18
  pipe.enable_sequential_cpu_offload()
 
 
19
 
20
  def calculate_optimal_dimensions(image: Image.Image):
21
  # Extract the original dimensions
@@ -54,7 +56,7 @@ def calculate_optimal_dimensions(image: Image.Image):
54
 
55
  return width, height
56
 
57
- @spaces.GPU
58
  def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
59
  image = edit_images["background"]
60
  width, height = calculate_optimal_dimensions(image)
@@ -69,7 +71,7 @@ def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height
69
  width=width,
70
  guidance_scale=guidance_scale,
71
  num_inference_steps=num_inference_steps,
72
- generator=torch.Generator("cpu").manual_seed(seed),
73
  # lora_scale=0.75 // not supported in this version
74
  ).images[0]
75
 
 
16
  pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
17
  pipe.load_lora_weights("alvdansen/flux-koda")
18
  pipe.enable_sequential_cpu_offload()
19
+ pipe.enable_xformers_memory_efficient_attention()
20
+ pipe.enable_fp16()
21
 
22
  def calculate_optimal_dimensions(image: Image.Image):
23
  # Extract the original dimensions
 
56
 
57
  return width, height
58
 
59
+ @spaces.GPU(durations=300)
60
  def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
61
  image = edit_images["background"]
62
  width, height = calculate_optimal_dimensions(image)
 
71
  width=width,
72
  guidance_scale=guidance_scale,
73
  num_inference_steps=num_inference_steps,
74
+ generator=torch.Generator(device='cuda').manual_seed(seed),
75
  # lora_scale=0.75 // not supported in this version
76
  ).images[0]
77