Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -28,9 +28,7 @@ dtype = torch.bfloat16
|
|
28 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
base_model = "black-forest-labs/FLUX.1-dev"
|
30 |
|
31 |
-
|
32 |
-
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
33 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
|
34 |
|
35 |
MAX_SEED = 2**32-1
|
36 |
|
@@ -88,10 +86,10 @@ def generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale, pr
|
|
88 |
width=width,
|
89 |
height=height,
|
90 |
generator=generator,
|
91 |
-
joint_attention_kwargs={"scale": lora_scale}
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
return generate_image
|
96 |
|
97 |
|
@@ -117,6 +115,8 @@ def run_lora(prompt, cfg_scale, steps, lora_repo, lora_name, randomize_seed, see
|
|
117 |
else:
|
118 |
result = {"status": "success", "message": "Image generated but not uploaded"}
|
119 |
|
|
|
|
|
120 |
yield final_image, seed, gr.update(value=progress_bar, visible=False), json.dumps(result)
|
121 |
|
122 |
|
|
|
28 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
29 |
base_model = "black-forest-labs/FLUX.1-dev"
|
30 |
|
31 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
|
|
|
|
32 |
|
33 |
MAX_SEED = 2**32-1
|
34 |
|
|
|
86 |
width=width,
|
87 |
height=height,
|
88 |
generator=generator,
|
89 |
+
joint_attention_kwargs={"scale": lora_scale}
|
90 |
+
).images[0]
|
91 |
+
|
92 |
+
progress(99, "Generate success!")
|
93 |
return generate_image
|
94 |
|
95 |
|
|
|
115 |
else:
|
116 |
result = {"status": "success", "message": "Image generated but not uploaded"}
|
117 |
|
118 |
+
progress(100, "Completed!")
|
119 |
+
|
120 |
yield final_image, seed, gr.update(value=progress_bar, visible=False), json.dumps(result)
|
121 |
|
122 |
|