Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef
|
|
34 |
|
35 |
MAX_SEED = 2**32-1
|
36 |
|
37 |
-
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
38 |
|
39 |
class calculateDuration:
|
40 |
def __init__(self, activity_name=""):
|
@@ -75,13 +75,13 @@ def upload_image_to_r2(image, account_id, access_key, secret_key, bucket_name):
|
|
75 |
return image_file
|
76 |
|
77 |
|
78 |
-
@spaces.GPU
|
79 |
def generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
80 |
pipe.to("cuda")
|
81 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
82 |
with calculateDuration("Generating image"):
|
83 |
# Generate image
|
84 |
-
|
85 |
prompt=prompt,
|
86 |
num_inference_steps=steps,
|
87 |
guidance_scale=cfg_scale,
|
@@ -91,8 +91,8 @@ def generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale, pr
|
|
91 |
joint_attention_kwargs={"scale": lora_scale},
|
92 |
output_type="pil",
|
93 |
good_vae=good_vae,
|
94 |
-
)
|
95 |
-
|
96 |
|
97 |
|
98 |
def run_lora(prompt, cfg_scale, steps, lora_repo, lora_name, randomize_seed, seed, width, height, lora_scale, upload_to_r2, account_id, access_key, secret_key, bucket, progress=gr.Progress(track_tqdm=True)):
|
@@ -106,21 +106,10 @@ def run_lora(prompt, cfg_scale, steps, lora_repo, lora_name, randomize_seed, see
|
|
106 |
pipe.load_lora_weights(lora_repo, weight_name=lora_name)
|
107 |
|
108 |
# Set random seed for reproducibility
|
109 |
-
|
110 |
-
|
111 |
-
seed = random.randint(0, MAX_SEED)
|
112 |
|
113 |
-
|
114 |
-
|
115 |
-
# Consume the generator to get the final image
|
116 |
-
final_image = None
|
117 |
-
step_counter = 0
|
118 |
-
final_image = None
|
119 |
-
for image in image_generator:
|
120 |
-
step_counter+=1
|
121 |
-
final_image = image
|
122 |
-
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
123 |
-
yield image, seed, gr.update(value=progress_bar, visible=True), json.dumps({"status": "processing"})
|
124 |
|
125 |
if upload_to_r2:
|
126 |
url = upload_image_to_r2(final_image, account_id, access_key, secret_key, bucket)
|
|
|
34 |
|
35 |
MAX_SEED = 2**32-1
|
36 |
|
37 |
+
# pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
38 |
|
39 |
class calculateDuration:
|
40 |
def __init__(self, activity_name=""):
|
|
|
75 |
return image_file
|
76 |
|
77 |
|
78 |
+
@spaces.GPU
|
79 |
def generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
80 |
pipe.to("cuda")
|
81 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
82 |
with calculateDuration("Generating image"):
|
83 |
# Generate image
|
84 |
+
generate_image = pipe(
|
85 |
prompt=prompt,
|
86 |
num_inference_steps=steps,
|
87 |
guidance_scale=cfg_scale,
|
|
|
91 |
joint_attention_kwargs={"scale": lora_scale},
|
92 |
output_type="pil",
|
93 |
good_vae=good_vae,
|
94 |
+
)
|
95 |
+
return generate_image
|
96 |
|
97 |
|
98 |
def run_lora(prompt, cfg_scale, steps, lora_repo, lora_name, randomize_seed, seed, width, height, lora_scale, upload_to_r2, account_id, access_key, secret_key, bucket, progress=gr.Progress(track_tqdm=True)):
|
|
|
106 |
pipe.load_lora_weights(lora_repo, weight_name=lora_name)
|
107 |
|
108 |
# Set random seed for reproducibility
|
109 |
+
if randomize_seed:
|
110 |
+
seed = random.randint(0, MAX_SEED)
|
|
|
111 |
|
112 |
+
final_image = generate_image(prompt, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
if upload_to_r2:
|
115 |
url = upload_image_to_r2(final_image, account_id, access_key, secret_key, bucket)
|