Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -278,7 +278,7 @@ def generate_30(
|
|
278 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
279 |
):
|
280 |
seed = random.randint(0, MAX_SEED)
|
281 |
-
|
282 |
options = {
|
283 |
"prompt": [prompt],
|
284 |
"negative_prompt": [negative_prompt],
|
@@ -287,7 +287,7 @@ def generate_30(
|
|
287 |
"height": height,
|
288 |
"guidance_scale": guidance_scale,
|
289 |
"num_inference_steps": num_inference_steps,
|
290 |
-
|
291 |
"output_type": "pil",
|
292 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
293 |
}
|
@@ -329,7 +329,7 @@ def generate_60(
|
|
329 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
330 |
):
|
331 |
seed = random.randint(0, MAX_SEED)
|
332 |
-
|
333 |
options = {
|
334 |
"prompt": [prompt],
|
335 |
"negative_prompt": [negative_prompt],
|
@@ -338,7 +338,7 @@ def generate_60(
|
|
338 |
"height": height,
|
339 |
"guidance_scale": guidance_scale,
|
340 |
"num_inference_steps": num_inference_steps,
|
341 |
-
|
342 |
"output_type": "pil",
|
343 |
"callback_on_step_end": scheduler_swap_callback
|
344 |
}
|
@@ -370,7 +370,7 @@ def generate_90(
|
|
370 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
371 |
):
|
372 |
seed = random.randint(0, MAX_SEED)
|
373 |
-
|
374 |
options = {
|
375 |
"prompt": [prompt],
|
376 |
"negative_prompt": [negative_prompt],
|
@@ -379,7 +379,7 @@ def generate_90(
|
|
379 |
"height": height,
|
380 |
"guidance_scale": guidance_scale,
|
381 |
"num_inference_steps": num_inference_steps,
|
382 |
-
|
383 |
"output_type": "pil",
|
384 |
"callback_on_step_end": scheduler_swap_callback
|
385 |
}
|
|
|
278 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
279 |
):
|
280 |
seed = random.randint(0, MAX_SEED)
|
281 |
+
# generator = torch.Generator(device='cuda').manual_seed(seed)
|
282 |
options = {
|
283 |
"prompt": [prompt],
|
284 |
"negative_prompt": [negative_prompt],
|
|
|
287 |
"height": height,
|
288 |
"guidance_scale": guidance_scale,
|
289 |
"num_inference_steps": num_inference_steps,
|
290 |
+
# "generator": generator,
|
291 |
"output_type": "pil",
|
292 |
"callback_on_step_end": pyx.scheduler_swap_callback
|
293 |
}
|
|
|
329 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
330 |
):
|
331 |
seed = random.randint(0, MAX_SEED)
|
332 |
+
# generator = torch.Generator(device='cuda').manual_seed(seed)
|
333 |
options = {
|
334 |
"prompt": [prompt],
|
335 |
"negative_prompt": [negative_prompt],
|
|
|
338 |
"height": height,
|
339 |
"guidance_scale": guidance_scale,
|
340 |
"num_inference_steps": num_inference_steps,
|
341 |
+
# "generator": generator,
|
342 |
"output_type": "pil",
|
343 |
"callback_on_step_end": scheduler_swap_callback
|
344 |
}
|
|
|
370 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
371 |
):
|
372 |
seed = random.randint(0, MAX_SEED)
|
373 |
+
# generator = torch.Generator(device='cuda').manual_seed(seed)
|
374 |
options = {
|
375 |
"prompt": [prompt],
|
376 |
"negative_prompt": [negative_prompt],
|
|
|
379 |
"height": height,
|
380 |
"guidance_scale": guidance_scale,
|
381 |
"num_inference_steps": num_inference_steps,
|
382 |
+
# "generator": generator,
|
383 |
"output_type": "pil",
|
384 |
"callback_on_step_end": scheduler_swap_callback
|
385 |
}
|