Spaces:
Paused
Paused
guardiancc
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1541,6 +1541,28 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
1541 |
).images[0]
|
1542 |
return final_image
|
1543 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1544 |
@spaces.GPU(duration=100)
|
1545 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, useCanny, useSobel, progress=gr.Progress(track_tqdm=True)):
|
1546 |
if selected_index is None:
|
@@ -1581,13 +1603,15 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
1581 |
if(image_input is not None):
|
1582 |
if(useCanny):
|
1583 |
final_image = generate_canny(image_input, "canny")
|
1584 |
-
|
|
|
1585 |
elif(useSobel):
|
1586 |
final_image = generate_canny(image_input, "sobel")
|
1587 |
-
|
|
|
1588 |
else:
|
1589 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
|
1590 |
-
yield final_image, seed, gr.update(visible=False)
|
1591 |
else:
|
1592 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
1593 |
|
@@ -1597,9 +1621,9 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
1597 |
step_counter+=1
|
1598 |
final_image = image
|
1599 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
1600 |
-
yield image, seed, gr.update(value=progress_bar, visible=True)
|
1601 |
|
1602 |
-
yield final_image, seed, gr.update(value=progress_bar, visible=False)
|
1603 |
|
1604 |
def get_huggingface_safetensors(link):
|
1605 |
split_link = link.split("/")
|
@@ -1732,6 +1756,7 @@ with gr.Blocks(theme="prithivMLmods/Minecraft-Theme", css=css, delete_cache=(60,
|
|
1732 |
with gr.Column():
|
1733 |
progress_bar = gr.Markdown(elem_id="progress",visible=False)
|
1734 |
result = gr.Image(label="Generated Image")
|
|
|
1735 |
|
1736 |
with gr.Row():
|
1737 |
with gr.Accordion("Advanced Settings", open=False):
|
@@ -1772,7 +1797,7 @@ with gr.Blocks(theme="prithivMLmods/Minecraft-Theme", css=css, delete_cache=(60,
|
|
1772 |
triggers=[generate_button.click, prompt.submit],
|
1773 |
fn=run_lora,
|
1774 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, useCanny, useSobel],
|
1775 |
-
outputs=[result, seed, progress_bar]
|
1776 |
)
|
1777 |
|
1778 |
app.queue()
|
|
|
1541 |
).images[0]
|
1542 |
return final_image
|
1543 |
|
1544 |
+
def generate_image_canny(prompt_mash, canny, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
|
1545 |
+
pipe_canny.to("cuda")
|
1546 |
+
control_image = load_image(canny)
|
1547 |
+
|
1548 |
+
image = pipe(
|
1549 |
+
prompt=prompt_mash,
|
1550 |
+
control_image=control_image,
|
1551 |
+
controlnet_conditioning_scale=0.6,
|
1552 |
+
num_inference_steps=28,
|
1553 |
+
guidance_scale=3.5,
|
1554 |
+
strength=image_strength,
|
1555 |
+
num_inference_steps=steps,
|
1556 |
+
guidance_scale=cfg_scale,
|
1557 |
+
width=width,
|
1558 |
+
height=height,
|
1559 |
+
generator=generator,
|
1560 |
+
joint_attention_kwargs={"scale": lora_scale},
|
1561 |
+
output_type="pil",
|
1562 |
+
).images[0]
|
1563 |
+
|
1564 |
+
return image
|
1565 |
+
|
1566 |
@spaces.GPU(duration=100)
|
1567 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, useCanny, useSobel, progress=gr.Progress(track_tqdm=True)):
|
1568 |
if selected_index is None:
|
|
|
1603 |
if(image_input is not None):
|
1604 |
if(useCanny):
|
1605 |
final_image = generate_canny(image_input, "canny")
|
1606 |
+
img = generate_image_canny(prompt_mash, final_image, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
|
1607 |
+
yield img, final_image, seed, gr.update(visible=False)
|
1608 |
elif(useSobel):
|
1609 |
final_image = generate_canny(image_input, "sobel")
|
1610 |
+
img = generate_image_canny(prompt_mash, final_image, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
|
1611 |
+
yield img, final_image, seed, gr.update(visible=False)
|
1612 |
else:
|
1613 |
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
|
1614 |
+
yield final_image, final_image, seed, gr.update(visible=False)
|
1615 |
else:
|
1616 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
1617 |
|
|
|
1621 |
step_counter+=1
|
1622 |
final_image = image
|
1623 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
1624 |
+
yield image, image, seed, gr.update(value=progress_bar, visible=True)
|
1625 |
|
1626 |
+
yield final_image,final_image, seed, gr.update(value=progress_bar, visible=False)
|
1627 |
|
1628 |
def get_huggingface_safetensors(link):
|
1629 |
split_link = link.split("/")
|
|
|
1756 |
with gr.Column():
|
1757 |
progress_bar = gr.Markdown(elem_id="progress",visible=False)
|
1758 |
result = gr.Image(label="Generated Image")
|
1759 |
+
canny = gr.Image(label="Generated Canny")
|
1760 |
|
1761 |
with gr.Row():
|
1762 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
1797 |
triggers=[generate_button.click, prompt.submit],
|
1798 |
fn=run_lora,
|
1799 |
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, useCanny, useSobel],
|
1800 |
+
outputs=[result, canny, seed, progress_bar]
|
1801 |
)
|
1802 |
|
1803 |
app.queue()
|