dbaranchuk's picture
Update space
818a2f4
import spaces
import gradio as gr
import numpy as np
import torch
from PIL import Image
from diffusers import DDPMScheduler, StableDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers import StableDiffusionInstructPix2PixPipeline, LCMScheduler
# InstructPix2Pix with LCM specified scheduler
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix", torch_dtype=torch.float16
)
pipe = pipe.to("cuda")
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
# Adapt the InstructPix2Pix model using the LoRA parameters
adapter_id = "latent-consistency/lcm-lora-sdv1-5"
pipe.load_lora_weights(adapter_id)
pipe.to('cuda')
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
@spaces.GPU(duration=30)
def infer(image, edit_instruction, guidance_scale, n_steps):
image = Image.fromarray(image).resize((512, 512))
image = pipe(prompt=edit_instruction,
image=image,
num_inference_steps=n_steps,
guidance_scale=guidance_scale,
image_guidance_scale=1.0
).images[0]
return image
css="""
#col-container {
margin: 0 auto;
max-width: 1024px;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
f"""
# ⚡ Instruct-pix2pix with Consistency Distillation⚡
Currently running on {power_device}
"""
)
gr.Markdown(
"If you enjoy the space, feel free to give a ⭐ to the <a href='https://github.com/yandex-research/invertible-cd' target='_blank'>Github Repo</a>. [![GitHub Stars](https://img.shields.io/github/stars/quickjkee/instruct-pix2pix-distill?style=social)](https://github.com/quickjkee/instruct-pix2pix-distill)"
)
with gr.Row():
edit_instruction = gr.Text(
label="Edit instruction",
max_lines=1,
placeholder="Enter your prompt",
)
with gr.Row():
with gr.Column():
image = gr.Image(label="Input image", height=512, width=512, show_label=False)
with gr.Column():
result = gr.Image(label="Result", height=512, width=512, show_label=False)
with gr.Accordion("Advanced Settings", open=True):
with gr.Row():
guidance_scale = gr.Slider(
label="guidance scale",
minimum=1.0,
maximum=5.0,
step=1.0,
value=2.0,
)
n_steps = gr.Slider(
label="inference steps",
minimum=1.0,
maximum=10.0,
step=1.0,
value=4.0,
)
with gr.Row():
run_button = gr.Button("Edit", scale=0)
with gr.Row():
examples = [
[
"examples/orig_3.jpg", #input_image
"turn apples into oranges", #tgt_prompt
2, #guidance_scale
4
],
[
"examples/orig_1.jpg", #input_image
"Make it a Modigliani painting", #tgt_prompt
2, #guidance_scale
4
],
[
"examples/orig_2.jpg", #input_image
"Turn a teddy bear into panda", #tgt_prompt
2, #guidance_scale
4
],
]
gr.Examples(
examples = examples,
inputs =[image, edit_instruction, guidance_scale, n_steps],
outputs=[
result
],
fn=infer, cache_examples=True
)
run_button.click(
fn = infer,
inputs=[image, edit_instruction, guidance_scale, n_steps],
outputs = [result]
)
demo.queue().launch()