Spaces:
Runtime error
Runtime error
File size: 2,604 Bytes
4f38d5d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
from PIL import Image
import torch
from diffusers import (
StableDiffusionPipeline,
StableDiffusionImg2ImgPipeline,
)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_id = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Anime-Chinese-v0.1"
pipe_text2img = StableDiffusionPipeline.from_pretrained(model_id)
model_path = "souljoy/sd-pokemon-model-lora-zh"
pipe_text2img.unet.load_attn_procs(model_path)
pipe_text2img.to(device)
pipe_text2img.safety_checker = lambda images, clip_input: (images, False)
pipe_img2img = StableDiffusionImg2ImgPipeline(**pipe_text2img.components).to(device)
def infer_text2img(prompt, guide, steps, width, height, image_in, strength):
if image_in is not None:
init_image = image_in.convert("RGB").resize((width, height))
output = pipe_img2img(prompt, image=init_image, strength=strength, width=width, height=height, guidance_scale=guide, num_inference_steps=steps)
else:
output = pipe_text2img(prompt, width=width, height=height, guidance_scale=guide, num_inference_steps=steps)
image = output.images[0]
return image
with gr.Blocks() as demo:
examples = [
["粉色的蝴蝶,小精灵,卡通"],
["可爱的狗,小精灵,卡通"],
["漂亮的猫,小精灵,卡通"],
]
with gr.Row():
with gr.Column(scale=1, ):
image_out = gr.Image(label = '输出(output)')
with gr.Column(scale=1, ):
image_in = gr.Image(source='upload', elem_id="image_upload", type="pil", label="参考图(非必须)(ref)")
prompt = gr.Textbox(label = '提示词(prompt)')
submit_btn = gr.Button("生成图像(Generate)")
with gr.Row(scale=0.5 ):
guide = gr.Slider(2, 15, value = 7, step = 0.1, label = '文本引导强度(guidance scale)')
steps = gr.Slider(10, 30, value = 20, step = 1, label = '迭代次数(inference steps)')
width = gr.Slider(384, 640, value = 512, step = 64, label = '宽度(width)')
height = gr.Slider(384, 640, value = 512, step = 64, label = '高度(height)')
strength = gr.Slider(0, 1.0, value = 0.8, step = 0.02, label = '参考图改变程度(strength)')
ex = gr.Examples(examples, fn=infer_text2img, inputs=[prompt, guide, steps, width, height], outputs=image_out)
submit_btn.click(fn = infer_text2img, inputs = [prompt, guide, steps, width, height, image_in, strength], outputs = image_out)
demo.queue(concurrency_count=1, max_size=8).launch() |