scribble-sdxl / app.py
Deadmon's picture
Update app.py
d86b8bd verified
raw
history blame
11.4 kB
import os
import random
import gradio as gr
import numpy as np
from PIL import Image, ImageOps
import torch
import torchvision.transforms.functional as TF
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
from diffusers import DDIMScheduler, EulerAncestralDiscreteScheduler
from controlnet_aux import PidiNetDetector, HEDdetector
from diffusers.utils import load_image
from huggingface_hub import HfApi
from pathlib import Path
from gradio_imageslider import ImageSlider
js_func = """
function refresh() {
const url = new URL(window.location);
}
"""
def nms(x, t, s):
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
y = np.zeros_like(x)
for f in [f1, f2, f3, f4]:
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
z = np.zeros_like(y, dtype=np.uint8)
z[y > t] = 255
return z
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
DESCRIPTION = ''''''
if not torch.cuda.is_available():
DESCRIPTION += "GPU not available. Using CPU."
style_list = [
{
"name": "(No style)",
"prompt": "{prompt}",
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
},
{
"name": "Cinematic",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
},
{
"name": "3D Model",
"prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
},
{
"name": "Anime",
"prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
"negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
},
{
"name": "Digital Art",
"prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
"negative_prompt": "photo, photorealistic, realism, ugly",
},
{
"name": "Photographic",
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
},
{
"name": "Pixel art",
"prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
"negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
},
{
"name": "Fantasy art",
"prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
"negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
},
{
"name": "Neonpunk",
"prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
},
{
"name": "Manga",
"prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
},
]
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
STYLE_NAMES = list(styles.keys())
DEFAULT_STYLE_NAME = "(No style)"
def apply_style(style_name, positive, negative=""):
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
return p.replace("{prompt}", positive), n + negative
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
eulera_scheduler = EulerAncestralDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler")
controlnet = ControlNetModel.from_pretrained("xinsir/controlnet-scribble-sdxl-1.0", torch_dtype=torch.float16)
controlnet_canny = ControlNetModel.from_pretrained("xinsir/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16)
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
controlnet=controlnet,
vae=vae,
torch_dtype=torch.float16,
scheduler=eulera_scheduler,
)
pipe.to(device)
pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
controlnet=controlnet_canny,
vae=vae,
safety_checker=None,
torch_dtype=torch.float16,
scheduler=eulera_scheduler,
)
pipe_canny.to(device)
MAX_SEED = np.iinfo(np.int32).max
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
def randomize_seed_fn(seed, randomize_seed):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@gr.annotations(
image=dict,
prompt=str,
negative_prompt=str,
style_name=str,
num_steps=int,
guidance_scale=float,
controlnet_conditioning_scale=float,
seed=int,
use_hed=bool,
use_canny=bool,
controlnet_img=Image,
out=Image,
)
def run(
image,
prompt,
negative_prompt,
style_name=DEFAULT_STYLE_NAME,
num_steps=25,
guidance_scale=5,
controlnet_conditioning_scale=1.0,
seed=0,
use_hed=False,
use_canny=False,
progress=gr.Progress(track_tqdm=True),
):
composite_image = image['composite']
width, height = composite_image.size
max_size = 1024
ratio = min(max_size / width, max_size / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
resized_image = composite_image.resize((new_width, new_height), Image.LANCZOS)
if use_canny:
controlnet_img = np.array(resized_image)
controlnet_img = cv2.Canny(controlnet_img, 100, 200)
controlnet_img = HWC3(controlnet_img)
image = Image.fromarray(controlnet_img)
elif not use_hed:
controlnet_img = resized_image
image = resized_image
else:
controlnet_img = processor(resized_image, scribble=False)
controlnet_img = np.array(controlnet_img)
controlnet_img = nms(controlnet_img, 127, 3)
controlnet_img = cv2.GaussianBlur(controlnet_img, (0, 0), 3)
random_val = int(round(random.uniform(0.01, 0.10), 2) * 255)
controlnet_img[controlnet_img > random_val] = 255
controlnet_img[controlnet_img < 255] = 0
image = Image.fromarray(controlnet_img)
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
generator = torch.Generator(device=device).manual_seed(seed)
if use_canny:
out = pipe_canny(
prompt=prompt,
negative_prompt=negative_prompt,
image=image,
num_inference_steps=num_steps,
generator=generator,
controlnet_conditioning_scale=controlnet_conditioning_scale,
guidance_scale=guidance_scale,
width=new_width,
height=new_height,
).images[0]
else:
out = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
image=image,
num_inference_steps=num_steps,
generator=generator,
controlnet_conditioning_scale=controlnet_conditioning_scale,
guidance_scale=guidance_scale,
width=new_width,
height=new_height,
).images[0]
return (controlnet_img, out)
with gr.Blocks(css="style.css", js=js_func) as demo:
gr.Markdown(DESCRIPTION, elem_id="description")
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_Multiplier: gr.ImageEditor(type="pil", label="Sketch your image or upload one", width=512, height=512)
prompt: gr.Textbox(label="Prompt")
style: gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
use_hed: gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
use_canny: gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
run_button: gr.Button("Run")
gr.Accordion("Advanced options", open=False):
negative_prompt: gr.Textbox(
label="Negative prompt",
value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
)
num_steps: gr.Slider(
label="Number of steps",
minimum=1,
maximum=50,
step=1,
value=25,
)
guidance_scale: gr.Slider(
label="Guidance scale",
minimum=0.1,
maximum=10.0,
step=0.1,
value=5,
)
controlnet_conditioning_scale: gr.Slider(
label="controlnet conditioning scale",
minimum=0.5,
maximum=5.0,
step=0.1,
value=0.9,
)
seed: gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed: gr.Checkbox(label="Randomize seed", value=True)
with gr.Column():
with gr.Group():
image_slider: ImageSlider(position=0.5)
inputs = [
image,
prompt,
negative_prompt,
style,
num_steps,
guidance_scale,
controlnet_conditioning_scale,
seed,
use_hed,
use_canny
]
outputs = [image_slider]
run_button.click(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(lambda x: None, inputs=None, outputs=image_slider).then(
fn=run, inputs=inputs, outputs=outputs
)
demo.queue().launch(share=True, show_error=True)