Spaces:
Runtime error
Runtime error
File size: 3,581 Bytes
2c19098 26063e6 2c19098 76b564d 9458c70 76b564d 26063e6 2c19098 76b564d 4cb8223 76b564d 4cb8223 2c19098 7bd0a5a 76b564d 7bd0a5a 76b564d 7bd0a5a 2c19098 088c386 2c19098 76b564d 2c19098 088c386 2c19098 4cb8223 2c19098 76b564d 4cb8223 2c19098 f581424 76b564d f581424 4cb8223 40a8b65 2c19098 e7f153f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
from diffusers import StableDiffusionPipeline
import gradio as gr
import torch
models = [
"nitrosocke/Arcane-Diffusion",
"nitrosocke/archer-diffusion",
"nitrosocke/elden-ring-diffusion",
"nitrosocke/spider-verse-diffusion"
]
prompt_prefixes = {
models[0]: "arcane style ",
models[1]: "archer style ",
models[2]: "elden ring style ",
models[3]: "spiderverse style "
}
current_model = models[0]
pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
def on_model_change(model):
global current_model
global pipe
if model != current_model:
current_model = model
pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
def inference(prompt, guidance, steps):
prompt = prompt_prefixes[current_model] + prompt
image = pipe(prompt, num_inference_steps=int(steps), guidance_scale=guidance, width=512, height=512).images[0]
return image
with gr.Blocks() as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px;">
Finetuned Diffusion
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: Arcane, Archer, Elden Ring, Spiderverse.
</p>
</div>
"""
)
with gr.Row():
with gr.Column():
model = gr.Dropdown(label="Model", choices=models, value=models[0])
prompt = gr.Textbox(label="Prompt", placeholder="{} is added automatically".format(prompt_prefixes[current_model]))
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
run = gr.Button(value="Run")
gr.Markdown(f"Running on: {device}")
with gr.Column():
image_out = gr.Image(height=512)
model.change(on_model_change, inputs=model, outputs=[])
run.click(inference, inputs=[prompt, guidance, steps], outputs=image_out)
gr.Examples([
["jason bateman disassembling the demon core", 7.5, 50],
["portrait of dwayne johnson", 7.0, 75],
["portrait of a beautiful alyx vance half life", 7, 50],
["Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7, 50],
["fantasy portrait painting, digital art", 4, 30],
], [prompt, guidance, steps], image_out, inference, cache_examples=torch.cuda.is_available())
gr.HTML('''
<div>
<p>Model by <a href="https://huggingface.co/nitrosocke" style="text-decoration: underline;" target="_blank">@nitrosocke</a> ❤️</p>
</div>
<div>Space by
<a href="https://twitter.com/hahahahohohe">
<img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social">
</a>
</div>
''')
demo.queue()
demo.launch() |