File size: 2,227 Bytes
93b0d61 03b3c4e 6904e5b 11c0b6c e1e9b2f 11c0b6c 03b3c4e 93b0d61 2b71a62 93b0d61 03b3c4e 3820dc1 03b3c4e c082d19 93b0d61 ee70940 93b0d61 03b3c4e eabda09 ee70940 5f4a167 93b0d61 03b3c4e f5ea5bf 5c31923 93b0d61 03b3c4e 93b0d61 03b3c4e 93b0d61 d14eae4 93b0d61 ee70940 93b0d61 03b3c4e 754e435 03b3c4e 93b0d61 03b3c4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import gradio as gr
import torch
from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
model_id = "hsuwill000/SpiritForeseerMix-openvino"
HIGH = 512
WIDTH = 512
batch_size = -1 # Or set it to a specific positive integer if needed
pipe = OVStableDiffusionPipeline.from_pretrained(
model_id,
compile=False,
ov_config={"CACHE_DIR": ""},
torch_dtype=torch.bfloat16, # More standard dtype for speed
safety_checker=None,
use_safetensors=False,
)
print(pipe.scheduler.compatibles)
pipe.reshape(batch_size=batch_size, height=HIGH, width=WIDTH, num_images_per_prompt=1)
pipe.compile()
prompt = ""
negative_prompt = "Easy Negative, worst quality, low quality, normal quality, lowers, monochrome, grayscales, skin spots, acnes, skin blemishes, age spot, 6 more fingers on one hand, deformity, bad legs, error legs, bad feet, malformed limbs, extra limbs, ugly, poorly drawn hands, poorly drawn feet, poorly drawn face, text, mutilated, extra fingers, mutated hands, mutation, bad anatomy, cloned face, disfigured, fused fingers"
def infer(prompt, negative_prompt, num_inference_steps=30):
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=WIDTH,
height=HIGH,
guidance_scale=1.0,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
).images[0]
return image
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# {model_id.split('/')[1]} {WIDTH}x{HIGH}
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=1)
result = gr.Image(label="Result", show_label=False)
run_button.click(
fn=infer,
inputs=[prompt],
outputs=[result]
)
demo.queue().launch()
|