Spaces:
Sleeping
Sleeping
File size: 1,280 Bytes
eaac02d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from diffusers import StableDiffusionPipeline
import torch
import requests
from PIL import Image
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline
device = "cpu"
model_id = "krishi/tartan2"
pipe = StableDiffusionPipeline.from_pretrained(model_id).to(device)
pipe2 = StableDiffusionImg2ImgPipeline(**pipe.components).to(device)
import gradio as gr
def generate_txt2img(prompt):
return pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
def generate_img2img(img, prompt):
image = Image.fromarray(img)
image = image.resize((512, 512))
return pipe2(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5).images[0]
with gr.Blocks() as demo:
with gr.Tab("Text2Image"):
inp_txt = gr.Text(show_label=False, placeholder="Enter your prompt here...")
btn = gr.Button("Generate")
out_img = gr.Image()
btn.click(fn=generate_txt2img, inputs=[inp_txt], outputs=[out_img])
with gr.Tab("Image2Image"):
inp_img = gr.Image()
inp_txt2 = gr.Text(show_label=False, placeholder="Enter your prompt here...")
btn2 = gr.Button("Generate")
out_img2 = gr.Image()
btn2.click(fn=generate_img2img, inputs=[inp_img, inp_txt2], outputs=[out_img2])
demo.launch() |