|
from turtle import width |
|
import gradio as gr |
|
import random |
|
import inspect |
|
latent = gr.Interface.load("spaces/multimodalart/latentdiffusion") |
|
rudalle = gr.Interface.load("spaces/multimodalart/rudalle") |
|
guided = gr.Interface.load("spaces/EleutherAI/clip-guided-diffusion") |
|
print(guided) |
|
def text2image_latent(text,steps,width,height,images,diversity): |
|
image = latent(text, steps, width, height, images, diversity)[0] |
|
return(image) |
|
|
|
def text2image_rudalle(text,aspect,model): |
|
image = rudalle(text,aspect,model)[0] |
|
print(image) |
|
return(image) |
|
|
|
def text2image_guided(text): |
|
image = guided(text,None,10,600,0,0,0,random.randint(2147483647),None,50,32)[0] |
|
return(image) |
|
|
|
block = gr.Blocks() |
|
|
|
with block: |
|
text = gr.inputs.Textbox(placeholder="Try writing something..") |
|
with gr.Tab("Latent Diffusion"): |
|
steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1) |
|
width = gr.inputs.Radio(label="Width", choices=[32,64,128,256],default=256) |
|
height = gr.inputs.Radio(label="Height", choices=[32,64,128,256],default=256) |
|
images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4) |
|
diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0) |
|
get_image_latent = gr.Button("Generate Image") |
|
with gr.Tab("ruDALLE"): |
|
aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square") |
|
model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism") |
|
get_image_rudalle = gr.Button("Generate Image") |
|
with gr.Tab("Guided Diffusion"): |
|
get_image_guided = gr.Button("Generate Image") |
|
with gr.Column(): |
|
with gr.Row(): |
|
image = gr.outputs.Image() |
|
|
|
get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=image) |
|
get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image) |
|
get_image_guided.click(text2image_guided, inputs=text, outputs=image) |
|
block.launch() |
|
|