Spaces:
Running
Running
File size: 1,215 Bytes
f7235bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
from huggingface_hub import InferenceClient
# List of models and their corresponding names
models = [
"CompVis/stable-diffusion-v1-4",
"runwayml/stable-diffusion-v1-5",
"hakurei/waifu-diffusion",
"prompthero/openjourney",
"jacek-huszar/$(IMG2TEXT_MODEL_NAME)",
"cjwbw/canvae-text-to-image",
"tdbooth/hollow-kingdom-ddim",
"kuprel/min-dalle",
"osanseviero/stable-diffusion-webui",
"lambdal/text-to-image"
]
def generate_image(prompt, model_name):
client = InferenceClient(model_name)
response = client.text_to_image(prompt)
return response.image
# Create Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("## Text-to-Image Generation with Hugging Face Models")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(models, label="Select Model")
prompt_input = gr.Textbox(label="Enter Text Prompt")
generate_button = gr.Button("Generate Image")
with gr.Column():
output_image = gr.Image(label="Generated Image")
generate_button.click(generate_image, inputs=[prompt_input, model_dropdown], outputs=output_image)
# Launch the interface
demo.launch() |