Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import base64 | |
from io import BytesIO | |
from PIL import Image | |
# Define the list of models | |
models = [ | |
"CompVis/stable-diffusion-v1-4", | |
"runwayml/stable-diffusion-v1-5", | |
"stabilityai/stable-diffusion-2-1-base", | |
"stabilityai/stable-diffusion-2-1", | |
"CompVis/ldm-text2im-large-256", | |
"lambdalabs/sd-text2img-base-2-0", | |
"ZB-Tech/Text-to-Image", | |
"cloudqi/cqi_text_to_image_pt_v0", | |
"kothariyashhh/GenAi-Texttoimage", | |
"sairajg/Text_To_Image" | |
] | |
def generate_image(prompt, model_name): | |
client = InferenceClient(model_name) | |
response = client.text_to_image(prompt) | |
if isinstance(response, list): | |
image_data = response[0]['image'] | |
image = Image.open(BytesIO(base64.b64decode(image_data))) | |
return image | |
else: | |
return "Failed to generate image." | |
# Create Gradio Interface | |
with gr.Blocks() as demo: | |
gr.Markdown("## Text-to-Image Generation with Hugging Face Models") | |
with gr.Row(): | |
with gr.Column(): | |
model_dropdown = gr.Dropdown(models, label="Select Model") | |
prompt_input = gr.Textbox(label="Enter Text Prompt") | |
generate_button = gr.Button("Generate Image") | |
with gr.Column(): | |
output_image = gr.Image(label="Generated Image") | |
generate_button.click(generate_image, inputs=[prompt_input, model_dropdown], outputs=output_image) | |
# Launch the interface | |
demo.launch() |