File size: 1,756 Bytes
022e492
89dff8c
 
 
9cd7dc3
 
fd5b548
4d9700e
fd5b548
 
 
9cd7dc3
4019854
89dff8c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr

models =["CompVis/stable-diffusion-v1-4", "runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "stabilityai/stable-diffusion-2-1-base"]

title="Text to Image with Stable Diffusion 1.4"
description="Input text, submit and the computer will create a picture."
examples=[
["Once upon a time, Dr. Woo was walking his dog in a park. His dog was a poodle."],
["Zoe Kwan is a 20-year old singer and songwriter who has taken Hong Kong’s music scene by storm."],
["Zoe’s big break came when the godfather of Cantopop Sam Hui stumbled upon a YouTube video of Zoe singing."]
]

gr.Interface.load("huggingface/CompVis/stable-diffusion-v1-4", title=title, description=description, examples=examples, live=False,preprocess=True, postprocess=False).launch()


#import diffusers
#import streamlit as st
#device = "cpu"
#from diffusers import StableDiffusionPipeline
#pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision = "fp16", use_auth_token = st.secrets["USER_TOKEN"])
#pipe = pipe.to("cpu")
#from PIL import Image
#import torch
#def StableDiffusionPipeline (prompt, Guide, iSteps, seed):
#    generator = torch.Generator("cpu").manual_seed(seed)
#    image = pipe(prompt, num_inference_steps = iSteps, guidence_scale = Guide).images[0]
#    return image
#iface = gr.Interface(fn = StableDiffusionPipeline, inputs = [
#    gr.Textbox(label = 'Prompt Input Text'),
#    gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
#    gr.Slider(10, 100, value = 25, step = 1, label = 'Number of Iterations'),
#    gr.Slider(
#        label = "Seed",
#        minimum = 0,
#        maximum = 2147483647,
#        step = 1,
#        randomize = True)
#    ],
#    outputs = 'image')
#iface.launch()