Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,105 +15,31 @@ def get_prompts(prompt_text):
|
|
15 |
return text_gen(prompt_text + " Dream")
|
16 |
else:
|
17 |
return text_gen("")
|
18 |
-
proc1=gr.Interface.load("models/Yntec/DreamAnything")
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
25 |
|
|
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
|
|
29 |
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
if noise_level == 0:
|
37 |
-
noise_level = 0.00
|
38 |
-
percentage_noise = noise_level * 5
|
39 |
-
num_noise_chars = int(len(prompt) * (percentage_noise/100))
|
40 |
-
noise_indices = random.sample(range(len(prompt)), num_noise_chars)
|
41 |
-
prompt_list = list(prompt)
|
42 |
-
noise_chars = list(string.ascii_letters + string.punctuation + '' + string.digits)
|
43 |
-
noise_chars.extend([''])
|
44 |
-
for index in noise_indices:
|
45 |
-
prompt_list[index] = random.choice(noise_chars)
|
46 |
-
return "".join(prompt_list)
|
47 |
-
|
48 |
-
#normal behavior
|
49 |
-
def add_random_noise(prompt, noise_level=0.00):
|
50 |
-
if noise_level == 0:
|
51 |
-
noise_level = 0.00
|
52 |
-
percentage_noise = noise_level * 5
|
53 |
-
num_noise_chars = int(len(prompt) * (percentage_noise/100))
|
54 |
-
noise_indices = random.sample(range(len(prompt)), num_noise_chars)
|
55 |
-
prompt_list = list(prompt)
|
56 |
-
noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
|
57 |
-
noise_chars.extend(['😍', 'beautiful', '😂', '🤔', '😊', '🤗', '😭', '🙄', 'pretty', '🤯', '🤫', '🥴', 'sitting', '🤩', '🥳', '😔', '😩', '🤪', '😇', 'retro', '😈', '👹', 'masterpiece', '🤖', '👽', 'high quality', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', 'visible', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', 'cute', 'kawaii', 'little'])
|
58 |
-
for index in noise_indices:
|
59 |
-
prompt_list[index] = random.choice(noise_chars)
|
60 |
-
return "".join(prompt_list)
|
61 |
-
|
62 |
-
def send_it1(inputs, noise_level, proc1=proc1):
|
63 |
-
prompt_with_noise = noadd_random_noise(inputs, noise_level)
|
64 |
-
while queue.qsize() >= queue_threshold:
|
65 |
-
time.sleep(2)
|
66 |
-
queue.put(prompt_with_noise)
|
67 |
-
output1 = proc1(prompt_with_noise)
|
68 |
-
return output1
|
69 |
-
|
70 |
-
def send_it2(inputs, noise_level, proc1=proc1):
|
71 |
-
prompt_with_noise = add_random_noise(inputs, noise_level)
|
72 |
-
while queue.qsize() >= queue_threshold:
|
73 |
-
time.sleep(2)
|
74 |
-
queue.put(prompt_with_noise)
|
75 |
-
output2 = proc1(prompt_with_noise)
|
76 |
-
return output2
|
77 |
-
|
78 |
-
def send_itX(inputs, noise_level, proc1=proc1):
|
79 |
-
prompt_with_noise = add_random_noise(inputs, noise_level)
|
80 |
-
while queue.qsize() >= queue_threshold:
|
81 |
-
time.sleep(2)
|
82 |
-
queue.put(prompt_with_noise)
|
83 |
-
outputX = proc1(prompt_with_noise)
|
84 |
-
return outputX
|
85 |
-
|
86 |
-
def send_it3(inputs, noise_level, proc1=proc1):
|
87 |
-
prompt_with_noise = add_random_noise(inputs, noise_level)
|
88 |
-
while queue.qsize() >= queue_threshold:
|
89 |
-
time.sleep(2)
|
90 |
-
queue.put(prompt_with_noise)
|
91 |
-
output3 = proc1(prompt_with_noise)
|
92 |
-
return output3
|
93 |
-
|
94 |
-
def send_it4(inputs, noise_level, proc1=proc1):
|
95 |
-
prompt_with_noise = add_random_noise(inputs, noise_level)
|
96 |
-
while queue.qsize() >= queue_threshold:
|
97 |
-
time.sleep(2)
|
98 |
-
queue.put(prompt_with_noise)
|
99 |
-
output4 = proc1(prompt_with_noise)
|
100 |
-
return output4
|
101 |
-
|
102 |
-
def send_it5(inputs, noise_level, proc1=proc1):
|
103 |
-
prompt_with_noise = add_random_noise(inputs, noise_level)
|
104 |
-
while queue.qsize() >= queue_threshold:
|
105 |
-
time.sleep(2)
|
106 |
-
queue.put(prompt_with_noise)
|
107 |
-
output5 = proc1(prompt_with_noise)
|
108 |
-
return output5
|
109 |
-
|
110 |
-
#def send_it7(inputs, noise_level, proc1=proc1):
|
111 |
-
#prompt_with_noise = add_random_noise(inputs, noise_level)
|
112 |
-
#while queue.qsize() >= queue_threshold:
|
113 |
-
# time.sleep(2)
|
114 |
-
#queue.put(prompt_with_noise)
|
115 |
-
#output5 = proc1(prompt_with_noise)
|
116 |
-
#return output0
|
117 |
|
118 |
|
119 |
with gr.Blocks(css='style.css') as demo:
|
@@ -145,73 +71,41 @@ with gr.Blocks(css='style.css') as demo:
|
|
145 |
</div>
|
146 |
"""
|
147 |
)
|
148 |
-
with gr.
|
149 |
-
with gr.
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
)
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
)
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
output5=gr.Image(label="DreamAnything",show_label=False,min_width=640)
|
187 |
-
outputX=gr.Image(label="DreamAnything",show_label=False,min_width=640)
|
188 |
-
#with gr.Row():
|
189 |
-
#with gr.Row():
|
190 |
-
#output0=gr.Image(label="DreamAnything",show_label=False,min_width=640)
|
191 |
-
|
192 |
-
see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
|
193 |
-
run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
|
194 |
-
#run.click(send_it7, inputs=[prompt, noise_level], outputs=[output0])
|
195 |
-
run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
|
196 |
-
run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3])
|
197 |
-
run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4])
|
198 |
-
run.click(send_it5, inputs=[prompt, noise_level], outputs=[output5])
|
199 |
-
run.click(send_itX, inputs=[prompt, noise_level], outputs=[outputX])
|
200 |
-
|
201 |
-
|
202 |
-
with gr.Row():
|
203 |
-
gr.HTML(
|
204 |
-
"""
|
205 |
-
<div class="footer">
|
206 |
-
<p> Demo for <a href="https://huggingface.co/Yntec/DreamAnything">DreamAnything</a> Stable Diffusion model
|
207 |
-
</p>
|
208 |
-
</div>
|
209 |
-
<div class="acknowledgments" style="font-size: 115%; color: #ffffff;">
|
210 |
-
<p> Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin!
|
211 |
-
</p>
|
212 |
-
</div>
|
213 |
-
"""
|
214 |
-
)
|
215 |
-
|
216 |
-
demo.launch(enable_queue=True, inline=True)
|
217 |
-
block.queue(concurrency_count=100)
|
|
|
15 |
return text_gen(prompt_text + " Dream")
|
16 |
else:
|
17 |
return text_gen("")
|
|
|
18 |
|
19 |
+
models = [
|
20 |
+
"Yntec/OpenLexica",
|
21 |
+
"Yntec/MapleSyrup",
|
22 |
+
]
|
23 |
+
current_model = models[0]
|
24 |
+
models2=[
|
25 |
+
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
|
26 |
+
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
|
27 |
+
]
|
28 |
|
29 |
+
def text_it1(inputs,text_gen1=text_gen1):
|
30 |
+
go_t1=text_gen1(inputs)
|
31 |
+
return(go_t1)
|
32 |
|
33 |
+
def set_model(current_model):
|
34 |
+
current_model = models[current_model]
|
35 |
+
return gr.update(label=(f"{current_model}"))
|
36 |
|
37 |
|
38 |
+
def send_it1(inputs, model_choice):
|
39 |
+
proc1=models2[model_choice]
|
40 |
+
output1=proc1(inputs)
|
41 |
+
return(output1)
|
42 |
+
css=""""""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
|
45 |
with gr.Blocks(css='style.css') as demo:
|
|
|
71 |
</div>
|
72 |
"""
|
73 |
)
|
74 |
+
with gr.Row():
|
75 |
+
with gr.Column(scale=100):
|
76 |
+
magic1=gr.Textbox(lines=4)
|
77 |
+
gr.HTML("""<style> .gr-button {
|
78 |
+
color: white !important;
|
79 |
+
border-color: #000000 !important;
|
80 |
+
background: #006699 !important;
|
81 |
+
}</style>""")
|
82 |
+
run=gr.Button("Generate Image")
|
83 |
+
with gr.Row():
|
84 |
+
with gr.Column(scale=100):
|
85 |
+
#Model selection dropdown
|
86 |
+
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
|
87 |
+
with gr.Row():
|
88 |
+
with gr.Column(style="width=800px"):
|
89 |
+
output1=gr.Image(label=(f"{current_model}"))
|
90 |
+
|
91 |
+
|
92 |
+
with gr.Row():
|
93 |
+
with gr.Column(scale=50):
|
94 |
+
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
|
95 |
+
use_short=gr.Button("Use Short Prompt")
|
96 |
+
see_prompts=gr.Button("Extend Idea")
|
97 |
+
|
98 |
+
|
99 |
+
def short_prompt(inputs):
|
100 |
+
return(inputs)
|
101 |
+
|
102 |
+
model_name1.change(set_model,inputs=model_name1,outputs=[output1])
|
103 |
+
|
104 |
+
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
|
105 |
+
|
106 |
+
use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
|
107 |
+
|
108 |
+
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
|
109 |
+
|
110 |
+
myface.queue(concurrency_count=200)
|
111 |
+
myface.launch(inline=True, show_api=False, max_threads=400)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|