Spaces:
Running
Running
Upload 2 files
Browse files- app.py +23 -86
- externalmod.py +5 -4
app.py
CHANGED
@@ -15,10 +15,6 @@ inference_timeout = 300
|
|
15 |
MAX_SEED = 2**32-1
|
16 |
current_model = models[0]
|
17 |
text_gen1 = extend_prompt
|
18 |
-
#text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
|
19 |
-
#text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
|
20 |
-
#text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
|
21 |
-
#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
|
22 |
|
23 |
models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
|
24 |
|
@@ -54,11 +50,17 @@ async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=
|
|
54 |
await asyncio.sleep(0)
|
55 |
try:
|
56 |
result = await asyncio.wait_for(task, timeout=timeout)
|
57 |
-
except
|
58 |
print(e)
|
59 |
print(f"Task timed out: {models2[model_index]}")
|
60 |
if not task.done(): task.cancel()
|
61 |
result = None
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
if task.done() and result is not None:
|
63 |
with lock:
|
64 |
png_path = "image.png"
|
@@ -76,6 +78,7 @@ def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None,
|
|
76 |
print(e)
|
77 |
print(f"Task aborted: {models2[model_index]}")
|
78 |
result = None
|
|
|
79 |
finally:
|
80 |
loop.close()
|
81 |
return result
|
@@ -90,93 +93,27 @@ h4 {display: inline-block; color: #ffffff !important;}
|
|
90 |
.wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
|
91 |
display: inline-block !important; color: #ffffff !important;}
|
92 |
.wrapper {color: #ffffff !important;}
|
93 |
-
.text-gray-500 {color: #ffc99f !important;}
|
94 |
.gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
|
95 |
border-top-color: #000000 !important; border-right-color: #ffffff !important;
|
96 |
border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
|
97 |
-
.gr-input {color: #ffc99f; !important; background-color: #254150 !important;}
|
98 |
-
:root {--neutral-100: #000000 !important;}
|
99 |
-
.gr-button {color: #ffffff !important; text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
|
100 |
-
background-image: linear-gradient(#76635a, #d2a489) !important; border-radius: 24px !important;
|
101 |
-
border: solid 1px !important; border-top-color: #ffc99f !important; border-right-color: #000000 !important;
|
102 |
-
border-bottom-color: #000000 !important; border-left-color: #ffc99f !important; padding: 6px 30px;}
|
103 |
-
.gr-button:active {color: #ffc99f !important; font-size: 98% !important;
|
104 |
-
text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important; background-image: linear-gradient(#d2a489, #76635a) !important;
|
105 |
-
border-top-color: #000000 !important; border-right-color: #ffffff !important;
|
106 |
-
border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
|
107 |
-
.gr-button:hover {filter: brightness(130%);}
|
108 |
"""
|
109 |
|
110 |
with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
|
111 |
gr.HTML(f"""
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
color: #ffc99f; !important;
|
124 |
-
}}
|
125 |
-
h4 {{
|
126 |
-
display: inline-block;
|
127 |
-
color: #ffffff !important;
|
128 |
-
}}
|
129 |
-
.wrapper img {{
|
130 |
-
font-size: 98% !important;
|
131 |
-
white-space: nowrap !important;
|
132 |
-
text-align: center !important;
|
133 |
-
display: inline-block !important;
|
134 |
-
color: #ffffff !important;
|
135 |
-
}}
|
136 |
-
.wrapper {{
|
137 |
-
color: #ffffff !important;
|
138 |
-
}}
|
139 |
-
.gradio-container {{
|
140 |
-
background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
|
141 |
-
color: #ffaa66 !important;
|
142 |
-
font-family: 'IBM Plex Sans', sans-serif !important;
|
143 |
-
}}
|
144 |
-
.text-gray-500 {{
|
145 |
-
color: #ffc99f !important;
|
146 |
-
}}
|
147 |
-
.gr-box {{
|
148 |
-
background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
|
149 |
-
border-top-color: #000000 !important;
|
150 |
-
border-right-color: #ffffff !important;
|
151 |
-
border-bottom-color: #ffffff !important;
|
152 |
-
border-left-color: #000000 !important;
|
153 |
-
}}
|
154 |
-
.gr-input {{
|
155 |
-
color: #ffc99f; !important;
|
156 |
-
background-color: #254150 !important;
|
157 |
-
}}
|
158 |
-
:root {{
|
159 |
-
--neutral-100: #000000 !important;
|
160 |
-
}}
|
161 |
-
</style>
|
162 |
-
<body>
|
163 |
-
<div class="center"><h1>Blitz Diffusion</h1>
|
164 |
-
</div>
|
165 |
-
</body>
|
166 |
-
</div>
|
167 |
-
<p style="margin-bottom: 1px; color: #ffaa66;">
|
168 |
-
<h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
|
169 |
-
<br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
|
170 |
-
<p style="margin-bottom: 1px; font-size: 98%">
|
171 |
-
<br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
|
172 |
-
<p style="margin-bottom: 1px; color: #ffffff;">
|
173 |
-
<br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
|
174 |
-
</p></p>
|
175 |
-
</div>
|
176 |
-
""", elem_classes="gr-box")
|
177 |
with gr.Row():
|
178 |
with gr.Column(scale=100):
|
179 |
-
#Model selection dropdown
|
180 |
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
|
181 |
value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
|
182 |
with gr.Row():
|
@@ -192,7 +129,7 @@ with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
|
|
192 |
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes=["gr-box", "gr-input"])
|
193 |
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes=["gr-box", "gr-input"])
|
194 |
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
|
195 |
-
run = gr.Button("Generate Image", elem_classes="gr-button")
|
196 |
|
197 |
with gr.Row():
|
198 |
with gr.Column():
|
@@ -202,8 +139,8 @@ with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
|
|
202 |
with gr.Row():
|
203 |
with gr.Column(scale=50):
|
204 |
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
|
205 |
-
see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above", elem_classes="gr-button")
|
206 |
-
use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above", elem_classes="gr-button")
|
207 |
def short_prompt(inputs):
|
208 |
return (inputs)
|
209 |
|
|
|
15 |
MAX_SEED = 2**32-1
|
16 |
current_model = models[0]
|
17 |
text_gen1 = extend_prompt
|
|
|
|
|
|
|
|
|
18 |
|
19 |
models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
|
20 |
|
|
|
50 |
await asyncio.sleep(0)
|
51 |
try:
|
52 |
result = await asyncio.wait_for(task, timeout=timeout)
|
53 |
+
except asyncio.TimeoutError as e:
|
54 |
print(e)
|
55 |
print(f"Task timed out: {models2[model_index]}")
|
56 |
if not task.done(): task.cancel()
|
57 |
result = None
|
58 |
+
raise Exception(f"Task timed out: {models2[model_index]}")
|
59 |
+
except Exception as e:
|
60 |
+
print(e)
|
61 |
+
if not task.done(): task.cancel()
|
62 |
+
result = None
|
63 |
+
raise Exception(e)
|
64 |
if task.done() and result is not None:
|
65 |
with lock:
|
66 |
png_path = "image.png"
|
|
|
78 |
print(e)
|
79 |
print(f"Task aborted: {models2[model_index]}")
|
80 |
result = None
|
81 |
+
raise gr.Error(f"Task aborted: {models2[model_index]}, Error: {e}")
|
82 |
finally:
|
83 |
loop.close()
|
84 |
return result
|
|
|
93 |
.wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
|
94 |
display: inline-block !important; color: #ffffff !important;}
|
95 |
.wrapper {color: #ffffff !important;}
|
|
|
96 |
.gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
|
97 |
border-top-color: #000000 !important; border-right-color: #ffffff !important;
|
98 |
border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
"""
|
100 |
|
101 |
with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
|
102 |
gr.HTML(f"""
|
103 |
+
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
|
104 |
+
<div class="center"><h1>Blitz Diffusion</h1></div>
|
105 |
+
<p style="margin-bottom: 1px; color: #ffaa66;">
|
106 |
+
<h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
|
107 |
+
<br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
|
108 |
+
<p style="margin-bottom: 1px; font-size: 98%">
|
109 |
+
<br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
|
110 |
+
<p style="margin-bottom: 1px; color: #ffffff;">
|
111 |
+
<br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
|
112 |
+
</p></p></div>
|
113 |
+
""", elem_classes="gr-box")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
with gr.Row():
|
115 |
with gr.Column(scale=100):
|
116 |
+
# Model selection dropdown
|
117 |
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
|
118 |
value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
|
119 |
with gr.Row():
|
|
|
129 |
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes=["gr-box", "gr-input"])
|
130 |
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes=["gr-box", "gr-input"])
|
131 |
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
|
132 |
+
run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
|
133 |
|
134 |
with gr.Row():
|
135 |
with gr.Column():
|
|
|
139 |
with gr.Row():
|
140 |
with gr.Column(scale=50):
|
141 |
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
|
142 |
+
see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
|
143 |
+
use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above", variant="primary", elem_classes="gr-button")
|
144 |
def short_prompt(inputs):
|
145 |
return (inputs)
|
146 |
|
externalmod.py
CHANGED
@@ -33,6 +33,7 @@ if TYPE_CHECKING:
|
|
33 |
from gradio.interface import Interface
|
34 |
|
35 |
|
|
|
36 |
server_timeout = 600
|
37 |
|
38 |
|
@@ -543,8 +544,8 @@ def list_uniq(l):
|
|
543 |
|
544 |
|
545 |
def get_status(model_name: str):
|
546 |
-
from huggingface_hub import
|
547 |
-
client =
|
548 |
return client.get_model_status(model_name)
|
549 |
|
550 |
|
@@ -563,7 +564,7 @@ def is_loadable(model_name: str, force_gpu: bool = False):
|
|
563 |
|
564 |
def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
|
565 |
from huggingface_hub import HfApi
|
566 |
-
api = HfApi()
|
567 |
default_tags = ["diffusers"]
|
568 |
if not sort: sort = "last_modified"
|
569 |
limit = limit * 20 if check_status and force_gpu else limit * 5
|
@@ -576,7 +577,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
|
|
576 |
print(e)
|
577 |
return models
|
578 |
for model in model_infos:
|
579 |
-
if not model.private and not model.gated:
|
580 |
loadable = is_loadable(model.id, force_gpu) if check_status else True
|
581 |
if not_tag and not_tag in model.tags or not loadable: continue
|
582 |
models.append(model.id)
|
|
|
33 |
from gradio.interface import Interface
|
34 |
|
35 |
|
36 |
+
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
37 |
server_timeout = 600
|
38 |
|
39 |
|
|
|
544 |
|
545 |
|
546 |
def get_status(model_name: str):
|
547 |
+
from huggingface_hub import AsyncInferenceClient
|
548 |
+
client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
|
549 |
return client.get_model_status(model_name)
|
550 |
|
551 |
|
|
|
564 |
|
565 |
def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
|
566 |
from huggingface_hub import HfApi
|
567 |
+
api = HfApi(token=HF_TOKEN)
|
568 |
default_tags = ["diffusers"]
|
569 |
if not sort: sort = "last_modified"
|
570 |
limit = limit * 20 if check_status and force_gpu else limit * 5
|
|
|
577 |
print(e)
|
578 |
return models
|
579 |
for model in model_infos:
|
580 |
+
if not model.private and not model.gated or HF_TOKEN is not None:
|
581 |
loadable = is_loadable(model.id, force_gpu) if check_status else True
|
582 |
if not_tag and not_tag in model.tags or not loadable: continue
|
583 |
models.append(model.id)
|