Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- app.py +10 -17
- env.py +6 -0
- modutils.py +9 -5
- requirements.txt +1 -1
app.py
CHANGED
@@ -355,7 +355,7 @@ class GuiSD:
|
|
355 |
|
356 |
print("Loading model...")
|
357 |
self.model = Model_Diffusers(
|
358 |
-
base_model_id="
|
359 |
task_name="txt2img",
|
360 |
vae_model=None,
|
361 |
type_model_precision=torch.float16,
|
@@ -363,16 +363,6 @@ class GuiSD:
|
|
363 |
device="cpu",
|
364 |
)
|
365 |
|
366 |
-
def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
|
367 |
-
progress(0, desc="Start inference...")
|
368 |
-
images, image_list = model(**pipe_params)
|
369 |
-
progress(1, desc="Inference completed.")
|
370 |
-
if not isinstance(images, list): images = [images]
|
371 |
-
img = []
|
372 |
-
for image in images:
|
373 |
-
img.append((image, None))
|
374 |
-
return img
|
375 |
-
|
376 |
def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
|
377 |
|
378 |
yield f"Loading model: {model_name}"
|
@@ -394,12 +384,13 @@ class GuiSD:
|
|
394 |
model_name,
|
395 |
task_name=task_stablepy[task],
|
396 |
vae_model=vae_model if vae_model != "None" else None,
|
397 |
-
type_model_precision=torch.float16,
|
398 |
retain_task_model_in_cache=False,
|
399 |
)
|
400 |
yield f"Model loaded: {model_name}"
|
401 |
|
402 |
@spaces.GPU
|
|
|
403 |
def generate_pipeline(
|
404 |
self,
|
405 |
prompt,
|
@@ -539,7 +530,7 @@ class GuiSD:
|
|
539 |
vae_model = None
|
540 |
|
541 |
for la in loras_list:
|
542 |
-
if la is not None and la != "None" and la in lora_model_list:
|
543 |
print(la)
|
544 |
lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
|
545 |
if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
|
@@ -569,9 +560,9 @@ class GuiSD:
|
|
569 |
params_ip_mode.append(modeip)
|
570 |
params_ip_scale.append(scaleip)
|
571 |
|
|
|
|
|
572 |
# First load
|
573 |
-
model_precision = torch.float16
|
574 |
-
self.model.device = torch.device("cuda:0")
|
575 |
if not self.model:
|
576 |
print("Loading model...")
|
577 |
self.model = Model_Diffusers(
|
@@ -726,8 +717,10 @@ class GuiSD:
|
|
726 |
"ip_adapter_scale": params_ip_scale,
|
727 |
}
|
728 |
|
729 |
-
|
730 |
-
|
|
|
|
|
731 |
|
732 |
#progress(0, desc="Preparation completed. Starting inference...")
|
733 |
|
|
|
355 |
|
356 |
print("Loading model...")
|
357 |
self.model = Model_Diffusers(
|
358 |
+
base_model_id="Lykon/dreamshaper-8",
|
359 |
task_name="txt2img",
|
360 |
vae_model=None,
|
361 |
type_model_precision=torch.float16,
|
|
|
363 |
device="cpu",
|
364 |
)
|
365 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
366 |
def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
|
367 |
|
368 |
yield f"Loading model: {model_name}"
|
|
|
384 |
model_name,
|
385 |
task_name=task_stablepy[task],
|
386 |
vae_model=vae_model if vae_model != "None" else None,
|
387 |
+
type_model_precision=torch.float16 if "flux" not in model_name.lower() else torch.bfloat16,
|
388 |
retain_task_model_in_cache=False,
|
389 |
)
|
390 |
yield f"Model loaded: {model_name}"
|
391 |
|
392 |
@spaces.GPU
|
393 |
+
@torch.inference_mode()
|
394 |
def generate_pipeline(
|
395 |
self,
|
396 |
prompt,
|
|
|
530 |
vae_model = None
|
531 |
|
532 |
for la in loras_list:
|
533 |
+
if la is not None and la != "None" and la != "" and la in lora_model_list:
|
534 |
print(la)
|
535 |
lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
|
536 |
if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
|
|
|
560 |
params_ip_mode.append(modeip)
|
561 |
params_ip_scale.append(scaleip)
|
562 |
|
563 |
+
model_precision = torch.float16 if "flux" not in model_name.lower() else torch.bfloat16
|
564 |
+
|
565 |
# First load
|
|
|
|
|
566 |
if not self.model:
|
567 |
print("Loading model...")
|
568 |
self.model = Model_Diffusers(
|
|
|
717 |
"ip_adapter_scale": params_ip_scale,
|
718 |
}
|
719 |
|
720 |
+
self.model.device = torch.device("cuda:0")
|
721 |
+
if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
|
722 |
+
self.model.pipe.transformer.to(self.model.device)
|
723 |
+
print("transformer to cuda")
|
724 |
|
725 |
#progress(0, desc="Preparation completed. Starting inference...")
|
726 |
|
env.py
CHANGED
@@ -64,6 +64,12 @@ load_diffusers_format_model = [
|
|
64 |
'Eugeoter/artiwaifu-diffusion-2.0',
|
65 |
'Raelina/Rae-Diffusion-XL-V2',
|
66 |
'Raelina/Raemu-XL-V4',
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
]
|
68 |
|
69 |
# List all Models for specified user
|
|
|
64 |
'Eugeoter/artiwaifu-diffusion-2.0',
|
65 |
'Raelina/Rae-Diffusion-XL-V2',
|
66 |
'Raelina/Raemu-XL-V4',
|
67 |
+
"camenduru/FLUX.1-dev-diffusers",
|
68 |
+
"black-forest-labs/FLUX.1-schnell",
|
69 |
+
"sayakpaul/FLUX.1-merged",
|
70 |
+
"ostris/OpenFLUX.1",
|
71 |
+
"multimodalart/FLUX.1-dev2pro-full",
|
72 |
+
"Raelina/Raemu-Flux",
|
73 |
]
|
74 |
|
75 |
# List all Models for specified user
|
modutils.py
CHANGED
@@ -227,11 +227,16 @@ def get_model_id_list():
|
|
227 |
model_ids.append(model.id) if not model.private else ""
|
228 |
anime_models = []
|
229 |
real_models = []
|
|
|
|
|
230 |
for model in models_ex:
|
231 |
-
if not model.private and not model.gated
|
232 |
-
|
|
|
233 |
model_ids.extend(anime_models)
|
234 |
model_ids.extend(real_models)
|
|
|
|
|
235 |
model_id_list = model_ids.copy()
|
236 |
return model_ids
|
237 |
|
@@ -745,8 +750,7 @@ def move_file_lora(filepaths):
|
|
745 |
|
746 |
|
747 |
def get_civitai_info(path):
|
748 |
-
global civitai_not_exists_list
|
749 |
-
global loras_url_to_path_dict
|
750 |
import requests
|
751 |
from requests.adapters import HTTPAdapter
|
752 |
from urllib3.util import Retry
|
@@ -1242,7 +1246,7 @@ def get_model_pipeline(repo_id: str):
|
|
1242 |
try:
|
1243 |
if " " in repo_id or not api.repo_exists(repo_id): return default
|
1244 |
model = api.model_info(repo_id=repo_id)
|
1245 |
-
except Exception
|
1246 |
return default
|
1247 |
if model.private or model.gated: return default
|
1248 |
tags = model.tags
|
|
|
227 |
model_ids.append(model.id) if not model.private else ""
|
228 |
anime_models = []
|
229 |
real_models = []
|
230 |
+
anime_models_flux = []
|
231 |
+
real_models_flux = []
|
232 |
for model in models_ex:
|
233 |
+
if not model.private and not model.gated:
|
234 |
+
if "diffusers:FluxPipeline" in model.tags: anime_models_flux.append(model.id) if "anime" in model.tags else real_models_flux.append(model.id)
|
235 |
+
else: anime_models.append(model.id) if "anime" in model.tags else real_models.append(model.id)
|
236 |
model_ids.extend(anime_models)
|
237 |
model_ids.extend(real_models)
|
238 |
+
model_ids.extend(anime_models_flux)
|
239 |
+
model_ids.extend(real_models_flux)
|
240 |
model_id_list = model_ids.copy()
|
241 |
return model_ids
|
242 |
|
|
|
750 |
|
751 |
|
752 |
def get_civitai_info(path):
|
753 |
+
global civitai_not_exists_list, loras_url_to_path_dict
|
|
|
754 |
import requests
|
755 |
from requests.adapters import HTTPAdapter
|
756 |
from urllib3.util import Retry
|
|
|
1246 |
try:
|
1247 |
if " " in repo_id or not api.repo_exists(repo_id): return default
|
1248 |
model = api.model_info(repo_id=repo_id)
|
1249 |
+
except Exception:
|
1250 |
return default
|
1251 |
if model.private or model.gated: return default
|
1252 |
tags = model.tags
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
git+https://github.com/R3gm/stablepy.git
|
2 |
torch==2.2.0
|
3 |
gdown
|
4 |
opencv-python
|
|
|
1 |
+
git+https://github.com/R3gm/stablepy.git@flux_beta
|
2 |
torch==2.2.0
|
3 |
gdown
|
4 |
opencv-python
|