Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -268,7 +268,7 @@ if torch.cuda.is_available():
|
|
268 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
269 |
print("Model RealVisXL_V5.0_Lightning Compiled!")
|
270 |
|
271 |
-
# Load
|
272 |
pipe2 = StableDiffusionXLPipeline.from_pretrained(
|
273 |
"SG161222/RealVisXL_V4.0",
|
274 |
torch_dtype=dtype,
|
@@ -286,8 +286,27 @@ if torch.cuda.is_available():
|
|
286 |
if USE_TORCH_COMPILE:
|
287 |
pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
|
288 |
print("Model RealVisXL_V4.0 Compiled!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
else:
|
290 |
-
# On CPU load
|
291 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
292 |
"SG161222/RealVisXL_V5.0_Lightning",
|
293 |
torch_dtype=dtype,
|
@@ -300,14 +319,21 @@ else:
|
|
300 |
use_safetensors=True,
|
301 |
add_watermarker=False,
|
302 |
).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
print("Running on CPU; models loaded in float32.")
|
304 |
|
305 |
# A dictionary to easily choose the model based on selection.
|
306 |
DEFAULT_MODEL = "RealVisXL_V5.0_Lightning"
|
307 |
-
MODEL_CHOICES = [DEFAULT_MODEL, "RealVisXL_V4.0"]
|
308 |
models = {
|
309 |
"RealVisXL_V5.0_Lightning": pipe,
|
310 |
-
"RealVisXL_V4.0": pipe2
|
|
|
311 |
}
|
312 |
|
313 |
def save_image(img, path):
|
|
|
268 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
269 |
print("Model RealVisXL_V5.0_Lightning Compiled!")
|
270 |
|
271 |
+
# Load second model (RealVisXL_V4.0)
|
272 |
pipe2 = StableDiffusionXLPipeline.from_pretrained(
|
273 |
"SG161222/RealVisXL_V4.0",
|
274 |
torch_dtype=dtype,
|
|
|
286 |
if USE_TORCH_COMPILE:
|
287 |
pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
|
288 |
print("Model RealVisXL_V4.0 Compiled!")
|
289 |
+
|
290 |
+
# Load third model (Animagine XL 4.0)
|
291 |
+
pipe3 = StableDiffusionXLPipeline.from_pretrained(
|
292 |
+
"cagliostrolab/animagine-xl-4.0",
|
293 |
+
torch_dtype=dtype,
|
294 |
+
use_safetensors=True,
|
295 |
+
add_watermarker=False,
|
296 |
+
).to(device)
|
297 |
+
pipe3.text_encoder = pipe3.text_encoder.half()
|
298 |
+
|
299 |
+
if ENABLE_CPU_OFFLOAD:
|
300 |
+
pipe3.enable_model_cpu_offload()
|
301 |
+
else:
|
302 |
+
pipe3.to(device)
|
303 |
+
print("Loaded Animagine XL 4.0 on Device!")
|
304 |
+
|
305 |
+
if USE_TORCH_COMPILE:
|
306 |
+
pipe3.unet = torch.compile(pipe3.unet, mode="reduce-overhead", fullgraph=True)
|
307 |
+
print("Model Animagine XL 4.0 Compiled!")
|
308 |
else:
|
309 |
+
# On CPU, load all models in float32
|
310 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
311 |
"SG161222/RealVisXL_V5.0_Lightning",
|
312 |
torch_dtype=dtype,
|
|
|
319 |
use_safetensors=True,
|
320 |
add_watermarker=False,
|
321 |
).to(device)
|
322 |
+
pipe3 = StableDiffusionXLPipeline.from_pretrained(
|
323 |
+
"cagliostrolab/animagine-xl-4.0",
|
324 |
+
torch_dtype=dtype,
|
325 |
+
use_safetensors=True,
|
326 |
+
add_watermarker=False,
|
327 |
+
).to(device)
|
328 |
print("Running on CPU; models loaded in float32.")
|
329 |
|
330 |
# A dictionary to easily choose the model based on selection.
|
331 |
DEFAULT_MODEL = "RealVisXL_V5.0_Lightning"
|
332 |
+
MODEL_CHOICES = [DEFAULT_MODEL, "RealVisXL_V4.0", "Animagine XL 4.0"]
|
333 |
models = {
|
334 |
"RealVisXL_V5.0_Lightning": pipe,
|
335 |
+
"RealVisXL_V4.0": pipe2,
|
336 |
+
"Animagine XL 4.0": pipe3
|
337 |
}
|
338 |
|
339 |
def save_image(img, path):
|