Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -179,8 +179,11 @@ def load_and_prepare_model():
|
|
179 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1) #,use_karras_sigmas=True)
|
180 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
181 |
pipe.scheduler = sched
|
182 |
-
|
183 |
-
|
|
|
|
|
|
|
184 |
pipe.vae.set_default_attn_processor()
|
185 |
#pipe.to(device)
|
186 |
#pipe.to(torch.bfloat16)
|
@@ -199,7 +202,7 @@ def load_and_prepare_model():
|
|
199 |
|
200 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
201 |
|
202 |
-
pipe.unet.to(memory_format=torch.channels_last)
|
203 |
#pipe.enable_vae_tiling()
|
204 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune') #.to(device=device, dtype=torch.bfloat16)
|
205 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune-no-cudagraphs') #.to(device=device, dtype=torch.bfloat16)
|
|
|
179 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1) #,use_karras_sigmas=True)
|
180 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
181 |
pipe.scheduler = sched
|
182 |
+
|
183 |
+
pipe.vae.do_resize = False
|
184 |
+
#pipe.vae.vae_scale_factor = 8
|
185 |
+
pipe.vae.do_convert_rgb = True
|
186 |
+
|
187 |
pipe.vae.set_default_attn_processor()
|
188 |
#pipe.to(device)
|
189 |
#pipe.to(torch.bfloat16)
|
|
|
202 |
|
203 |
#pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
204 |
|
205 |
+
#pipe.unet.to(memory_format=torch.channels_last)
|
206 |
#pipe.enable_vae_tiling()
|
207 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune') #.to(device=device, dtype=torch.bfloat16)
|
208 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune-no-cudagraphs') #.to(device=device, dtype=torch.bfloat16)
|