Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -141,7 +141,8 @@ def load_and_prepare_model(model_id):
|
|
141 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
142 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
143 |
)
|
144 |
-
pipe.unet=UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='unet').to(torch.bfloat16)
|
|
|
145 |
#pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
146 |
pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
147 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
@@ -161,7 +162,7 @@ def load_and_prepare_model(model_id):
|
|
161 |
|
162 |
#apply_hidiffusion(pipe)
|
163 |
|
164 |
-
|
165 |
pipe.vae.set_default_attn_processor()
|
166 |
|
167 |
print(f'Pipeline: ')
|
@@ -219,7 +220,7 @@ def uploadNote():
|
|
219 |
f.write(f"Use Model Dtype: no \n")
|
220 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
221 |
f.write(f"Model VAE: juggernaut to bfloat before cuda then attn_proc \n")
|
222 |
-
f.write(f"Model UNET: default
|
223 |
f.write(f"Model HiDiffusion OFF \n")
|
224 |
f.write(f"Model do_resize OFF \n")
|
225 |
upload_to_ftp(filename)
|
|
|
141 |
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
142 |
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
143 |
)
|
144 |
+
#pipe.unet=UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='unet').to(torch.bfloat16)
|
145 |
+
#pipe.unet=UNet2DConditionModel.from_pretrained('SG161222/RealVisXL_V5.0',subfolder='unet').to(torch.bfloat16)
|
146 |
#pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
147 |
pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
148 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
|
|
162 |
|
163 |
#apply_hidiffusion(pipe)
|
164 |
|
165 |
+
pipe.unet.set_default_attn_processor()
|
166 |
pipe.vae.set_default_attn_processor()
|
167 |
|
168 |
print(f'Pipeline: ')
|
|
|
220 |
f.write(f"Use Model Dtype: no \n")
|
221 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
222 |
f.write(f"Model VAE: juggernaut to bfloat before cuda then attn_proc \n")
|
223 |
+
f.write(f"Model UNET: default to bfloat before cuda then attn_proc \n")
|
224 |
f.write(f"Model HiDiffusion OFF \n")
|
225 |
f.write(f"Model do_resize OFF \n")
|
226 |
upload_to_ftp(filename)
|