ford442 commited on
Commit
703c5e6
·
verified ·
1 Parent(s): e24b5ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -1
app.py CHANGED
@@ -143,12 +143,32 @@ def load_and_prepare_model(model_id):
143
  )
144
  #pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
145
  pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
147
  #pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
148
  #pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
149
  #pipe.vae = vaeX
150
  #pipe.unet = unetX
 
151
  pipe.vae.do_resize=False
 
 
 
152
  #pipe.scheduler = sched
153
  #pipe.vae=vae.to(torch.bfloat16)
154
  #pipe.unet=pipeX.unet
@@ -157,7 +177,6 @@ def load_and_prepare_model(model_id):
157
 
158
  pipe.to(device)
159
  pipe.to(torch.bfloat16)
160
-
161
  #apply_hidiffusion(pipe)
162
 
163
  #pipe.unet.set_default_attn_processor()
 
143
  )
144
  #pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
145
  pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
146
+
147
+ '''
148
+ scaling_factor (`float`, *optional*, defaults to 0.18215):
149
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
150
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
151
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
152
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
153
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
154
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
155
+ force_upcast (`bool`, *optional*, default to `True`):
156
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
157
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
158
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
159
+
160
+ '''
161
+
162
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
163
  #pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
164
  #pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
165
  #pipe.vae = vaeX
166
  #pipe.unet = unetX
167
+
168
  pipe.vae.do_resize=False
169
+ #pipe.vae.do_rescale=False
170
+ #pipe.vae.do_convert_rgb=True
171
+
172
  #pipe.scheduler = sched
173
  #pipe.vae=vae.to(torch.bfloat16)
174
  #pipe.unet=pipeX.unet
 
177
 
178
  pipe.to(device)
179
  pipe.to(torch.bfloat16)
 
180
  #apply_hidiffusion(pipe)
181
 
182
  #pipe.unet.set_default_attn_processor()