Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -198,11 +198,11 @@ def load_and_prepare_model():
|
|
198 |
|
199 |
#pipe.unet.to(memory_format=torch.channels_last)
|
200 |
#pipe.enable_vae_tiling()
|
201 |
-
pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune')
|
202 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet")
|
203 |
#pipe.unet = torch.compile(pipe.unet, backend="cudagraphs", dynamic=False)
|
204 |
#pipe.unet = torch.compile(pipe.unet, backend="torch_tensorrt", dynamic=False, options={"precision": torch.bfloat16,"optimization_level": 4,})
|
205 |
-
|
206 |
|
207 |
return pipe
|
208 |
|
|
|
198 |
|
199 |
#pipe.unet.to(memory_format=torch.channels_last)
|
200 |
#pipe.enable_vae_tiling()
|
201 |
+
pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune') #.to(device=device, dtype=torch.bfloat16)
|
202 |
#pipe.unet = torch.compile(pipe.unet, backend="hidet")
|
203 |
#pipe.unet = torch.compile(pipe.unet, backend="cudagraphs", dynamic=False)
|
204 |
#pipe.unet = torch.compile(pipe.unet, backend="torch_tensorrt", dynamic=False, options={"precision": torch.bfloat16,"optimization_level": 4,})
|
205 |
+
pipe.to(device=device, dtype=torch.bfloat16)
|
206 |
|
207 |
return pipe
|
208 |
|