ford442 commited on
Commit
7df282f
·
verified ·
1 Parent(s): 8871dfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -202,7 +202,7 @@ def load_and_prepare_model():
202
  #pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune-no-cudagraphs') #.to(device=device, dtype=torch.bfloat16)
203
  #pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, options={'epilogue_fusion': True, 'shape_padding': True}) #.to(device=device, dtype=torch.bfloat16)
204
  #pipe.unet = torch.compile(pipe.unet, backend="hidet")
205
- pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False)
206
  #pipe.unet = torch.compile(pipe.unet, backend="torch_tensorrt", dynamic=False, options={"precision": torch.bfloat16,"optimization_level": 4,})
207
  pipe.to(device=device, dtype=torch.bfloat16)
208
 
@@ -210,11 +210,11 @@ def load_and_prepare_model():
210
 
211
  hidet.option.parallel_build(True)
212
  hidet.option.parallel_tune(2,2.0)
213
- torch._dynamo.config.suppress_errors = True
214
  torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
215
 
216
  # more search
217
- hidet.torch.dynamo_config.search_space(0)
218
  #hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
219
  hidet.option.cache_dir("local_cache")
220
  # automatically transform the model to use float16 data type
@@ -222,8 +222,8 @@ hidet.option.cache_dir("local_cache")
222
  # use float16 data type as the accumulate data type in operators with reduction
223
  #hidet.torch.dynamo_config.use_fp16_reduction(True)
224
  # use tensorcore
225
- hidet.torch.dynamo_config.use_tensor_core()
226
- hidet.torch.dynamo_config.steal_weights(False)
227
 
228
  # Preload and compile both models
229
 
 
202
  #pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, mode='max-autotune-no-cudagraphs') #.to(device=device, dtype=torch.bfloat16)
203
  #pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, options={'epilogue_fusion': True, 'shape_padding': True}) #.to(device=device, dtype=torch.bfloat16)
204
  #pipe.unet = torch.compile(pipe.unet, backend="hidet")
205
+ pipe.unet = torch.compile(pipe.unet, backend="hidet", dynamic=False, options={"search_space": 0})
206
  #pipe.unet = torch.compile(pipe.unet, backend="torch_tensorrt", dynamic=False, options={"precision": torch.bfloat16,"optimization_level": 4,})
207
  pipe.to(device=device, dtype=torch.bfloat16)
208
 
 
210
 
211
  hidet.option.parallel_build(True)
212
  hidet.option.parallel_tune(2,2.0)
213
+ #torch._dynamo.config.suppress_errors = True
214
  torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
215
 
216
  # more search
217
+ #hidet.torch.dynamo_config.search_space(0)
218
  #hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
219
  hidet.option.cache_dir("local_cache")
220
  # automatically transform the model to use float16 data type
 
222
  # use float16 data type as the accumulate data type in operators with reduction
223
  #hidet.torch.dynamo_config.use_fp16_reduction(True)
224
  # use tensorcore
225
+ #hidet.torch.dynamo_config.use_tensor_core()
226
+ #hidet.torch.dynamo_config.steal_weights(False)
227
 
228
  # Preload and compile both models
229