3v324v23 commited on
Commit
157f270
·
1 Parent(s): 1dd11df
anydoor/configs/anydoor.yaml CHANGED
@@ -80,6 +80,4 @@ model:
80
 
81
  cond_stage_config:
82
  target: ldm.modules.encoders.modules.FrozenDinoV2Encoder
83
- weight: /workspace/train-wefadoor-master/dinov2_vitg14_pretrain.pth
84
-
85
-
 
80
 
81
  cond_stage_config:
82
  target: ldm.modules.encoders.modules.FrozenDinoV2Encoder
83
+ weight: workspace/train-wefadoor-master/dinov2_vitg14_pretrain.pth
 
 
anydoor/configs/inference.yaml CHANGED
@@ -1,3 +1,4 @@
1
- pretrained_model: /workspace/300k_wefa_boys_slim/lightning_logs/version_0/checkpoints/boys.ckpt
 
2
  config_file: configs/anydoor.yaml
3
  save_memory: False
 
1
+
2
+ pretrained_model: /workspace/mixed_wefa_unanon/step_300k_slim.ckpt
3
  config_file: configs/anydoor.yaml
4
  save_memory: False
anydoor/run_train_anydoor.py CHANGED
@@ -42,7 +42,7 @@ if save_memory:
42
  # accumulate_grad_batches=1
43
 
44
  # Configs
45
- resume_path = '/workspace/train-wefadoor-master/anydoor/lightning_logs/version_5/checkpoints/step_300k.ckpt'
46
  batch_size = 8
47
  logger_freq = 1000
48
  learning_rate = 1e-5
 
42
  # accumulate_grad_batches=1
43
 
44
  # Configs
45
+ resume_path = '/workspace/mixed_wefa_unanon/step_300k_slim.ckpt'
46
  batch_size = 8
47
  logger_freq = 1000
48
  learning_rate = 1e-5