anydoor/configs/anydoor.yaml
CHANGED
@@ -80,6 +80,4 @@ model:
|
|
80 |
|
81 |
cond_stage_config:
|
82 |
target: ldm.modules.encoders.modules.FrozenDinoV2Encoder
|
83 |
-
weight:
|
84 |
-
|
85 |
-
|
|
|
80 |
|
81 |
cond_stage_config:
|
82 |
target: ldm.modules.encoders.modules.FrozenDinoV2Encoder
|
83 |
+
weight: workspace/train-wefadoor-master/dinov2_vitg14_pretrain.pth
|
|
|
|
anydoor/configs/inference.yaml
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
-
|
|
|
2 |
config_file: configs/anydoor.yaml
|
3 |
save_memory: False
|
|
|
1 |
+
|
2 |
+
pretrained_model: /workspace/mixed_wefa_unanon/step_300k_slim.ckpt
|
3 |
config_file: configs/anydoor.yaml
|
4 |
save_memory: False
|
anydoor/run_train_anydoor.py
CHANGED
@@ -42,7 +42,7 @@ if save_memory:
|
|
42 |
# accumulate_grad_batches=1
|
43 |
|
44 |
# Configs
|
45 |
-
resume_path = '/workspace/
|
46 |
batch_size = 8
|
47 |
logger_freq = 1000
|
48 |
learning_rate = 1e-5
|
|
|
42 |
# accumulate_grad_batches=1
|
43 |
|
44 |
# Configs
|
45 |
+
resume_path = '/workspace/mixed_wefa_unanon/step_300k_slim.ckpt'
|
46 |
batch_size = 8
|
47 |
logger_freq = 1000
|
48 |
learning_rate = 1e-5
|