File size: 1,351 Bytes
d180a71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
data_cfgs:
eval_data_files: null
eval_datasets: null
eval_optional_args: []
eval_size: null
eval_split: null
eval_subset: null
eval_template: null
train_data_files: q0_80_preference.pt
train_datasets: /data/align-anything/hantao/data/mm_interp/AA_preference_cosi_new_step10/tokenized
train_optional_args: []
train_size: null
train_split: train
train_subset: null
train_template: Chameleon_preference
logger_cfgs:
cache_dir: null
log_project: align-anything
log_run_name: dpo
log_type: wandb
output_dir: /data/align-anything/hantao/align-anything/outputs/mm_interp/AA_preference_cosi_new_step10/q0_80_preference
save_interval: 400.0
model_cfgs:
model_max_length: 4096
model_name_or_path: /data/align-anything/hantao/models/chameleon-7b
trust_remote_code: true
special_tokens: null
train_cfgs:
adam_betas:
- 0.9
- 0.95
bf16: true
ds_cfgs: ds_z3_config.json
epochs: 3.0
eval_interval: 10
eval_strategy: epoch
fp16: false
freeze_language_model: true
freeze_mm_proj: true
freeze_vision_tower: false
gradient_accumulation_steps: 2.0
gradient_checkpointing: true
learning_rate: 1.0e-06
lr_scheduler_type: cosine
lr_warmup_ratio: 0.03
per_device_eval_batch_size: 4.0
per_device_train_batch_size: 4.0
regularization: 0.001
scale_coeff: 0.1
seed: 42
weight_decay: 0.01
|