config: (): colpali_engine.trainer.colmodel_training.ColModelTrainingConfig output_dir: !path ../../../models/qwenstella_8e-5_tabfquad_3e_cont_1e-5 processor: (): colpali_engine.utils.transformers_wrappers.AllPurposeWrapper class_to_instanciate: !ext colpali_engine.models.ColQwenStellaProcessor pretrained_model_name_or_path: "./models/qwenstella-base" #"vidore/colqwen2-base" #"./models/colqwen2-base" # "./models/paligemma-3b-mix-448" # num_image_tokens: 2048 # max_length: 50 model: (): colpali_engine.utils.transformers_wrappers.AllPurposeWrapper class_to_instanciate: !ext colpali_engine.models.ColQwenStella pretrained_model_name_or_path: "./models/qwenstella-base" #"vidore/colqwen2-base" #"./models/colqwen2-base" torch_dtype: !ext torch.bfloat16 # weights_only: false # use_cache: false attn_implementation: "flash_attention_2" # device_map: "auto" # quantization_config: # (): transformers.BitsAndBytesConfig # load_in_4bit: true # bnb_4bit_quant_type: "nf4" # bnb_4bit_compute_dtype: "bfloat16" # bnb_4bit_use_double_quant: true dataset_loading_func: !ext colpali_engine.utils.dataset_transformation.load_mixed_train_set eval_dataset_loader: !import ../data/test_data.yaml pretrained_peft_model_name_or_path: models/qwenstella_8e-5_tabfquad_3e # max_length: 50 run_eval: true loss_func: (): colpali_engine.loss.late_interaction_losses.ColbertPairwiseCELoss tr_args: (): transformers.training_args.TrainingArguments output_dir: null overwrite_output_dir: true num_train_epochs: 5 per_device_train_batch_size: 64 gradient_checkpointing: true gradient_checkpointing_kwargs: { "use_reentrant": false } # gradient_checkpointing: true # 6 x 8 gpus = 48 batch size gradient_accumulation_steps: 2 per_device_eval_batch_size: 32 eval_strategy: "steps" dataloader_num_workers: 8 # bf16: true save_steps: 100 logging_steps: 1 eval_steps: 400 warmup_steps: 100 learning_rate: 1e-5 save_total_limit: 100 lr_scheduler_type: cosine # lr_scheduler_kwargs: min_lr # resume_from_checkpoint: true # optim: "paged_adamw_8bit" # wandb logging # wandb_name: "colqwen2" run_name: "qwenstella_8e-5_tabfquad_3e_cont_1e-5" report_to: "wandb" peft_config: (): peft.LoraConfig r: 128 lora_alpha: 128 lora_dropout: 0.1 init_lora_weights: "gaussian" bias: "none" task_type: "FEATURE_EXTRACTION" target_modules: "(.*(model).*(down_proj|gate_proj|up_proj|k_proj|q_proj|v_proj|o_proj).*$|.*(vector_linear_1024).*$|.*(vision_model).*(merger).*(mlp).*(0|2).*$)" # target_modules: '(.*(language_model).*(down_proj|gate_proj|up_proj|k_proj|q_proj|v_proj|o_proj).*$|.*(custom_text_proj).*$)'