model_family: llama2-7b LoRA: r: 8 alpha: 16 dropout: 0.1 data_path: ../../dataset/KnowUnDo/privacy/full.json batch_size: 16 gradient_accumulation_steps: 4 num_epochs: 10 save_dir: ../../paper_models/llama2-7b_lora_kud_privacy lr: 0.0003 weight_decay: 0.0001 seed: 100 max_length: 512 ds_config: ../config/ds_z2_config.json