|
{ |
|
"best_metric": 0.47813913226127625, |
|
"best_model_checkpoint": "/mnt/beegfs/farid/mlora/outputs/xnli/aya-101/ar/rank4_lr5e-5/checkpoint-6000", |
|
"epoch": 0.24445893089960888, |
|
"eval_steps": 500, |
|
"global_step": 6000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020371577574967405, |
|
"grad_norm": 3.1687538623809814, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 1.1228, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.020371577574967405, |
|
"eval_accuracy": 0.4465863453815261, |
|
"eval_f1": 0.4398462893270641, |
|
"eval_loss": 1.0515310764312744, |
|
"eval_runtime": 413.6686, |
|
"eval_samples_per_second": 6.019, |
|
"eval_steps_per_second": 0.377, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.04074315514993481, |
|
"grad_norm": 10.303964614868164, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.901, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04074315514993481, |
|
"eval_accuracy": 0.7469879518072289, |
|
"eval_f1": 0.7498668828149658, |
|
"eval_loss": 0.6499696969985962, |
|
"eval_runtime": 411.8975, |
|
"eval_samples_per_second": 6.045, |
|
"eval_steps_per_second": 0.379, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.06111473272490222, |
|
"grad_norm": 27.607500076293945, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.6865, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.06111473272490222, |
|
"eval_accuracy": 0.7763052208835342, |
|
"eval_f1": 0.7760746295957563, |
|
"eval_loss": 0.6157358884811401, |
|
"eval_runtime": 411.7973, |
|
"eval_samples_per_second": 6.047, |
|
"eval_steps_per_second": 0.379, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.08148631029986962, |
|
"grad_norm": 10.66073989868164, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.6189, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08148631029986962, |
|
"eval_accuracy": 0.7799196787148595, |
|
"eval_f1": 0.7829493062150646, |
|
"eval_loss": 0.5638271570205688, |
|
"eval_runtime": 411.2214, |
|
"eval_samples_per_second": 6.055, |
|
"eval_steps_per_second": 0.379, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.10185788787483703, |
|
"grad_norm": 4.5490522384643555, |
|
"learning_rate": 3.240740740740741e-05, |
|
"loss": 0.621, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.10185788787483703, |
|
"eval_accuracy": 0.8012048192771084, |
|
"eval_f1": 0.8020131355543914, |
|
"eval_loss": 0.5373384952545166, |
|
"eval_runtime": 411.4783, |
|
"eval_samples_per_second": 6.051, |
|
"eval_steps_per_second": 0.379, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.12222946544980444, |
|
"grad_norm": 9.11120319366455, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.5901, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12222946544980444, |
|
"eval_accuracy": 0.8132530120481928, |
|
"eval_f1": 0.813184387788095, |
|
"eval_loss": 0.5145502090454102, |
|
"eval_runtime": 411.6491, |
|
"eval_samples_per_second": 6.049, |
|
"eval_steps_per_second": 0.379, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.14260104302477183, |
|
"grad_norm": 7.9284515380859375, |
|
"learning_rate": 2.314814814814815e-05, |
|
"loss": 0.5831, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.14260104302477183, |
|
"eval_accuracy": 0.8068273092369478, |
|
"eval_f1": 0.807843707897922, |
|
"eval_loss": 0.5040690302848816, |
|
"eval_runtime": 411.4454, |
|
"eval_samples_per_second": 6.052, |
|
"eval_steps_per_second": 0.379, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.16297262059973924, |
|
"grad_norm": 5.931895732879639, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.5816, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16297262059973924, |
|
"eval_accuracy": 0.8164658634538152, |
|
"eval_f1": 0.8168966682189732, |
|
"eval_loss": 0.49493011832237244, |
|
"eval_runtime": 411.9765, |
|
"eval_samples_per_second": 6.044, |
|
"eval_steps_per_second": 0.379, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.18334419817470665, |
|
"grad_norm": 5.388001918792725, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.5606, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.18334419817470665, |
|
"eval_accuracy": 0.8220883534136546, |
|
"eval_f1": 0.8223408197075474, |
|
"eval_loss": 0.48176905512809753, |
|
"eval_runtime": 411.5334, |
|
"eval_samples_per_second": 6.051, |
|
"eval_steps_per_second": 0.379, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.20371577574967406, |
|
"grad_norm": 8.442779541015625, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.5742, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.20371577574967406, |
|
"eval_accuracy": 0.8152610441767069, |
|
"eval_f1": 0.8151921057938069, |
|
"eval_loss": 0.49218347668647766, |
|
"eval_runtime": 411.5662, |
|
"eval_samples_per_second": 6.05, |
|
"eval_steps_per_second": 0.379, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.22408735332464147, |
|
"grad_norm": 4.897560119628906, |
|
"learning_rate": 4.6296296296296296e-06, |
|
"loss": 0.5648, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.22408735332464147, |
|
"eval_accuracy": 0.8236947791164658, |
|
"eval_f1": 0.823621391191149, |
|
"eval_loss": 0.47885623574256897, |
|
"eval_runtime": 431.6447, |
|
"eval_samples_per_second": 5.769, |
|
"eval_steps_per_second": 0.361, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.24445893089960888, |
|
"grad_norm": 6.240907669067383, |
|
"learning_rate": 0.0, |
|
"loss": 0.5461, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24445893089960888, |
|
"eval_accuracy": 0.8253012048192772, |
|
"eval_f1": 0.8254650982786048, |
|
"eval_loss": 0.47813913226127625, |
|
"eval_runtime": 412.3144, |
|
"eval_samples_per_second": 6.039, |
|
"eval_steps_per_second": 0.378, |
|
"step": 6000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 6000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 8.03166870528e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|