|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.984771573604061, |
|
"eval_steps": 500, |
|
"global_step": 147, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.20304568527918782, |
|
"grad_norm": 32.67877197265625, |
|
"learning_rate": 4.8239436619718316e-05, |
|
"loss": 3.4262, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.40609137055837563, |
|
"grad_norm": 18.402006149291992, |
|
"learning_rate": 4.471830985915493e-05, |
|
"loss": 2.8928, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6091370558375635, |
|
"grad_norm": 48.83979415893555, |
|
"learning_rate": 4.119718309859155e-05, |
|
"loss": 2.3612, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.8121827411167513, |
|
"grad_norm": 6.010754585266113, |
|
"learning_rate": 3.767605633802817e-05, |
|
"loss": 2.2193, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.015228426395939, |
|
"grad_norm": 5.850377559661865, |
|
"learning_rate": 3.4154929577464786e-05, |
|
"loss": 2.0969, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.218274111675127, |
|
"grad_norm": 8.595719337463379, |
|
"learning_rate": 3.063380281690141e-05, |
|
"loss": 2.0547, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.4213197969543148, |
|
"grad_norm": 9.501204490661621, |
|
"learning_rate": 2.711267605633803e-05, |
|
"loss": 1.9979, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.6243654822335025, |
|
"grad_norm": 6.856517791748047, |
|
"learning_rate": 2.359154929577465e-05, |
|
"loss": 1.9259, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.8274111675126905, |
|
"grad_norm": 13.690930366516113, |
|
"learning_rate": 2.007042253521127e-05, |
|
"loss": 1.9244, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.030456852791878, |
|
"grad_norm": 10.869937896728516, |
|
"learning_rate": 1.6549295774647887e-05, |
|
"loss": 1.8519, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.233502538071066, |
|
"grad_norm": 12.451278686523438, |
|
"learning_rate": 1.3028169014084506e-05, |
|
"loss": 1.8955, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.436548223350254, |
|
"grad_norm": 9.575093269348145, |
|
"learning_rate": 9.507042253521127e-06, |
|
"loss": 1.7803, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.6395939086294415, |
|
"grad_norm": 6.249921798706055, |
|
"learning_rate": 5.9859154929577465e-06, |
|
"loss": 1.8202, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.8426395939086295, |
|
"grad_norm": 8.334336280822754, |
|
"learning_rate": 2.464788732394366e-06, |
|
"loss": 1.8493, |
|
"step": 140 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 147, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 1000000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4746645092499456.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|