fedovtt's picture
Training in progress, step 25, checkpoint
2a8c02e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0021108179419525065,
"eval_steps": 3,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.443271767810026e-05,
"grad_norm": 11.129352569580078,
"learning_rate": 2e-05,
"loss": 7.1042,
"step": 1
},
{
"epoch": 8.443271767810026e-05,
"eval_loss": 7.151242256164551,
"eval_runtime": 420.6162,
"eval_samples_per_second": 11.856,
"eval_steps_per_second": 5.929,
"step": 1
},
{
"epoch": 0.00016886543535620053,
"grad_norm": 11.819311141967773,
"learning_rate": 4e-05,
"loss": 6.2728,
"step": 2
},
{
"epoch": 0.0002532981530343008,
"grad_norm": 12.868980407714844,
"learning_rate": 6e-05,
"loss": 7.5328,
"step": 3
},
{
"epoch": 0.0002532981530343008,
"eval_loss": 7.0206756591796875,
"eval_runtime": 422.3489,
"eval_samples_per_second": 11.808,
"eval_steps_per_second": 5.905,
"step": 3
},
{
"epoch": 0.00033773087071240106,
"grad_norm": 9.709155082702637,
"learning_rate": 8e-05,
"loss": 6.0845,
"step": 4
},
{
"epoch": 0.00042216358839050134,
"grad_norm": 12.9670991897583,
"learning_rate": 0.0001,
"loss": 6.0418,
"step": 5
},
{
"epoch": 0.0005065963060686016,
"grad_norm": 13.192237854003906,
"learning_rate": 9.938441702975689e-05,
"loss": 6.0895,
"step": 6
},
{
"epoch": 0.0005065963060686016,
"eval_loss": 4.537143230438232,
"eval_runtime": 422.4237,
"eval_samples_per_second": 11.806,
"eval_steps_per_second": 5.904,
"step": 6
},
{
"epoch": 0.0005910290237467018,
"grad_norm": 22.602550506591797,
"learning_rate": 9.755282581475769e-05,
"loss": 4.155,
"step": 7
},
{
"epoch": 0.0006754617414248021,
"grad_norm": 15.708683967590332,
"learning_rate": 9.45503262094184e-05,
"loss": 3.0346,
"step": 8
},
{
"epoch": 0.0007598944591029024,
"grad_norm": 17.4925594329834,
"learning_rate": 9.045084971874738e-05,
"loss": 1.8198,
"step": 9
},
{
"epoch": 0.0007598944591029024,
"eval_loss": 1.1246894598007202,
"eval_runtime": 422.3964,
"eval_samples_per_second": 11.806,
"eval_steps_per_second": 5.904,
"step": 9
},
{
"epoch": 0.0008443271767810027,
"grad_norm": 8.983549118041992,
"learning_rate": 8.535533905932738e-05,
"loss": 1.2177,
"step": 10
},
{
"epoch": 0.000928759894459103,
"grad_norm": 7.7590765953063965,
"learning_rate": 7.938926261462366e-05,
"loss": 1.1052,
"step": 11
},
{
"epoch": 0.0010131926121372031,
"grad_norm": 7.711614608764648,
"learning_rate": 7.269952498697734e-05,
"loss": 0.3309,
"step": 12
},
{
"epoch": 0.0010131926121372031,
"eval_loss": 0.4180523753166199,
"eval_runtime": 422.3609,
"eval_samples_per_second": 11.807,
"eval_steps_per_second": 5.905,
"step": 12
},
{
"epoch": 0.0010976253298153034,
"grad_norm": 5.040955066680908,
"learning_rate": 6.545084971874738e-05,
"loss": 0.2055,
"step": 13
},
{
"epoch": 0.0011820580474934037,
"grad_norm": 10.291000366210938,
"learning_rate": 5.782172325201155e-05,
"loss": 0.4698,
"step": 14
},
{
"epoch": 0.001266490765171504,
"grad_norm": 9.118146896362305,
"learning_rate": 5e-05,
"loss": 0.231,
"step": 15
},
{
"epoch": 0.001266490765171504,
"eval_loss": 0.28742024302482605,
"eval_runtime": 422.1719,
"eval_samples_per_second": 11.813,
"eval_steps_per_second": 5.908,
"step": 15
},
{
"epoch": 0.0013509234828496042,
"grad_norm": 9.136611938476562,
"learning_rate": 4.2178276747988446e-05,
"loss": 0.2849,
"step": 16
},
{
"epoch": 0.0014353562005277045,
"grad_norm": 7.953118324279785,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.1662,
"step": 17
},
{
"epoch": 0.0015197889182058048,
"grad_norm": 2.821458578109741,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.0864,
"step": 18
},
{
"epoch": 0.0015197889182058048,
"eval_loss": 0.24295015633106232,
"eval_runtime": 422.3791,
"eval_samples_per_second": 11.807,
"eval_steps_per_second": 5.905,
"step": 18
},
{
"epoch": 0.001604221635883905,
"grad_norm": 1.723188042640686,
"learning_rate": 2.061073738537635e-05,
"loss": 0.044,
"step": 19
},
{
"epoch": 0.0016886543535620053,
"grad_norm": 6.9622321128845215,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.1891,
"step": 20
},
{
"epoch": 0.0017730870712401056,
"grad_norm": 5.619901180267334,
"learning_rate": 9.549150281252633e-06,
"loss": 0.1357,
"step": 21
},
{
"epoch": 0.0017730870712401056,
"eval_loss": 0.21679861843585968,
"eval_runtime": 422.402,
"eval_samples_per_second": 11.806,
"eval_steps_per_second": 5.904,
"step": 21
},
{
"epoch": 0.001857519788918206,
"grad_norm": 8.403288841247559,
"learning_rate": 5.449673790581611e-06,
"loss": 0.2989,
"step": 22
},
{
"epoch": 0.001941952506596306,
"grad_norm": 2.9916210174560547,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0602,
"step": 23
},
{
"epoch": 0.0020263852242744062,
"grad_norm": 4.566269874572754,
"learning_rate": 6.15582970243117e-07,
"loss": 0.1002,
"step": 24
},
{
"epoch": 0.0020263852242744062,
"eval_loss": 0.20803606510162354,
"eval_runtime": 422.4259,
"eval_samples_per_second": 11.806,
"eval_steps_per_second": 5.904,
"step": 24
},
{
"epoch": 0.0021108179419525065,
"grad_norm": 3.1905064582824707,
"learning_rate": 0.0,
"loss": 0.0737,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9247821240729600.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}