lesso01's picture
Training in progress, step 30, checkpoint
b1e872b verified
raw
history blame
7.58 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.14705882352941177,
"eval_steps": 4,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004901960784313725,
"grad_norm": 1.948913812637329,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.0601,
"step": 1
},
{
"epoch": 0.004901960784313725,
"eval_loss": 0.9626200199127197,
"eval_runtime": 11.2758,
"eval_samples_per_second": 7.627,
"eval_steps_per_second": 1.951,
"step": 1
},
{
"epoch": 0.00980392156862745,
"grad_norm": 2.0441207885742188,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9237,
"step": 2
},
{
"epoch": 0.014705882352941176,
"grad_norm": 1.0346183776855469,
"learning_rate": 6e-06,
"loss": 1.7715,
"step": 3
},
{
"epoch": 0.0196078431372549,
"grad_norm": 1.7915290594100952,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9189,
"step": 4
},
{
"epoch": 0.0196078431372549,
"eval_loss": 0.9602966904640198,
"eval_runtime": 10.5025,
"eval_samples_per_second": 8.189,
"eval_steps_per_second": 2.095,
"step": 4
},
{
"epoch": 0.024509803921568627,
"grad_norm": 1.6641079187393188,
"learning_rate": 1e-05,
"loss": 0.7885,
"step": 5
},
{
"epoch": 0.029411764705882353,
"grad_norm": 1.4073126316070557,
"learning_rate": 9.960573506572391e-06,
"loss": 0.6591,
"step": 6
},
{
"epoch": 0.03431372549019608,
"grad_norm": 1.7350611686706543,
"learning_rate": 9.842915805643156e-06,
"loss": 0.7968,
"step": 7
},
{
"epoch": 0.0392156862745098,
"grad_norm": 1.7413434982299805,
"learning_rate": 9.648882429441258e-06,
"loss": 0.8927,
"step": 8
},
{
"epoch": 0.0392156862745098,
"eval_loss": 0.934045672416687,
"eval_runtime": 10.5931,
"eval_samples_per_second": 8.119,
"eval_steps_per_second": 2.077,
"step": 8
},
{
"epoch": 0.04411764705882353,
"grad_norm": 1.2970942258834839,
"learning_rate": 9.381533400219319e-06,
"loss": 0.6153,
"step": 9
},
{
"epoch": 0.049019607843137254,
"grad_norm": 2.589458465576172,
"learning_rate": 9.045084971874738e-06,
"loss": 1.0173,
"step": 10
},
{
"epoch": 0.05392156862745098,
"grad_norm": 1.39173424243927,
"learning_rate": 8.644843137107058e-06,
"loss": 0.7402,
"step": 11
},
{
"epoch": 0.058823529411764705,
"grad_norm": 1.885929822921753,
"learning_rate": 8.18711994874345e-06,
"loss": 0.6374,
"step": 12
},
{
"epoch": 0.058823529411764705,
"eval_loss": 0.9021902680397034,
"eval_runtime": 10.6144,
"eval_samples_per_second": 8.102,
"eval_steps_per_second": 2.073,
"step": 12
},
{
"epoch": 0.06372549019607843,
"grad_norm": 1.750763177871704,
"learning_rate": 7.679133974894984e-06,
"loss": 0.8322,
"step": 13
},
{
"epoch": 0.06862745098039216,
"grad_norm": 1.578403353691101,
"learning_rate": 7.128896457825364e-06,
"loss": 0.8538,
"step": 14
},
{
"epoch": 0.07352941176470588,
"grad_norm": 1.8658047914505005,
"learning_rate": 6.545084971874738e-06,
"loss": 0.7806,
"step": 15
},
{
"epoch": 0.0784313725490196,
"grad_norm": 2.268649101257324,
"learning_rate": 5.936906572928625e-06,
"loss": 0.9588,
"step": 16
},
{
"epoch": 0.0784313725490196,
"eval_loss": 0.8679822087287903,
"eval_runtime": 10.5908,
"eval_samples_per_second": 8.12,
"eval_steps_per_second": 2.077,
"step": 16
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.655454695224762,
"learning_rate": 5.3139525976465675e-06,
"loss": 0.7884,
"step": 17
},
{
"epoch": 0.08823529411764706,
"grad_norm": 2.9148428440093994,
"learning_rate": 4.686047402353433e-06,
"loss": 0.9067,
"step": 18
},
{
"epoch": 0.09313725490196079,
"grad_norm": 0.9642003774642944,
"learning_rate": 4.063093427071376e-06,
"loss": 0.636,
"step": 19
},
{
"epoch": 0.09803921568627451,
"grad_norm": 1.121366262435913,
"learning_rate": 3.4549150281252635e-06,
"loss": 0.7324,
"step": 20
},
{
"epoch": 0.09803921568627451,
"eval_loss": 0.8456867933273315,
"eval_runtime": 10.6004,
"eval_samples_per_second": 8.113,
"eval_steps_per_second": 2.075,
"step": 20
},
{
"epoch": 0.10294117647058823,
"grad_norm": 2.0282931327819824,
"learning_rate": 2.871103542174637e-06,
"loss": 0.8578,
"step": 21
},
{
"epoch": 0.10784313725490197,
"grad_norm": 0.8347979784011841,
"learning_rate": 2.320866025105016e-06,
"loss": 0.775,
"step": 22
},
{
"epoch": 0.11274509803921569,
"grad_norm": 1.1945385932922363,
"learning_rate": 1.8128800512565514e-06,
"loss": 0.6854,
"step": 23
},
{
"epoch": 0.11764705882352941,
"grad_norm": 1.330310583114624,
"learning_rate": 1.3551568628929434e-06,
"loss": 0.8785,
"step": 24
},
{
"epoch": 0.11764705882352941,
"eval_loss": 0.8344882726669312,
"eval_runtime": 10.6019,
"eval_samples_per_second": 8.112,
"eval_steps_per_second": 2.075,
"step": 24
},
{
"epoch": 0.12254901960784313,
"grad_norm": 1.4803705215454102,
"learning_rate": 9.549150281252633e-07,
"loss": 0.6359,
"step": 25
},
{
"epoch": 0.12745098039215685,
"grad_norm": 1.536838173866272,
"learning_rate": 6.184665997806832e-07,
"loss": 0.6286,
"step": 26
},
{
"epoch": 0.1323529411764706,
"grad_norm": 1.0436162948608398,
"learning_rate": 3.511175705587433e-07,
"loss": 0.7679,
"step": 27
},
{
"epoch": 0.13725490196078433,
"grad_norm": 2.5319721698760986,
"learning_rate": 1.5708419435684463e-07,
"loss": 1.1476,
"step": 28
},
{
"epoch": 0.13725490196078433,
"eval_loss": 0.8303804397583008,
"eval_runtime": 10.618,
"eval_samples_per_second": 8.099,
"eval_steps_per_second": 2.072,
"step": 28
},
{
"epoch": 0.14215686274509803,
"grad_norm": 1.8032675981521606,
"learning_rate": 3.9426493427611177e-08,
"loss": 0.8761,
"step": 29
},
{
"epoch": 0.14705882352941177,
"grad_norm": 2.128256320953369,
"learning_rate": 0.0,
"loss": 1.0381,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.041223234093056e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}