llama3-1_8b_math_100000_samples / trainer_state.json
gsmyrnis's picture
End of training
2dc6861 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 252,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11904761904761904,
"grad_norm": 26.617824687216622,
"learning_rate": 5e-06,
"loss": 0.7508,
"step": 10
},
{
"epoch": 0.23809523809523808,
"grad_norm": 1.8689351819227027,
"learning_rate": 5e-06,
"loss": 0.6727,
"step": 20
},
{
"epoch": 0.35714285714285715,
"grad_norm": 1.5792334038916935,
"learning_rate": 5e-06,
"loss": 0.633,
"step": 30
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.6218650960930915,
"learning_rate": 5e-06,
"loss": 0.6164,
"step": 40
},
{
"epoch": 0.5952380952380952,
"grad_norm": 1.140712919876654,
"learning_rate": 5e-06,
"loss": 0.6066,
"step": 50
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.8602592078396757,
"learning_rate": 5e-06,
"loss": 0.5966,
"step": 60
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.8320424935867776,
"learning_rate": 5e-06,
"loss": 0.5944,
"step": 70
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.49165318108555545,
"learning_rate": 5e-06,
"loss": 0.5861,
"step": 80
},
{
"epoch": 1.0,
"eval_loss": 0.5798346996307373,
"eval_runtime": 8.4592,
"eval_samples_per_second": 266.928,
"eval_steps_per_second": 1.064,
"step": 84
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.6573425533819024,
"learning_rate": 5e-06,
"loss": 0.5663,
"step": 90
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.4670077635391962,
"learning_rate": 5e-06,
"loss": 0.5517,
"step": 100
},
{
"epoch": 1.3095238095238095,
"grad_norm": 0.5237829600316058,
"learning_rate": 5e-06,
"loss": 0.5546,
"step": 110
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.5547024135708416,
"learning_rate": 5e-06,
"loss": 0.5526,
"step": 120
},
{
"epoch": 1.5476190476190477,
"grad_norm": 0.47689471851329457,
"learning_rate": 5e-06,
"loss": 0.5466,
"step": 130
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.5230783383262022,
"learning_rate": 5e-06,
"loss": 0.5472,
"step": 140
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.5674014749330538,
"learning_rate": 5e-06,
"loss": 0.5464,
"step": 150
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.47822121321020516,
"learning_rate": 5e-06,
"loss": 0.543,
"step": 160
},
{
"epoch": 2.0,
"eval_loss": 0.5688267946243286,
"eval_runtime": 8.5104,
"eval_samples_per_second": 265.321,
"eval_steps_per_second": 1.058,
"step": 168
},
{
"epoch": 2.0238095238095237,
"grad_norm": 0.9393861948324864,
"learning_rate": 5e-06,
"loss": 0.5387,
"step": 170
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.5880801701644413,
"learning_rate": 5e-06,
"loss": 0.513,
"step": 180
},
{
"epoch": 2.261904761904762,
"grad_norm": 0.7573868967310241,
"learning_rate": 5e-06,
"loss": 0.5084,
"step": 190
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.5149453817597104,
"learning_rate": 5e-06,
"loss": 0.5065,
"step": 200
},
{
"epoch": 2.5,
"grad_norm": 0.48769983126777056,
"learning_rate": 5e-06,
"loss": 0.5096,
"step": 210
},
{
"epoch": 2.619047619047619,
"grad_norm": 0.6168117709375799,
"learning_rate": 5e-06,
"loss": 0.51,
"step": 220
},
{
"epoch": 2.738095238095238,
"grad_norm": 0.6182244851253141,
"learning_rate": 5e-06,
"loss": 0.5075,
"step": 230
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5115158649844643,
"learning_rate": 5e-06,
"loss": 0.5129,
"step": 240
},
{
"epoch": 2.9761904761904763,
"grad_norm": 0.6056742291106577,
"learning_rate": 5e-06,
"loss": 0.51,
"step": 250
},
{
"epoch": 3.0,
"eval_loss": 0.5704450607299805,
"eval_runtime": 8.2035,
"eval_samples_per_second": 275.249,
"eval_steps_per_second": 1.097,
"step": 252
},
{
"epoch": 3.0,
"step": 252,
"total_flos": 422109385850880.0,
"train_loss": 0.5631871157222323,
"train_runtime": 1874.1971,
"train_samples_per_second": 68.655,
"train_steps_per_second": 0.134
}
],
"logging_steps": 10,
"max_steps": 252,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 422109385850880.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}