bbytxt's picture
Training in progress, epoch 0, checkpoint
a999155 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.17185821697099893,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008592910848549946,
"eval_loss": 10.378536224365234,
"eval_runtime": 2.4628,
"eval_samples_per_second": 198.962,
"eval_steps_per_second": 99.481,
"step": 1
},
{
"epoch": 0.004296455424274973,
"grad_norm": 0.010985192842781544,
"learning_rate": 5e-05,
"loss": 10.3786,
"step": 5
},
{
"epoch": 0.008592910848549946,
"grad_norm": 0.010209411382675171,
"learning_rate": 0.0001,
"loss": 10.3791,
"step": 10
},
{
"epoch": 0.01288936627282492,
"grad_norm": 0.01133144460618496,
"learning_rate": 9.98292246503335e-05,
"loss": 10.3782,
"step": 15
},
{
"epoch": 0.017185821697099892,
"grad_norm": 0.011832349933683872,
"learning_rate": 9.931806517013612e-05,
"loss": 10.3785,
"step": 20
},
{
"epoch": 0.021482277121374866,
"grad_norm": 0.0108720101416111,
"learning_rate": 9.847001329696653e-05,
"loss": 10.3781,
"step": 25
},
{
"epoch": 0.02577873254564984,
"grad_norm": 0.016251012682914734,
"learning_rate": 9.729086208503174e-05,
"loss": 10.3776,
"step": 30
},
{
"epoch": 0.03007518796992481,
"grad_norm": 0.012109613977372646,
"learning_rate": 9.578866633275288e-05,
"loss": 10.3775,
"step": 35
},
{
"epoch": 0.034371643394199784,
"grad_norm": 0.015744652599096298,
"learning_rate": 9.397368756032445e-05,
"loss": 10.3769,
"step": 40
},
{
"epoch": 0.03866809881847476,
"grad_norm": 0.013376533053815365,
"learning_rate": 9.185832391312644e-05,
"loss": 10.3772,
"step": 45
},
{
"epoch": 0.04296455424274973,
"grad_norm": 0.02371077798306942,
"learning_rate": 8.945702546981969e-05,
"loss": 10.3756,
"step": 50
},
{
"epoch": 0.04296455424274973,
"eval_loss": 10.37713623046875,
"eval_runtime": 2.4659,
"eval_samples_per_second": 198.71,
"eval_steps_per_second": 99.355,
"step": 50
},
{
"epoch": 0.047261009667024706,
"grad_norm": 0.01536468043923378,
"learning_rate": 8.678619553365659e-05,
"loss": 10.3781,
"step": 55
},
{
"epoch": 0.05155746509129968,
"grad_norm": 0.015035259537398815,
"learning_rate": 8.386407858128706e-05,
"loss": 10.378,
"step": 60
},
{
"epoch": 0.055853920515574654,
"grad_norm": 0.017331380397081375,
"learning_rate": 8.07106356344834e-05,
"loss": 10.3773,
"step": 65
},
{
"epoch": 0.06015037593984962,
"grad_norm": 0.01693269982933998,
"learning_rate": 7.734740790612136e-05,
"loss": 10.3768,
"step": 70
},
{
"epoch": 0.0644468313641246,
"grad_norm": 0.018788564950227737,
"learning_rate": 7.379736965185368e-05,
"loss": 10.3763,
"step": 75
},
{
"epoch": 0.06874328678839957,
"grad_norm": 0.01848599687218666,
"learning_rate": 7.008477123264848e-05,
"loss": 10.3765,
"step": 80
},
{
"epoch": 0.07303974221267455,
"grad_norm": 0.022367339581251144,
"learning_rate": 6.623497346023418e-05,
"loss": 10.3759,
"step": 85
},
{
"epoch": 0.07733619763694952,
"grad_norm": 0.026913169771432877,
"learning_rate": 6.227427435703997e-05,
"loss": 10.3743,
"step": 90
},
{
"epoch": 0.08163265306122448,
"grad_norm": 0.025651535019278526,
"learning_rate": 5.8229729514036705e-05,
"loss": 10.3754,
"step": 95
},
{
"epoch": 0.08592910848549946,
"grad_norm": 0.035040900111198425,
"learning_rate": 5.4128967273616625e-05,
"loss": 10.3744,
"step": 100
},
{
"epoch": 0.08592910848549946,
"eval_loss": 10.375018119812012,
"eval_runtime": 2.4717,
"eval_samples_per_second": 198.245,
"eval_steps_per_second": 99.123,
"step": 100
},
{
"epoch": 0.09022556390977443,
"grad_norm": 0.024812066927552223,
"learning_rate": 5e-05,
"loss": 10.375,
"step": 105
},
{
"epoch": 0.09452201933404941,
"grad_norm": 0.029139433056116104,
"learning_rate": 4.5871032726383386e-05,
"loss": 10.375,
"step": 110
},
{
"epoch": 0.09881847475832438,
"grad_norm": 0.026183877140283585,
"learning_rate": 4.17702704859633e-05,
"loss": 10.3747,
"step": 115
},
{
"epoch": 0.10311493018259936,
"grad_norm": 0.028735578060150146,
"learning_rate": 3.772572564296005e-05,
"loss": 10.374,
"step": 120
},
{
"epoch": 0.10741138560687433,
"grad_norm": 0.02600599266588688,
"learning_rate": 3.3765026539765834e-05,
"loss": 10.3744,
"step": 125
},
{
"epoch": 0.11170784103114931,
"grad_norm": 0.037724222987890244,
"learning_rate": 2.991522876735154e-05,
"loss": 10.3742,
"step": 130
},
{
"epoch": 0.11600429645542427,
"grad_norm": 0.0350051075220108,
"learning_rate": 2.6202630348146324e-05,
"loss": 10.3731,
"step": 135
},
{
"epoch": 0.12030075187969924,
"grad_norm": 0.04115588963031769,
"learning_rate": 2.2652592093878666e-05,
"loss": 10.3737,
"step": 140
},
{
"epoch": 0.12459720730397422,
"grad_norm": 0.034164655953645706,
"learning_rate": 1.928936436551661e-05,
"loss": 10.3718,
"step": 145
},
{
"epoch": 0.1288936627282492,
"grad_norm": 0.04169544205069542,
"learning_rate": 1.6135921418712956e-05,
"loss": 10.3717,
"step": 150
},
{
"epoch": 0.1288936627282492,
"eval_loss": 10.373159408569336,
"eval_runtime": 2.4689,
"eval_samples_per_second": 198.468,
"eval_steps_per_second": 99.234,
"step": 150
},
{
"epoch": 0.13319011815252416,
"grad_norm": 0.035568829625844955,
"learning_rate": 1.3213804466343421e-05,
"loss": 10.3739,
"step": 155
},
{
"epoch": 0.13748657357679914,
"grad_norm": 0.028946030884981155,
"learning_rate": 1.0542974530180327e-05,
"loss": 10.3732,
"step": 160
},
{
"epoch": 0.14178302900107412,
"grad_norm": 0.029522452503442764,
"learning_rate": 8.141676086873572e-06,
"loss": 10.374,
"step": 165
},
{
"epoch": 0.1460794844253491,
"grad_norm": 0.03400672227144241,
"learning_rate": 6.026312439675552e-06,
"loss": 10.3731,
"step": 170
},
{
"epoch": 0.15037593984962405,
"grad_norm": 0.026614105328917503,
"learning_rate": 4.2113336672471245e-06,
"loss": 10.3742,
"step": 175
},
{
"epoch": 0.15467239527389903,
"grad_norm": 0.034079305827617645,
"learning_rate": 2.7091379149682685e-06,
"loss": 10.3729,
"step": 180
},
{
"epoch": 0.158968850698174,
"grad_norm": 0.031550854444503784,
"learning_rate": 1.5299867030334814e-06,
"loss": 10.373,
"step": 185
},
{
"epoch": 0.16326530612244897,
"grad_norm": 0.03797019645571709,
"learning_rate": 6.819348298638839e-07,
"loss": 10.3725,
"step": 190
},
{
"epoch": 0.16756176154672395,
"grad_norm": 0.036687079817056656,
"learning_rate": 1.7077534966650766e-07,
"loss": 10.3732,
"step": 195
},
{
"epoch": 0.17185821697099893,
"grad_norm": 0.046533871442079544,
"learning_rate": 0.0,
"loss": 10.3721,
"step": 200
},
{
"epoch": 0.17185821697099893,
"eval_loss": 10.372811317443848,
"eval_runtime": 2.4726,
"eval_samples_per_second": 198.175,
"eval_steps_per_second": 99.087,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 10124901089280.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}