GaetanMichelet's picture
Model save
fa15631 verified
{
"best_metric": 0.9015458226203918,
"best_model_checkpoint": "data/Llama-31-8B_task-1_120-samples_config-3_full/checkpoint-396",
"epoch": 43.0,
"eval_steps": 500,
"global_step": 473,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09090909090909091,
"grad_norm": 2.021212100982666,
"learning_rate": 6.060606060606061e-08,
"loss": 2.4844,
"step": 1
},
{
"epoch": 0.18181818181818182,
"grad_norm": 1.8868809938430786,
"learning_rate": 1.2121212121212122e-07,
"loss": 2.5082,
"step": 2
},
{
"epoch": 0.36363636363636365,
"grad_norm": 1.9227168560028076,
"learning_rate": 2.4242424242424244e-07,
"loss": 2.5149,
"step": 4
},
{
"epoch": 0.5454545454545454,
"grad_norm": 2.164538860321045,
"learning_rate": 3.6363636363636366e-07,
"loss": 2.4938,
"step": 6
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.9525693655014038,
"learning_rate": 4.848484848484849e-07,
"loss": 2.4402,
"step": 8
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.973608136177063,
"learning_rate": 6.060606060606061e-07,
"loss": 2.4681,
"step": 10
},
{
"epoch": 1.0,
"eval_loss": 2.453854560852051,
"eval_runtime": 9.6392,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 11
},
{
"epoch": 1.0909090909090908,
"grad_norm": 1.9462313652038574,
"learning_rate": 7.272727272727273e-07,
"loss": 2.4751,
"step": 12
},
{
"epoch": 1.2727272727272727,
"grad_norm": 1.8247989416122437,
"learning_rate": 8.484848484848486e-07,
"loss": 2.523,
"step": 14
},
{
"epoch": 1.4545454545454546,
"grad_norm": 1.8403159379959106,
"learning_rate": 9.696969696969698e-07,
"loss": 2.4192,
"step": 16
},
{
"epoch": 1.6363636363636362,
"grad_norm": 1.6670445203781128,
"learning_rate": 1.090909090909091e-06,
"loss": 2.5069,
"step": 18
},
{
"epoch": 1.8181818181818183,
"grad_norm": 1.802819013595581,
"learning_rate": 1.2121212121212122e-06,
"loss": 2.4854,
"step": 20
},
{
"epoch": 2.0,
"grad_norm": 1.4742677211761475,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.3894,
"step": 22
},
{
"epoch": 2.0,
"eval_loss": 2.4259960651397705,
"eval_runtime": 9.6188,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 22
},
{
"epoch": 2.1818181818181817,
"grad_norm": 1.6892441511154175,
"learning_rate": 1.4545454545454546e-06,
"loss": 2.4004,
"step": 24
},
{
"epoch": 2.3636363636363638,
"grad_norm": 1.5024837255477905,
"learning_rate": 1.5757575757575759e-06,
"loss": 2.4339,
"step": 26
},
{
"epoch": 2.5454545454545454,
"grad_norm": 1.6032872200012207,
"learning_rate": 1.6969696969696973e-06,
"loss": 2.4351,
"step": 28
},
{
"epoch": 2.7272727272727275,
"grad_norm": 1.526031732559204,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.3962,
"step": 30
},
{
"epoch": 2.909090909090909,
"grad_norm": 1.6687572002410889,
"learning_rate": 1.9393939393939395e-06,
"loss": 2.4746,
"step": 32
},
{
"epoch": 3.0,
"eval_loss": 2.382659912109375,
"eval_runtime": 9.6201,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 33
},
{
"epoch": 3.090909090909091,
"grad_norm": 1.5408605337142944,
"learning_rate": 2.0606060606060607e-06,
"loss": 2.4125,
"step": 34
},
{
"epoch": 3.2727272727272725,
"grad_norm": 1.6628339290618896,
"learning_rate": 2.181818181818182e-06,
"loss": 2.3574,
"step": 36
},
{
"epoch": 3.4545454545454546,
"grad_norm": 1.6864051818847656,
"learning_rate": 2.303030303030303e-06,
"loss": 2.3033,
"step": 38
},
{
"epoch": 3.6363636363636362,
"grad_norm": 1.705551266670227,
"learning_rate": 2.4242424242424244e-06,
"loss": 2.4227,
"step": 40
},
{
"epoch": 3.8181818181818183,
"grad_norm": 1.6931661367416382,
"learning_rate": 2.5454545454545456e-06,
"loss": 2.3626,
"step": 42
},
{
"epoch": 4.0,
"grad_norm": 1.910327434539795,
"learning_rate": 2.666666666666667e-06,
"loss": 2.4177,
"step": 44
},
{
"epoch": 4.0,
"eval_loss": 2.313791036605835,
"eval_runtime": 9.6408,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 44
},
{
"epoch": 4.181818181818182,
"grad_norm": 1.9009881019592285,
"learning_rate": 2.7878787878787885e-06,
"loss": 2.2818,
"step": 46
},
{
"epoch": 4.363636363636363,
"grad_norm": 1.9625577926635742,
"learning_rate": 2.9090909090909093e-06,
"loss": 2.4051,
"step": 48
},
{
"epoch": 4.545454545454545,
"grad_norm": 1.4953864812850952,
"learning_rate": 3.0303030303030305e-06,
"loss": 2.2829,
"step": 50
},
{
"epoch": 4.7272727272727275,
"grad_norm": 1.5853980779647827,
"learning_rate": 3.1515151515151517e-06,
"loss": 2.2705,
"step": 52
},
{
"epoch": 4.909090909090909,
"grad_norm": 1.5361964702606201,
"learning_rate": 3.272727272727273e-06,
"loss": 2.1959,
"step": 54
},
{
"epoch": 5.0,
"eval_loss": 2.226909875869751,
"eval_runtime": 9.6204,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 55
},
{
"epoch": 5.090909090909091,
"grad_norm": 1.919047236442566,
"learning_rate": 3.3939393939393946e-06,
"loss": 2.2901,
"step": 56
},
{
"epoch": 5.2727272727272725,
"grad_norm": 2.0125391483306885,
"learning_rate": 3.5151515151515154e-06,
"loss": 2.2584,
"step": 58
},
{
"epoch": 5.454545454545454,
"grad_norm": 1.8322199583053589,
"learning_rate": 3.6363636363636366e-06,
"loss": 2.1565,
"step": 60
},
{
"epoch": 5.636363636363637,
"grad_norm": 1.318441390991211,
"learning_rate": 3.757575757575758e-06,
"loss": 2.2066,
"step": 62
},
{
"epoch": 5.818181818181818,
"grad_norm": 1.2064168453216553,
"learning_rate": 3.878787878787879e-06,
"loss": 2.1475,
"step": 64
},
{
"epoch": 6.0,
"grad_norm": 1.1312469244003296,
"learning_rate": 4.000000000000001e-06,
"loss": 2.16,
"step": 66
},
{
"epoch": 6.0,
"eval_loss": 2.1177315711975098,
"eval_runtime": 9.6288,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 66
},
{
"epoch": 6.181818181818182,
"grad_norm": 1.0877413749694824,
"learning_rate": 4.1212121212121215e-06,
"loss": 2.0577,
"step": 68
},
{
"epoch": 6.363636363636363,
"grad_norm": 1.1328318119049072,
"learning_rate": 4.242424242424243e-06,
"loss": 2.1584,
"step": 70
},
{
"epoch": 6.545454545454545,
"grad_norm": 1.1114661693572998,
"learning_rate": 4.363636363636364e-06,
"loss": 2.1214,
"step": 72
},
{
"epoch": 6.7272727272727275,
"grad_norm": 1.0736863613128662,
"learning_rate": 4.4848484848484855e-06,
"loss": 2.0338,
"step": 74
},
{
"epoch": 6.909090909090909,
"grad_norm": 1.1103681325912476,
"learning_rate": 4.606060606060606e-06,
"loss": 2.0388,
"step": 76
},
{
"epoch": 7.0,
"eval_loss": 1.9843875169754028,
"eval_runtime": 9.6304,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 77
},
{
"epoch": 7.090909090909091,
"grad_norm": 1.0429500341415405,
"learning_rate": 4.727272727272728e-06,
"loss": 1.9515,
"step": 78
},
{
"epoch": 7.2727272727272725,
"grad_norm": 1.0388323068618774,
"learning_rate": 4.848484848484849e-06,
"loss": 2.0436,
"step": 80
},
{
"epoch": 7.454545454545454,
"grad_norm": 0.9600175023078918,
"learning_rate": 4.9696969696969696e-06,
"loss": 1.8726,
"step": 82
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.9380725026130676,
"learning_rate": 5.090909090909091e-06,
"loss": 1.9307,
"step": 84
},
{
"epoch": 7.818181818181818,
"grad_norm": 0.8361033797264099,
"learning_rate": 5.212121212121213e-06,
"loss": 1.869,
"step": 86
},
{
"epoch": 8.0,
"grad_norm": 0.8874518275260925,
"learning_rate": 5.333333333333334e-06,
"loss": 1.8932,
"step": 88
},
{
"epoch": 8.0,
"eval_loss": 1.8441652059555054,
"eval_runtime": 9.6269,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 88
},
{
"epoch": 8.181818181818182,
"grad_norm": 0.8483957052230835,
"learning_rate": 5.4545454545454545e-06,
"loss": 1.8148,
"step": 90
},
{
"epoch": 8.363636363636363,
"grad_norm": 0.9277289509773254,
"learning_rate": 5.575757575757577e-06,
"loss": 1.8286,
"step": 92
},
{
"epoch": 8.545454545454545,
"grad_norm": 0.9386701583862305,
"learning_rate": 5.696969696969698e-06,
"loss": 1.7935,
"step": 94
},
{
"epoch": 8.727272727272727,
"grad_norm": 0.9653158783912659,
"learning_rate": 5.8181818181818185e-06,
"loss": 1.7714,
"step": 96
},
{
"epoch": 8.909090909090908,
"grad_norm": 0.929076075553894,
"learning_rate": 5.93939393939394e-06,
"loss": 1.7199,
"step": 98
},
{
"epoch": 9.0,
"eval_loss": 1.6830488443374634,
"eval_runtime": 9.6281,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 99
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.9527233839035034,
"learning_rate": 6.060606060606061e-06,
"loss": 1.7054,
"step": 100
},
{
"epoch": 9.272727272727273,
"grad_norm": 1.1190400123596191,
"learning_rate": 6.181818181818182e-06,
"loss": 1.623,
"step": 102
},
{
"epoch": 9.454545454545455,
"grad_norm": 1.1004133224487305,
"learning_rate": 6.303030303030303e-06,
"loss": 1.6696,
"step": 104
},
{
"epoch": 9.636363636363637,
"grad_norm": 1.036831259727478,
"learning_rate": 6.424242424242425e-06,
"loss": 1.5955,
"step": 106
},
{
"epoch": 9.818181818181818,
"grad_norm": 0.9210329055786133,
"learning_rate": 6.545454545454546e-06,
"loss": 1.5252,
"step": 108
},
{
"epoch": 10.0,
"grad_norm": 1.0149990320205688,
"learning_rate": 6.666666666666667e-06,
"loss": 1.4973,
"step": 110
},
{
"epoch": 10.0,
"eval_loss": 1.4929395914077759,
"eval_runtime": 9.6201,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 110
},
{
"epoch": 10.181818181818182,
"grad_norm": 0.9690227508544922,
"learning_rate": 6.787878787878789e-06,
"loss": 1.4956,
"step": 112
},
{
"epoch": 10.363636363636363,
"grad_norm": 0.8670191168785095,
"learning_rate": 6.90909090909091e-06,
"loss": 1.4899,
"step": 114
},
{
"epoch": 10.545454545454545,
"grad_norm": 0.9516677260398865,
"learning_rate": 7.030303030303031e-06,
"loss": 1.3702,
"step": 116
},
{
"epoch": 10.727272727272727,
"grad_norm": 0.987469494342804,
"learning_rate": 7.151515151515152e-06,
"loss": 1.4109,
"step": 118
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.9726764559745789,
"learning_rate": 7.272727272727273e-06,
"loss": 1.2726,
"step": 120
},
{
"epoch": 11.0,
"eval_loss": 1.2980190515518188,
"eval_runtime": 9.6227,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 121
},
{
"epoch": 11.090909090909092,
"grad_norm": 0.925747811794281,
"learning_rate": 7.393939393939395e-06,
"loss": 1.2873,
"step": 122
},
{
"epoch": 11.272727272727273,
"grad_norm": 1.0416390895843506,
"learning_rate": 7.515151515151516e-06,
"loss": 1.2906,
"step": 124
},
{
"epoch": 11.454545454545455,
"grad_norm": 0.8796542286872864,
"learning_rate": 7.636363636363638e-06,
"loss": 1.2497,
"step": 126
},
{
"epoch": 11.636363636363637,
"grad_norm": 0.7352049350738525,
"learning_rate": 7.757575757575758e-06,
"loss": 1.2023,
"step": 128
},
{
"epoch": 11.818181818181818,
"grad_norm": 0.7811641693115234,
"learning_rate": 7.87878787878788e-06,
"loss": 1.1205,
"step": 130
},
{
"epoch": 12.0,
"grad_norm": 0.6574164032936096,
"learning_rate": 8.000000000000001e-06,
"loss": 1.204,
"step": 132
},
{
"epoch": 12.0,
"eval_loss": 1.1553701162338257,
"eval_runtime": 9.6262,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 132
},
{
"epoch": 12.181818181818182,
"grad_norm": 0.7204536199569702,
"learning_rate": 8.121212121212121e-06,
"loss": 1.134,
"step": 134
},
{
"epoch": 12.363636363636363,
"grad_norm": 0.7305138111114502,
"learning_rate": 8.242424242424243e-06,
"loss": 1.046,
"step": 136
},
{
"epoch": 12.545454545454545,
"grad_norm": 0.7755628228187561,
"learning_rate": 8.363636363636365e-06,
"loss": 1.1237,
"step": 138
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.8240987062454224,
"learning_rate": 8.484848484848486e-06,
"loss": 1.1215,
"step": 140
},
{
"epoch": 12.909090909090908,
"grad_norm": 0.716643750667572,
"learning_rate": 8.606060606060606e-06,
"loss": 1.0597,
"step": 142
},
{
"epoch": 13.0,
"eval_loss": 1.0772147178649902,
"eval_runtime": 9.6246,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 143
},
{
"epoch": 13.090909090909092,
"grad_norm": 0.6686062812805176,
"learning_rate": 8.727272727272728e-06,
"loss": 1.09,
"step": 144
},
{
"epoch": 13.272727272727273,
"grad_norm": 0.5461896061897278,
"learning_rate": 8.84848484848485e-06,
"loss": 1.0405,
"step": 146
},
{
"epoch": 13.454545454545455,
"grad_norm": 0.5121557712554932,
"learning_rate": 8.969696969696971e-06,
"loss": 1.0367,
"step": 148
},
{
"epoch": 13.636363636363637,
"grad_norm": 0.5350603461265564,
"learning_rate": 9.090909090909091e-06,
"loss": 1.0327,
"step": 150
},
{
"epoch": 13.818181818181818,
"grad_norm": 0.48038652539253235,
"learning_rate": 9.212121212121213e-06,
"loss": 1.0409,
"step": 152
},
{
"epoch": 14.0,
"grad_norm": 0.5158300399780273,
"learning_rate": 9.333333333333334e-06,
"loss": 1.0642,
"step": 154
},
{
"epoch": 14.0,
"eval_loss": 1.0425277948379517,
"eval_runtime": 9.6218,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 154
},
{
"epoch": 14.181818181818182,
"grad_norm": 0.5284631252288818,
"learning_rate": 9.454545454545456e-06,
"loss": 1.0258,
"step": 156
},
{
"epoch": 14.363636363636363,
"grad_norm": 0.5030417442321777,
"learning_rate": 9.575757575757576e-06,
"loss": 1.0207,
"step": 158
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.5023478269577026,
"learning_rate": 9.696969696969698e-06,
"loss": 0.9857,
"step": 160
},
{
"epoch": 14.727272727272727,
"grad_norm": 0.4412921071052551,
"learning_rate": 9.81818181818182e-06,
"loss": 1.006,
"step": 162
},
{
"epoch": 14.909090909090908,
"grad_norm": 0.4654785096645355,
"learning_rate": 9.939393939393939e-06,
"loss": 1.0466,
"step": 164
},
{
"epoch": 15.0,
"eval_loss": 1.020145297050476,
"eval_runtime": 9.6194,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 165
},
{
"epoch": 15.090909090909092,
"grad_norm": 0.4788985848426819,
"learning_rate": 9.999988811118232e-06,
"loss": 0.9711,
"step": 166
},
{
"epoch": 15.272727272727273,
"grad_norm": 0.505489706993103,
"learning_rate": 9.999899300364534e-06,
"loss": 0.9704,
"step": 168
},
{
"epoch": 15.454545454545455,
"grad_norm": 0.5644559860229492,
"learning_rate": 9.999720280459576e-06,
"loss": 1.0248,
"step": 170
},
{
"epoch": 15.636363636363637,
"grad_norm": 0.4749113619327545,
"learning_rate": 9.999451754608208e-06,
"loss": 0.9284,
"step": 172
},
{
"epoch": 15.818181818181818,
"grad_norm": 0.658306896686554,
"learning_rate": 9.99909372761763e-06,
"loss": 1.0245,
"step": 174
},
{
"epoch": 16.0,
"grad_norm": 0.5014739036560059,
"learning_rate": 9.99864620589731e-06,
"loss": 1.0044,
"step": 176
},
{
"epoch": 16.0,
"eval_loss": 1.0009642839431763,
"eval_runtime": 9.6199,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 176
},
{
"epoch": 16.181818181818183,
"grad_norm": 0.5191582441329956,
"learning_rate": 9.998109197458865e-06,
"loss": 0.9723,
"step": 178
},
{
"epoch": 16.363636363636363,
"grad_norm": 0.5656840205192566,
"learning_rate": 9.997482711915926e-06,
"loss": 0.9901,
"step": 180
},
{
"epoch": 16.545454545454547,
"grad_norm": 0.49254581332206726,
"learning_rate": 9.996766760483955e-06,
"loss": 0.9321,
"step": 182
},
{
"epoch": 16.727272727272727,
"grad_norm": 0.500410795211792,
"learning_rate": 9.995961355980052e-06,
"loss": 0.9564,
"step": 184
},
{
"epoch": 16.90909090909091,
"grad_norm": 0.6280571222305298,
"learning_rate": 9.99506651282272e-06,
"loss": 0.9967,
"step": 186
},
{
"epoch": 17.0,
"eval_loss": 0.9866144061088562,
"eval_runtime": 9.621,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 187
},
{
"epoch": 17.09090909090909,
"grad_norm": 0.578583300113678,
"learning_rate": 9.994082247031613e-06,
"loss": 0.9784,
"step": 188
},
{
"epoch": 17.272727272727273,
"grad_norm": 0.5218169093132019,
"learning_rate": 9.993008576227248e-06,
"loss": 0.8817,
"step": 190
},
{
"epoch": 17.454545454545453,
"grad_norm": 0.6009635329246521,
"learning_rate": 9.991845519630679e-06,
"loss": 0.9515,
"step": 192
},
{
"epoch": 17.636363636363637,
"grad_norm": 0.5707380771636963,
"learning_rate": 9.99059309806317e-06,
"loss": 0.9694,
"step": 194
},
{
"epoch": 17.818181818181817,
"grad_norm": 0.6348333358764648,
"learning_rate": 9.989251333945813e-06,
"loss": 0.9498,
"step": 196
},
{
"epoch": 18.0,
"grad_norm": 0.5231234431266785,
"learning_rate": 9.987820251299121e-06,
"loss": 0.9863,
"step": 198
},
{
"epoch": 18.0,
"eval_loss": 0.9735579490661621,
"eval_runtime": 9.6192,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 198
},
{
"epoch": 18.181818181818183,
"grad_norm": 0.6517296433448792,
"learning_rate": 9.986299875742612e-06,
"loss": 0.9591,
"step": 200
},
{
"epoch": 18.363636363636363,
"grad_norm": 0.6254110932350159,
"learning_rate": 9.984690234494338e-06,
"loss": 0.9296,
"step": 202
},
{
"epoch": 18.545454545454547,
"grad_norm": 0.7348714470863342,
"learning_rate": 9.982991356370404e-06,
"loss": 0.9482,
"step": 204
},
{
"epoch": 18.727272727272727,
"grad_norm": 0.5867448449134827,
"learning_rate": 9.98120327178445e-06,
"loss": 0.9276,
"step": 206
},
{
"epoch": 18.90909090909091,
"grad_norm": 0.6433852910995483,
"learning_rate": 9.979326012747106e-06,
"loss": 0.9065,
"step": 208
},
{
"epoch": 19.0,
"eval_loss": 0.9643786549568176,
"eval_runtime": 9.647,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 209
},
{
"epoch": 19.09090909090909,
"grad_norm": 0.650622546672821,
"learning_rate": 9.977359612865424e-06,
"loss": 0.8947,
"step": 210
},
{
"epoch": 19.272727272727273,
"grad_norm": 0.7716985940933228,
"learning_rate": 9.975304107342268e-06,
"loss": 0.929,
"step": 212
},
{
"epoch": 19.454545454545453,
"grad_norm": 0.5945254564285278,
"learning_rate": 9.973159532975691e-06,
"loss": 0.9743,
"step": 214
},
{
"epoch": 19.636363636363637,
"grad_norm": 0.6686264872550964,
"learning_rate": 9.970925928158275e-06,
"loss": 0.9398,
"step": 216
},
{
"epoch": 19.818181818181817,
"grad_norm": 0.6576296091079712,
"learning_rate": 9.968603332876435e-06,
"loss": 0.9251,
"step": 218
},
{
"epoch": 20.0,
"grad_norm": 0.5890394449234009,
"learning_rate": 9.966191788709716e-06,
"loss": 0.8669,
"step": 220
},
{
"epoch": 20.0,
"eval_loss": 0.9538940787315369,
"eval_runtime": 9.6225,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 220
},
{
"epoch": 20.181818181818183,
"grad_norm": 0.7149389386177063,
"learning_rate": 9.963691338830045e-06,
"loss": 0.9322,
"step": 222
},
{
"epoch": 20.363636363636363,
"grad_norm": 0.6404098272323608,
"learning_rate": 9.961102028000948e-06,
"loss": 0.8914,
"step": 224
},
{
"epoch": 20.545454545454547,
"grad_norm": 0.6958956122398376,
"learning_rate": 9.958423902576764e-06,
"loss": 0.9489,
"step": 226
},
{
"epoch": 20.727272727272727,
"grad_norm": 0.6515837907791138,
"learning_rate": 9.955657010501807e-06,
"loss": 0.8477,
"step": 228
},
{
"epoch": 20.90909090909091,
"grad_norm": 0.8464462161064148,
"learning_rate": 9.952801401309504e-06,
"loss": 0.9253,
"step": 230
},
{
"epoch": 21.0,
"eval_loss": 0.9454106688499451,
"eval_runtime": 9.6279,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 231
},
{
"epoch": 21.09090909090909,
"grad_norm": 0.7701188325881958,
"learning_rate": 9.949857126121519e-06,
"loss": 0.9027,
"step": 232
},
{
"epoch": 21.272727272727273,
"grad_norm": 0.6819570660591125,
"learning_rate": 9.946824237646823e-06,
"loss": 0.8545,
"step": 234
},
{
"epoch": 21.454545454545453,
"grad_norm": 0.7400511503219604,
"learning_rate": 9.94370279018077e-06,
"loss": 0.9229,
"step": 236
},
{
"epoch": 21.636363636363637,
"grad_norm": 0.6731954216957092,
"learning_rate": 9.940492839604103e-06,
"loss": 0.8596,
"step": 238
},
{
"epoch": 21.818181818181817,
"grad_norm": 0.7251449227333069,
"learning_rate": 9.937194443381972e-06,
"loss": 0.9052,
"step": 240
},
{
"epoch": 22.0,
"grad_norm": 0.7023078799247742,
"learning_rate": 9.933807660562898e-06,
"loss": 0.872,
"step": 242
},
{
"epoch": 22.0,
"eval_loss": 0.9398066997528076,
"eval_runtime": 9.6205,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 242
},
{
"epoch": 22.181818181818183,
"grad_norm": 0.6639079451560974,
"learning_rate": 9.930332551777709e-06,
"loss": 0.8602,
"step": 244
},
{
"epoch": 22.363636363636363,
"grad_norm": 0.8378622531890869,
"learning_rate": 9.926769179238467e-06,
"loss": 0.9034,
"step": 246
},
{
"epoch": 22.545454545454547,
"grad_norm": 0.8481977581977844,
"learning_rate": 9.923117606737347e-06,
"loss": 0.8493,
"step": 248
},
{
"epoch": 22.727272727272727,
"grad_norm": 0.8623536825180054,
"learning_rate": 9.919377899645497e-06,
"loss": 0.8883,
"step": 250
},
{
"epoch": 22.90909090909091,
"grad_norm": 0.8115867376327515,
"learning_rate": 9.915550124911866e-06,
"loss": 0.8824,
"step": 252
},
{
"epoch": 23.0,
"eval_loss": 0.9327845573425293,
"eval_runtime": 9.6274,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 253
},
{
"epoch": 23.09090909090909,
"grad_norm": 0.7263345122337341,
"learning_rate": 9.91163435106201e-06,
"loss": 0.8292,
"step": 254
},
{
"epoch": 23.272727272727273,
"grad_norm": 0.8343206644058228,
"learning_rate": 9.907630648196857e-06,
"loss": 0.9044,
"step": 256
},
{
"epoch": 23.454545454545453,
"grad_norm": 0.7706195116043091,
"learning_rate": 9.903539087991462e-06,
"loss": 0.8363,
"step": 258
},
{
"epoch": 23.636363636363637,
"grad_norm": 0.7953319549560547,
"learning_rate": 9.899359743693715e-06,
"loss": 0.8528,
"step": 260
},
{
"epoch": 23.818181818181817,
"grad_norm": 0.8244236707687378,
"learning_rate": 9.895092690123036e-06,
"loss": 0.8787,
"step": 262
},
{
"epoch": 24.0,
"grad_norm": 0.9236502647399902,
"learning_rate": 9.890738003669029e-06,
"loss": 0.8582,
"step": 264
},
{
"epoch": 24.0,
"eval_loss": 0.9282767176628113,
"eval_runtime": 9.6299,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 264
},
{
"epoch": 24.181818181818183,
"grad_norm": 0.7912314534187317,
"learning_rate": 9.886295762290125e-06,
"loss": 0.8291,
"step": 266
},
{
"epoch": 24.363636363636363,
"grad_norm": 0.8443674445152283,
"learning_rate": 9.881766045512176e-06,
"loss": 0.8599,
"step": 268
},
{
"epoch": 24.545454545454547,
"grad_norm": 0.9810454249382019,
"learning_rate": 9.877148934427037e-06,
"loss": 0.844,
"step": 270
},
{
"epoch": 24.727272727272727,
"grad_norm": 0.827754020690918,
"learning_rate": 9.872444511691108e-06,
"loss": 0.8447,
"step": 272
},
{
"epoch": 24.90909090909091,
"grad_norm": 0.9015039801597595,
"learning_rate": 9.867652861523866e-06,
"loss": 0.8763,
"step": 274
},
{
"epoch": 25.0,
"eval_loss": 0.9221246838569641,
"eval_runtime": 9.6209,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 275
},
{
"epoch": 25.09090909090909,
"grad_norm": 0.8841392993927002,
"learning_rate": 9.862774069706346e-06,
"loss": 0.8184,
"step": 276
},
{
"epoch": 25.272727272727273,
"grad_norm": 0.8848661780357361,
"learning_rate": 9.85780822357961e-06,
"loss": 0.8496,
"step": 278
},
{
"epoch": 25.454545454545453,
"grad_norm": 0.7846701145172119,
"learning_rate": 9.85275541204318e-06,
"loss": 0.8165,
"step": 280
},
{
"epoch": 25.636363636363637,
"grad_norm": 1.0641032457351685,
"learning_rate": 9.847615725553457e-06,
"loss": 0.8485,
"step": 282
},
{
"epoch": 25.818181818181817,
"grad_norm": 0.9293933510780334,
"learning_rate": 9.842389256122086e-06,
"loss": 0.829,
"step": 284
},
{
"epoch": 26.0,
"grad_norm": 0.8131471872329712,
"learning_rate": 9.83707609731432e-06,
"loss": 0.8199,
"step": 286
},
{
"epoch": 26.0,
"eval_loss": 0.917729377746582,
"eval_runtime": 9.6285,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 286
},
{
"epoch": 26.181818181818183,
"grad_norm": 0.8052296042442322,
"learning_rate": 9.831676344247343e-06,
"loss": 0.8356,
"step": 288
},
{
"epoch": 26.363636363636363,
"grad_norm": 0.9019801020622253,
"learning_rate": 9.826190093588564e-06,
"loss": 0.8427,
"step": 290
},
{
"epoch": 26.545454545454547,
"grad_norm": 0.8782145380973816,
"learning_rate": 9.820617443553889e-06,
"loss": 0.8175,
"step": 292
},
{
"epoch": 26.727272727272727,
"grad_norm": 1.0153470039367676,
"learning_rate": 9.814958493905962e-06,
"loss": 0.8059,
"step": 294
},
{
"epoch": 26.90909090909091,
"grad_norm": 0.9501240253448486,
"learning_rate": 9.80921334595238e-06,
"loss": 0.7986,
"step": 296
},
{
"epoch": 27.0,
"eval_loss": 0.9146122336387634,
"eval_runtime": 9.6275,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 297
},
{
"epoch": 27.09090909090909,
"grad_norm": 0.8864769339561462,
"learning_rate": 9.80338210254388e-06,
"loss": 0.7819,
"step": 298
},
{
"epoch": 27.272727272727273,
"grad_norm": 0.8944150805473328,
"learning_rate": 9.797464868072489e-06,
"loss": 0.7875,
"step": 300
},
{
"epoch": 27.454545454545453,
"grad_norm": 0.9001815915107727,
"learning_rate": 9.791461748469669e-06,
"loss": 0.8734,
"step": 302
},
{
"epoch": 27.636363636363637,
"grad_norm": 0.9148157835006714,
"learning_rate": 9.785372851204415e-06,
"loss": 0.8076,
"step": 304
},
{
"epoch": 27.818181818181817,
"grad_norm": 1.0519267320632935,
"learning_rate": 9.779198285281326e-06,
"loss": 0.7918,
"step": 306
},
{
"epoch": 28.0,
"grad_norm": 0.8914878964424133,
"learning_rate": 9.77293816123866e-06,
"loss": 0.7754,
"step": 308
},
{
"epoch": 28.0,
"eval_loss": 0.9141943454742432,
"eval_runtime": 9.6378,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 308
},
{
"epoch": 28.181818181818183,
"grad_norm": 0.9089798331260681,
"learning_rate": 9.766592591146353e-06,
"loss": 0.7795,
"step": 310
},
{
"epoch": 28.363636363636363,
"grad_norm": 1.054513931274414,
"learning_rate": 9.760161688604008e-06,
"loss": 0.7935,
"step": 312
},
{
"epoch": 28.545454545454547,
"grad_norm": 0.9446949362754822,
"learning_rate": 9.753645568738872e-06,
"loss": 0.797,
"step": 314
},
{
"epoch": 28.727272727272727,
"grad_norm": 0.9865514039993286,
"learning_rate": 9.747044348203766e-06,
"loss": 0.7824,
"step": 316
},
{
"epoch": 28.90909090909091,
"grad_norm": 1.0658093690872192,
"learning_rate": 9.740358145174999e-06,
"loss": 0.7893,
"step": 318
},
{
"epoch": 29.0,
"eval_loss": 0.9086329340934753,
"eval_runtime": 9.6391,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 319
},
{
"epoch": 29.09090909090909,
"grad_norm": 0.9681562185287476,
"learning_rate": 9.733587079350254e-06,
"loss": 0.7736,
"step": 320
},
{
"epoch": 29.272727272727273,
"grad_norm": 0.9801912903785706,
"learning_rate": 9.72673127194644e-06,
"loss": 0.8747,
"step": 322
},
{
"epoch": 29.454545454545453,
"grad_norm": 1.076412558555603,
"learning_rate": 9.719790845697534e-06,
"loss": 0.7737,
"step": 324
},
{
"epoch": 29.636363636363637,
"grad_norm": 1.0102134943008423,
"learning_rate": 9.71276592485237e-06,
"loss": 0.7632,
"step": 326
},
{
"epoch": 29.818181818181817,
"grad_norm": 1.1170337200164795,
"learning_rate": 9.705656635172418e-06,
"loss": 0.7417,
"step": 328
},
{
"epoch": 30.0,
"grad_norm": 1.0282917022705078,
"learning_rate": 9.698463103929542e-06,
"loss": 0.7312,
"step": 330
},
{
"epoch": 30.0,
"eval_loss": 0.9086909294128418,
"eval_runtime": 9.6293,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 330
},
{
"epoch": 30.181818181818183,
"grad_norm": 1.050240397453308,
"learning_rate": 9.69118545990371e-06,
"loss": 0.7415,
"step": 332
},
{
"epoch": 30.363636363636363,
"grad_norm": 1.090142011642456,
"learning_rate": 9.683823833380692e-06,
"loss": 0.7346,
"step": 334
},
{
"epoch": 30.545454545454547,
"grad_norm": 1.1241227388381958,
"learning_rate": 9.676378356149733e-06,
"loss": 0.7492,
"step": 336
},
{
"epoch": 30.727272727272727,
"grad_norm": 1.0831716060638428,
"learning_rate": 9.668849161501186e-06,
"loss": 0.7854,
"step": 338
},
{
"epoch": 30.90909090909091,
"grad_norm": 0.9916213154792786,
"learning_rate": 9.66123638422413e-06,
"loss": 0.7431,
"step": 340
},
{
"epoch": 31.0,
"eval_loss": 0.9049713611602783,
"eval_runtime": 9.6277,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 341
},
{
"epoch": 31.09090909090909,
"grad_norm": 0.9737184047698975,
"learning_rate": 9.653540160603956e-06,
"loss": 0.8,
"step": 342
},
{
"epoch": 31.272727272727273,
"grad_norm": 1.002685308456421,
"learning_rate": 9.64576062841993e-06,
"loss": 0.7644,
"step": 344
},
{
"epoch": 31.454545454545453,
"grad_norm": 1.2203502655029297,
"learning_rate": 9.637897926942716e-06,
"loss": 0.7335,
"step": 346
},
{
"epoch": 31.636363636363637,
"grad_norm": 1.2266589403152466,
"learning_rate": 9.629952196931902e-06,
"loss": 0.7724,
"step": 348
},
{
"epoch": 31.818181818181817,
"grad_norm": 1.3024228811264038,
"learning_rate": 9.621923580633462e-06,
"loss": 0.7164,
"step": 350
},
{
"epoch": 32.0,
"grad_norm": 1.1638389825820923,
"learning_rate": 9.613812221777212e-06,
"loss": 0.7103,
"step": 352
},
{
"epoch": 32.0,
"eval_loss": 0.9036614298820496,
"eval_runtime": 9.6223,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 352
},
{
"epoch": 32.18181818181818,
"grad_norm": 1.1456729173660278,
"learning_rate": 9.60561826557425e-06,
"loss": 0.7032,
"step": 354
},
{
"epoch": 32.36363636363637,
"grad_norm": 1.0486174821853638,
"learning_rate": 9.597341858714344e-06,
"loss": 0.7216,
"step": 356
},
{
"epoch": 32.54545454545455,
"grad_norm": 1.2181857824325562,
"learning_rate": 9.588983149363307e-06,
"loss": 0.7591,
"step": 358
},
{
"epoch": 32.72727272727273,
"grad_norm": 1.0791609287261963,
"learning_rate": 9.580542287160348e-06,
"loss": 0.7719,
"step": 360
},
{
"epoch": 32.90909090909091,
"grad_norm": 1.1914441585540771,
"learning_rate": 9.572019423215395e-06,
"loss": 0.6967,
"step": 362
},
{
"epoch": 33.0,
"eval_loss": 0.9091615080833435,
"eval_runtime": 9.619,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 363
},
{
"epoch": 33.09090909090909,
"grad_norm": 1.206595778465271,
"learning_rate": 9.563414710106382e-06,
"loss": 0.6713,
"step": 364
},
{
"epoch": 33.27272727272727,
"grad_norm": 1.2539595365524292,
"learning_rate": 9.554728301876525e-06,
"loss": 0.7171,
"step": 366
},
{
"epoch": 33.45454545454545,
"grad_norm": 1.135501503944397,
"learning_rate": 9.545960354031564e-06,
"loss": 0.7324,
"step": 368
},
{
"epoch": 33.63636363636363,
"grad_norm": 1.2056708335876465,
"learning_rate": 9.537111023536973e-06,
"loss": 0.7101,
"step": 370
},
{
"epoch": 33.81818181818182,
"grad_norm": 1.1787803173065186,
"learning_rate": 9.528180468815155e-06,
"loss": 0.7468,
"step": 372
},
{
"epoch": 34.0,
"grad_norm": 1.490050196647644,
"learning_rate": 9.519168849742603e-06,
"loss": 0.6502,
"step": 374
},
{
"epoch": 34.0,
"eval_loss": 0.9070794582366943,
"eval_runtime": 9.63,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 2.492,
"step": 374
},
{
"epoch": 34.18181818181818,
"grad_norm": 1.2995131015777588,
"learning_rate": 9.510076327647043e-06,
"loss": 0.6144,
"step": 376
},
{
"epoch": 34.36363636363637,
"grad_norm": 1.3441383838653564,
"learning_rate": 9.50090306530454e-06,
"loss": 0.7332,
"step": 378
},
{
"epoch": 34.54545454545455,
"grad_norm": 1.1878114938735962,
"learning_rate": 9.491649226936586e-06,
"loss": 0.7056,
"step": 380
},
{
"epoch": 34.72727272727273,
"grad_norm": 1.3715388774871826,
"learning_rate": 9.48231497820716e-06,
"loss": 0.67,
"step": 382
},
{
"epoch": 34.90909090909091,
"grad_norm": 1.466930866241455,
"learning_rate": 9.47290048621977e-06,
"loss": 0.6659,
"step": 384
},
{
"epoch": 35.0,
"eval_loss": 0.9019081592559814,
"eval_runtime": 9.6208,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 385
},
{
"epoch": 35.09090909090909,
"grad_norm": 1.4354052543640137,
"learning_rate": 9.46340591951444e-06,
"loss": 0.7589,
"step": 386
},
{
"epoch": 35.27272727272727,
"grad_norm": 1.2188067436218262,
"learning_rate": 9.453831448064717e-06,
"loss": 0.6215,
"step": 388
},
{
"epoch": 35.45454545454545,
"grad_norm": 1.3039608001708984,
"learning_rate": 9.444177243274619e-06,
"loss": 0.6502,
"step": 390
},
{
"epoch": 35.63636363636363,
"grad_norm": 1.3067864179611206,
"learning_rate": 9.434443477975557e-06,
"loss": 0.6884,
"step": 392
},
{
"epoch": 35.81818181818182,
"grad_norm": 1.33348548412323,
"learning_rate": 9.42463032642326e-06,
"loss": 0.6852,
"step": 394
},
{
"epoch": 36.0,
"grad_norm": 1.5278242826461792,
"learning_rate": 9.414737964294636e-06,
"loss": 0.7003,
"step": 396
},
{
"epoch": 36.0,
"eval_loss": 0.9015458226203918,
"eval_runtime": 9.6251,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 396
},
{
"epoch": 36.18181818181818,
"grad_norm": 1.3526606559753418,
"learning_rate": 9.40476656868464e-06,
"loss": 0.6329,
"step": 398
},
{
"epoch": 36.36363636363637,
"grad_norm": 1.6108198165893555,
"learning_rate": 9.394716318103098e-06,
"loss": 0.6626,
"step": 400
},
{
"epoch": 36.54545454545455,
"grad_norm": 1.5193760395050049,
"learning_rate": 9.384587392471516e-06,
"loss": 0.6816,
"step": 402
},
{
"epoch": 36.72727272727273,
"grad_norm": 1.2977081537246704,
"learning_rate": 9.37437997311985e-06,
"loss": 0.6422,
"step": 404
},
{
"epoch": 36.90909090909091,
"grad_norm": 1.4797639846801758,
"learning_rate": 9.364094242783272e-06,
"loss": 0.629,
"step": 406
},
{
"epoch": 37.0,
"eval_loss": 0.9018394351005554,
"eval_runtime": 9.6212,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 407
},
{
"epoch": 37.09090909090909,
"grad_norm": 1.2966375350952148,
"learning_rate": 9.353730385598887e-06,
"loss": 0.6654,
"step": 408
},
{
"epoch": 37.27272727272727,
"grad_norm": 1.3821908235549927,
"learning_rate": 9.343288587102444e-06,
"loss": 0.6054,
"step": 410
},
{
"epoch": 37.45454545454545,
"grad_norm": 1.3437285423278809,
"learning_rate": 9.332769034225012e-06,
"loss": 0.6449,
"step": 412
},
{
"epoch": 37.63636363636363,
"grad_norm": 1.4815727472305298,
"learning_rate": 9.322171915289635e-06,
"loss": 0.658,
"step": 414
},
{
"epoch": 37.81818181818182,
"grad_norm": 1.273409366607666,
"learning_rate": 9.311497420007955e-06,
"loss": 0.6629,
"step": 416
},
{
"epoch": 38.0,
"grad_norm": 1.4128069877624512,
"learning_rate": 9.30074573947683e-06,
"loss": 0.6299,
"step": 418
},
{
"epoch": 38.0,
"eval_loss": 0.9080850481987,
"eval_runtime": 9.6337,
"eval_samples_per_second": 2.491,
"eval_steps_per_second": 2.491,
"step": 418
},
{
"epoch": 38.18181818181818,
"grad_norm": 1.467628836631775,
"learning_rate": 9.289917066174887e-06,
"loss": 0.6516,
"step": 420
},
{
"epoch": 38.36363636363637,
"grad_norm": 1.4847270250320435,
"learning_rate": 9.279011593959107e-06,
"loss": 0.6093,
"step": 422
},
{
"epoch": 38.54545454545455,
"grad_norm": 1.7236661911010742,
"learning_rate": 9.268029518061335e-06,
"loss": 0.6272,
"step": 424
},
{
"epoch": 38.72727272727273,
"grad_norm": 1.563481330871582,
"learning_rate": 9.256971035084786e-06,
"loss": 0.6293,
"step": 426
},
{
"epoch": 38.90909090909091,
"grad_norm": 1.4896105527877808,
"learning_rate": 9.245836343000534e-06,
"loss": 0.6259,
"step": 428
},
{
"epoch": 39.0,
"eval_loss": 0.9161927103996277,
"eval_runtime": 9.6218,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 429
},
{
"epoch": 39.09090909090909,
"grad_norm": 1.36057448387146,
"learning_rate": 9.234625641143962e-06,
"loss": 0.5801,
"step": 430
},
{
"epoch": 39.27272727272727,
"grad_norm": 1.5864324569702148,
"learning_rate": 9.223339130211194e-06,
"loss": 0.5785,
"step": 432
},
{
"epoch": 39.45454545454545,
"grad_norm": 1.609424352645874,
"learning_rate": 9.211977012255497e-06,
"loss": 0.5777,
"step": 434
},
{
"epoch": 39.63636363636363,
"grad_norm": 1.4554574489593506,
"learning_rate": 9.200539490683682e-06,
"loss": 0.5927,
"step": 436
},
{
"epoch": 39.81818181818182,
"grad_norm": 1.5687090158462524,
"learning_rate": 9.189026770252437e-06,
"loss": 0.6421,
"step": 438
},
{
"epoch": 40.0,
"grad_norm": 1.4782568216323853,
"learning_rate": 9.177439057064684e-06,
"loss": 0.6262,
"step": 440
},
{
"epoch": 40.0,
"eval_loss": 0.9212110638618469,
"eval_runtime": 9.6258,
"eval_samples_per_second": 2.493,
"eval_steps_per_second": 2.493,
"step": 440
},
{
"epoch": 40.18181818181818,
"grad_norm": 1.4512465000152588,
"learning_rate": 9.16577655856587e-06,
"loss": 0.5827,
"step": 442
},
{
"epoch": 40.36363636363637,
"grad_norm": 2.0493810176849365,
"learning_rate": 9.154039483540273e-06,
"loss": 0.5539,
"step": 444
},
{
"epoch": 40.54545454545455,
"grad_norm": 1.6594229936599731,
"learning_rate": 9.142228042107248e-06,
"loss": 0.5907,
"step": 446
},
{
"epoch": 40.72727272727273,
"grad_norm": 1.4589492082595825,
"learning_rate": 9.130342445717474e-06,
"loss": 0.6161,
"step": 448
},
{
"epoch": 40.90909090909091,
"grad_norm": 1.7103596925735474,
"learning_rate": 9.118382907149164e-06,
"loss": 0.5707,
"step": 450
},
{
"epoch": 41.0,
"eval_loss": 0.9212350845336914,
"eval_runtime": 9.6173,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 2.495,
"step": 451
},
{
"epoch": 41.09090909090909,
"grad_norm": 1.3275471925735474,
"learning_rate": 9.10634964050426e-06,
"loss": 0.6536,
"step": 452
},
{
"epoch": 41.27272727272727,
"grad_norm": 1.6759672164916992,
"learning_rate": 9.094242861204598e-06,
"loss": 0.5257,
"step": 454
},
{
"epoch": 41.45454545454545,
"grad_norm": 1.9791761636734009,
"learning_rate": 9.08206278598805e-06,
"loss": 0.5604,
"step": 456
},
{
"epoch": 41.63636363636363,
"grad_norm": 1.740043044090271,
"learning_rate": 9.069809632904647e-06,
"loss": 0.544,
"step": 458
},
{
"epoch": 41.81818181818182,
"grad_norm": 1.7312424182891846,
"learning_rate": 9.057483621312671e-06,
"loss": 0.5987,
"step": 460
},
{
"epoch": 42.0,
"grad_norm": 1.5688552856445312,
"learning_rate": 9.045084971874738e-06,
"loss": 0.5749,
"step": 462
},
{
"epoch": 42.0,
"eval_loss": 0.927391529083252,
"eval_runtime": 9.6244,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 462
},
{
"epoch": 42.18181818181818,
"grad_norm": 1.69509756565094,
"learning_rate": 9.032613906553833e-06,
"loss": 0.6091,
"step": 464
},
{
"epoch": 42.36363636363637,
"grad_norm": 2.032390832901001,
"learning_rate": 9.020070648609347e-06,
"loss": 0.5022,
"step": 466
},
{
"epoch": 42.54545454545455,
"grad_norm": 1.9728387594223022,
"learning_rate": 9.007455422593077e-06,
"loss": 0.5395,
"step": 468
},
{
"epoch": 42.72727272727273,
"grad_norm": 1.7745451927185059,
"learning_rate": 8.994768454345207e-06,
"loss": 0.5662,
"step": 470
},
{
"epoch": 42.90909090909091,
"grad_norm": 1.9252541065216064,
"learning_rate": 8.982009970990262e-06,
"loss": 0.533,
"step": 472
},
{
"epoch": 43.0,
"eval_loss": 0.9368504881858826,
"eval_runtime": 9.6216,
"eval_samples_per_second": 2.494,
"eval_steps_per_second": 2.494,
"step": 473
},
{
"epoch": 43.0,
"step": 473,
"total_flos": 5.808369757165978e+16,
"train_loss": 1.1408403902678863,
"train_runtime": 4431.3714,
"train_samples_per_second": 2.979,
"train_steps_per_second": 0.372
}
],
"logging_steps": 2,
"max_steps": 1650,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.808369757165978e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}