nguyenthanhdo's picture
Upload folder using huggingface_hub
5b7443a verified
raw
history blame contribute delete
No virus
40.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0099251907266127,
"eval_steps": 11,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009480779201540626,
"grad_norm": 0.38671875,
"learning_rate": 2e-05,
"loss": 0.4197,
"step": 1
},
{
"epoch": 0.009480779201540626,
"eval_loss": 0.41279804706573486,
"eval_runtime": 34.6855,
"eval_samples_per_second": 20.498,
"eval_steps_per_second": 20.498,
"step": 1
},
{
"epoch": 0.018961558403081252,
"grad_norm": 0.359375,
"learning_rate": 4e-05,
"loss": 0.3966,
"step": 2
},
{
"epoch": 0.02844233760462188,
"grad_norm": 0.388671875,
"learning_rate": 6e-05,
"loss": 0.4257,
"step": 3
},
{
"epoch": 0.037923116806162505,
"grad_norm": 0.408203125,
"learning_rate": 8e-05,
"loss": 0.4026,
"step": 4
},
{
"epoch": 0.04740389600770313,
"grad_norm": 0.384765625,
"learning_rate": 0.0001,
"loss": 0.3229,
"step": 5
},
{
"epoch": 0.05688467520924376,
"grad_norm": 0.251953125,
"learning_rate": 0.00012,
"loss": 0.2408,
"step": 6
},
{
"epoch": 0.06636545441078438,
"grad_norm": 0.203125,
"learning_rate": 0.00014,
"loss": 0.1709,
"step": 7
},
{
"epoch": 0.07584623361232501,
"grad_norm": 0.150390625,
"learning_rate": 0.00016,
"loss": 0.1328,
"step": 8
},
{
"epoch": 0.08532701281386564,
"grad_norm": 0.1201171875,
"learning_rate": 0.00018,
"loss": 0.0993,
"step": 9
},
{
"epoch": 0.09480779201540626,
"grad_norm": 0.10888671875,
"learning_rate": 0.0002,
"loss": 0.0914,
"step": 10
},
{
"epoch": 0.10428857121694689,
"grad_norm": 0.119140625,
"learning_rate": 0.00019999469523400122,
"loss": 0.0885,
"step": 11
},
{
"epoch": 0.10428857121694689,
"eval_loss": 0.07809103280305862,
"eval_runtime": 34.2892,
"eval_samples_per_second": 20.735,
"eval_steps_per_second": 20.735,
"step": 11
},
{
"epoch": 0.11376935041848751,
"grad_norm": 0.09228515625,
"learning_rate": 0.00019997878149881574,
"loss": 0.0761,
"step": 12
},
{
"epoch": 0.12325012962002814,
"grad_norm": 0.0703125,
"learning_rate": 0.0001999522604828164,
"loss": 0.0683,
"step": 13
},
{
"epoch": 0.13273090882156877,
"grad_norm": 0.059326171875,
"learning_rate": 0.00019991513499975882,
"loss": 0.0696,
"step": 14
},
{
"epoch": 0.1422116880231094,
"grad_norm": 0.044921875,
"learning_rate": 0.00019986740898848306,
"loss": 0.0615,
"step": 15
},
{
"epoch": 0.15169246722465002,
"grad_norm": 0.054443359375,
"learning_rate": 0.00019980908751249555,
"loss": 0.0625,
"step": 16
},
{
"epoch": 0.16117324642619066,
"grad_norm": 0.04736328125,
"learning_rate": 0.00019974017675943192,
"loss": 0.0598,
"step": 17
},
{
"epoch": 0.17065402562773127,
"grad_norm": 0.039306640625,
"learning_rate": 0.0001996606840404006,
"loss": 0.0573,
"step": 18
},
{
"epoch": 0.1801348048292719,
"grad_norm": 0.036376953125,
"learning_rate": 0.00019957061778920701,
"loss": 0.0482,
"step": 19
},
{
"epoch": 0.18961558403081252,
"grad_norm": 0.039306640625,
"learning_rate": 0.0001994699875614589,
"loss": 0.0555,
"step": 20
},
{
"epoch": 0.19909636323235316,
"grad_norm": 0.0380859375,
"learning_rate": 0.00019935880403355253,
"loss": 0.0495,
"step": 21
},
{
"epoch": 0.20857714243389378,
"grad_norm": 0.038818359375,
"learning_rate": 0.00019923707900153982,
"loss": 0.0482,
"step": 22
},
{
"epoch": 0.20857714243389378,
"eval_loss": 0.05171125754714012,
"eval_runtime": 34.4113,
"eval_samples_per_second": 20.662,
"eval_steps_per_second": 20.662,
"step": 22
},
{
"epoch": 0.21805792163543442,
"grad_norm": 0.038818359375,
"learning_rate": 0.00019910482537987702,
"loss": 0.0529,
"step": 23
},
{
"epoch": 0.22753870083697503,
"grad_norm": 0.031494140625,
"learning_rate": 0.0001989620572000544,
"loss": 0.0495,
"step": 24
},
{
"epoch": 0.23701948003851567,
"grad_norm": 0.0380859375,
"learning_rate": 0.00019880878960910772,
"loss": 0.059,
"step": 25
},
{
"epoch": 0.24650025924005628,
"grad_norm": 0.032470703125,
"learning_rate": 0.00019864503886801106,
"loss": 0.0545,
"step": 26
},
{
"epoch": 0.2559810384415969,
"grad_norm": 0.029296875,
"learning_rate": 0.00019847082234995171,
"loss": 0.0417,
"step": 27
},
{
"epoch": 0.26546181764313753,
"grad_norm": 0.037109375,
"learning_rate": 0.00019828615853848688,
"loss": 0.0459,
"step": 28
},
{
"epoch": 0.27494259684467814,
"grad_norm": 0.027099609375,
"learning_rate": 0.00019809106702558277,
"loss": 0.0412,
"step": 29
},
{
"epoch": 0.2844233760462188,
"grad_norm": 0.032470703125,
"learning_rate": 0.0001978855685095358,
"loss": 0.0403,
"step": 30
},
{
"epoch": 0.2939041552477594,
"grad_norm": 0.0289306640625,
"learning_rate": 0.00019766968479277683,
"loss": 0.0463,
"step": 31
},
{
"epoch": 0.30338493444930004,
"grad_norm": 0.0301513671875,
"learning_rate": 0.00019744343877955788,
"loss": 0.041,
"step": 32
},
{
"epoch": 0.3128657136508407,
"grad_norm": 0.033447265625,
"learning_rate": 0.00019720685447352209,
"loss": 0.045,
"step": 33
},
{
"epoch": 0.3128657136508407,
"eval_loss": 0.04294149950146675,
"eval_runtime": 34.2719,
"eval_samples_per_second": 20.746,
"eval_steps_per_second": 20.746,
"step": 33
},
{
"epoch": 0.3223464928523813,
"grad_norm": 0.028076171875,
"learning_rate": 0.0001969599569751571,
"loss": 0.0369,
"step": 34
},
{
"epoch": 0.33182727205392193,
"grad_norm": 0.029296875,
"learning_rate": 0.00019670277247913205,
"loss": 0.0465,
"step": 35
},
{
"epoch": 0.34130805125546254,
"grad_norm": 0.032958984375,
"learning_rate": 0.0001964353282715183,
"loss": 0.0382,
"step": 36
},
{
"epoch": 0.3507888304570032,
"grad_norm": 0.0299072265625,
"learning_rate": 0.00019615765272689461,
"loss": 0.0442,
"step": 37
},
{
"epoch": 0.3602696096585438,
"grad_norm": 0.0322265625,
"learning_rate": 0.00019586977530533677,
"loss": 0.0481,
"step": 38
},
{
"epoch": 0.36975038886008443,
"grad_norm": 0.028076171875,
"learning_rate": 0.00019557172654929196,
"loss": 0.0423,
"step": 39
},
{
"epoch": 0.37923116806162505,
"grad_norm": 0.0299072265625,
"learning_rate": 0.00019526353808033825,
"loss": 0.0384,
"step": 40
},
{
"epoch": 0.3887119472631657,
"grad_norm": 0.027587890625,
"learning_rate": 0.00019494524259582992,
"loss": 0.0404,
"step": 41
},
{
"epoch": 0.3981927264647063,
"grad_norm": 0.0286865234375,
"learning_rate": 0.00019461687386542826,
"loss": 0.0394,
"step": 42
},
{
"epoch": 0.40767350566624694,
"grad_norm": 0.032958984375,
"learning_rate": 0.00019427846672751873,
"loss": 0.0351,
"step": 43
},
{
"epoch": 0.41715428486778755,
"grad_norm": 0.02783203125,
"learning_rate": 0.00019393005708551498,
"loss": 0.0425,
"step": 44
},
{
"epoch": 0.41715428486778755,
"eval_loss": 0.03996235132217407,
"eval_runtime": 34.3007,
"eval_samples_per_second": 20.728,
"eval_steps_per_second": 20.728,
"step": 44
},
{
"epoch": 0.4266350640693282,
"grad_norm": 0.030517578125,
"learning_rate": 0.00019357168190404936,
"loss": 0.0365,
"step": 45
},
{
"epoch": 0.43611584327086883,
"grad_norm": 0.032958984375,
"learning_rate": 0.00019320337920505153,
"loss": 0.0438,
"step": 46
},
{
"epoch": 0.44559662247240944,
"grad_norm": 0.0306396484375,
"learning_rate": 0.00019282518806371414,
"loss": 0.044,
"step": 47
},
{
"epoch": 0.45507740167395005,
"grad_norm": 0.037841796875,
"learning_rate": 0.0001924371486043473,
"loss": 0.0496,
"step": 48
},
{
"epoch": 0.4645581808754907,
"grad_norm": 0.0284423828125,
"learning_rate": 0.0001920393019961217,
"loss": 0.0369,
"step": 49
},
{
"epoch": 0.47403896007703133,
"grad_norm": 0.0341796875,
"learning_rate": 0.0001916316904487005,
"loss": 0.0399,
"step": 50
},
{
"epoch": 0.48351973927857195,
"grad_norm": 0.0255126953125,
"learning_rate": 0.00019121435720776122,
"loss": 0.0363,
"step": 51
},
{
"epoch": 0.49300051848011256,
"grad_norm": 0.0311279296875,
"learning_rate": 0.0001907873465504076,
"loss": 0.0384,
"step": 52
},
{
"epoch": 0.5024812976816532,
"grad_norm": 0.03271484375,
"learning_rate": 0.00019035070378047204,
"loss": 0.0371,
"step": 53
},
{
"epoch": 0.5119620768831938,
"grad_norm": 0.0263671875,
"learning_rate": 0.00018990447522370884,
"loss": 0.0351,
"step": 54
},
{
"epoch": 0.5214428560847345,
"grad_norm": 0.033203125,
"learning_rate": 0.00018944870822287956,
"loss": 0.0411,
"step": 55
},
{
"epoch": 0.5214428560847345,
"eval_loss": 0.03788134828209877,
"eval_runtime": 34.6048,
"eval_samples_per_second": 20.546,
"eval_steps_per_second": 20.546,
"step": 55
},
{
"epoch": 0.5309236352862751,
"grad_norm": 0.0272216796875,
"learning_rate": 0.00018898345113272998,
"loss": 0.0337,
"step": 56
},
{
"epoch": 0.5404044144878157,
"grad_norm": 0.0245361328125,
"learning_rate": 0.00018850875331485995,
"loss": 0.0309,
"step": 57
},
{
"epoch": 0.5498851936893563,
"grad_norm": 0.027587890625,
"learning_rate": 0.00018802466513248632,
"loss": 0.0329,
"step": 58
},
{
"epoch": 0.559365972890897,
"grad_norm": 0.0267333984375,
"learning_rate": 0.00018753123794509974,
"loss": 0.0365,
"step": 59
},
{
"epoch": 0.5688467520924376,
"grad_norm": 0.02734375,
"learning_rate": 0.00018702852410301554,
"loss": 0.0373,
"step": 60
},
{
"epoch": 0.5783275312939782,
"grad_norm": 0.025390625,
"learning_rate": 0.0001865165769418196,
"loss": 0.0375,
"step": 61
},
{
"epoch": 0.5878083104955188,
"grad_norm": 0.02978515625,
"learning_rate": 0.00018599545077670985,
"loss": 0.0376,
"step": 62
},
{
"epoch": 0.5972890896970595,
"grad_norm": 0.0242919921875,
"learning_rate": 0.0001854652008967335,
"loss": 0.0337,
"step": 63
},
{
"epoch": 0.6067698688986001,
"grad_norm": 0.02880859375,
"learning_rate": 0.00018492588355892124,
"loss": 0.0379,
"step": 64
},
{
"epoch": 0.6162506481001407,
"grad_norm": 0.0322265625,
"learning_rate": 0.00018437755598231856,
"loss": 0.0392,
"step": 65
},
{
"epoch": 0.6257314273016814,
"grad_norm": 0.0269775390625,
"learning_rate": 0.00018382027634191524,
"loss": 0.0348,
"step": 66
},
{
"epoch": 0.6257314273016814,
"eval_loss": 0.03594927862286568,
"eval_runtime": 34.2756,
"eval_samples_per_second": 20.744,
"eval_steps_per_second": 20.744,
"step": 66
},
{
"epoch": 0.635212206503222,
"grad_norm": 0.03515625,
"learning_rate": 0.00018325410376247294,
"loss": 0.0379,
"step": 67
},
{
"epoch": 0.6446929857047626,
"grad_norm": 0.0301513671875,
"learning_rate": 0.0001826790983122527,
"loss": 0.0317,
"step": 68
},
{
"epoch": 0.6541737649063032,
"grad_norm": 0.0296630859375,
"learning_rate": 0.00018209532099664174,
"loss": 0.0366,
"step": 69
},
{
"epoch": 0.6636545441078439,
"grad_norm": 0.0272216796875,
"learning_rate": 0.00018150283375168114,
"loss": 0.0345,
"step": 70
},
{
"epoch": 0.6731353233093845,
"grad_norm": 0.03125,
"learning_rate": 0.00018090169943749476,
"loss": 0.0368,
"step": 71
},
{
"epoch": 0.6826161025109251,
"grad_norm": 0.032470703125,
"learning_rate": 0.00018029198183161998,
"loss": 0.0455,
"step": 72
},
{
"epoch": 0.6920968817124658,
"grad_norm": 0.029296875,
"learning_rate": 0.00017967374562224132,
"loss": 0.0362,
"step": 73
},
{
"epoch": 0.7015776609140064,
"grad_norm": 0.0311279296875,
"learning_rate": 0.00017904705640132718,
"loss": 0.0388,
"step": 74
},
{
"epoch": 0.711058440115547,
"grad_norm": 0.02685546875,
"learning_rate": 0.00017841198065767107,
"loss": 0.0288,
"step": 75
},
{
"epoch": 0.7205392193170876,
"grad_norm": 0.0302734375,
"learning_rate": 0.00017776858576983712,
"loss": 0.0349,
"step": 76
},
{
"epoch": 0.7300199985186282,
"grad_norm": 0.026611328125,
"learning_rate": 0.0001771169399990119,
"loss": 0.0288,
"step": 77
},
{
"epoch": 0.7300199985186282,
"eval_loss": 0.03419337794184685,
"eval_runtime": 34.3669,
"eval_samples_per_second": 20.688,
"eval_steps_per_second": 20.688,
"step": 77
},
{
"epoch": 0.7395007777201689,
"grad_norm": 0.0240478515625,
"learning_rate": 0.00017645711248176195,
"loss": 0.0285,
"step": 78
},
{
"epoch": 0.7489815569217095,
"grad_norm": 0.025390625,
"learning_rate": 0.00017578917322269886,
"loss": 0.0338,
"step": 79
},
{
"epoch": 0.7584623361232501,
"grad_norm": 0.0281982421875,
"learning_rate": 0.00017511319308705198,
"loss": 0.0316,
"step": 80
},
{
"epoch": 0.7679431153247908,
"grad_norm": 0.025146484375,
"learning_rate": 0.0001744292437931502,
"loss": 0.0306,
"step": 81
},
{
"epoch": 0.7774238945263314,
"grad_norm": 0.027099609375,
"learning_rate": 0.00017373739790481262,
"loss": 0.0335,
"step": 82
},
{
"epoch": 0.786904673727872,
"grad_norm": 0.0242919921875,
"learning_rate": 0.00017303772882365016,
"loss": 0.0271,
"step": 83
},
{
"epoch": 0.7963854529294127,
"grad_norm": 0.028564453125,
"learning_rate": 0.00017233031078127788,
"loss": 0.0334,
"step": 84
},
{
"epoch": 0.8058662321309532,
"grad_norm": 0.0299072265625,
"learning_rate": 0.00017161521883143934,
"loss": 0.0403,
"step": 85
},
{
"epoch": 0.8153470113324939,
"grad_norm": 0.0294189453125,
"learning_rate": 0.00017089252884204377,
"loss": 0.0368,
"step": 86
},
{
"epoch": 0.8248277905340345,
"grad_norm": 0.03369140625,
"learning_rate": 0.0001701623174871168,
"loss": 0.0335,
"step": 87
},
{
"epoch": 0.8343085697355751,
"grad_norm": 0.0277099609375,
"learning_rate": 0.0001694246622386658,
"loss": 0.0339,
"step": 88
},
{
"epoch": 0.8343085697355751,
"eval_loss": 0.033091045916080475,
"eval_runtime": 34.4475,
"eval_samples_per_second": 20.64,
"eval_steps_per_second": 20.64,
"step": 88
},
{
"epoch": 0.8437893489371158,
"grad_norm": 0.02392578125,
"learning_rate": 0.00016867964135846043,
"loss": 0.0241,
"step": 89
},
{
"epoch": 0.8532701281386564,
"grad_norm": 0.0361328125,
"learning_rate": 0.00016792733388972932,
"loss": 0.0387,
"step": 90
},
{
"epoch": 0.862750907340197,
"grad_norm": 0.0260009765625,
"learning_rate": 0.0001671678196487741,
"loss": 0.0346,
"step": 91
},
{
"epoch": 0.8722316865417377,
"grad_norm": 0.0284423828125,
"learning_rate": 0.00016640117921650117,
"loss": 0.0378,
"step": 92
},
{
"epoch": 0.8817124657432783,
"grad_norm": 0.0279541015625,
"learning_rate": 0.00016562749392987254,
"loss": 0.0294,
"step": 93
},
{
"epoch": 0.8911932449448189,
"grad_norm": 0.0272216796875,
"learning_rate": 0.0001648468458732762,
"loss": 0.0308,
"step": 94
},
{
"epoch": 0.9006740241463596,
"grad_norm": 0.0242919921875,
"learning_rate": 0.00016405931786981755,
"loss": 0.0326,
"step": 95
},
{
"epoch": 0.9101548033479001,
"grad_norm": 0.0262451171875,
"learning_rate": 0.00016326499347253207,
"loss": 0.032,
"step": 96
},
{
"epoch": 0.9196355825494408,
"grad_norm": 0.0264892578125,
"learning_rate": 0.00016246395695552085,
"loss": 0.0321,
"step": 97
},
{
"epoch": 0.9291163617509814,
"grad_norm": 0.0244140625,
"learning_rate": 0.00016165629330500952,
"loss": 0.0301,
"step": 98
},
{
"epoch": 0.938597140952522,
"grad_norm": 0.029052734375,
"learning_rate": 0.0001608420882103315,
"loss": 0.0297,
"step": 99
},
{
"epoch": 0.938597140952522,
"eval_loss": 0.03182319179177284,
"eval_runtime": 34.2304,
"eval_samples_per_second": 20.771,
"eval_steps_per_second": 20.771,
"step": 99
},
{
"epoch": 0.9480779201540627,
"grad_norm": 0.0302734375,
"learning_rate": 0.00016002142805483685,
"loss": 0.0327,
"step": 100
},
{
"epoch": 0.9575586993556033,
"grad_norm": 0.0260009765625,
"learning_rate": 0.0001591943999067273,
"loss": 0.0285,
"step": 101
},
{
"epoch": 0.9670394785571439,
"grad_norm": 0.032470703125,
"learning_rate": 0.00015836109150981886,
"loss": 0.0308,
"step": 102
},
{
"epoch": 0.9765202577586846,
"grad_norm": 0.0283203125,
"learning_rate": 0.00015752159127423263,
"loss": 0.0301,
"step": 103
},
{
"epoch": 0.9860010369602251,
"grad_norm": 0.027587890625,
"learning_rate": 0.0001566759882670146,
"loss": 0.0287,
"step": 104
},
{
"epoch": 0.9954818161617658,
"grad_norm": 0.025634765625,
"learning_rate": 0.00015582437220268647,
"loss": 0.031,
"step": 105
},
{
"epoch": 1.0049625953633063,
"grad_norm": 0.02490234375,
"learning_rate": 0.0001549668334337271,
"loss": 0.0275,
"step": 106
},
{
"epoch": 1.0144433745648471,
"grad_norm": 0.03369140625,
"learning_rate": 0.0001541034629409865,
"loss": 0.0302,
"step": 107
},
{
"epoch": 1.0239241537663877,
"grad_norm": 0.027099609375,
"learning_rate": 0.00015323435232403337,
"loss": 0.0308,
"step": 108
},
{
"epoch": 1.0334049329679282,
"grad_norm": 0.0240478515625,
"learning_rate": 0.00015235959379143678,
"loss": 0.0247,
"step": 109
},
{
"epoch": 1.042885712169469,
"grad_norm": 0.0255126953125,
"learning_rate": 0.0001514792801509831,
"loss": 0.0281,
"step": 110
},
{
"epoch": 1.042885712169469,
"eval_loss": 0.03116695210337639,
"eval_runtime": 34.429,
"eval_samples_per_second": 20.651,
"eval_steps_per_second": 20.651,
"step": 110
},
{
"epoch": 1.0523664913710096,
"grad_norm": 0.0277099609375,
"learning_rate": 0.00015059350479982965,
"loss": 0.0243,
"step": 111
},
{
"epoch": 1.0618472705725501,
"grad_norm": 0.028564453125,
"learning_rate": 0.0001497023617145958,
"loss": 0.0274,
"step": 112
},
{
"epoch": 1.071328049774091,
"grad_norm": 0.0301513671875,
"learning_rate": 0.0001488059454413923,
"loss": 0.0265,
"step": 113
},
{
"epoch": 1.0808088289756315,
"grad_norm": 0.0296630859375,
"learning_rate": 0.00014790435108579048,
"loss": 0.0287,
"step": 114
},
{
"epoch": 1.090289608177172,
"grad_norm": 0.025390625,
"learning_rate": 0.000146997674302732,
"loss": 0.0255,
"step": 115
},
{
"epoch": 1.0997703873787126,
"grad_norm": 0.02685546875,
"learning_rate": 0.00014608601128638027,
"loss": 0.0258,
"step": 116
},
{
"epoch": 1.1092511665802534,
"grad_norm": 0.0277099609375,
"learning_rate": 0.00014516945875991472,
"loss": 0.0326,
"step": 117
},
{
"epoch": 1.118731945781794,
"grad_norm": 0.0260009765625,
"learning_rate": 0.00014424811396526892,
"loss": 0.0241,
"step": 118
},
{
"epoch": 1.1282127249833347,
"grad_norm": 0.0284423828125,
"learning_rate": 0.00014332207465281364,
"loss": 0.0276,
"step": 119
},
{
"epoch": 1.1376935041848752,
"grad_norm": 0.0306396484375,
"learning_rate": 0.0001423914390709861,
"loss": 0.0283,
"step": 120
},
{
"epoch": 1.1471742833864158,
"grad_norm": 0.0274658203125,
"learning_rate": 0.00014145630595586607,
"loss": 0.027,
"step": 121
},
{
"epoch": 1.1471742833864158,
"eval_loss": 0.030295666307210922,
"eval_runtime": 34.4847,
"eval_samples_per_second": 20.618,
"eval_steps_per_second": 20.618,
"step": 121
},
{
"epoch": 1.1566550625879564,
"grad_norm": 0.0244140625,
"learning_rate": 0.00014051677452070065,
"loss": 0.0248,
"step": 122
},
{
"epoch": 1.1661358417894971,
"grad_norm": 0.023193359375,
"learning_rate": 0.00013957294444537808,
"loss": 0.0226,
"step": 123
},
{
"epoch": 1.1756166209910377,
"grad_norm": 0.02880859375,
"learning_rate": 0.0001386249158658522,
"loss": 0.0332,
"step": 124
},
{
"epoch": 1.1850974001925783,
"grad_norm": 0.028076171875,
"learning_rate": 0.00013767278936351854,
"loss": 0.0292,
"step": 125
},
{
"epoch": 1.194578179394119,
"grad_norm": 0.0264892578125,
"learning_rate": 0.00013671666595454295,
"loss": 0.0224,
"step": 126
},
{
"epoch": 1.2040589585956596,
"grad_norm": 0.027099609375,
"learning_rate": 0.00013575664707914448,
"loss": 0.0234,
"step": 127
},
{
"epoch": 1.2135397377972001,
"grad_norm": 0.03125,
"learning_rate": 0.0001347928345908329,
"loss": 0.029,
"step": 128
},
{
"epoch": 1.223020516998741,
"grad_norm": 0.0269775390625,
"learning_rate": 0.00013382533074560255,
"loss": 0.0249,
"step": 129
},
{
"epoch": 1.2325012962002815,
"grad_norm": 0.0291748046875,
"learning_rate": 0.0001328542381910835,
"loss": 0.026,
"step": 130
},
{
"epoch": 1.241982075401822,
"grad_norm": 0.0291748046875,
"learning_rate": 0.00013187965995565098,
"loss": 0.0252,
"step": 131
},
{
"epoch": 1.2514628546033628,
"grad_norm": 0.0279541015625,
"learning_rate": 0.00013090169943749476,
"loss": 0.023,
"step": 132
},
{
"epoch": 1.2514628546033628,
"eval_loss": 0.029845552518963814,
"eval_runtime": 34.1739,
"eval_samples_per_second": 20.805,
"eval_steps_per_second": 20.805,
"step": 132
},
{
"epoch": 1.2609436338049034,
"grad_norm": 0.02734375,
"learning_rate": 0.00012992046039364893,
"loss": 0.0259,
"step": 133
},
{
"epoch": 1.270424413006444,
"grad_norm": 0.0284423828125,
"learning_rate": 0.0001289360469289838,
"loss": 0.0244,
"step": 134
},
{
"epoch": 1.2799051922079845,
"grad_norm": 0.0286865234375,
"learning_rate": 0.00012794856348516095,
"loss": 0.0237,
"step": 135
},
{
"epoch": 1.2893859714095253,
"grad_norm": 0.0289306640625,
"learning_rate": 0.00012695811482955227,
"loss": 0.0248,
"step": 136
},
{
"epoch": 1.2988667506110658,
"grad_norm": 0.028076171875,
"learning_rate": 0.00012596480604412484,
"loss": 0.0283,
"step": 137
},
{
"epoch": 1.3083475298126066,
"grad_norm": 0.0260009765625,
"learning_rate": 0.000124968742514292,
"loss": 0.021,
"step": 138
},
{
"epoch": 1.3178283090141472,
"grad_norm": 0.0279541015625,
"learning_rate": 0.00012397002991773275,
"loss": 0.0249,
"step": 139
},
{
"epoch": 1.3273090882156877,
"grad_norm": 0.026611328125,
"learning_rate": 0.0001229687742131796,
"loss": 0.0231,
"step": 140
},
{
"epoch": 1.3367898674172283,
"grad_norm": 0.0274658203125,
"learning_rate": 0.00012196508162917677,
"loss": 0.0276,
"step": 141
},
{
"epoch": 1.346270646618769,
"grad_norm": 0.03173828125,
"learning_rate": 0.00012095905865281025,
"loss": 0.0268,
"step": 142
},
{
"epoch": 1.3557514258203096,
"grad_norm": 0.027587890625,
"learning_rate": 0.00011995081201840956,
"loss": 0.0259,
"step": 143
},
{
"epoch": 1.3557514258203096,
"eval_loss": 0.029736338183283806,
"eval_runtime": 34.4542,
"eval_samples_per_second": 20.636,
"eval_steps_per_second": 20.636,
"step": 143
},
{
"epoch": 1.3652322050218502,
"grad_norm": 0.02734375,
"learning_rate": 0.00011894044869622403,
"loss": 0.025,
"step": 144
},
{
"epoch": 1.374712984223391,
"grad_norm": 0.0263671875,
"learning_rate": 0.00011792807588107357,
"loss": 0.0221,
"step": 145
},
{
"epoch": 1.3841937634249315,
"grad_norm": 0.031982421875,
"learning_rate": 0.00011691380098097597,
"loss": 0.0281,
"step": 146
},
{
"epoch": 1.393674542626472,
"grad_norm": 0.025146484375,
"learning_rate": 0.0001158977316057513,
"loss": 0.021,
"step": 147
},
{
"epoch": 1.4031553218280126,
"grad_norm": 0.032958984375,
"learning_rate": 0.00011487997555560503,
"loss": 0.0256,
"step": 148
},
{
"epoch": 1.4126361010295534,
"grad_norm": 0.0306396484375,
"learning_rate": 0.00011386064080969094,
"loss": 0.0267,
"step": 149
},
{
"epoch": 1.422116880231094,
"grad_norm": 0.0267333984375,
"learning_rate": 0.00011283983551465511,
"loss": 0.0211,
"step": 150
},
{
"epoch": 1.4315976594326347,
"grad_norm": 0.0291748046875,
"learning_rate": 0.0001118176679731619,
"loss": 0.025,
"step": 151
},
{
"epoch": 1.4410784386341753,
"grad_norm": 0.0296630859375,
"learning_rate": 0.00011079424663240372,
"loss": 0.0273,
"step": 152
},
{
"epoch": 1.4505592178357158,
"grad_norm": 0.0284423828125,
"learning_rate": 0.00010976968007259519,
"loss": 0.0243,
"step": 153
},
{
"epoch": 1.4600399970372564,
"grad_norm": 0.0272216796875,
"learning_rate": 0.00010874407699545328,
"loss": 0.0232,
"step": 154
},
{
"epoch": 1.4600399970372564,
"eval_loss": 0.029951849952340126,
"eval_runtime": 34.3508,
"eval_samples_per_second": 20.698,
"eval_steps_per_second": 20.698,
"step": 154
},
{
"epoch": 1.4695207762387972,
"grad_norm": 0.03271484375,
"learning_rate": 0.00010771754621266466,
"loss": 0.0283,
"step": 155
},
{
"epoch": 1.4790015554403377,
"grad_norm": 0.024169921875,
"learning_rate": 0.00010669019663434117,
"loss": 0.0218,
"step": 156
},
{
"epoch": 1.4884823346418785,
"grad_norm": 0.029296875,
"learning_rate": 0.00010566213725746506,
"loss": 0.0273,
"step": 157
},
{
"epoch": 1.497963113843419,
"grad_norm": 0.0263671875,
"learning_rate": 0.00010463347715432488,
"loss": 0.0229,
"step": 158
},
{
"epoch": 1.5074438930449596,
"grad_norm": 0.02978515625,
"learning_rate": 0.00010360432546094341,
"loss": 0.0242,
"step": 159
},
{
"epoch": 1.5169246722465002,
"grad_norm": 0.031494140625,
"learning_rate": 0.00010257479136549889,
"loss": 0.0252,
"step": 160
},
{
"epoch": 1.5264054514480407,
"grad_norm": 0.0283203125,
"learning_rate": 0.00010154498409674051,
"loss": 0.0275,
"step": 161
},
{
"epoch": 1.5358862306495815,
"grad_norm": 0.035400390625,
"learning_rate": 0.00010051501291240008,
"loss": 0.0299,
"step": 162
},
{
"epoch": 1.5453670098511223,
"grad_norm": 0.031982421875,
"learning_rate": 9.948498708759993e-05,
"loss": 0.0264,
"step": 163
},
{
"epoch": 1.5548477890526629,
"grad_norm": 0.0302734375,
"learning_rate": 9.845501590325948e-05,
"loss": 0.025,
"step": 164
},
{
"epoch": 1.5643285682542034,
"grad_norm": 0.029296875,
"learning_rate": 9.742520863450115e-05,
"loss": 0.0203,
"step": 165
},
{
"epoch": 1.5643285682542034,
"eval_loss": 0.02908056415617466,
"eval_runtime": 34.2083,
"eval_samples_per_second": 20.784,
"eval_steps_per_second": 20.784,
"step": 165
},
{
"epoch": 1.573809347455744,
"grad_norm": 0.0322265625,
"learning_rate": 9.639567453905661e-05,
"loss": 0.0262,
"step": 166
},
{
"epoch": 1.5832901266572845,
"grad_norm": 0.0245361328125,
"learning_rate": 9.536652284567513e-05,
"loss": 0.0213,
"step": 167
},
{
"epoch": 1.5927709058588253,
"grad_norm": 0.031005859375,
"learning_rate": 9.433786274253495e-05,
"loss": 0.0272,
"step": 168
},
{
"epoch": 1.6022516850603659,
"grad_norm": 0.0262451171875,
"learning_rate": 9.330980336565887e-05,
"loss": 0.0217,
"step": 169
},
{
"epoch": 1.6117324642619066,
"grad_norm": 0.0296630859375,
"learning_rate": 9.228245378733537e-05,
"loss": 0.0258,
"step": 170
},
{
"epoch": 1.6212132434634472,
"grad_norm": 0.03466796875,
"learning_rate": 9.125592300454676e-05,
"loss": 0.0278,
"step": 171
},
{
"epoch": 1.6306940226649878,
"grad_norm": 0.0284423828125,
"learning_rate": 9.023031992740488e-05,
"loss": 0.0234,
"step": 172
},
{
"epoch": 1.6401748018665283,
"grad_norm": 0.0301513671875,
"learning_rate": 8.920575336759629e-05,
"loss": 0.025,
"step": 173
},
{
"epoch": 1.6496555810680689,
"grad_norm": 0.03173828125,
"learning_rate": 8.818233202683814e-05,
"loss": 0.0268,
"step": 174
},
{
"epoch": 1.6591363602696096,
"grad_norm": 0.0260009765625,
"learning_rate": 8.71601644853449e-05,
"loss": 0.0241,
"step": 175
},
{
"epoch": 1.6686171394711504,
"grad_norm": 0.028564453125,
"learning_rate": 8.613935919030907e-05,
"loss": 0.0241,
"step": 176
},
{
"epoch": 1.6686171394711504,
"eval_loss": 0.02843022532761097,
"eval_runtime": 34.2598,
"eval_samples_per_second": 20.753,
"eval_steps_per_second": 20.753,
"step": 176
},
{
"epoch": 1.678097918672691,
"grad_norm": 0.029052734375,
"learning_rate": 8.512002444439502e-05,
"loss": 0.0228,
"step": 177
},
{
"epoch": 1.6875786978742315,
"grad_norm": 0.02880859375,
"learning_rate": 8.410226839424871e-05,
"loss": 0.0272,
"step": 178
},
{
"epoch": 1.697059477075772,
"grad_norm": 0.0238037109375,
"learning_rate": 8.308619901902406e-05,
"loss": 0.0196,
"step": 179
},
{
"epoch": 1.7065402562773127,
"grad_norm": 0.02783203125,
"learning_rate": 8.207192411892646e-05,
"loss": 0.0218,
"step": 180
},
{
"epoch": 1.7160210354788534,
"grad_norm": 0.0277099609375,
"learning_rate": 8.1059551303776e-05,
"loss": 0.022,
"step": 181
},
{
"epoch": 1.7255018146803942,
"grad_norm": 0.0284423828125,
"learning_rate": 8.004918798159045e-05,
"loss": 0.0219,
"step": 182
},
{
"epoch": 1.7349825938819348,
"grad_norm": 0.02734375,
"learning_rate": 7.904094134718976e-05,
"loss": 0.0244,
"step": 183
},
{
"epoch": 1.7444633730834753,
"grad_norm": 0.036376953125,
"learning_rate": 7.803491837082324e-05,
"loss": 0.0301,
"step": 184
},
{
"epoch": 1.7539441522850159,
"grad_norm": 0.03125,
"learning_rate": 7.703122578682046e-05,
"loss": 0.0259,
"step": 185
},
{
"epoch": 1.7634249314865564,
"grad_norm": 0.025634765625,
"learning_rate": 7.602997008226726e-05,
"loss": 0.0216,
"step": 186
},
{
"epoch": 1.7729057106880972,
"grad_norm": 0.02783203125,
"learning_rate": 7.5031257485708e-05,
"loss": 0.0245,
"step": 187
},
{
"epoch": 1.7729057106880972,
"eval_loss": 0.028160251677036285,
"eval_runtime": 34.3215,
"eval_samples_per_second": 20.716,
"eval_steps_per_second": 20.716,
"step": 187
},
{
"epoch": 1.7823864898896378,
"grad_norm": 0.024658203125,
"learning_rate": 7.403519395587521e-05,
"loss": 0.0221,
"step": 188
},
{
"epoch": 1.7918672690911785,
"grad_norm": 0.0263671875,
"learning_rate": 7.304188517044774e-05,
"loss": 0.0228,
"step": 189
},
{
"epoch": 1.801348048292719,
"grad_norm": 0.02587890625,
"learning_rate": 7.205143651483906e-05,
"loss": 0.0226,
"step": 190
},
{
"epoch": 1.8108288274942597,
"grad_norm": 0.028076171875,
"learning_rate": 7.106395307101621e-05,
"loss": 0.0213,
"step": 191
},
{
"epoch": 1.8203096066958002,
"grad_norm": 0.0299072265625,
"learning_rate": 7.007953960635109e-05,
"loss": 0.0247,
"step": 192
},
{
"epoch": 1.8297903858973408,
"grad_norm": 0.0341796875,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0276,
"step": 193
},
{
"epoch": 1.8392711650988816,
"grad_norm": 0.031494140625,
"learning_rate": 6.812034004434903e-05,
"loss": 0.0233,
"step": 194
},
{
"epoch": 1.8487519443004223,
"grad_norm": 0.0269775390625,
"learning_rate": 6.714576180891654e-05,
"loss": 0.0213,
"step": 195
},
{
"epoch": 1.858232723501963,
"grad_norm": 0.0296630859375,
"learning_rate": 6.617466925439746e-05,
"loss": 0.0213,
"step": 196
},
{
"epoch": 1.8677135027035034,
"grad_norm": 0.02685546875,
"learning_rate": 6.520716540916709e-05,
"loss": 0.0227,
"step": 197
},
{
"epoch": 1.877194281905044,
"grad_norm": 0.02734375,
"learning_rate": 6.424335292085553e-05,
"loss": 0.0222,
"step": 198
},
{
"epoch": 1.877194281905044,
"eval_loss": 0.027654768899083138,
"eval_runtime": 34.2048,
"eval_samples_per_second": 20.787,
"eval_steps_per_second": 20.787,
"step": 198
},
{
"epoch": 1.8866750611065846,
"grad_norm": 0.0255126953125,
"learning_rate": 6.32833340454571e-05,
"loss": 0.021,
"step": 199
},
{
"epoch": 1.8961558403081253,
"grad_norm": 0.02978515625,
"learning_rate": 6.232721063648148e-05,
"loss": 0.0249,
"step": 200
},
{
"epoch": 1.905636619509666,
"grad_norm": 0.029052734375,
"learning_rate": 6.137508413414784e-05,
"loss": 0.0238,
"step": 201
},
{
"epoch": 1.9151173987112067,
"grad_norm": 0.0294189453125,
"learning_rate": 6.0427055554621913e-05,
"loss": 0.0246,
"step": 202
},
{
"epoch": 1.9245981779127472,
"grad_norm": 0.0272216796875,
"learning_rate": 5.948322547929939e-05,
"loss": 0.0202,
"step": 203
},
{
"epoch": 1.9340789571142878,
"grad_norm": 0.024658203125,
"learning_rate": 5.854369404413398e-05,
"loss": 0.0165,
"step": 204
},
{
"epoch": 1.9435597363158283,
"grad_norm": 0.026611328125,
"learning_rate": 5.7608560929013946e-05,
"loss": 0.0226,
"step": 205
},
{
"epoch": 1.9530405155173691,
"grad_norm": 0.033203125,
"learning_rate": 5.667792534718639e-05,
"loss": 0.0265,
"step": 206
},
{
"epoch": 1.9625212947189097,
"grad_norm": 0.0269775390625,
"learning_rate": 5.5751886034731115e-05,
"loss": 0.0233,
"step": 207
},
{
"epoch": 1.9720020739204505,
"grad_norm": 0.02734375,
"learning_rate": 5.483054124008528e-05,
"loss": 0.0209,
"step": 208
},
{
"epoch": 1.981482853121991,
"grad_norm": 0.0286865234375,
"learning_rate": 5.391398871361972e-05,
"loss": 0.0231,
"step": 209
},
{
"epoch": 1.981482853121991,
"eval_loss": 0.027809714898467064,
"eval_runtime": 34.3727,
"eval_samples_per_second": 20.685,
"eval_steps_per_second": 20.685,
"step": 209
},
{
"epoch": 1.9909636323235316,
"grad_norm": 0.03271484375,
"learning_rate": 5.300232569726804e-05,
"loss": 0.0246,
"step": 210
},
{
"epoch": 2.000444411525072,
"grad_norm": 0.032470703125,
"learning_rate": 5.2095648914209525e-05,
"loss": 0.0285,
"step": 211
},
{
"epoch": 2.0099251907266127,
"grad_norm": 0.026123046875,
"learning_rate": 5.119405455860772e-05,
"loss": 0.0201,
"step": 212
}
],
"logging_steps": 1,
"max_steps": 315,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 53,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.881706086558597e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}