Think-and-Code-React / trainer_state.json
nehulagrawal's picture
Upload 16 files
f50b1b3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 500,
"global_step": 7662,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07830853563038372,
"grad_norm": 9.886259078979492,
"learning_rate": 9.874706342991386e-05,
"loss": 2.2961,
"step": 100
},
{
"epoch": 0.15661707126076743,
"grad_norm": 9.754347801208496,
"learning_rate": 9.744192116940747e-05,
"loss": 2.1652,
"step": 200
},
{
"epoch": 0.23492560689115113,
"grad_norm": 10.011685371398926,
"learning_rate": 9.613677890890107e-05,
"loss": 2.2701,
"step": 300
},
{
"epoch": 0.31323414252153486,
"grad_norm": 10.389019966125488,
"learning_rate": 9.483163664839469e-05,
"loss": 2.1357,
"step": 400
},
{
"epoch": 0.39154267815191857,
"grad_norm": 8.717257499694824,
"learning_rate": 9.352649438788829e-05,
"loss": 2.0979,
"step": 500
},
{
"epoch": 0.46985121378230227,
"grad_norm": 10.2243070602417,
"learning_rate": 9.222135212738188e-05,
"loss": 2.0926,
"step": 600
},
{
"epoch": 0.548159749412686,
"grad_norm": 9.24715518951416,
"learning_rate": 9.091620986687549e-05,
"loss": 2.0199,
"step": 700
},
{
"epoch": 0.6264682850430697,
"grad_norm": 8.67659854888916,
"learning_rate": 8.96110676063691e-05,
"loss": 2.071,
"step": 800
},
{
"epoch": 0.7047768206734534,
"grad_norm": 6.2473015785217285,
"learning_rate": 8.830592534586271e-05,
"loss": 2.0342,
"step": 900
},
{
"epoch": 0.7830853563038371,
"grad_norm": 9.428214073181152,
"learning_rate": 8.700078308535631e-05,
"loss": 1.9497,
"step": 1000
},
{
"epoch": 0.8613938919342208,
"grad_norm": 7.909013271331787,
"learning_rate": 8.569564082484992e-05,
"loss": 2.006,
"step": 1100
},
{
"epoch": 0.9397024275646045,
"grad_norm": 7.305349826812744,
"learning_rate": 8.43904985643435e-05,
"loss": 1.9751,
"step": 1200
},
{
"epoch": 1.0180109631949883,
"grad_norm": 6.404293060302734,
"learning_rate": 8.308535630383712e-05,
"loss": 1.7841,
"step": 1300
},
{
"epoch": 1.096319498825372,
"grad_norm": 6.121842384338379,
"learning_rate": 8.178021404333073e-05,
"loss": 1.2646,
"step": 1400
},
{
"epoch": 1.1746280344557556,
"grad_norm": 5.871735572814941,
"learning_rate": 8.047507178282433e-05,
"loss": 1.2666,
"step": 1500
},
{
"epoch": 1.2529365700861395,
"grad_norm": 6.930913925170898,
"learning_rate": 7.916992952231794e-05,
"loss": 1.295,
"step": 1600
},
{
"epoch": 1.331245105716523,
"grad_norm": 5.334323406219482,
"learning_rate": 7.786478726181154e-05,
"loss": 1.3333,
"step": 1700
},
{
"epoch": 1.4095536413469067,
"grad_norm": 7.161041259765625,
"learning_rate": 7.655964500130514e-05,
"loss": 1.3355,
"step": 1800
},
{
"epoch": 1.4878621769772904,
"grad_norm": 7.43231725692749,
"learning_rate": 7.525450274079875e-05,
"loss": 1.284,
"step": 1900
},
{
"epoch": 1.5661707126076743,
"grad_norm": 6.2656354904174805,
"learning_rate": 7.394936048029235e-05,
"loss": 1.287,
"step": 2000
},
{
"epoch": 1.644479248238058,
"grad_norm": 5.97381067276001,
"learning_rate": 7.264421821978597e-05,
"loss": 1.3196,
"step": 2100
},
{
"epoch": 1.7227877838684416,
"grad_norm": 7.828345775604248,
"learning_rate": 7.133907595927957e-05,
"loss": 1.2884,
"step": 2200
},
{
"epoch": 1.8010963194988254,
"grad_norm": 7.866464614868164,
"learning_rate": 7.003393369877316e-05,
"loss": 1.3136,
"step": 2300
},
{
"epoch": 1.879404855129209,
"grad_norm": 5.8095598220825195,
"learning_rate": 6.872879143826677e-05,
"loss": 1.2181,
"step": 2400
},
{
"epoch": 1.9577133907595927,
"grad_norm": 6.666238784790039,
"learning_rate": 6.742364917776038e-05,
"loss": 1.2461,
"step": 2500
},
{
"epoch": 2.0360219263899766,
"grad_norm": 7.129757881164551,
"learning_rate": 6.611850691725399e-05,
"loss": 0.9798,
"step": 2600
},
{
"epoch": 2.11433046202036,
"grad_norm": 6.111073017120361,
"learning_rate": 6.481336465674759e-05,
"loss": 0.6804,
"step": 2700
},
{
"epoch": 2.192638997650744,
"grad_norm": 5.344665050506592,
"learning_rate": 6.35082223962412e-05,
"loss": 0.6946,
"step": 2800
},
{
"epoch": 2.2709475332811278,
"grad_norm": 5.611790657043457,
"learning_rate": 6.220308013573479e-05,
"loss": 0.6516,
"step": 2900
},
{
"epoch": 2.349256068911511,
"grad_norm": 7.892216682434082,
"learning_rate": 6.0897937875228404e-05,
"loss": 0.6718,
"step": 3000
},
{
"epoch": 2.427564604541895,
"grad_norm": 8.479430198669434,
"learning_rate": 5.959279561472201e-05,
"loss": 0.6601,
"step": 3100
},
{
"epoch": 2.505873140172279,
"grad_norm": 6.140246391296387,
"learning_rate": 5.828765335421561e-05,
"loss": 0.6619,
"step": 3200
},
{
"epoch": 2.5841816758026623,
"grad_norm": 6.076893329620361,
"learning_rate": 5.6982511093709215e-05,
"loss": 0.69,
"step": 3300
},
{
"epoch": 2.662490211433046,
"grad_norm": 6.6155900955200195,
"learning_rate": 5.5677368833202826e-05,
"loss": 0.6866,
"step": 3400
},
{
"epoch": 2.74079874706343,
"grad_norm": 6.685502052307129,
"learning_rate": 5.437222657269643e-05,
"loss": 0.6615,
"step": 3500
},
{
"epoch": 2.8191072826938135,
"grad_norm": 8.586551666259766,
"learning_rate": 5.306708431219003e-05,
"loss": 0.7118,
"step": 3600
},
{
"epoch": 2.8974158183241974,
"grad_norm": 5.001748085021973,
"learning_rate": 5.176194205168363e-05,
"loss": 0.7117,
"step": 3700
},
{
"epoch": 2.975724353954581,
"grad_norm": 5.666826248168945,
"learning_rate": 5.045679979117724e-05,
"loss": 0.6868,
"step": 3800
},
{
"epoch": 3.0540328895849647,
"grad_norm": 5.463041305541992,
"learning_rate": 4.9151657530670846e-05,
"loss": 0.4208,
"step": 3900
},
{
"epoch": 3.1323414252153485,
"grad_norm": 6.563859939575195,
"learning_rate": 4.784651527016445e-05,
"loss": 0.2744,
"step": 4000
},
{
"epoch": 3.210649960845732,
"grad_norm": 5.918147087097168,
"learning_rate": 4.654137300965806e-05,
"loss": 0.309,
"step": 4100
},
{
"epoch": 3.288958496476116,
"grad_norm": 4.2588958740234375,
"learning_rate": 4.523623074915166e-05,
"loss": 0.3063,
"step": 4200
},
{
"epoch": 3.3672670321064997,
"grad_norm": 5.194436073303223,
"learning_rate": 4.393108848864527e-05,
"loss": 0.3002,
"step": 4300
},
{
"epoch": 3.445575567736883,
"grad_norm": 4.340348720550537,
"learning_rate": 4.262594622813887e-05,
"loss": 0.2957,
"step": 4400
},
{
"epoch": 3.523884103367267,
"grad_norm": 3.6878747940063477,
"learning_rate": 4.1320803967632476e-05,
"loss": 0.2968,
"step": 4500
},
{
"epoch": 3.602192638997651,
"grad_norm": 4.565743923187256,
"learning_rate": 4.001566170712608e-05,
"loss": 0.2921,
"step": 4600
},
{
"epoch": 3.6805011746280343,
"grad_norm": 4.834578514099121,
"learning_rate": 3.8710519446619684e-05,
"loss": 0.2937,
"step": 4700
},
{
"epoch": 3.758809710258418,
"grad_norm": 4.321404933929443,
"learning_rate": 3.740537718611329e-05,
"loss": 0.2927,
"step": 4800
},
{
"epoch": 3.837118245888802,
"grad_norm": 5.987460136413574,
"learning_rate": 3.610023492560689e-05,
"loss": 0.2813,
"step": 4900
},
{
"epoch": 3.9154267815191854,
"grad_norm": 4.690079689025879,
"learning_rate": 3.4795092665100495e-05,
"loss": 0.2848,
"step": 5000
},
{
"epoch": 3.9937353171495693,
"grad_norm": 4.9851298332214355,
"learning_rate": 3.34899504045941e-05,
"loss": 0.2776,
"step": 5100
},
{
"epoch": 4.072043852779953,
"grad_norm": 4.890774726867676,
"learning_rate": 3.218480814408771e-05,
"loss": 0.1309,
"step": 5200
},
{
"epoch": 4.150352388410337,
"grad_norm": 3.3844189643859863,
"learning_rate": 3.087966588358131e-05,
"loss": 0.1197,
"step": 5300
},
{
"epoch": 4.22866092404072,
"grad_norm": 3.3200528621673584,
"learning_rate": 2.9574523623074918e-05,
"loss": 0.1252,
"step": 5400
},
{
"epoch": 4.306969459671104,
"grad_norm": 2.3074514865875244,
"learning_rate": 2.826938136256852e-05,
"loss": 0.1181,
"step": 5500
},
{
"epoch": 4.385277995301488,
"grad_norm": 4.1699724197387695,
"learning_rate": 2.696423910206213e-05,
"loss": 0.1234,
"step": 5600
},
{
"epoch": 4.463586530931871,
"grad_norm": 3.833683729171753,
"learning_rate": 2.565909684155573e-05,
"loss": 0.1161,
"step": 5700
},
{
"epoch": 4.5418950665622555,
"grad_norm": 2.7541275024414062,
"learning_rate": 2.4353954581049333e-05,
"loss": 0.1101,
"step": 5800
},
{
"epoch": 4.620203602192639,
"grad_norm": 4.193445205688477,
"learning_rate": 2.304881232054294e-05,
"loss": 0.1118,
"step": 5900
},
{
"epoch": 4.698512137823022,
"grad_norm": 3.851712465286255,
"learning_rate": 2.1743670060036544e-05,
"loss": 0.1144,
"step": 6000
},
{
"epoch": 4.776820673453407,
"grad_norm": 3.333712100982666,
"learning_rate": 2.043852779953015e-05,
"loss": 0.1117,
"step": 6100
},
{
"epoch": 4.85512920908379,
"grad_norm": 3.7569446563720703,
"learning_rate": 1.9133385539023756e-05,
"loss": 0.1049,
"step": 6200
},
{
"epoch": 4.9334377447141735,
"grad_norm": 3.2152910232543945,
"learning_rate": 1.782824327851736e-05,
"loss": 0.1064,
"step": 6300
},
{
"epoch": 5.011746280344558,
"grad_norm": 1.593434453010559,
"learning_rate": 1.6523101018010963e-05,
"loss": 0.0936,
"step": 6400
},
{
"epoch": 5.090054815974941,
"grad_norm": 1.8612021207809448,
"learning_rate": 1.5217958757504569e-05,
"loss": 0.047,
"step": 6500
},
{
"epoch": 5.168363351605325,
"grad_norm": 2.482856035232544,
"learning_rate": 1.3912816496998175e-05,
"loss": 0.0448,
"step": 6600
},
{
"epoch": 5.246671887235709,
"grad_norm": 1.4229090213775635,
"learning_rate": 1.2607674236491779e-05,
"loss": 0.044,
"step": 6700
},
{
"epoch": 5.324980422866092,
"grad_norm": 1.7034039497375488,
"learning_rate": 1.1302531975985382e-05,
"loss": 0.0459,
"step": 6800
},
{
"epoch": 5.403288958496476,
"grad_norm": 2.2560577392578125,
"learning_rate": 9.997389715478986e-06,
"loss": 0.0453,
"step": 6900
},
{
"epoch": 5.48159749412686,
"grad_norm": 2.826206922531128,
"learning_rate": 8.692247454972592e-06,
"loss": 0.0439,
"step": 7000
},
{
"epoch": 5.559906029757244,
"grad_norm": 1.7038235664367676,
"learning_rate": 7.387105194466197e-06,
"loss": 0.0421,
"step": 7100
},
{
"epoch": 5.638214565387627,
"grad_norm": 2.713496685028076,
"learning_rate": 6.081962933959802e-06,
"loss": 0.0417,
"step": 7200
},
{
"epoch": 5.716523101018011,
"grad_norm": 0.9614657163619995,
"learning_rate": 4.776820673453407e-06,
"loss": 0.0403,
"step": 7300
},
{
"epoch": 5.794831636648395,
"grad_norm": 1.6873942613601685,
"learning_rate": 3.471678412947011e-06,
"loss": 0.0407,
"step": 7400
},
{
"epoch": 5.873140172278778,
"grad_norm": 2.548095464706421,
"learning_rate": 2.166536152440616e-06,
"loss": 0.0397,
"step": 7500
},
{
"epoch": 5.9514487079091625,
"grad_norm": 1.1459206342697144,
"learning_rate": 8.613938919342209e-07,
"loss": 0.0366,
"step": 7600
}
],
"logging_steps": 100,
"max_steps": 7662,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8424085506490368.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}