0x1202's picture
Training in progress, step 200, checkpoint
f329336 verified
{
"best_metric": 11.844514846801758,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.022397043590246087,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00011198521795123043,
"grad_norm": 0.286604106426239,
"learning_rate": 1e-05,
"loss": 11.9329,
"step": 1
},
{
"epoch": 0.00011198521795123043,
"eval_loss": 11.934624671936035,
"eval_runtime": 147.4029,
"eval_samples_per_second": 102.033,
"eval_steps_per_second": 25.508,
"step": 1
},
{
"epoch": 0.00022397043590246087,
"grad_norm": 0.3014761209487915,
"learning_rate": 2e-05,
"loss": 11.9347,
"step": 2
},
{
"epoch": 0.00033595565385369133,
"grad_norm": 0.2701459228992462,
"learning_rate": 3e-05,
"loss": 11.9335,
"step": 3
},
{
"epoch": 0.00044794087180492174,
"grad_norm": 0.2966476380825043,
"learning_rate": 4e-05,
"loss": 11.9318,
"step": 4
},
{
"epoch": 0.0005599260897561521,
"grad_norm": 0.3018394708633423,
"learning_rate": 5e-05,
"loss": 11.9339,
"step": 5
},
{
"epoch": 0.0006719113077073827,
"grad_norm": 0.3023911714553833,
"learning_rate": 6e-05,
"loss": 11.9336,
"step": 6
},
{
"epoch": 0.0007838965256586131,
"grad_norm": 0.3025539517402649,
"learning_rate": 7e-05,
"loss": 11.9324,
"step": 7
},
{
"epoch": 0.0008958817436098435,
"grad_norm": 0.3113081455230713,
"learning_rate": 8e-05,
"loss": 11.9316,
"step": 8
},
{
"epoch": 0.0010078669615610739,
"grad_norm": 0.31129753589630127,
"learning_rate": 9e-05,
"loss": 11.9315,
"step": 9
},
{
"epoch": 0.0011198521795123043,
"grad_norm": 0.3733045756816864,
"learning_rate": 0.0001,
"loss": 11.9273,
"step": 10
},
{
"epoch": 0.001231837397463535,
"grad_norm": 0.3920263946056366,
"learning_rate": 9.999316524962345e-05,
"loss": 11.924,
"step": 11
},
{
"epoch": 0.0013438226154147653,
"grad_norm": 0.39984649419784546,
"learning_rate": 9.997266286704631e-05,
"loss": 11.9202,
"step": 12
},
{
"epoch": 0.0014558078333659957,
"grad_norm": 0.43256324529647827,
"learning_rate": 9.993849845741524e-05,
"loss": 11.9165,
"step": 13
},
{
"epoch": 0.0015677930513172261,
"grad_norm": 0.47162097692489624,
"learning_rate": 9.989068136093873e-05,
"loss": 11.914,
"step": 14
},
{
"epoch": 0.0016797782692684565,
"grad_norm": 0.46797800064086914,
"learning_rate": 9.98292246503335e-05,
"loss": 11.9122,
"step": 15
},
{
"epoch": 0.001791763487219687,
"grad_norm": 0.46917322278022766,
"learning_rate": 9.975414512725057e-05,
"loss": 11.9055,
"step": 16
},
{
"epoch": 0.0019037487051709173,
"grad_norm": 0.44359341263771057,
"learning_rate": 9.966546331768191e-05,
"loss": 11.9063,
"step": 17
},
{
"epoch": 0.0020157339231221478,
"grad_norm": 0.47372058033943176,
"learning_rate": 9.956320346634876e-05,
"loss": 11.9007,
"step": 18
},
{
"epoch": 0.0021277191410733784,
"grad_norm": 0.42819708585739136,
"learning_rate": 9.944739353007344e-05,
"loss": 11.8978,
"step": 19
},
{
"epoch": 0.0022397043590246086,
"grad_norm": 0.422730028629303,
"learning_rate": 9.931806517013612e-05,
"loss": 11.8936,
"step": 20
},
{
"epoch": 0.002351689576975839,
"grad_norm": 0.41111958026885986,
"learning_rate": 9.917525374361912e-05,
"loss": 11.8876,
"step": 21
},
{
"epoch": 0.00246367479492707,
"grad_norm": 0.43906450271606445,
"learning_rate": 9.901899829374047e-05,
"loss": 11.8911,
"step": 22
},
{
"epoch": 0.0025756600128783,
"grad_norm": 0.4011225402355194,
"learning_rate": 9.884934153917997e-05,
"loss": 11.884,
"step": 23
},
{
"epoch": 0.0026876452308295306,
"grad_norm": 0.3697347640991211,
"learning_rate": 9.86663298624003e-05,
"loss": 11.8866,
"step": 24
},
{
"epoch": 0.002799630448780761,
"grad_norm": 0.3722085654735565,
"learning_rate": 9.847001329696653e-05,
"loss": 11.8841,
"step": 25
},
{
"epoch": 0.0029116156667319914,
"grad_norm": 0.35108664631843567,
"learning_rate": 9.826044551386744e-05,
"loss": 11.8889,
"step": 26
},
{
"epoch": 0.0030236008846832216,
"grad_norm": 0.3389376997947693,
"learning_rate": 9.803768380684242e-05,
"loss": 11.8853,
"step": 27
},
{
"epoch": 0.0031355861026344523,
"grad_norm": 0.3340710997581482,
"learning_rate": 9.780178907671789e-05,
"loss": 11.8794,
"step": 28
},
{
"epoch": 0.003247571320585683,
"grad_norm": 0.2880931496620178,
"learning_rate": 9.755282581475769e-05,
"loss": 11.8731,
"step": 29
},
{
"epoch": 0.003359556538536913,
"grad_norm": 0.27614906430244446,
"learning_rate": 9.729086208503174e-05,
"loss": 11.874,
"step": 30
},
{
"epoch": 0.0034715417564881437,
"grad_norm": 0.2922820746898651,
"learning_rate": 9.701596950580806e-05,
"loss": 11.8713,
"step": 31
},
{
"epoch": 0.003583526974439374,
"grad_norm": 0.29635900259017944,
"learning_rate": 9.672822322997305e-05,
"loss": 11.8757,
"step": 32
},
{
"epoch": 0.0036955121923906045,
"grad_norm": 0.29963231086730957,
"learning_rate": 9.642770192448536e-05,
"loss": 11.8637,
"step": 33
},
{
"epoch": 0.0038074974103418347,
"grad_norm": 0.2212974578142166,
"learning_rate": 9.611448774886924e-05,
"loss": 11.87,
"step": 34
},
{
"epoch": 0.003919482628293065,
"grad_norm": 0.2441217005252838,
"learning_rate": 9.578866633275288e-05,
"loss": 11.8686,
"step": 35
},
{
"epoch": 0.0040314678462442955,
"grad_norm": 0.25918489694595337,
"learning_rate": 9.545032675245813e-05,
"loss": 11.8721,
"step": 36
},
{
"epoch": 0.0041434530641955266,
"grad_norm": 0.22317498922348022,
"learning_rate": 9.509956150664796e-05,
"loss": 11.8705,
"step": 37
},
{
"epoch": 0.004255438282146757,
"grad_norm": 0.2156185507774353,
"learning_rate": 9.473646649103818e-05,
"loss": 11.8713,
"step": 38
},
{
"epoch": 0.004367423500097987,
"grad_norm": 0.19167621433734894,
"learning_rate": 9.43611409721806e-05,
"loss": 11.8653,
"step": 39
},
{
"epoch": 0.004479408718049217,
"grad_norm": 0.2138894498348236,
"learning_rate": 9.397368756032445e-05,
"loss": 11.8629,
"step": 40
},
{
"epoch": 0.004591393936000448,
"grad_norm": 0.1945975422859192,
"learning_rate": 9.357421218136386e-05,
"loss": 11.8646,
"step": 41
},
{
"epoch": 0.004703379153951678,
"grad_norm": 0.17435677349567413,
"learning_rate": 9.316282404787871e-05,
"loss": 11.8607,
"step": 42
},
{
"epoch": 0.004815364371902909,
"grad_norm": 0.19533629715442657,
"learning_rate": 9.273963562927695e-05,
"loss": 11.8633,
"step": 43
},
{
"epoch": 0.00492734958985414,
"grad_norm": 0.20290258526802063,
"learning_rate": 9.230476262104677e-05,
"loss": 11.8632,
"step": 44
},
{
"epoch": 0.00503933480780537,
"grad_norm": 0.1901685744524002,
"learning_rate": 9.185832391312644e-05,
"loss": 11.8634,
"step": 45
},
{
"epoch": 0.0051513200257566,
"grad_norm": 0.2550676465034485,
"learning_rate": 9.140044155740101e-05,
"loss": 11.8588,
"step": 46
},
{
"epoch": 0.00526330524370783,
"grad_norm": 0.21027059853076935,
"learning_rate": 9.093124073433463e-05,
"loss": 11.861,
"step": 47
},
{
"epoch": 0.005375290461659061,
"grad_norm": 0.2024654746055603,
"learning_rate": 9.045084971874738e-05,
"loss": 11.8582,
"step": 48
},
{
"epoch": 0.0054872756796102914,
"grad_norm": 0.2265324592590332,
"learning_rate": 8.995939984474624e-05,
"loss": 11.8656,
"step": 49
},
{
"epoch": 0.005599260897561522,
"grad_norm": 0.28348395228385925,
"learning_rate": 8.945702546981969e-05,
"loss": 11.8576,
"step": 50
},
{
"epoch": 0.005599260897561522,
"eval_loss": 11.863534927368164,
"eval_runtime": 144.7237,
"eval_samples_per_second": 103.922,
"eval_steps_per_second": 25.981,
"step": 50
},
{
"epoch": 0.005711246115512753,
"grad_norm": 0.2238164246082306,
"learning_rate": 8.894386393810563e-05,
"loss": 11.8706,
"step": 51
},
{
"epoch": 0.005823231333463983,
"grad_norm": 0.2522401213645935,
"learning_rate": 8.842005554284296e-05,
"loss": 11.8634,
"step": 52
},
{
"epoch": 0.005935216551415213,
"grad_norm": 0.23112688958644867,
"learning_rate": 8.788574348801675e-05,
"loss": 11.8656,
"step": 53
},
{
"epoch": 0.006047201769366443,
"grad_norm": 0.17010186612606049,
"learning_rate": 8.73410738492077e-05,
"loss": 11.8671,
"step": 54
},
{
"epoch": 0.006159186987317674,
"grad_norm": 0.2186678797006607,
"learning_rate": 8.678619553365659e-05,
"loss": 11.8632,
"step": 55
},
{
"epoch": 0.0062711722052689045,
"grad_norm": 0.18585319817066193,
"learning_rate": 8.622126023955446e-05,
"loss": 11.8643,
"step": 56
},
{
"epoch": 0.006383157423220135,
"grad_norm": 0.147291362285614,
"learning_rate": 8.564642241456986e-05,
"loss": 11.8625,
"step": 57
},
{
"epoch": 0.006495142641171366,
"grad_norm": 0.15081480145454407,
"learning_rate": 8.506183921362443e-05,
"loss": 11.8612,
"step": 58
},
{
"epoch": 0.006607127859122596,
"grad_norm": 0.1829891949892044,
"learning_rate": 8.44676704559283e-05,
"loss": 11.8587,
"step": 59
},
{
"epoch": 0.006719113077073826,
"grad_norm": 0.13547901809215546,
"learning_rate": 8.386407858128706e-05,
"loss": 11.8642,
"step": 60
},
{
"epoch": 0.006831098295025056,
"grad_norm": 0.11329905688762665,
"learning_rate": 8.32512286056924e-05,
"loss": 11.8602,
"step": 61
},
{
"epoch": 0.006943083512976287,
"grad_norm": 0.21004189550876617,
"learning_rate": 8.262928807620843e-05,
"loss": 11.8641,
"step": 62
},
{
"epoch": 0.007055068730927518,
"grad_norm": 0.10746277868747711,
"learning_rate": 8.199842702516583e-05,
"loss": 11.8642,
"step": 63
},
{
"epoch": 0.007167053948878748,
"grad_norm": 0.11390742659568787,
"learning_rate": 8.135881792367686e-05,
"loss": 11.8582,
"step": 64
},
{
"epoch": 0.007279039166829979,
"grad_norm": 0.10696551203727722,
"learning_rate": 8.07106356344834e-05,
"loss": 11.8567,
"step": 65
},
{
"epoch": 0.007391024384781209,
"grad_norm": 0.11884024739265442,
"learning_rate": 8.005405736415126e-05,
"loss": 11.8564,
"step": 66
},
{
"epoch": 0.007503009602732439,
"grad_norm": 0.107220858335495,
"learning_rate": 7.938926261462366e-05,
"loss": 11.8583,
"step": 67
},
{
"epoch": 0.007614994820683669,
"grad_norm": 0.0726424902677536,
"learning_rate": 7.871643313414718e-05,
"loss": 11.8596,
"step": 68
},
{
"epoch": 0.0077269800386349004,
"grad_norm": 0.10806351900100708,
"learning_rate": 7.803575286758364e-05,
"loss": 11.8558,
"step": 69
},
{
"epoch": 0.00783896525658613,
"grad_norm": 0.10926433652639389,
"learning_rate": 7.734740790612136e-05,
"loss": 11.8669,
"step": 70
},
{
"epoch": 0.00795095047453736,
"grad_norm": 0.10724564641714096,
"learning_rate": 7.66515864363997e-05,
"loss": 11.8552,
"step": 71
},
{
"epoch": 0.008062935692488591,
"grad_norm": 0.10973364114761353,
"learning_rate": 7.594847868906076e-05,
"loss": 11.8627,
"step": 72
},
{
"epoch": 0.008174920910439821,
"grad_norm": 0.11084305495023727,
"learning_rate": 7.52382768867422e-05,
"loss": 11.8653,
"step": 73
},
{
"epoch": 0.008286906128391053,
"grad_norm": 0.10954280197620392,
"learning_rate": 7.452117519152542e-05,
"loss": 11.8611,
"step": 74
},
{
"epoch": 0.008398891346342283,
"grad_norm": 0.1328384280204773,
"learning_rate": 7.379736965185368e-05,
"loss": 11.8611,
"step": 75
},
{
"epoch": 0.008510876564293514,
"grad_norm": 0.11508401483297348,
"learning_rate": 7.30670581489344e-05,
"loss": 11.8592,
"step": 76
},
{
"epoch": 0.008622861782244744,
"grad_norm": 0.12055990099906921,
"learning_rate": 7.233044034264034e-05,
"loss": 11.8663,
"step": 77
},
{
"epoch": 0.008734847000195974,
"grad_norm": 0.12198225408792496,
"learning_rate": 7.158771761692464e-05,
"loss": 11.8593,
"step": 78
},
{
"epoch": 0.008846832218147204,
"grad_norm": 0.10362587124109268,
"learning_rate": 7.083909302476453e-05,
"loss": 11.852,
"step": 79
},
{
"epoch": 0.008958817436098434,
"grad_norm": 0.13018785417079926,
"learning_rate": 7.008477123264848e-05,
"loss": 11.8567,
"step": 80
},
{
"epoch": 0.009070802654049666,
"grad_norm": 0.1769609898328781,
"learning_rate": 6.932495846462261e-05,
"loss": 11.8535,
"step": 81
},
{
"epoch": 0.009182787872000896,
"grad_norm": 0.14807429909706116,
"learning_rate": 6.855986244591104e-05,
"loss": 11.8521,
"step": 82
},
{
"epoch": 0.009294773089952127,
"grad_norm": 0.1289256364107132,
"learning_rate": 6.778969234612584e-05,
"loss": 11.8539,
"step": 83
},
{
"epoch": 0.009406758307903357,
"grad_norm": 0.1351771056652069,
"learning_rate": 6.701465872208216e-05,
"loss": 11.8656,
"step": 84
},
{
"epoch": 0.009518743525854587,
"grad_norm": 0.12823566794395447,
"learning_rate": 6.623497346023418e-05,
"loss": 11.8609,
"step": 85
},
{
"epoch": 0.009630728743805817,
"grad_norm": 0.10501829534769058,
"learning_rate": 6.545084971874738e-05,
"loss": 11.8538,
"step": 86
},
{
"epoch": 0.009742713961757047,
"grad_norm": 0.14413680136203766,
"learning_rate": 6.466250186922325e-05,
"loss": 11.8592,
"step": 87
},
{
"epoch": 0.00985469917970828,
"grad_norm": 0.1515997350215912,
"learning_rate": 6.387014543809223e-05,
"loss": 11.8531,
"step": 88
},
{
"epoch": 0.00996668439765951,
"grad_norm": 0.15429282188415527,
"learning_rate": 6.307399704769099e-05,
"loss": 11.8554,
"step": 89
},
{
"epoch": 0.01007866961561074,
"grad_norm": 0.1753351390361786,
"learning_rate": 6.227427435703997e-05,
"loss": 11.8517,
"step": 90
},
{
"epoch": 0.01019065483356197,
"grad_norm": 0.16229097545146942,
"learning_rate": 6.147119600233758e-05,
"loss": 11.8538,
"step": 91
},
{
"epoch": 0.0103026400515132,
"grad_norm": 0.1595967710018158,
"learning_rate": 6.066498153718735e-05,
"loss": 11.857,
"step": 92
},
{
"epoch": 0.01041462526946443,
"grad_norm": 0.2020295411348343,
"learning_rate": 5.985585137257401e-05,
"loss": 11.854,
"step": 93
},
{
"epoch": 0.01052661048741566,
"grad_norm": 0.14769580960273743,
"learning_rate": 5.90440267166055e-05,
"loss": 11.8552,
"step": 94
},
{
"epoch": 0.010638595705366892,
"grad_norm": 0.16329923272132874,
"learning_rate": 5.8229729514036705e-05,
"loss": 11.8494,
"step": 95
},
{
"epoch": 0.010750580923318123,
"grad_norm": 0.17727980017662048,
"learning_rate": 5.74131823855921e-05,
"loss": 11.8546,
"step": 96
},
{
"epoch": 0.010862566141269353,
"grad_norm": 0.1587761789560318,
"learning_rate": 5.6594608567103456e-05,
"loss": 11.8562,
"step": 97
},
{
"epoch": 0.010974551359220583,
"grad_norm": 0.17432230710983276,
"learning_rate": 5.577423184847932e-05,
"loss": 11.8509,
"step": 98
},
{
"epoch": 0.011086536577171813,
"grad_norm": 0.2063705027103424,
"learning_rate": 5.495227651252315e-05,
"loss": 11.8532,
"step": 99
},
{
"epoch": 0.011198521795123043,
"grad_norm": 0.2938782572746277,
"learning_rate": 5.4128967273616625e-05,
"loss": 11.8502,
"step": 100
},
{
"epoch": 0.011198521795123043,
"eval_loss": 11.852167129516602,
"eval_runtime": 144.7085,
"eval_samples_per_second": 103.933,
"eval_steps_per_second": 25.983,
"step": 100
},
{
"epoch": 0.011310507013074273,
"grad_norm": 0.15994684398174286,
"learning_rate": 5.330452921628497e-05,
"loss": 11.8639,
"step": 101
},
{
"epoch": 0.011422492231025505,
"grad_norm": 0.1284467577934265,
"learning_rate": 5.247918773366112e-05,
"loss": 11.8527,
"step": 102
},
{
"epoch": 0.011534477448976736,
"grad_norm": 0.1346336007118225,
"learning_rate": 5.165316846586541e-05,
"loss": 11.8539,
"step": 103
},
{
"epoch": 0.011646462666927966,
"grad_norm": 0.10573034733533859,
"learning_rate": 5.0826697238317935e-05,
"loss": 11.858,
"step": 104
},
{
"epoch": 0.011758447884879196,
"grad_norm": 0.13644260168075562,
"learning_rate": 5e-05,
"loss": 11.8526,
"step": 105
},
{
"epoch": 0.011870433102830426,
"grad_norm": 0.11927726119756699,
"learning_rate": 4.917330276168208e-05,
"loss": 11.8548,
"step": 106
},
{
"epoch": 0.011982418320781656,
"grad_norm": 0.1183391883969307,
"learning_rate": 4.834683153413459e-05,
"loss": 11.8571,
"step": 107
},
{
"epoch": 0.012094403538732887,
"grad_norm": 0.10421369224786758,
"learning_rate": 4.7520812266338885e-05,
"loss": 11.8537,
"step": 108
},
{
"epoch": 0.012206388756684118,
"grad_norm": 0.12772461771965027,
"learning_rate": 4.669547078371504e-05,
"loss": 11.8442,
"step": 109
},
{
"epoch": 0.012318373974635349,
"grad_norm": 0.13019393384456635,
"learning_rate": 4.5871032726383386e-05,
"loss": 11.8439,
"step": 110
},
{
"epoch": 0.012430359192586579,
"grad_norm": 0.10987130552530289,
"learning_rate": 4.504772348747687e-05,
"loss": 11.843,
"step": 111
},
{
"epoch": 0.012542344410537809,
"grad_norm": 0.11590322107076645,
"learning_rate": 4.4225768151520694e-05,
"loss": 11.8505,
"step": 112
},
{
"epoch": 0.01265432962848904,
"grad_norm": 0.11244717240333557,
"learning_rate": 4.3405391432896555e-05,
"loss": 11.8502,
"step": 113
},
{
"epoch": 0.01276631484644027,
"grad_norm": 0.12638477981090546,
"learning_rate": 4.2586817614407895e-05,
"loss": 11.8516,
"step": 114
},
{
"epoch": 0.0128783000643915,
"grad_norm": 0.08632209151983261,
"learning_rate": 4.17702704859633e-05,
"loss": 11.8589,
"step": 115
},
{
"epoch": 0.012990285282342732,
"grad_norm": 0.09909665584564209,
"learning_rate": 4.095597328339452e-05,
"loss": 11.8499,
"step": 116
},
{
"epoch": 0.013102270500293962,
"grad_norm": 0.07591623067855835,
"learning_rate": 4.0144148627425993e-05,
"loss": 11.8475,
"step": 117
},
{
"epoch": 0.013214255718245192,
"grad_norm": 0.08854926377534866,
"learning_rate": 3.933501846281267e-05,
"loss": 11.8485,
"step": 118
},
{
"epoch": 0.013326240936196422,
"grad_norm": 0.09631261229515076,
"learning_rate": 3.852880399766243e-05,
"loss": 11.8508,
"step": 119
},
{
"epoch": 0.013438226154147652,
"grad_norm": 0.08313702791929245,
"learning_rate": 3.772572564296005e-05,
"loss": 11.8471,
"step": 120
},
{
"epoch": 0.013550211372098882,
"grad_norm": 0.10649950802326202,
"learning_rate": 3.6926002952309016e-05,
"loss": 11.8554,
"step": 121
},
{
"epoch": 0.013662196590050113,
"grad_norm": 0.06774821877479553,
"learning_rate": 3.612985456190778e-05,
"loss": 11.8525,
"step": 122
},
{
"epoch": 0.013774181808001345,
"grad_norm": 0.08589000999927521,
"learning_rate": 3.533749813077677e-05,
"loss": 11.8455,
"step": 123
},
{
"epoch": 0.013886167025952575,
"grad_norm": 0.11686256527900696,
"learning_rate": 3.4549150281252636e-05,
"loss": 11.8541,
"step": 124
},
{
"epoch": 0.013998152243903805,
"grad_norm": 0.16135923564434052,
"learning_rate": 3.3765026539765834e-05,
"loss": 11.8509,
"step": 125
},
{
"epoch": 0.014110137461855035,
"grad_norm": 0.15247495472431183,
"learning_rate": 3.298534127791785e-05,
"loss": 11.852,
"step": 126
},
{
"epoch": 0.014222122679806265,
"grad_norm": 0.11234267801046371,
"learning_rate": 3.221030765387417e-05,
"loss": 11.8454,
"step": 127
},
{
"epoch": 0.014334107897757496,
"grad_norm": 0.09969646483659744,
"learning_rate": 3.144013755408895e-05,
"loss": 11.8512,
"step": 128
},
{
"epoch": 0.014446093115708726,
"grad_norm": 0.10679420828819275,
"learning_rate": 3.0675041535377405e-05,
"loss": 11.8502,
"step": 129
},
{
"epoch": 0.014558078333659958,
"grad_norm": 0.12656456232070923,
"learning_rate": 2.991522876735154e-05,
"loss": 11.8393,
"step": 130
},
{
"epoch": 0.014670063551611188,
"grad_norm": 0.17909297347068787,
"learning_rate": 2.916090697523549e-05,
"loss": 11.8419,
"step": 131
},
{
"epoch": 0.014782048769562418,
"grad_norm": 0.09594359993934631,
"learning_rate": 2.8412282383075363e-05,
"loss": 11.8441,
"step": 132
},
{
"epoch": 0.014894033987513648,
"grad_norm": 0.09326756745576859,
"learning_rate": 2.766955965735968e-05,
"loss": 11.8494,
"step": 133
},
{
"epoch": 0.015006019205464878,
"grad_norm": 0.11459993571043015,
"learning_rate": 2.693294185106562e-05,
"loss": 11.8463,
"step": 134
},
{
"epoch": 0.015118004423416109,
"grad_norm": 0.0951642319560051,
"learning_rate": 2.6202630348146324e-05,
"loss": 11.8548,
"step": 135
},
{
"epoch": 0.015229989641367339,
"grad_norm": 0.1266089677810669,
"learning_rate": 2.547882480847461e-05,
"loss": 11.8445,
"step": 136
},
{
"epoch": 0.01534197485931857,
"grad_norm": 0.13178473711013794,
"learning_rate": 2.476172311325783e-05,
"loss": 11.855,
"step": 137
},
{
"epoch": 0.015453960077269801,
"grad_norm": 0.1270277500152588,
"learning_rate": 2.405152131093926e-05,
"loss": 11.8473,
"step": 138
},
{
"epoch": 0.015565945295221031,
"grad_norm": 0.11386235803365707,
"learning_rate": 2.3348413563600325e-05,
"loss": 11.8415,
"step": 139
},
{
"epoch": 0.01567793051317226,
"grad_norm": 0.12678323686122894,
"learning_rate": 2.2652592093878666e-05,
"loss": 11.8508,
"step": 140
},
{
"epoch": 0.015789915731123493,
"grad_norm": 0.15571513772010803,
"learning_rate": 2.196424713241637e-05,
"loss": 11.8456,
"step": 141
},
{
"epoch": 0.01590190094907472,
"grad_norm": 0.13216663897037506,
"learning_rate": 2.128356686585282e-05,
"loss": 11.8442,
"step": 142
},
{
"epoch": 0.016013886167025954,
"grad_norm": 0.14941446483135223,
"learning_rate": 2.061073738537635e-05,
"loss": 11.8523,
"step": 143
},
{
"epoch": 0.016125871384977182,
"grad_norm": 0.13734516501426697,
"learning_rate": 1.9945942635848748e-05,
"loss": 11.8433,
"step": 144
},
{
"epoch": 0.016237856602928414,
"grad_norm": 0.1225147545337677,
"learning_rate": 1.928936436551661e-05,
"loss": 11.8414,
"step": 145
},
{
"epoch": 0.016349841820879642,
"grad_norm": 0.15480488538742065,
"learning_rate": 1.8641182076323148e-05,
"loss": 11.8438,
"step": 146
},
{
"epoch": 0.016461827038830874,
"grad_norm": 0.1925436556339264,
"learning_rate": 1.800157297483417e-05,
"loss": 11.843,
"step": 147
},
{
"epoch": 0.016573812256782106,
"grad_norm": 0.1457533836364746,
"learning_rate": 1.7370711923791567e-05,
"loss": 11.8503,
"step": 148
},
{
"epoch": 0.016685797474733335,
"grad_norm": 0.22455795109272003,
"learning_rate": 1.6748771394307585e-05,
"loss": 11.8477,
"step": 149
},
{
"epoch": 0.016797782692684567,
"grad_norm": 0.2925475239753723,
"learning_rate": 1.6135921418712956e-05,
"loss": 11.8482,
"step": 150
},
{
"epoch": 0.016797782692684567,
"eval_loss": 11.845553398132324,
"eval_runtime": 145.7746,
"eval_samples_per_second": 103.173,
"eval_steps_per_second": 25.793,
"step": 150
},
{
"epoch": 0.016909767910635795,
"grad_norm": 0.14274629950523376,
"learning_rate": 1.553232954407171e-05,
"loss": 11.8537,
"step": 151
},
{
"epoch": 0.017021753128587027,
"grad_norm": 0.1093517318367958,
"learning_rate": 1.4938160786375572e-05,
"loss": 11.85,
"step": 152
},
{
"epoch": 0.017133738346538255,
"grad_norm": 0.10719018429517746,
"learning_rate": 1.435357758543015e-05,
"loss": 11.8467,
"step": 153
},
{
"epoch": 0.017245723564489487,
"grad_norm": 0.11384471505880356,
"learning_rate": 1.3778739760445552e-05,
"loss": 11.8534,
"step": 154
},
{
"epoch": 0.01735770878244072,
"grad_norm": 0.08552500605583191,
"learning_rate": 1.3213804466343421e-05,
"loss": 11.836,
"step": 155
},
{
"epoch": 0.017469694000391948,
"grad_norm": 0.09449688345193863,
"learning_rate": 1.2658926150792322e-05,
"loss": 11.8365,
"step": 156
},
{
"epoch": 0.01758167921834318,
"grad_norm": 0.08517363667488098,
"learning_rate": 1.2114256511983274e-05,
"loss": 11.8444,
"step": 157
},
{
"epoch": 0.017693664436294408,
"grad_norm": 0.07527995109558105,
"learning_rate": 1.157994445715706e-05,
"loss": 11.8375,
"step": 158
},
{
"epoch": 0.01780564965424564,
"grad_norm": 0.07749849557876587,
"learning_rate": 1.1056136061894384e-05,
"loss": 11.8421,
"step": 159
},
{
"epoch": 0.01791763487219687,
"grad_norm": 0.08391092717647552,
"learning_rate": 1.0542974530180327e-05,
"loss": 11.8392,
"step": 160
},
{
"epoch": 0.0180296200901481,
"grad_norm": 0.08061125874519348,
"learning_rate": 1.0040600155253765e-05,
"loss": 11.846,
"step": 161
},
{
"epoch": 0.018141605308099332,
"grad_norm": 0.09921222180128098,
"learning_rate": 9.549150281252633e-06,
"loss": 11.8467,
"step": 162
},
{
"epoch": 0.01825359052605056,
"grad_norm": 0.11230499297380447,
"learning_rate": 9.068759265665384e-06,
"loss": 11.851,
"step": 163
},
{
"epoch": 0.018365575744001793,
"grad_norm": 0.08359543979167938,
"learning_rate": 8.599558442598998e-06,
"loss": 11.8378,
"step": 164
},
{
"epoch": 0.01847756096195302,
"grad_norm": 0.11416777223348618,
"learning_rate": 8.141676086873572e-06,
"loss": 11.8494,
"step": 165
},
{
"epoch": 0.018589546179904253,
"grad_norm": 0.0862763449549675,
"learning_rate": 7.695237378953223e-06,
"loss": 11.847,
"step": 166
},
{
"epoch": 0.01870153139785548,
"grad_norm": 0.09926173090934753,
"learning_rate": 7.260364370723044e-06,
"loss": 11.8447,
"step": 167
},
{
"epoch": 0.018813516615806714,
"grad_norm": 0.07171810418367386,
"learning_rate": 6.837175952121306e-06,
"loss": 11.8455,
"step": 168
},
{
"epoch": 0.018925501833757945,
"grad_norm": 0.0772084966301918,
"learning_rate": 6.425787818636131e-06,
"loss": 11.8516,
"step": 169
},
{
"epoch": 0.019037487051709174,
"grad_norm": 0.10193199664354324,
"learning_rate": 6.026312439675552e-06,
"loss": 11.8474,
"step": 170
},
{
"epoch": 0.019149472269660406,
"grad_norm": 0.09078315645456314,
"learning_rate": 5.6388590278194096e-06,
"loss": 11.8477,
"step": 171
},
{
"epoch": 0.019261457487611634,
"grad_norm": 0.0762549489736557,
"learning_rate": 5.263533508961827e-06,
"loss": 11.8453,
"step": 172
},
{
"epoch": 0.019373442705562866,
"grad_norm": 0.09394077211618423,
"learning_rate": 4.900438493352055e-06,
"loss": 11.8464,
"step": 173
},
{
"epoch": 0.019485427923514095,
"grad_norm": 0.10061314702033997,
"learning_rate": 4.549673247541875e-06,
"loss": 11.8438,
"step": 174
},
{
"epoch": 0.019597413141465327,
"grad_norm": 0.08596616983413696,
"learning_rate": 4.2113336672471245e-06,
"loss": 11.8477,
"step": 175
},
{
"epoch": 0.01970939835941656,
"grad_norm": 0.11032987385988235,
"learning_rate": 3.885512251130763e-06,
"loss": 11.8479,
"step": 176
},
{
"epoch": 0.019821383577367787,
"grad_norm": 0.08890503644943237,
"learning_rate": 3.5722980755146517e-06,
"loss": 11.8395,
"step": 177
},
{
"epoch": 0.01993336879531902,
"grad_norm": 0.08024253696203232,
"learning_rate": 3.271776770026963e-06,
"loss": 11.8459,
"step": 178
},
{
"epoch": 0.020045354013270247,
"grad_norm": 0.11825075000524521,
"learning_rate": 2.9840304941919415e-06,
"loss": 11.8433,
"step": 179
},
{
"epoch": 0.02015733923122148,
"grad_norm": 0.09822863340377808,
"learning_rate": 2.7091379149682685e-06,
"loss": 11.8448,
"step": 180
},
{
"epoch": 0.020269324449172708,
"grad_norm": 0.09055879712104797,
"learning_rate": 2.4471741852423237e-06,
"loss": 11.8374,
"step": 181
},
{
"epoch": 0.02038130966712394,
"grad_norm": 0.1232486441731453,
"learning_rate": 2.1982109232821178e-06,
"loss": 11.8401,
"step": 182
},
{
"epoch": 0.02049329488507517,
"grad_norm": 0.12297049909830093,
"learning_rate": 1.962316193157593e-06,
"loss": 11.8461,
"step": 183
},
{
"epoch": 0.0206052801030264,
"grad_norm": 0.10573378205299377,
"learning_rate": 1.7395544861325718e-06,
"loss": 11.8488,
"step": 184
},
{
"epoch": 0.020717265320977632,
"grad_norm": 0.11282268166542053,
"learning_rate": 1.5299867030334814e-06,
"loss": 11.8382,
"step": 185
},
{
"epoch": 0.02082925053892886,
"grad_norm": 0.10707976669073105,
"learning_rate": 1.333670137599713e-06,
"loss": 11.8423,
"step": 186
},
{
"epoch": 0.020941235756880092,
"grad_norm": 0.11269718408584595,
"learning_rate": 1.1506584608200367e-06,
"loss": 11.8388,
"step": 187
},
{
"epoch": 0.02105322097483132,
"grad_norm": 0.11932256072759628,
"learning_rate": 9.810017062595322e-07,
"loss": 11.8354,
"step": 188
},
{
"epoch": 0.021165206192782553,
"grad_norm": 0.10518424957990646,
"learning_rate": 8.247462563808817e-07,
"loss": 11.8511,
"step": 189
},
{
"epoch": 0.021277191410733785,
"grad_norm": 0.10518964380025864,
"learning_rate": 6.819348298638839e-07,
"loss": 11.8408,
"step": 190
},
{
"epoch": 0.021389176628685013,
"grad_norm": 0.09963524341583252,
"learning_rate": 5.526064699265753e-07,
"loss": 11.8478,
"step": 191
},
{
"epoch": 0.021501161846636245,
"grad_norm": 0.1250251680612564,
"learning_rate": 4.367965336512403e-07,
"loss": 11.8427,
"step": 192
},
{
"epoch": 0.021613147064587473,
"grad_norm": 0.13727028667926788,
"learning_rate": 3.3453668231809286e-07,
"loss": 11.8497,
"step": 193
},
{
"epoch": 0.021725132282538705,
"grad_norm": 0.14538681507110596,
"learning_rate": 2.458548727494292e-07,
"loss": 11.8512,
"step": 194
},
{
"epoch": 0.021837117500489934,
"grad_norm": 0.1351637840270996,
"learning_rate": 1.7077534966650766e-07,
"loss": 11.8517,
"step": 195
},
{
"epoch": 0.021949102718441166,
"grad_norm": 0.12191486358642578,
"learning_rate": 1.0931863906127327e-07,
"loss": 11.847,
"step": 196
},
{
"epoch": 0.022061087936392398,
"grad_norm": 0.15278801321983337,
"learning_rate": 6.150154258476315e-08,
"loss": 11.8536,
"step": 197
},
{
"epoch": 0.022173073154343626,
"grad_norm": 0.1961815506219864,
"learning_rate": 2.7337132953697554e-08,
"loss": 11.8494,
"step": 198
},
{
"epoch": 0.022285058372294858,
"grad_norm": 0.19913120567798615,
"learning_rate": 6.834750376549792e-09,
"loss": 11.8546,
"step": 199
},
{
"epoch": 0.022397043590246087,
"grad_norm": 0.21238204836845398,
"learning_rate": 0.0,
"loss": 11.8428,
"step": 200
},
{
"epoch": 0.022397043590246087,
"eval_loss": 11.844514846801758,
"eval_runtime": 145.385,
"eval_samples_per_second": 103.449,
"eval_steps_per_second": 25.862,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 220358246400000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}