ColPali
Safetensors
English
idefics3
colsmolvlm
vidore-experimental
vidore
colSmol-256M-base / checkpoint-11082 /trainer_state.json
manu's picture
Add files using upload-large-folder tool
3e6d7bf verified
raw
history blame
212 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 11082,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027070925825663237,
"grad_norm": 4.34375,
"learning_rate": 5e-05,
"loss": 1.3537,
"step": 10
},
{
"epoch": 0.005414185165132647,
"grad_norm": 2.515625,
"learning_rate": 0.0001,
"loss": 1.0202,
"step": 20
},
{
"epoch": 0.008121277747698972,
"grad_norm": 1.3984375,
"learning_rate": 0.00015,
"loss": 0.8165,
"step": 30
},
{
"epoch": 0.010828370330265295,
"grad_norm": 0.77734375,
"learning_rate": 0.0002,
"loss": 0.789,
"step": 40
},
{
"epoch": 0.01353546291283162,
"grad_norm": 1.3359375,
"learning_rate": 0.00025,
"loss": 0.7576,
"step": 50
},
{
"epoch": 0.016242555495397944,
"grad_norm": 0.8828125,
"learning_rate": 0.0003,
"loss": 0.7408,
"step": 60
},
{
"epoch": 0.018949648077964266,
"grad_norm": 0.80859375,
"learning_rate": 0.00035,
"loss": 0.7243,
"step": 70
},
{
"epoch": 0.02165674066053059,
"grad_norm": 0.625,
"learning_rate": 0.0004,
"loss": 0.708,
"step": 80
},
{
"epoch": 0.024363833243096916,
"grad_norm": 1.0625,
"learning_rate": 0.00045000000000000004,
"loss": 0.6916,
"step": 90
},
{
"epoch": 0.02707092582566324,
"grad_norm": 0.6796875,
"learning_rate": 0.0005,
"loss": 0.6795,
"step": 100
},
{
"epoch": 0.02707092582566324,
"eval_loss": 0.7104170918464661,
"eval_runtime": 52.5058,
"eval_samples_per_second": 9.523,
"eval_steps_per_second": 0.305,
"step": 100
},
{
"epoch": 0.02977801840822956,
"grad_norm": 0.59375,
"learning_rate": 0.0004995447095246767,
"loss": 0.606,
"step": 110
},
{
"epoch": 0.03248511099079589,
"grad_norm": 0.7421875,
"learning_rate": 0.0004990894190493535,
"loss": 0.492,
"step": 120
},
{
"epoch": 0.03519220357336221,
"grad_norm": 0.98828125,
"learning_rate": 0.0004986341285740302,
"loss": 0.4467,
"step": 130
},
{
"epoch": 0.03789929615592853,
"grad_norm": 0.625,
"learning_rate": 0.000498178838098707,
"loss": 0.3712,
"step": 140
},
{
"epoch": 0.040606388738494856,
"grad_norm": 0.353515625,
"learning_rate": 0.0004977235476233838,
"loss": 0.3499,
"step": 150
},
{
"epoch": 0.04331348132106118,
"grad_norm": 0.6484375,
"learning_rate": 0.0004972682571480605,
"loss": 0.3513,
"step": 160
},
{
"epoch": 0.0460205739036275,
"grad_norm": 0.59375,
"learning_rate": 0.0004968129666727372,
"loss": 0.3074,
"step": 170
},
{
"epoch": 0.04872766648619383,
"grad_norm": 0.5859375,
"learning_rate": 0.0004963576761974139,
"loss": 0.3166,
"step": 180
},
{
"epoch": 0.051434759068760154,
"grad_norm": 0.3671875,
"learning_rate": 0.0004959023857220907,
"loss": 0.3321,
"step": 190
},
{
"epoch": 0.05414185165132648,
"grad_norm": 0.37890625,
"learning_rate": 0.0004954470952467675,
"loss": 0.2764,
"step": 200
},
{
"epoch": 0.05414185165132648,
"eval_loss": 0.2984282970428467,
"eval_runtime": 51.6076,
"eval_samples_per_second": 9.688,
"eval_steps_per_second": 0.31,
"step": 200
},
{
"epoch": 0.0568489442338928,
"grad_norm": 0.328125,
"learning_rate": 0.0004949918047714442,
"loss": 0.3399,
"step": 210
},
{
"epoch": 0.05955603681645912,
"grad_norm": 0.484375,
"learning_rate": 0.0004945365142961209,
"loss": 0.2881,
"step": 220
},
{
"epoch": 0.062263129399025445,
"grad_norm": 0.625,
"learning_rate": 0.0004940812238207977,
"loss": 0.2848,
"step": 230
},
{
"epoch": 0.06497022198159177,
"grad_norm": 0.4453125,
"learning_rate": 0.0004936259333454744,
"loss": 0.2854,
"step": 240
},
{
"epoch": 0.0676773145641581,
"grad_norm": 0.36328125,
"learning_rate": 0.0004931706428701511,
"loss": 0.2628,
"step": 250
},
{
"epoch": 0.07038440714672442,
"grad_norm": 0.7109375,
"learning_rate": 0.0004927153523948279,
"loss": 0.2686,
"step": 260
},
{
"epoch": 0.07309149972929074,
"grad_norm": 0.3203125,
"learning_rate": 0.0004922600619195047,
"loss": 0.2626,
"step": 270
},
{
"epoch": 0.07579859231185707,
"grad_norm": 0.5859375,
"learning_rate": 0.0004918047714441814,
"loss": 0.2475,
"step": 280
},
{
"epoch": 0.07850568489442339,
"grad_norm": 0.29296875,
"learning_rate": 0.0004913494809688581,
"loss": 0.2104,
"step": 290
},
{
"epoch": 0.08121277747698971,
"grad_norm": 0.7421875,
"learning_rate": 0.0004908941904935349,
"loss": 0.2537,
"step": 300
},
{
"epoch": 0.08121277747698971,
"eval_loss": 0.27854180335998535,
"eval_runtime": 50.3171,
"eval_samples_per_second": 9.937,
"eval_steps_per_second": 0.318,
"step": 300
},
{
"epoch": 0.08391987005955603,
"grad_norm": 0.5859375,
"learning_rate": 0.0004904389000182116,
"loss": 0.2649,
"step": 310
},
{
"epoch": 0.08662696264212236,
"grad_norm": 0.59765625,
"learning_rate": 0.0004899836095428884,
"loss": 0.2188,
"step": 320
},
{
"epoch": 0.08933405522468868,
"grad_norm": 0.3359375,
"learning_rate": 0.0004895283190675652,
"loss": 0.2715,
"step": 330
},
{
"epoch": 0.092041147807255,
"grad_norm": 0.349609375,
"learning_rate": 0.0004890730285922419,
"loss": 0.2425,
"step": 340
},
{
"epoch": 0.09474824038982133,
"grad_norm": 0.451171875,
"learning_rate": 0.0004886177381169186,
"loss": 0.2697,
"step": 350
},
{
"epoch": 0.09745533297238766,
"grad_norm": 0.51953125,
"learning_rate": 0.00048816244764159533,
"loss": 0.2589,
"step": 360
},
{
"epoch": 0.10016242555495398,
"grad_norm": 0.279296875,
"learning_rate": 0.00048770715716627206,
"loss": 0.2436,
"step": 370
},
{
"epoch": 0.10286951813752031,
"grad_norm": 0.26171875,
"learning_rate": 0.00048725186669094884,
"loss": 0.2405,
"step": 380
},
{
"epoch": 0.10557661072008663,
"grad_norm": 0.1845703125,
"learning_rate": 0.00048679657621562557,
"loss": 0.1929,
"step": 390
},
{
"epoch": 0.10828370330265295,
"grad_norm": 0.5859375,
"learning_rate": 0.00048634128574030235,
"loss": 0.2524,
"step": 400
},
{
"epoch": 0.10828370330265295,
"eval_loss": 0.2730386257171631,
"eval_runtime": 50.4403,
"eval_samples_per_second": 9.913,
"eval_steps_per_second": 0.317,
"step": 400
},
{
"epoch": 0.11099079588521928,
"grad_norm": 0.474609375,
"learning_rate": 0.0004858859952649791,
"loss": 0.2384,
"step": 410
},
{
"epoch": 0.1136978884677856,
"grad_norm": 0.640625,
"learning_rate": 0.0004854307047896558,
"loss": 0.2345,
"step": 420
},
{
"epoch": 0.11640498105035192,
"grad_norm": 0.490234375,
"learning_rate": 0.0004849754143143326,
"loss": 0.2222,
"step": 430
},
{
"epoch": 0.11911207363291824,
"grad_norm": 0.2890625,
"learning_rate": 0.0004845201238390093,
"loss": 0.2448,
"step": 440
},
{
"epoch": 0.12181916621548457,
"grad_norm": 0.306640625,
"learning_rate": 0.0004840648333636861,
"loss": 0.2293,
"step": 450
},
{
"epoch": 0.12452625879805089,
"grad_norm": 0.3671875,
"learning_rate": 0.00048360954288836276,
"loss": 0.2342,
"step": 460
},
{
"epoch": 0.12723335138061723,
"grad_norm": 0.181640625,
"learning_rate": 0.0004831542524130395,
"loss": 0.2127,
"step": 470
},
{
"epoch": 0.12994044396318355,
"grad_norm": 0.2080078125,
"learning_rate": 0.00048269896193771627,
"loss": 0.2081,
"step": 480
},
{
"epoch": 0.13264753654574987,
"grad_norm": 0.3203125,
"learning_rate": 0.000482243671462393,
"loss": 0.2002,
"step": 490
},
{
"epoch": 0.1353546291283162,
"grad_norm": 0.4296875,
"learning_rate": 0.0004817883809870698,
"loss": 0.2682,
"step": 500
},
{
"epoch": 0.1353546291283162,
"eval_loss": 0.2406972348690033,
"eval_runtime": 50.5337,
"eval_samples_per_second": 9.894,
"eval_steps_per_second": 0.317,
"step": 500
},
{
"epoch": 0.13806172171088252,
"grad_norm": 0.453125,
"learning_rate": 0.0004813330905117465,
"loss": 0.2076,
"step": 510
},
{
"epoch": 0.14076881429344884,
"grad_norm": 0.412109375,
"learning_rate": 0.00048087780003642323,
"loss": 0.2251,
"step": 520
},
{
"epoch": 0.14347590687601516,
"grad_norm": 0.431640625,
"learning_rate": 0.0004804225095611,
"loss": 0.2211,
"step": 530
},
{
"epoch": 0.1461829994585815,
"grad_norm": 0.83203125,
"learning_rate": 0.00047996721908577674,
"loss": 0.2015,
"step": 540
},
{
"epoch": 0.1488900920411478,
"grad_norm": 0.388671875,
"learning_rate": 0.00047951192861045346,
"loss": 0.2195,
"step": 550
},
{
"epoch": 0.15159718462371413,
"grad_norm": 0.47265625,
"learning_rate": 0.00047905663813513024,
"loss": 0.2289,
"step": 560
},
{
"epoch": 0.15430427720628045,
"grad_norm": 0.287109375,
"learning_rate": 0.00047860134765980697,
"loss": 0.2015,
"step": 570
},
{
"epoch": 0.15701136978884678,
"grad_norm": 0.49609375,
"learning_rate": 0.00047814605718448375,
"loss": 0.2575,
"step": 580
},
{
"epoch": 0.1597184623714131,
"grad_norm": 0.2333984375,
"learning_rate": 0.0004776907667091605,
"loss": 0.2308,
"step": 590
},
{
"epoch": 0.16242555495397942,
"grad_norm": 0.322265625,
"learning_rate": 0.00047723547623383715,
"loss": 0.166,
"step": 600
},
{
"epoch": 0.16242555495397942,
"eval_loss": 0.2522674798965454,
"eval_runtime": 50.2817,
"eval_samples_per_second": 9.944,
"eval_steps_per_second": 0.318,
"step": 600
},
{
"epoch": 0.16513264753654575,
"grad_norm": 0.353515625,
"learning_rate": 0.00047678018575851393,
"loss": 0.2034,
"step": 610
},
{
"epoch": 0.16783974011911207,
"grad_norm": 0.337890625,
"learning_rate": 0.00047632489528319066,
"loss": 0.2435,
"step": 620
},
{
"epoch": 0.1705468327016784,
"grad_norm": 0.44921875,
"learning_rate": 0.00047586960480786744,
"loss": 0.1984,
"step": 630
},
{
"epoch": 0.17325392528424471,
"grad_norm": 0.296875,
"learning_rate": 0.00047541431433254416,
"loss": 0.2178,
"step": 640
},
{
"epoch": 0.17596101786681104,
"grad_norm": 0.3828125,
"learning_rate": 0.0004749590238572209,
"loss": 0.2117,
"step": 650
},
{
"epoch": 0.17866811044937736,
"grad_norm": 0.359375,
"learning_rate": 0.00047450373338189767,
"loss": 0.2056,
"step": 660
},
{
"epoch": 0.18137520303194368,
"grad_norm": 0.287109375,
"learning_rate": 0.0004740484429065744,
"loss": 0.171,
"step": 670
},
{
"epoch": 0.18408229561451,
"grad_norm": 0.6015625,
"learning_rate": 0.0004735931524312512,
"loss": 0.2604,
"step": 680
},
{
"epoch": 0.18678938819707633,
"grad_norm": 0.306640625,
"learning_rate": 0.0004731378619559279,
"loss": 0.2011,
"step": 690
},
{
"epoch": 0.18949648077964265,
"grad_norm": 0.90234375,
"learning_rate": 0.00047268257148060463,
"loss": 0.1928,
"step": 700
},
{
"epoch": 0.18949648077964265,
"eval_loss": 0.25613531470298767,
"eval_runtime": 49.6244,
"eval_samples_per_second": 10.076,
"eval_steps_per_second": 0.322,
"step": 700
},
{
"epoch": 0.19220357336220897,
"grad_norm": 0.56640625,
"learning_rate": 0.0004722272810052814,
"loss": 0.2571,
"step": 710
},
{
"epoch": 0.19491066594477532,
"grad_norm": 0.341796875,
"learning_rate": 0.00047177199052995814,
"loss": 0.2028,
"step": 720
},
{
"epoch": 0.19761775852734165,
"grad_norm": 0.1748046875,
"learning_rate": 0.00047131670005463487,
"loss": 0.2207,
"step": 730
},
{
"epoch": 0.20032485110990797,
"grad_norm": 0.244140625,
"learning_rate": 0.0004708614095793116,
"loss": 0.1788,
"step": 740
},
{
"epoch": 0.2030319436924743,
"grad_norm": 0.322265625,
"learning_rate": 0.0004704061191039883,
"loss": 0.1913,
"step": 750
},
{
"epoch": 0.20573903627504062,
"grad_norm": 0.32421875,
"learning_rate": 0.0004699508286286651,
"loss": 0.2213,
"step": 760
},
{
"epoch": 0.20844612885760694,
"grad_norm": 0.4140625,
"learning_rate": 0.0004694955381533418,
"loss": 0.1681,
"step": 770
},
{
"epoch": 0.21115322144017326,
"grad_norm": 0.373046875,
"learning_rate": 0.00046904024767801855,
"loss": 0.1984,
"step": 780
},
{
"epoch": 0.21386031402273958,
"grad_norm": 0.52734375,
"learning_rate": 0.00046858495720269533,
"loss": 0.2184,
"step": 790
},
{
"epoch": 0.2165674066053059,
"grad_norm": 0.203125,
"learning_rate": 0.00046812966672737206,
"loss": 0.1834,
"step": 800
},
{
"epoch": 0.2165674066053059,
"eval_loss": 0.229017972946167,
"eval_runtime": 50.6851,
"eval_samples_per_second": 9.865,
"eval_steps_per_second": 0.316,
"step": 800
},
{
"epoch": 0.21927449918787223,
"grad_norm": 0.416015625,
"learning_rate": 0.00046767437625204884,
"loss": 0.1681,
"step": 810
},
{
"epoch": 0.22198159177043855,
"grad_norm": 0.34375,
"learning_rate": 0.00046721908577672557,
"loss": 0.1968,
"step": 820
},
{
"epoch": 0.22468868435300487,
"grad_norm": 0.251953125,
"learning_rate": 0.0004667637953014023,
"loss": 0.1499,
"step": 830
},
{
"epoch": 0.2273957769355712,
"grad_norm": 0.275390625,
"learning_rate": 0.0004663085048260791,
"loss": 0.1987,
"step": 840
},
{
"epoch": 0.23010286951813752,
"grad_norm": 0.408203125,
"learning_rate": 0.0004658532143507558,
"loss": 0.2192,
"step": 850
},
{
"epoch": 0.23280996210070384,
"grad_norm": 0.3203125,
"learning_rate": 0.0004653979238754326,
"loss": 0.1981,
"step": 860
},
{
"epoch": 0.23551705468327017,
"grad_norm": 0.267578125,
"learning_rate": 0.0004649426334001093,
"loss": 0.1985,
"step": 870
},
{
"epoch": 0.2382241472658365,
"grad_norm": 0.1787109375,
"learning_rate": 0.000464487342924786,
"loss": 0.1495,
"step": 880
},
{
"epoch": 0.2409312398484028,
"grad_norm": 0.361328125,
"learning_rate": 0.00046403205244946276,
"loss": 0.1736,
"step": 890
},
{
"epoch": 0.24363833243096913,
"grad_norm": 0.19921875,
"learning_rate": 0.0004635767619741395,
"loss": 0.1499,
"step": 900
},
{
"epoch": 0.24363833243096913,
"eval_loss": 0.240362286567688,
"eval_runtime": 50.0701,
"eval_samples_per_second": 9.986,
"eval_steps_per_second": 0.32,
"step": 900
},
{
"epoch": 0.24634542501353546,
"grad_norm": 0.484375,
"learning_rate": 0.0004631214714988162,
"loss": 0.1793,
"step": 910
},
{
"epoch": 0.24905251759610178,
"grad_norm": 0.34765625,
"learning_rate": 0.000462666181023493,
"loss": 0.2043,
"step": 920
},
{
"epoch": 0.2517596101786681,
"grad_norm": 0.51171875,
"learning_rate": 0.0004622108905481697,
"loss": 0.211,
"step": 930
},
{
"epoch": 0.25446670276123445,
"grad_norm": 0.53125,
"learning_rate": 0.0004617556000728465,
"loss": 0.2002,
"step": 940
},
{
"epoch": 0.25717379534380075,
"grad_norm": 0.1435546875,
"learning_rate": 0.00046130030959752323,
"loss": 0.1758,
"step": 950
},
{
"epoch": 0.2598808879263671,
"grad_norm": 0.263671875,
"learning_rate": 0.00046084501912219995,
"loss": 0.1695,
"step": 960
},
{
"epoch": 0.2625879805089334,
"grad_norm": 0.29296875,
"learning_rate": 0.00046038972864687674,
"loss": 0.184,
"step": 970
},
{
"epoch": 0.26529507309149974,
"grad_norm": 0.1103515625,
"learning_rate": 0.00045993443817155346,
"loss": 0.177,
"step": 980
},
{
"epoch": 0.26800216567406604,
"grad_norm": 0.138671875,
"learning_rate": 0.00045947914769623024,
"loss": 0.1733,
"step": 990
},
{
"epoch": 0.2707092582566324,
"grad_norm": 0.2080078125,
"learning_rate": 0.00045902385722090697,
"loss": 0.1469,
"step": 1000
},
{
"epoch": 0.2707092582566324,
"eval_loss": 0.24039389193058014,
"eval_runtime": 50.3119,
"eval_samples_per_second": 9.938,
"eval_steps_per_second": 0.318,
"step": 1000
},
{
"epoch": 0.2734163508391987,
"grad_norm": 0.46875,
"learning_rate": 0.0004585685667455837,
"loss": 0.1747,
"step": 1010
},
{
"epoch": 0.27612344342176504,
"grad_norm": 0.375,
"learning_rate": 0.0004581132762702604,
"loss": 0.1451,
"step": 1020
},
{
"epoch": 0.27883053600433133,
"grad_norm": 0.375,
"learning_rate": 0.00045765798579493715,
"loss": 0.1525,
"step": 1030
},
{
"epoch": 0.2815376285868977,
"grad_norm": 0.185546875,
"learning_rate": 0.00045720269531961393,
"loss": 0.1606,
"step": 1040
},
{
"epoch": 0.284244721169464,
"grad_norm": 0.56640625,
"learning_rate": 0.00045674740484429066,
"loss": 0.1908,
"step": 1050
},
{
"epoch": 0.2869518137520303,
"grad_norm": 0.158203125,
"learning_rate": 0.0004562921143689674,
"loss": 0.1894,
"step": 1060
},
{
"epoch": 0.2896589063345966,
"grad_norm": 0.35546875,
"learning_rate": 0.00045583682389364416,
"loss": 0.1646,
"step": 1070
},
{
"epoch": 0.292365998917163,
"grad_norm": 0.419921875,
"learning_rate": 0.0004553815334183209,
"loss": 0.1536,
"step": 1080
},
{
"epoch": 0.29507309149972927,
"grad_norm": 0.2734375,
"learning_rate": 0.0004549262429429976,
"loss": 0.1654,
"step": 1090
},
{
"epoch": 0.2977801840822956,
"grad_norm": 0.318359375,
"learning_rate": 0.0004544709524676744,
"loss": 0.1598,
"step": 1100
},
{
"epoch": 0.2977801840822956,
"eval_loss": 0.22400033473968506,
"eval_runtime": 50.8906,
"eval_samples_per_second": 9.825,
"eval_steps_per_second": 0.314,
"step": 1100
},
{
"epoch": 0.3004872766648619,
"grad_norm": 0.24609375,
"learning_rate": 0.0004540156619923511,
"loss": 0.2339,
"step": 1110
},
{
"epoch": 0.30319436924742826,
"grad_norm": 0.2275390625,
"learning_rate": 0.0004535603715170279,
"loss": 0.1969,
"step": 1120
},
{
"epoch": 0.30590146182999456,
"grad_norm": 0.18359375,
"learning_rate": 0.00045310508104170463,
"loss": 0.167,
"step": 1130
},
{
"epoch": 0.3086085544125609,
"grad_norm": 0.4765625,
"learning_rate": 0.00045264979056638136,
"loss": 0.1763,
"step": 1140
},
{
"epoch": 0.31131564699512726,
"grad_norm": 0.314453125,
"learning_rate": 0.00045219450009105814,
"loss": 0.14,
"step": 1150
},
{
"epoch": 0.31402273957769355,
"grad_norm": 0.326171875,
"learning_rate": 0.0004517392096157348,
"loss": 0.1583,
"step": 1160
},
{
"epoch": 0.3167298321602599,
"grad_norm": 0.53515625,
"learning_rate": 0.0004512839191404116,
"loss": 0.1911,
"step": 1170
},
{
"epoch": 0.3194369247428262,
"grad_norm": 0.384765625,
"learning_rate": 0.0004508286286650883,
"loss": 0.2127,
"step": 1180
},
{
"epoch": 0.32214401732539255,
"grad_norm": 0.19140625,
"learning_rate": 0.00045037333818976504,
"loss": 0.1617,
"step": 1190
},
{
"epoch": 0.32485110990795885,
"grad_norm": 0.41015625,
"learning_rate": 0.0004499180477144418,
"loss": 0.146,
"step": 1200
},
{
"epoch": 0.32485110990795885,
"eval_loss": 0.21456098556518555,
"eval_runtime": 51.3811,
"eval_samples_per_second": 9.731,
"eval_steps_per_second": 0.311,
"step": 1200
},
{
"epoch": 0.3275582024905252,
"grad_norm": 0.224609375,
"learning_rate": 0.00044946275723911855,
"loss": 0.1529,
"step": 1210
},
{
"epoch": 0.3302652950730915,
"grad_norm": 0.31640625,
"learning_rate": 0.00044900746676379533,
"loss": 0.1624,
"step": 1220
},
{
"epoch": 0.33297238765565784,
"grad_norm": 0.2275390625,
"learning_rate": 0.00044855217628847206,
"loss": 0.2105,
"step": 1230
},
{
"epoch": 0.33567948023822414,
"grad_norm": 0.166015625,
"learning_rate": 0.0004480968858131488,
"loss": 0.1419,
"step": 1240
},
{
"epoch": 0.3383865728207905,
"grad_norm": 0.41015625,
"learning_rate": 0.00044764159533782557,
"loss": 0.1838,
"step": 1250
},
{
"epoch": 0.3410936654033568,
"grad_norm": 0.234375,
"learning_rate": 0.0004471863048625023,
"loss": 0.1583,
"step": 1260
},
{
"epoch": 0.34380075798592313,
"grad_norm": 0.30859375,
"learning_rate": 0.000446731014387179,
"loss": 0.1838,
"step": 1270
},
{
"epoch": 0.34650785056848943,
"grad_norm": 0.23046875,
"learning_rate": 0.0004462757239118558,
"loss": 0.1976,
"step": 1280
},
{
"epoch": 0.3492149431510558,
"grad_norm": 0.4609375,
"learning_rate": 0.0004458204334365325,
"loss": 0.1799,
"step": 1290
},
{
"epoch": 0.3519220357336221,
"grad_norm": 0.515625,
"learning_rate": 0.0004453651429612093,
"loss": 0.2087,
"step": 1300
},
{
"epoch": 0.3519220357336221,
"eval_loss": 0.20530980825424194,
"eval_runtime": 51.1083,
"eval_samples_per_second": 9.783,
"eval_steps_per_second": 0.313,
"step": 1300
},
{
"epoch": 0.3546291283161884,
"grad_norm": 0.1318359375,
"learning_rate": 0.000444909852485886,
"loss": 0.174,
"step": 1310
},
{
"epoch": 0.3573362208987547,
"grad_norm": 0.390625,
"learning_rate": 0.0004444545620105627,
"loss": 0.1743,
"step": 1320
},
{
"epoch": 0.36004331348132107,
"grad_norm": 0.36328125,
"learning_rate": 0.0004439992715352395,
"loss": 0.188,
"step": 1330
},
{
"epoch": 0.36275040606388737,
"grad_norm": 0.330078125,
"learning_rate": 0.0004435439810599162,
"loss": 0.1952,
"step": 1340
},
{
"epoch": 0.3654574986464537,
"grad_norm": 0.28515625,
"learning_rate": 0.000443088690584593,
"loss": 0.1742,
"step": 1350
},
{
"epoch": 0.36816459122902,
"grad_norm": 0.2138671875,
"learning_rate": 0.0004426334001092697,
"loss": 0.1285,
"step": 1360
},
{
"epoch": 0.37087168381158636,
"grad_norm": 0.2578125,
"learning_rate": 0.00044217810963394645,
"loss": 0.1621,
"step": 1370
},
{
"epoch": 0.37357877639415266,
"grad_norm": 0.38671875,
"learning_rate": 0.0004417228191586232,
"loss": 0.1448,
"step": 1380
},
{
"epoch": 0.376285868976719,
"grad_norm": 0.28125,
"learning_rate": 0.00044126752868329995,
"loss": 0.1837,
"step": 1390
},
{
"epoch": 0.3789929615592853,
"grad_norm": 0.16015625,
"learning_rate": 0.00044081223820797673,
"loss": 0.1831,
"step": 1400
},
{
"epoch": 0.3789929615592853,
"eval_loss": 0.19879956543445587,
"eval_runtime": 49.8699,
"eval_samples_per_second": 10.026,
"eval_steps_per_second": 0.321,
"step": 1400
},
{
"epoch": 0.38170005414185165,
"grad_norm": 0.271484375,
"learning_rate": 0.00044035694773265346,
"loss": 0.1556,
"step": 1410
},
{
"epoch": 0.38440714672441795,
"grad_norm": 0.2412109375,
"learning_rate": 0.0004399016572573302,
"loss": 0.1856,
"step": 1420
},
{
"epoch": 0.3871142393069843,
"grad_norm": 0.341796875,
"learning_rate": 0.00043944636678200697,
"loss": 0.1639,
"step": 1430
},
{
"epoch": 0.38982133188955065,
"grad_norm": 0.2734375,
"learning_rate": 0.0004389910763066837,
"loss": 0.1609,
"step": 1440
},
{
"epoch": 0.39252842447211694,
"grad_norm": 0.1865234375,
"learning_rate": 0.00043853578583136037,
"loss": 0.1906,
"step": 1450
},
{
"epoch": 0.3952355170546833,
"grad_norm": 0.25390625,
"learning_rate": 0.00043808049535603715,
"loss": 0.1423,
"step": 1460
},
{
"epoch": 0.3979426096372496,
"grad_norm": 0.248046875,
"learning_rate": 0.0004376252048807139,
"loss": 0.1549,
"step": 1470
},
{
"epoch": 0.40064970221981594,
"grad_norm": 0.25,
"learning_rate": 0.00043716991440539065,
"loss": 0.1808,
"step": 1480
},
{
"epoch": 0.40335679480238223,
"grad_norm": 0.2392578125,
"learning_rate": 0.0004367146239300674,
"loss": 0.1612,
"step": 1490
},
{
"epoch": 0.4060638873849486,
"grad_norm": 0.1982421875,
"learning_rate": 0.0004362593334547441,
"loss": 0.1655,
"step": 1500
},
{
"epoch": 0.4060638873849486,
"eval_loss": 0.20820540189743042,
"eval_runtime": 49.8508,
"eval_samples_per_second": 10.03,
"eval_steps_per_second": 0.321,
"step": 1500
},
{
"epoch": 0.4087709799675149,
"grad_norm": 0.40625,
"learning_rate": 0.0004358040429794209,
"loss": 0.172,
"step": 1510
},
{
"epoch": 0.41147807255008123,
"grad_norm": 0.439453125,
"learning_rate": 0.0004353487525040976,
"loss": 0.1782,
"step": 1520
},
{
"epoch": 0.4141851651326475,
"grad_norm": 0.4140625,
"learning_rate": 0.0004348934620287744,
"loss": 0.2182,
"step": 1530
},
{
"epoch": 0.4168922577152139,
"grad_norm": 0.205078125,
"learning_rate": 0.0004344381715534511,
"loss": 0.1459,
"step": 1540
},
{
"epoch": 0.41959935029778017,
"grad_norm": 0.361328125,
"learning_rate": 0.00043398288107812785,
"loss": 0.165,
"step": 1550
},
{
"epoch": 0.4223064428803465,
"grad_norm": 0.3203125,
"learning_rate": 0.00043352759060280463,
"loss": 0.1253,
"step": 1560
},
{
"epoch": 0.4250135354629128,
"grad_norm": 0.17578125,
"learning_rate": 0.00043307230012748136,
"loss": 0.1682,
"step": 1570
},
{
"epoch": 0.42772062804547917,
"grad_norm": 0.474609375,
"learning_rate": 0.00043261700965215814,
"loss": 0.1921,
"step": 1580
},
{
"epoch": 0.43042772062804546,
"grad_norm": 0.326171875,
"learning_rate": 0.0004321617191768348,
"loss": 0.1683,
"step": 1590
},
{
"epoch": 0.4331348132106118,
"grad_norm": 0.1767578125,
"learning_rate": 0.00043170642870151154,
"loss": 0.1625,
"step": 1600
},
{
"epoch": 0.4331348132106118,
"eval_loss": 0.2006607949733734,
"eval_runtime": 50.6005,
"eval_samples_per_second": 9.881,
"eval_steps_per_second": 0.316,
"step": 1600
},
{
"epoch": 0.4358419057931781,
"grad_norm": 0.11279296875,
"learning_rate": 0.0004312511382261883,
"loss": 0.1418,
"step": 1610
},
{
"epoch": 0.43854899837574446,
"grad_norm": 0.177734375,
"learning_rate": 0.00043079584775086504,
"loss": 0.1838,
"step": 1620
},
{
"epoch": 0.44125609095831075,
"grad_norm": 0.2265625,
"learning_rate": 0.00043034055727554177,
"loss": 0.1515,
"step": 1630
},
{
"epoch": 0.4439631835408771,
"grad_norm": 0.578125,
"learning_rate": 0.00042988526680021855,
"loss": 0.1601,
"step": 1640
},
{
"epoch": 0.4466702761234434,
"grad_norm": 0.58203125,
"learning_rate": 0.0004294299763248953,
"loss": 0.1776,
"step": 1650
},
{
"epoch": 0.44937736870600975,
"grad_norm": 0.2314453125,
"learning_rate": 0.00042897468584957206,
"loss": 0.1732,
"step": 1660
},
{
"epoch": 0.45208446128857604,
"grad_norm": 0.33984375,
"learning_rate": 0.0004285193953742488,
"loss": 0.1717,
"step": 1670
},
{
"epoch": 0.4547915538711424,
"grad_norm": 0.6015625,
"learning_rate": 0.0004280641048989255,
"loss": 0.1935,
"step": 1680
},
{
"epoch": 0.4574986464537087,
"grad_norm": 0.154296875,
"learning_rate": 0.0004276088144236023,
"loss": 0.1489,
"step": 1690
},
{
"epoch": 0.46020573903627504,
"grad_norm": 0.11962890625,
"learning_rate": 0.000427153523948279,
"loss": 0.1372,
"step": 1700
},
{
"epoch": 0.46020573903627504,
"eval_loss": 0.2127913236618042,
"eval_runtime": 51.3425,
"eval_samples_per_second": 9.739,
"eval_steps_per_second": 0.312,
"step": 1700
},
{
"epoch": 0.4629128316188414,
"grad_norm": 0.298828125,
"learning_rate": 0.0004266982334729558,
"loss": 0.17,
"step": 1710
},
{
"epoch": 0.4656199242014077,
"grad_norm": 0.224609375,
"learning_rate": 0.0004262429429976325,
"loss": 0.1759,
"step": 1720
},
{
"epoch": 0.46832701678397404,
"grad_norm": 0.2353515625,
"learning_rate": 0.0004257876525223092,
"loss": 0.1776,
"step": 1730
},
{
"epoch": 0.47103410936654033,
"grad_norm": 0.2060546875,
"learning_rate": 0.000425332362046986,
"loss": 0.1677,
"step": 1740
},
{
"epoch": 0.4737412019491067,
"grad_norm": 0.255859375,
"learning_rate": 0.0004248770715716627,
"loss": 0.1782,
"step": 1750
},
{
"epoch": 0.476448294531673,
"grad_norm": 0.2314453125,
"learning_rate": 0.0004244217810963395,
"loss": 0.1623,
"step": 1760
},
{
"epoch": 0.47915538711423933,
"grad_norm": 0.322265625,
"learning_rate": 0.0004239664906210162,
"loss": 0.1823,
"step": 1770
},
{
"epoch": 0.4818624796968056,
"grad_norm": 0.134765625,
"learning_rate": 0.00042351120014569294,
"loss": 0.1942,
"step": 1780
},
{
"epoch": 0.484569572279372,
"grad_norm": 0.275390625,
"learning_rate": 0.0004230559096703697,
"loss": 0.1567,
"step": 1790
},
{
"epoch": 0.48727666486193827,
"grad_norm": 0.41796875,
"learning_rate": 0.00042260061919504645,
"loss": 0.1743,
"step": 1800
},
{
"epoch": 0.48727666486193827,
"eval_loss": 0.19061319530010223,
"eval_runtime": 51.1815,
"eval_samples_per_second": 9.769,
"eval_steps_per_second": 0.313,
"step": 1800
},
{
"epoch": 0.4899837574445046,
"grad_norm": 0.2158203125,
"learning_rate": 0.00042214532871972317,
"loss": 0.1434,
"step": 1810
},
{
"epoch": 0.4926908500270709,
"grad_norm": 0.283203125,
"learning_rate": 0.00042169003824439995,
"loss": 0.1843,
"step": 1820
},
{
"epoch": 0.49539794260963727,
"grad_norm": 0.58203125,
"learning_rate": 0.0004212347477690767,
"loss": 0.1689,
"step": 1830
},
{
"epoch": 0.49810503519220356,
"grad_norm": 0.220703125,
"learning_rate": 0.00042077945729375346,
"loss": 0.1576,
"step": 1840
},
{
"epoch": 0.5008121277747699,
"grad_norm": 0.287109375,
"learning_rate": 0.0004203241668184302,
"loss": 0.1912,
"step": 1850
},
{
"epoch": 0.5035192203573362,
"grad_norm": 0.33203125,
"learning_rate": 0.0004198688763431069,
"loss": 0.17,
"step": 1860
},
{
"epoch": 0.5062263129399025,
"grad_norm": 0.095703125,
"learning_rate": 0.00041941358586778364,
"loss": 0.1596,
"step": 1870
},
{
"epoch": 0.5089334055224689,
"grad_norm": 0.359375,
"learning_rate": 0.00041895829539246037,
"loss": 0.1591,
"step": 1880
},
{
"epoch": 0.5116404981050352,
"grad_norm": 0.2392578125,
"learning_rate": 0.00041850300491713715,
"loss": 0.1489,
"step": 1890
},
{
"epoch": 0.5143475906876015,
"grad_norm": 0.1611328125,
"learning_rate": 0.0004180477144418139,
"loss": 0.141,
"step": 1900
},
{
"epoch": 0.5143475906876015,
"eval_loss": 0.18286480009555817,
"eval_runtime": 51.4361,
"eval_samples_per_second": 9.721,
"eval_steps_per_second": 0.311,
"step": 1900
},
{
"epoch": 0.5170546832701678,
"grad_norm": 0.1513671875,
"learning_rate": 0.0004175924239664906,
"loss": 0.1435,
"step": 1910
},
{
"epoch": 0.5197617758527342,
"grad_norm": 0.150390625,
"learning_rate": 0.0004171371334911674,
"loss": 0.1549,
"step": 1920
},
{
"epoch": 0.5224688684353005,
"grad_norm": 0.5546875,
"learning_rate": 0.0004166818430158441,
"loss": 0.1548,
"step": 1930
},
{
"epoch": 0.5251759610178668,
"grad_norm": 0.44140625,
"learning_rate": 0.0004162265525405209,
"loss": 0.1838,
"step": 1940
},
{
"epoch": 0.5278830536004331,
"grad_norm": 0.283203125,
"learning_rate": 0.0004157712620651976,
"loss": 0.1906,
"step": 1950
},
{
"epoch": 0.5305901461829995,
"grad_norm": 0.462890625,
"learning_rate": 0.00041531597158987434,
"loss": 0.1393,
"step": 1960
},
{
"epoch": 0.5332972387655658,
"grad_norm": 0.423828125,
"learning_rate": 0.0004148606811145511,
"loss": 0.167,
"step": 1970
},
{
"epoch": 0.5360043313481321,
"grad_norm": 0.1748046875,
"learning_rate": 0.00041440539063922785,
"loss": 0.1789,
"step": 1980
},
{
"epoch": 0.5387114239306985,
"grad_norm": 0.2099609375,
"learning_rate": 0.0004139501001639046,
"loss": 0.1693,
"step": 1990
},
{
"epoch": 0.5414185165132648,
"grad_norm": 0.142578125,
"learning_rate": 0.00041349480968858136,
"loss": 0.1407,
"step": 2000
},
{
"epoch": 0.5414185165132648,
"eval_loss": 0.1786070615053177,
"eval_runtime": 51.1698,
"eval_samples_per_second": 9.771,
"eval_steps_per_second": 0.313,
"step": 2000
},
{
"epoch": 0.5441256090958311,
"grad_norm": 0.3671875,
"learning_rate": 0.00041303951921325803,
"loss": 0.1487,
"step": 2010
},
{
"epoch": 0.5468327016783974,
"grad_norm": 0.45703125,
"learning_rate": 0.0004125842287379348,
"loss": 0.1458,
"step": 2020
},
{
"epoch": 0.5495397942609638,
"grad_norm": 0.2578125,
"learning_rate": 0.00041212893826261153,
"loss": 0.1763,
"step": 2030
},
{
"epoch": 0.5522468868435301,
"grad_norm": 0.1708984375,
"learning_rate": 0.00041167364778728826,
"loss": 0.1262,
"step": 2040
},
{
"epoch": 0.5549539794260964,
"grad_norm": 0.341796875,
"learning_rate": 0.00041121835731196504,
"loss": 0.1468,
"step": 2050
},
{
"epoch": 0.5576610720086627,
"grad_norm": 0.1669921875,
"learning_rate": 0.00041076306683664177,
"loss": 0.1275,
"step": 2060
},
{
"epoch": 0.5603681645912291,
"grad_norm": 0.4609375,
"learning_rate": 0.00041030777636131855,
"loss": 0.1663,
"step": 2070
},
{
"epoch": 0.5630752571737954,
"grad_norm": 0.359375,
"learning_rate": 0.0004098524858859953,
"loss": 0.158,
"step": 2080
},
{
"epoch": 0.5657823497563617,
"grad_norm": 0.2216796875,
"learning_rate": 0.000409397195410672,
"loss": 0.146,
"step": 2090
},
{
"epoch": 0.568489442338928,
"grad_norm": 0.412109375,
"learning_rate": 0.0004089419049353488,
"loss": 0.136,
"step": 2100
},
{
"epoch": 0.568489442338928,
"eval_loss": 0.1932000368833542,
"eval_runtime": 50.2017,
"eval_samples_per_second": 9.96,
"eval_steps_per_second": 0.319,
"step": 2100
},
{
"epoch": 0.5711965349214944,
"grad_norm": 0.244140625,
"learning_rate": 0.0004084866144600255,
"loss": 0.1242,
"step": 2110
},
{
"epoch": 0.5739036275040607,
"grad_norm": 0.2265625,
"learning_rate": 0.0004080313239847023,
"loss": 0.1162,
"step": 2120
},
{
"epoch": 0.576610720086627,
"grad_norm": 0.2734375,
"learning_rate": 0.000407576033509379,
"loss": 0.1615,
"step": 2130
},
{
"epoch": 0.5793178126691932,
"grad_norm": 0.349609375,
"learning_rate": 0.00040712074303405574,
"loss": 0.1513,
"step": 2140
},
{
"epoch": 0.5820249052517596,
"grad_norm": 0.34765625,
"learning_rate": 0.00040666545255873247,
"loss": 0.1466,
"step": 2150
},
{
"epoch": 0.584731997834326,
"grad_norm": 0.142578125,
"learning_rate": 0.0004062101620834092,
"loss": 0.1348,
"step": 2160
},
{
"epoch": 0.5874390904168922,
"grad_norm": 0.357421875,
"learning_rate": 0.0004057548716080859,
"loss": 0.1708,
"step": 2170
},
{
"epoch": 0.5901461829994585,
"grad_norm": 0.263671875,
"learning_rate": 0.0004052995811327627,
"loss": 0.1304,
"step": 2180
},
{
"epoch": 0.5928532755820249,
"grad_norm": 0.376953125,
"learning_rate": 0.00040484429065743943,
"loss": 0.1435,
"step": 2190
},
{
"epoch": 0.5955603681645912,
"grad_norm": 0.1689453125,
"learning_rate": 0.0004043890001821162,
"loss": 0.134,
"step": 2200
},
{
"epoch": 0.5955603681645912,
"eval_loss": 0.18312786519527435,
"eval_runtime": 51.6541,
"eval_samples_per_second": 9.68,
"eval_steps_per_second": 0.31,
"step": 2200
},
{
"epoch": 0.5982674607471575,
"grad_norm": 0.2158203125,
"learning_rate": 0.00040393370970679294,
"loss": 0.1884,
"step": 2210
},
{
"epoch": 0.6009745533297238,
"grad_norm": 0.2041015625,
"learning_rate": 0.00040347841923146966,
"loss": 0.1432,
"step": 2220
},
{
"epoch": 0.6036816459122902,
"grad_norm": 0.5390625,
"learning_rate": 0.00040302312875614644,
"loss": 0.1962,
"step": 2230
},
{
"epoch": 0.6063887384948565,
"grad_norm": 0.3828125,
"learning_rate": 0.00040256783828082317,
"loss": 0.1512,
"step": 2240
},
{
"epoch": 0.6090958310774228,
"grad_norm": 0.232421875,
"learning_rate": 0.00040211254780549995,
"loss": 0.1313,
"step": 2250
},
{
"epoch": 0.6118029236599891,
"grad_norm": 0.3203125,
"learning_rate": 0.0004016572573301767,
"loss": 0.1391,
"step": 2260
},
{
"epoch": 0.6145100162425555,
"grad_norm": 0.625,
"learning_rate": 0.0004012019668548534,
"loss": 0.1812,
"step": 2270
},
{
"epoch": 0.6172171088251218,
"grad_norm": 0.1875,
"learning_rate": 0.0004007466763795302,
"loss": 0.1542,
"step": 2280
},
{
"epoch": 0.6199242014076881,
"grad_norm": 0.298828125,
"learning_rate": 0.00040029138590420686,
"loss": 0.1433,
"step": 2290
},
{
"epoch": 0.6226312939902545,
"grad_norm": 0.419921875,
"learning_rate": 0.00039983609542888364,
"loss": 0.1541,
"step": 2300
},
{
"epoch": 0.6226312939902545,
"eval_loss": 0.17906835675239563,
"eval_runtime": 53.0935,
"eval_samples_per_second": 9.417,
"eval_steps_per_second": 0.301,
"step": 2300
},
{
"epoch": 0.6253383865728208,
"grad_norm": 0.220703125,
"learning_rate": 0.00039938080495356037,
"loss": 0.1498,
"step": 2310
},
{
"epoch": 0.6280454791553871,
"grad_norm": 0.41796875,
"learning_rate": 0.0003989255144782371,
"loss": 0.1688,
"step": 2320
},
{
"epoch": 0.6307525717379534,
"grad_norm": 0.380859375,
"learning_rate": 0.00039847022400291387,
"loss": 0.1467,
"step": 2330
},
{
"epoch": 0.6334596643205198,
"grad_norm": 0.2060546875,
"learning_rate": 0.0003980149335275906,
"loss": 0.1255,
"step": 2340
},
{
"epoch": 0.6361667569030861,
"grad_norm": 0.11083984375,
"learning_rate": 0.0003975596430522673,
"loss": 0.1284,
"step": 2350
},
{
"epoch": 0.6388738494856524,
"grad_norm": 0.33984375,
"learning_rate": 0.0003971043525769441,
"loss": 0.1225,
"step": 2360
},
{
"epoch": 0.6415809420682187,
"grad_norm": 0.208984375,
"learning_rate": 0.00039664906210162083,
"loss": 0.1784,
"step": 2370
},
{
"epoch": 0.6442880346507851,
"grad_norm": 0.4453125,
"learning_rate": 0.0003961937716262976,
"loss": 0.1707,
"step": 2380
},
{
"epoch": 0.6469951272333514,
"grad_norm": 0.1943359375,
"learning_rate": 0.00039573848115097434,
"loss": 0.1754,
"step": 2390
},
{
"epoch": 0.6497022198159177,
"grad_norm": 0.287109375,
"learning_rate": 0.00039528319067565107,
"loss": 0.1581,
"step": 2400
},
{
"epoch": 0.6497022198159177,
"eval_loss": 0.1758279949426651,
"eval_runtime": 50.8478,
"eval_samples_per_second": 9.833,
"eval_steps_per_second": 0.315,
"step": 2400
},
{
"epoch": 0.652409312398484,
"grad_norm": 0.474609375,
"learning_rate": 0.00039482790020032785,
"loss": 0.1885,
"step": 2410
},
{
"epoch": 0.6551164049810504,
"grad_norm": 0.51171875,
"learning_rate": 0.0003943726097250046,
"loss": 0.167,
"step": 2420
},
{
"epoch": 0.6578234975636167,
"grad_norm": 0.2734375,
"learning_rate": 0.00039391731924968135,
"loss": 0.1616,
"step": 2430
},
{
"epoch": 0.660530590146183,
"grad_norm": 0.2578125,
"learning_rate": 0.000393462028774358,
"loss": 0.1327,
"step": 2440
},
{
"epoch": 0.6632376827287493,
"grad_norm": 0.181640625,
"learning_rate": 0.00039300673829903475,
"loss": 0.155,
"step": 2450
},
{
"epoch": 0.6659447753113157,
"grad_norm": 0.291015625,
"learning_rate": 0.00039255144782371153,
"loss": 0.125,
"step": 2460
},
{
"epoch": 0.668651867893882,
"grad_norm": 0.23046875,
"learning_rate": 0.00039209615734838826,
"loss": 0.1478,
"step": 2470
},
{
"epoch": 0.6713589604764483,
"grad_norm": 0.3203125,
"learning_rate": 0.00039164086687306504,
"loss": 0.1478,
"step": 2480
},
{
"epoch": 0.6740660530590146,
"grad_norm": 0.53125,
"learning_rate": 0.00039118557639774177,
"loss": 0.1711,
"step": 2490
},
{
"epoch": 0.676773145641581,
"grad_norm": 0.34765625,
"learning_rate": 0.0003907302859224185,
"loss": 0.1608,
"step": 2500
},
{
"epoch": 0.676773145641581,
"eval_loss": 0.16929227113723755,
"eval_runtime": 52.5378,
"eval_samples_per_second": 9.517,
"eval_steps_per_second": 0.305,
"step": 2500
},
{
"epoch": 0.6794802382241473,
"grad_norm": 0.470703125,
"learning_rate": 0.0003902749954470953,
"loss": 0.1785,
"step": 2510
},
{
"epoch": 0.6821873308067136,
"grad_norm": 0.2431640625,
"learning_rate": 0.000389819704971772,
"loss": 0.1459,
"step": 2520
},
{
"epoch": 0.6848944233892799,
"grad_norm": 0.4921875,
"learning_rate": 0.00038936441449644873,
"loss": 0.1492,
"step": 2530
},
{
"epoch": 0.6876015159718463,
"grad_norm": 0.294921875,
"learning_rate": 0.0003889091240211255,
"loss": 0.1149,
"step": 2540
},
{
"epoch": 0.6903086085544126,
"grad_norm": 0.283203125,
"learning_rate": 0.00038845383354580224,
"loss": 0.1742,
"step": 2550
},
{
"epoch": 0.6930157011369789,
"grad_norm": 0.142578125,
"learning_rate": 0.000387998543070479,
"loss": 0.1301,
"step": 2560
},
{
"epoch": 0.6957227937195453,
"grad_norm": 0.083984375,
"learning_rate": 0.00038754325259515574,
"loss": 0.1445,
"step": 2570
},
{
"epoch": 0.6984298863021116,
"grad_norm": 0.38671875,
"learning_rate": 0.0003870879621198324,
"loss": 0.1139,
"step": 2580
},
{
"epoch": 0.7011369788846779,
"grad_norm": 0.326171875,
"learning_rate": 0.0003866326716445092,
"loss": 0.1363,
"step": 2590
},
{
"epoch": 0.7038440714672441,
"grad_norm": 0.27734375,
"learning_rate": 0.0003861773811691859,
"loss": 0.1236,
"step": 2600
},
{
"epoch": 0.7038440714672441,
"eval_loss": 0.1715136170387268,
"eval_runtime": 50.1305,
"eval_samples_per_second": 9.974,
"eval_steps_per_second": 0.319,
"step": 2600
},
{
"epoch": 0.7065511640498106,
"grad_norm": 0.3203125,
"learning_rate": 0.0003857220906938627,
"loss": 0.1507,
"step": 2610
},
{
"epoch": 0.7092582566323768,
"grad_norm": 0.1376953125,
"learning_rate": 0.00038526680021853943,
"loss": 0.1634,
"step": 2620
},
{
"epoch": 0.7119653492149431,
"grad_norm": 0.1669921875,
"learning_rate": 0.00038481150974321616,
"loss": 0.1723,
"step": 2630
},
{
"epoch": 0.7146724417975094,
"grad_norm": 0.23046875,
"learning_rate": 0.00038435621926789294,
"loss": 0.1341,
"step": 2640
},
{
"epoch": 0.7173795343800758,
"grad_norm": 0.2177734375,
"learning_rate": 0.00038390092879256966,
"loss": 0.1575,
"step": 2650
},
{
"epoch": 0.7200866269626421,
"grad_norm": 0.2314453125,
"learning_rate": 0.00038344563831724644,
"loss": 0.1506,
"step": 2660
},
{
"epoch": 0.7227937195452084,
"grad_norm": 0.2314453125,
"learning_rate": 0.00038299034784192317,
"loss": 0.1523,
"step": 2670
},
{
"epoch": 0.7255008121277747,
"grad_norm": 0.453125,
"learning_rate": 0.0003825350573665999,
"loss": 0.1587,
"step": 2680
},
{
"epoch": 0.7282079047103411,
"grad_norm": 0.2275390625,
"learning_rate": 0.0003820797668912767,
"loss": 0.1497,
"step": 2690
},
{
"epoch": 0.7309149972929074,
"grad_norm": 0.130859375,
"learning_rate": 0.0003816244764159534,
"loss": 0.1143,
"step": 2700
},
{
"epoch": 0.7309149972929074,
"eval_loss": 0.17223110795021057,
"eval_runtime": 51.7962,
"eval_samples_per_second": 9.653,
"eval_steps_per_second": 0.309,
"step": 2700
},
{
"epoch": 0.7336220898754737,
"grad_norm": 0.35546875,
"learning_rate": 0.0003811691859406301,
"loss": 0.1524,
"step": 2710
},
{
"epoch": 0.73632918245804,
"grad_norm": 0.19921875,
"learning_rate": 0.00038071389546530686,
"loss": 0.1468,
"step": 2720
},
{
"epoch": 0.7390362750406064,
"grad_norm": 0.2373046875,
"learning_rate": 0.0003802586049899836,
"loss": 0.1432,
"step": 2730
},
{
"epoch": 0.7417433676231727,
"grad_norm": 0.41015625,
"learning_rate": 0.00037980331451466036,
"loss": 0.1372,
"step": 2740
},
{
"epoch": 0.744450460205739,
"grad_norm": 0.228515625,
"learning_rate": 0.0003793480240393371,
"loss": 0.1337,
"step": 2750
},
{
"epoch": 0.7471575527883053,
"grad_norm": 0.61328125,
"learning_rate": 0.0003788927335640138,
"loss": 0.1624,
"step": 2760
},
{
"epoch": 0.7498646453708717,
"grad_norm": 0.2421875,
"learning_rate": 0.0003784374430886906,
"loss": 0.1501,
"step": 2770
},
{
"epoch": 0.752571737953438,
"grad_norm": 0.345703125,
"learning_rate": 0.0003779821526133673,
"loss": 0.1482,
"step": 2780
},
{
"epoch": 0.7552788305360043,
"grad_norm": 0.10986328125,
"learning_rate": 0.0003775268621380441,
"loss": 0.1289,
"step": 2790
},
{
"epoch": 0.7579859231185706,
"grad_norm": 0.35546875,
"learning_rate": 0.00037707157166272083,
"loss": 0.1625,
"step": 2800
},
{
"epoch": 0.7579859231185706,
"eval_loss": 0.16664518415927887,
"eval_runtime": 49.9223,
"eval_samples_per_second": 10.016,
"eval_steps_per_second": 0.32,
"step": 2800
},
{
"epoch": 0.760693015701137,
"grad_norm": 0.2451171875,
"learning_rate": 0.00037661628118739756,
"loss": 0.1518,
"step": 2810
},
{
"epoch": 0.7634001082837033,
"grad_norm": 0.326171875,
"learning_rate": 0.00037616099071207434,
"loss": 0.1197,
"step": 2820
},
{
"epoch": 0.7661072008662696,
"grad_norm": 0.375,
"learning_rate": 0.00037570570023675107,
"loss": 0.1053,
"step": 2830
},
{
"epoch": 0.7688142934488359,
"grad_norm": 0.283203125,
"learning_rate": 0.00037525040976142785,
"loss": 0.1734,
"step": 2840
},
{
"epoch": 0.7715213860314023,
"grad_norm": 0.1884765625,
"learning_rate": 0.00037479511928610457,
"loss": 0.1496,
"step": 2850
},
{
"epoch": 0.7742284786139686,
"grad_norm": 0.50390625,
"learning_rate": 0.00037433982881078124,
"loss": 0.1551,
"step": 2860
},
{
"epoch": 0.7769355711965349,
"grad_norm": 0.48046875,
"learning_rate": 0.000373884538335458,
"loss": 0.146,
"step": 2870
},
{
"epoch": 0.7796426637791013,
"grad_norm": 0.423828125,
"learning_rate": 0.00037342924786013475,
"loss": 0.1618,
"step": 2880
},
{
"epoch": 0.7823497563616676,
"grad_norm": 0.2392578125,
"learning_rate": 0.0003729739573848115,
"loss": 0.1487,
"step": 2890
},
{
"epoch": 0.7850568489442339,
"grad_norm": 0.1953125,
"learning_rate": 0.00037251866690948826,
"loss": 0.1107,
"step": 2900
},
{
"epoch": 0.7850568489442339,
"eval_loss": 0.17157955467700958,
"eval_runtime": 50.4579,
"eval_samples_per_second": 9.909,
"eval_steps_per_second": 0.317,
"step": 2900
},
{
"epoch": 0.7877639415268002,
"grad_norm": 0.40625,
"learning_rate": 0.000372063376434165,
"loss": 0.1585,
"step": 2910
},
{
"epoch": 0.7904710341093666,
"grad_norm": 0.365234375,
"learning_rate": 0.00037160808595884177,
"loss": 0.1348,
"step": 2920
},
{
"epoch": 0.7931781266919329,
"grad_norm": 0.369140625,
"learning_rate": 0.0003711527954835185,
"loss": 0.1585,
"step": 2930
},
{
"epoch": 0.7958852192744992,
"grad_norm": 0.1787109375,
"learning_rate": 0.0003706975050081952,
"loss": 0.1762,
"step": 2940
},
{
"epoch": 0.7985923118570655,
"grad_norm": 0.310546875,
"learning_rate": 0.000370242214532872,
"loss": 0.1445,
"step": 2950
},
{
"epoch": 0.8012994044396319,
"grad_norm": 0.263671875,
"learning_rate": 0.0003697869240575487,
"loss": 0.1678,
"step": 2960
},
{
"epoch": 0.8040064970221982,
"grad_norm": 0.265625,
"learning_rate": 0.0003693316335822255,
"loss": 0.1662,
"step": 2970
},
{
"epoch": 0.8067135896047645,
"grad_norm": 0.1884765625,
"learning_rate": 0.00036887634310690223,
"loss": 0.1478,
"step": 2980
},
{
"epoch": 0.8094206821873308,
"grad_norm": 0.099609375,
"learning_rate": 0.00036842105263157896,
"loss": 0.1412,
"step": 2990
},
{
"epoch": 0.8121277747698972,
"grad_norm": 0.251953125,
"learning_rate": 0.0003679657621562557,
"loss": 0.1212,
"step": 3000
},
{
"epoch": 0.8121277747698972,
"eval_loss": 0.17397309839725494,
"eval_runtime": 50.6131,
"eval_samples_per_second": 9.879,
"eval_steps_per_second": 0.316,
"step": 3000
},
{
"epoch": 0.8148348673524635,
"grad_norm": 0.2275390625,
"learning_rate": 0.0003675104716809324,
"loss": 0.149,
"step": 3010
},
{
"epoch": 0.8175419599350298,
"grad_norm": 0.275390625,
"learning_rate": 0.0003670551812056092,
"loss": 0.1344,
"step": 3020
},
{
"epoch": 0.8202490525175961,
"grad_norm": 0.0751953125,
"learning_rate": 0.0003665998907302859,
"loss": 0.1744,
"step": 3030
},
{
"epoch": 0.8229561451001625,
"grad_norm": 0.259765625,
"learning_rate": 0.00036614460025496265,
"loss": 0.1394,
"step": 3040
},
{
"epoch": 0.8256632376827288,
"grad_norm": 0.1865234375,
"learning_rate": 0.00036568930977963943,
"loss": 0.1359,
"step": 3050
},
{
"epoch": 0.828370330265295,
"grad_norm": 0.314453125,
"learning_rate": 0.00036523401930431615,
"loss": 0.1631,
"step": 3060
},
{
"epoch": 0.8310774228478613,
"grad_norm": 0.349609375,
"learning_rate": 0.0003647787288289929,
"loss": 0.1509,
"step": 3070
},
{
"epoch": 0.8337845154304278,
"grad_norm": 0.2119140625,
"learning_rate": 0.00036432343835366966,
"loss": 0.1534,
"step": 3080
},
{
"epoch": 0.836491608012994,
"grad_norm": 0.359375,
"learning_rate": 0.0003638681478783464,
"loss": 0.1473,
"step": 3090
},
{
"epoch": 0.8391987005955603,
"grad_norm": 0.486328125,
"learning_rate": 0.00036341285740302317,
"loss": 0.1406,
"step": 3100
},
{
"epoch": 0.8391987005955603,
"eval_loss": 0.1714637130498886,
"eval_runtime": 50.3998,
"eval_samples_per_second": 9.921,
"eval_steps_per_second": 0.317,
"step": 3100
},
{
"epoch": 0.8419057931781266,
"grad_norm": 0.3828125,
"learning_rate": 0.0003629575669276999,
"loss": 0.1712,
"step": 3110
},
{
"epoch": 0.844612885760693,
"grad_norm": 0.439453125,
"learning_rate": 0.0003625022764523766,
"loss": 0.162,
"step": 3120
},
{
"epoch": 0.8473199783432593,
"grad_norm": 0.3828125,
"learning_rate": 0.0003620469859770534,
"loss": 0.1609,
"step": 3130
},
{
"epoch": 0.8500270709258256,
"grad_norm": 0.2470703125,
"learning_rate": 0.0003615916955017301,
"loss": 0.1701,
"step": 3140
},
{
"epoch": 0.852734163508392,
"grad_norm": 0.349609375,
"learning_rate": 0.00036113640502640686,
"loss": 0.2107,
"step": 3150
},
{
"epoch": 0.8554412560909583,
"grad_norm": 0.41015625,
"learning_rate": 0.0003606811145510836,
"loss": 0.1407,
"step": 3160
},
{
"epoch": 0.8581483486735246,
"grad_norm": 0.2373046875,
"learning_rate": 0.0003602258240757603,
"loss": 0.1761,
"step": 3170
},
{
"epoch": 0.8608554412560909,
"grad_norm": 0.392578125,
"learning_rate": 0.0003597705336004371,
"loss": 0.1434,
"step": 3180
},
{
"epoch": 0.8635625338386573,
"grad_norm": 0.5625,
"learning_rate": 0.0003593152431251138,
"loss": 0.1506,
"step": 3190
},
{
"epoch": 0.8662696264212236,
"grad_norm": 0.3359375,
"learning_rate": 0.0003588599526497906,
"loss": 0.1245,
"step": 3200
},
{
"epoch": 0.8662696264212236,
"eval_loss": 0.16608840227127075,
"eval_runtime": 50.1258,
"eval_samples_per_second": 9.975,
"eval_steps_per_second": 0.319,
"step": 3200
},
{
"epoch": 0.8689767190037899,
"grad_norm": 0.2265625,
"learning_rate": 0.0003584046621744673,
"loss": 0.1403,
"step": 3210
},
{
"epoch": 0.8716838115863562,
"grad_norm": 0.12353515625,
"learning_rate": 0.00035794937169914405,
"loss": 0.1356,
"step": 3220
},
{
"epoch": 0.8743909041689226,
"grad_norm": 0.34765625,
"learning_rate": 0.00035749408122382083,
"loss": 0.1398,
"step": 3230
},
{
"epoch": 0.8770979967514889,
"grad_norm": 0.279296875,
"learning_rate": 0.00035703879074849756,
"loss": 0.1641,
"step": 3240
},
{
"epoch": 0.8798050893340552,
"grad_norm": 0.13671875,
"learning_rate": 0.0003565835002731743,
"loss": 0.1326,
"step": 3250
},
{
"epoch": 0.8825121819166215,
"grad_norm": 0.26953125,
"learning_rate": 0.00035612820979785106,
"loss": 0.1267,
"step": 3260
},
{
"epoch": 0.8852192744991879,
"grad_norm": 0.1474609375,
"learning_rate": 0.0003556729193225278,
"loss": 0.1514,
"step": 3270
},
{
"epoch": 0.8879263670817542,
"grad_norm": 0.45703125,
"learning_rate": 0.0003552176288472045,
"loss": 0.1558,
"step": 3280
},
{
"epoch": 0.8906334596643205,
"grad_norm": 0.357421875,
"learning_rate": 0.00035476233837188124,
"loss": 0.1496,
"step": 3290
},
{
"epoch": 0.8933405522468868,
"grad_norm": 0.1767578125,
"learning_rate": 0.00035430704789655797,
"loss": 0.1574,
"step": 3300
},
{
"epoch": 0.8933405522468868,
"eval_loss": 0.1689441204071045,
"eval_runtime": 50.2047,
"eval_samples_per_second": 9.959,
"eval_steps_per_second": 0.319,
"step": 3300
},
{
"epoch": 0.8960476448294532,
"grad_norm": 0.4375,
"learning_rate": 0.00035385175742123475,
"loss": 0.1382,
"step": 3310
},
{
"epoch": 0.8987547374120195,
"grad_norm": 0.30859375,
"learning_rate": 0.0003533964669459115,
"loss": 0.1095,
"step": 3320
},
{
"epoch": 0.9014618299945858,
"grad_norm": 0.3828125,
"learning_rate": 0.00035294117647058826,
"loss": 0.1643,
"step": 3330
},
{
"epoch": 0.9041689225771521,
"grad_norm": 0.279296875,
"learning_rate": 0.000352485885995265,
"loss": 0.1082,
"step": 3340
},
{
"epoch": 0.9068760151597185,
"grad_norm": 0.1904296875,
"learning_rate": 0.0003520305955199417,
"loss": 0.1377,
"step": 3350
},
{
"epoch": 0.9095831077422848,
"grad_norm": 0.2392578125,
"learning_rate": 0.0003515753050446185,
"loss": 0.1264,
"step": 3360
},
{
"epoch": 0.9122902003248511,
"grad_norm": 0.392578125,
"learning_rate": 0.0003511200145692952,
"loss": 0.1663,
"step": 3370
},
{
"epoch": 0.9149972929074174,
"grad_norm": 0.150390625,
"learning_rate": 0.000350664724093972,
"loss": 0.1463,
"step": 3380
},
{
"epoch": 0.9177043854899838,
"grad_norm": 0.1533203125,
"learning_rate": 0.0003502094336186487,
"loss": 0.1004,
"step": 3390
},
{
"epoch": 0.9204114780725501,
"grad_norm": 0.1806640625,
"learning_rate": 0.00034975414314332545,
"loss": 0.1394,
"step": 3400
},
{
"epoch": 0.9204114780725501,
"eval_loss": 0.16821011900901794,
"eval_runtime": 51.9302,
"eval_samples_per_second": 9.628,
"eval_steps_per_second": 0.308,
"step": 3400
},
{
"epoch": 0.9231185706551164,
"grad_norm": 0.455078125,
"learning_rate": 0.00034929885266800223,
"loss": 0.1351,
"step": 3410
},
{
"epoch": 0.9258256632376828,
"grad_norm": 0.310546875,
"learning_rate": 0.0003488435621926789,
"loss": 0.1314,
"step": 3420
},
{
"epoch": 0.9285327558202491,
"grad_norm": 0.2353515625,
"learning_rate": 0.00034838827171735563,
"loss": 0.1377,
"step": 3430
},
{
"epoch": 0.9312398484028154,
"grad_norm": 0.318359375,
"learning_rate": 0.0003479329812420324,
"loss": 0.143,
"step": 3440
},
{
"epoch": 0.9339469409853817,
"grad_norm": 0.34375,
"learning_rate": 0.00034747769076670914,
"loss": 0.1408,
"step": 3450
},
{
"epoch": 0.9366540335679481,
"grad_norm": 0.328125,
"learning_rate": 0.0003470224002913859,
"loss": 0.1777,
"step": 3460
},
{
"epoch": 0.9393611261505144,
"grad_norm": 0.16015625,
"learning_rate": 0.00034656710981606265,
"loss": 0.1183,
"step": 3470
},
{
"epoch": 0.9420682187330807,
"grad_norm": 0.6875,
"learning_rate": 0.0003461118193407394,
"loss": 0.1524,
"step": 3480
},
{
"epoch": 0.944775311315647,
"grad_norm": 0.265625,
"learning_rate": 0.00034565652886541615,
"loss": 0.155,
"step": 3490
},
{
"epoch": 0.9474824038982134,
"grad_norm": 0.2275390625,
"learning_rate": 0.0003452012383900929,
"loss": 0.145,
"step": 3500
},
{
"epoch": 0.9474824038982134,
"eval_loss": 0.16947884857654572,
"eval_runtime": 51.2584,
"eval_samples_per_second": 9.755,
"eval_steps_per_second": 0.312,
"step": 3500
},
{
"epoch": 0.9501894964807797,
"grad_norm": 0.71875,
"learning_rate": 0.00034474594791476966,
"loss": 0.1241,
"step": 3510
},
{
"epoch": 0.952896589063346,
"grad_norm": 0.2314453125,
"learning_rate": 0.0003442906574394464,
"loss": 0.1459,
"step": 3520
},
{
"epoch": 0.9556036816459123,
"grad_norm": 0.23046875,
"learning_rate": 0.0003438353669641231,
"loss": 0.1171,
"step": 3530
},
{
"epoch": 0.9583107742284787,
"grad_norm": 0.2109375,
"learning_rate": 0.0003433800764887999,
"loss": 0.1398,
"step": 3540
},
{
"epoch": 0.961017866811045,
"grad_norm": 0.302734375,
"learning_rate": 0.0003429247860134766,
"loss": 0.1196,
"step": 3550
},
{
"epoch": 0.9637249593936112,
"grad_norm": 0.17578125,
"learning_rate": 0.00034246949553815335,
"loss": 0.1478,
"step": 3560
},
{
"epoch": 0.9664320519761775,
"grad_norm": 0.23828125,
"learning_rate": 0.0003420142050628301,
"loss": 0.1532,
"step": 3570
},
{
"epoch": 0.969139144558744,
"grad_norm": 0.455078125,
"learning_rate": 0.0003415589145875068,
"loss": 0.1341,
"step": 3580
},
{
"epoch": 0.9718462371413102,
"grad_norm": 0.201171875,
"learning_rate": 0.0003411036241121836,
"loss": 0.1649,
"step": 3590
},
{
"epoch": 0.9745533297238765,
"grad_norm": 0.16796875,
"learning_rate": 0.0003406483336368603,
"loss": 0.1501,
"step": 3600
},
{
"epoch": 0.9745533297238765,
"eval_loss": 0.16499453783035278,
"eval_runtime": 52.0838,
"eval_samples_per_second": 9.6,
"eval_steps_per_second": 0.307,
"step": 3600
},
{
"epoch": 0.9772604223064428,
"grad_norm": 0.400390625,
"learning_rate": 0.00034019304316153703,
"loss": 0.1629,
"step": 3610
},
{
"epoch": 0.9799675148890092,
"grad_norm": 0.419921875,
"learning_rate": 0.0003397377526862138,
"loss": 0.1329,
"step": 3620
},
{
"epoch": 0.9826746074715755,
"grad_norm": 0.1728515625,
"learning_rate": 0.00033928246221089054,
"loss": 0.1379,
"step": 3630
},
{
"epoch": 0.9853817000541418,
"grad_norm": 0.1552734375,
"learning_rate": 0.0003388271717355673,
"loss": 0.1488,
"step": 3640
},
{
"epoch": 0.9880887926367081,
"grad_norm": 0.140625,
"learning_rate": 0.00033837188126024405,
"loss": 0.1597,
"step": 3650
},
{
"epoch": 0.9907958852192745,
"grad_norm": 0.45703125,
"learning_rate": 0.0003379165907849208,
"loss": 0.1496,
"step": 3660
},
{
"epoch": 0.9935029778018408,
"grad_norm": 0.140625,
"learning_rate": 0.00033746130030959756,
"loss": 0.1262,
"step": 3670
},
{
"epoch": 0.9962100703844071,
"grad_norm": 0.359375,
"learning_rate": 0.0003370060098342743,
"loss": 0.1194,
"step": 3680
},
{
"epoch": 0.9989171629669734,
"grad_norm": 0.1904296875,
"learning_rate": 0.00033655071935895106,
"loss": 0.1285,
"step": 3690
},
{
"epoch": 1.0016242555495398,
"grad_norm": 0.302734375,
"learning_rate": 0.00033609542888362774,
"loss": 0.1643,
"step": 3700
},
{
"epoch": 1.0016242555495398,
"eval_loss": 0.1689431369304657,
"eval_runtime": 50.9236,
"eval_samples_per_second": 9.819,
"eval_steps_per_second": 0.314,
"step": 3700
},
{
"epoch": 1.0043313481321061,
"grad_norm": 0.15625,
"learning_rate": 0.00033564013840830446,
"loss": 0.1191,
"step": 3710
},
{
"epoch": 1.0070384407146724,
"grad_norm": 0.234375,
"learning_rate": 0.00033518484793298124,
"loss": 0.116,
"step": 3720
},
{
"epoch": 1.0097455332972387,
"grad_norm": 0.1611328125,
"learning_rate": 0.00033472955745765797,
"loss": 0.1083,
"step": 3730
},
{
"epoch": 1.012452625879805,
"grad_norm": 0.1220703125,
"learning_rate": 0.00033427426698233475,
"loss": 0.1558,
"step": 3740
},
{
"epoch": 1.0151597184623715,
"grad_norm": 0.2265625,
"learning_rate": 0.0003338189765070115,
"loss": 0.1273,
"step": 3750
},
{
"epoch": 1.0178668110449378,
"grad_norm": 0.32421875,
"learning_rate": 0.0003333636860316882,
"loss": 0.145,
"step": 3760
},
{
"epoch": 1.020573903627504,
"grad_norm": 0.2333984375,
"learning_rate": 0.000332908395556365,
"loss": 0.1308,
"step": 3770
},
{
"epoch": 1.0232809962100704,
"grad_norm": 0.1611328125,
"learning_rate": 0.0003324531050810417,
"loss": 0.1483,
"step": 3780
},
{
"epoch": 1.0259880887926367,
"grad_norm": 0.3125,
"learning_rate": 0.00033199781460571844,
"loss": 0.1164,
"step": 3790
},
{
"epoch": 1.028695181375203,
"grad_norm": 0.384765625,
"learning_rate": 0.0003315425241303952,
"loss": 0.1587,
"step": 3800
},
{
"epoch": 1.028695181375203,
"eval_loss": 0.1659206598997116,
"eval_runtime": 50.5276,
"eval_samples_per_second": 9.896,
"eval_steps_per_second": 0.317,
"step": 3800
},
{
"epoch": 1.0314022739577693,
"grad_norm": 0.294921875,
"learning_rate": 0.00033108723365507194,
"loss": 0.125,
"step": 3810
},
{
"epoch": 1.0341093665403356,
"grad_norm": 0.1005859375,
"learning_rate": 0.0003306319431797487,
"loss": 0.1409,
"step": 3820
},
{
"epoch": 1.036816459122902,
"grad_norm": 0.134765625,
"learning_rate": 0.00033017665270442545,
"loss": 0.1236,
"step": 3830
},
{
"epoch": 1.0395235517054684,
"grad_norm": 0.134765625,
"learning_rate": 0.0003297213622291021,
"loss": 0.1327,
"step": 3840
},
{
"epoch": 1.0422306442880347,
"grad_norm": 0.162109375,
"learning_rate": 0.0003292660717537789,
"loss": 0.1083,
"step": 3850
},
{
"epoch": 1.044937736870601,
"grad_norm": 0.2431640625,
"learning_rate": 0.00032881078127845563,
"loss": 0.1372,
"step": 3860
},
{
"epoch": 1.0476448294531673,
"grad_norm": 0.412109375,
"learning_rate": 0.0003283554908031324,
"loss": 0.1111,
"step": 3870
},
{
"epoch": 1.0503519220357336,
"grad_norm": 0.298828125,
"learning_rate": 0.00032790020032780914,
"loss": 0.137,
"step": 3880
},
{
"epoch": 1.0530590146182999,
"grad_norm": 0.4765625,
"learning_rate": 0.00032744490985248587,
"loss": 0.1124,
"step": 3890
},
{
"epoch": 1.0557661072008662,
"grad_norm": 0.1650390625,
"learning_rate": 0.00032698961937716265,
"loss": 0.137,
"step": 3900
},
{
"epoch": 1.0557661072008662,
"eval_loss": 0.16376519203186035,
"eval_runtime": 50.6812,
"eval_samples_per_second": 9.866,
"eval_steps_per_second": 0.316,
"step": 3900
},
{
"epoch": 1.0584731997834327,
"grad_norm": 0.111328125,
"learning_rate": 0.00032653432890183937,
"loss": 0.1337,
"step": 3910
},
{
"epoch": 1.061180292365999,
"grad_norm": 0.2490234375,
"learning_rate": 0.00032607903842651615,
"loss": 0.0989,
"step": 3920
},
{
"epoch": 1.0638873849485653,
"grad_norm": 0.203125,
"learning_rate": 0.0003256237479511929,
"loss": 0.128,
"step": 3930
},
{
"epoch": 1.0665944775311316,
"grad_norm": 0.25390625,
"learning_rate": 0.0003251684574758696,
"loss": 0.1188,
"step": 3940
},
{
"epoch": 1.0693015701136979,
"grad_norm": 0.1982421875,
"learning_rate": 0.0003247131670005464,
"loss": 0.1374,
"step": 3950
},
{
"epoch": 1.0720086626962642,
"grad_norm": 0.177734375,
"learning_rate": 0.0003242578765252231,
"loss": 0.1486,
"step": 3960
},
{
"epoch": 1.0747157552788305,
"grad_norm": 0.12890625,
"learning_rate": 0.00032380258604989984,
"loss": 0.15,
"step": 3970
},
{
"epoch": 1.077422847861397,
"grad_norm": 0.365234375,
"learning_rate": 0.0003233472955745766,
"loss": 0.148,
"step": 3980
},
{
"epoch": 1.0801299404439633,
"grad_norm": 0.271484375,
"learning_rate": 0.0003228920050992533,
"loss": 0.1351,
"step": 3990
},
{
"epoch": 1.0828370330265296,
"grad_norm": 0.1640625,
"learning_rate": 0.0003224367146239301,
"loss": 0.1109,
"step": 4000
},
{
"epoch": 1.0828370330265296,
"eval_loss": 0.1612129956483841,
"eval_runtime": 50.3507,
"eval_samples_per_second": 9.93,
"eval_steps_per_second": 0.318,
"step": 4000
},
{
"epoch": 1.0855441256090959,
"grad_norm": 0.1376953125,
"learning_rate": 0.0003219814241486068,
"loss": 0.105,
"step": 4010
},
{
"epoch": 1.0882512181916622,
"grad_norm": 0.359375,
"learning_rate": 0.0003215261336732835,
"loss": 0.1322,
"step": 4020
},
{
"epoch": 1.0909583107742284,
"grad_norm": 0.173828125,
"learning_rate": 0.0003210708431979603,
"loss": 0.127,
"step": 4030
},
{
"epoch": 1.0936654033567947,
"grad_norm": 0.2099609375,
"learning_rate": 0.00032061555272263703,
"loss": 0.1185,
"step": 4040
},
{
"epoch": 1.096372495939361,
"grad_norm": 0.3359375,
"learning_rate": 0.0003201602622473138,
"loss": 0.129,
"step": 4050
},
{
"epoch": 1.0990795885219276,
"grad_norm": 0.10107421875,
"learning_rate": 0.00031970497177199054,
"loss": 0.0838,
"step": 4060
},
{
"epoch": 1.1017866811044938,
"grad_norm": 0.4140625,
"learning_rate": 0.00031924968129666727,
"loss": 0.1047,
"step": 4070
},
{
"epoch": 1.1044937736870601,
"grad_norm": 0.123046875,
"learning_rate": 0.00031879439082134405,
"loss": 0.1305,
"step": 4080
},
{
"epoch": 1.1072008662696264,
"grad_norm": 0.279296875,
"learning_rate": 0.0003183391003460208,
"loss": 0.1006,
"step": 4090
},
{
"epoch": 1.1099079588521927,
"grad_norm": 0.2392578125,
"learning_rate": 0.00031788380987069756,
"loss": 0.1227,
"step": 4100
},
{
"epoch": 1.1099079588521927,
"eval_loss": 0.16056661307811737,
"eval_runtime": 51.1636,
"eval_samples_per_second": 9.773,
"eval_steps_per_second": 0.313,
"step": 4100
},
{
"epoch": 1.112615051434759,
"grad_norm": 0.365234375,
"learning_rate": 0.0003174285193953743,
"loss": 0.1338,
"step": 4110
},
{
"epoch": 1.1153221440173253,
"grad_norm": 0.189453125,
"learning_rate": 0.000316973228920051,
"loss": 0.1076,
"step": 4120
},
{
"epoch": 1.1180292365998916,
"grad_norm": 0.1328125,
"learning_rate": 0.00031651793844472774,
"loss": 0.1242,
"step": 4130
},
{
"epoch": 1.1207363291824581,
"grad_norm": 0.41015625,
"learning_rate": 0.00031606264796940446,
"loss": 0.1359,
"step": 4140
},
{
"epoch": 1.1234434217650244,
"grad_norm": 0.134765625,
"learning_rate": 0.0003156073574940812,
"loss": 0.1229,
"step": 4150
},
{
"epoch": 1.1261505143475907,
"grad_norm": 0.326171875,
"learning_rate": 0.00031515206701875797,
"loss": 0.1236,
"step": 4160
},
{
"epoch": 1.128857606930157,
"grad_norm": 0.271484375,
"learning_rate": 0.0003146967765434347,
"loss": 0.1342,
"step": 4170
},
{
"epoch": 1.1315646995127233,
"grad_norm": 0.19140625,
"learning_rate": 0.0003142414860681115,
"loss": 0.0986,
"step": 4180
},
{
"epoch": 1.1342717920952896,
"grad_norm": 0.345703125,
"learning_rate": 0.0003137861955927882,
"loss": 0.1257,
"step": 4190
},
{
"epoch": 1.136978884677856,
"grad_norm": 0.1904296875,
"learning_rate": 0.00031333090511746493,
"loss": 0.1356,
"step": 4200
},
{
"epoch": 1.136978884677856,
"eval_loss": 0.1527666598558426,
"eval_runtime": 50.0731,
"eval_samples_per_second": 9.985,
"eval_steps_per_second": 0.32,
"step": 4200
},
{
"epoch": 1.1396859772604224,
"grad_norm": 0.33984375,
"learning_rate": 0.0003128756146421417,
"loss": 0.1214,
"step": 4210
},
{
"epoch": 1.1423930698429887,
"grad_norm": 0.1982421875,
"learning_rate": 0.00031242032416681844,
"loss": 0.1158,
"step": 4220
},
{
"epoch": 1.145100162425555,
"grad_norm": 0.296875,
"learning_rate": 0.0003119650336914952,
"loss": 0.1128,
"step": 4230
},
{
"epoch": 1.1478072550081213,
"grad_norm": 0.1259765625,
"learning_rate": 0.00031150974321617194,
"loss": 0.0777,
"step": 4240
},
{
"epoch": 1.1505143475906876,
"grad_norm": 0.294921875,
"learning_rate": 0.00031105445274084867,
"loss": 0.1316,
"step": 4250
},
{
"epoch": 1.153221440173254,
"grad_norm": 0.3671875,
"learning_rate": 0.00031059916226552545,
"loss": 0.1176,
"step": 4260
},
{
"epoch": 1.1559285327558202,
"grad_norm": 0.1826171875,
"learning_rate": 0.0003101438717902021,
"loss": 0.1292,
"step": 4270
},
{
"epoch": 1.1586356253383865,
"grad_norm": 0.294921875,
"learning_rate": 0.0003096885813148789,
"loss": 0.1368,
"step": 4280
},
{
"epoch": 1.1613427179209528,
"grad_norm": 0.25,
"learning_rate": 0.00030923329083955563,
"loss": 0.1283,
"step": 4290
},
{
"epoch": 1.1640498105035193,
"grad_norm": 0.283203125,
"learning_rate": 0.00030877800036423236,
"loss": 0.1383,
"step": 4300
},
{
"epoch": 1.1640498105035193,
"eval_loss": 0.1612272709608078,
"eval_runtime": 50.8695,
"eval_samples_per_second": 9.829,
"eval_steps_per_second": 0.315,
"step": 4300
},
{
"epoch": 1.1667569030860856,
"grad_norm": 0.326171875,
"learning_rate": 0.00030832270988890914,
"loss": 0.1211,
"step": 4310
},
{
"epoch": 1.169463995668652,
"grad_norm": 0.1552734375,
"learning_rate": 0.00030786741941358586,
"loss": 0.1308,
"step": 4320
},
{
"epoch": 1.1721710882512182,
"grad_norm": 0.1845703125,
"learning_rate": 0.0003074121289382626,
"loss": 0.1196,
"step": 4330
},
{
"epoch": 1.1748781808337845,
"grad_norm": 0.23046875,
"learning_rate": 0.00030695683846293937,
"loss": 0.1596,
"step": 4340
},
{
"epoch": 1.1775852734163508,
"grad_norm": 0.2216796875,
"learning_rate": 0.0003065015479876161,
"loss": 0.1029,
"step": 4350
},
{
"epoch": 1.180292365998917,
"grad_norm": 0.1220703125,
"learning_rate": 0.0003060462575122929,
"loss": 0.0975,
"step": 4360
},
{
"epoch": 1.1829994585814836,
"grad_norm": 0.5390625,
"learning_rate": 0.0003055909670369696,
"loss": 0.1485,
"step": 4370
},
{
"epoch": 1.1857065511640499,
"grad_norm": 0.451171875,
"learning_rate": 0.00030513567656164633,
"loss": 0.1329,
"step": 4380
},
{
"epoch": 1.1884136437466162,
"grad_norm": 0.2353515625,
"learning_rate": 0.0003046803860863231,
"loss": 0.1443,
"step": 4390
},
{
"epoch": 1.1911207363291825,
"grad_norm": 0.2197265625,
"learning_rate": 0.00030422509561099984,
"loss": 0.1266,
"step": 4400
},
{
"epoch": 1.1911207363291825,
"eval_loss": 0.15496616065502167,
"eval_runtime": 50.2857,
"eval_samples_per_second": 9.943,
"eval_steps_per_second": 0.318,
"step": 4400
},
{
"epoch": 1.1938278289117488,
"grad_norm": 0.119140625,
"learning_rate": 0.00030376980513567657,
"loss": 0.0877,
"step": 4410
},
{
"epoch": 1.196534921494315,
"grad_norm": 0.470703125,
"learning_rate": 0.0003033145146603533,
"loss": 0.1178,
"step": 4420
},
{
"epoch": 1.1992420140768814,
"grad_norm": 0.43359375,
"learning_rate": 0.00030285922418503,
"loss": 0.1191,
"step": 4430
},
{
"epoch": 1.2019491066594479,
"grad_norm": 0.205078125,
"learning_rate": 0.0003024039337097068,
"loss": 0.1465,
"step": 4440
},
{
"epoch": 1.2046561992420142,
"grad_norm": 0.46875,
"learning_rate": 0.0003019486432343835,
"loss": 0.1411,
"step": 4450
},
{
"epoch": 1.2073632918245805,
"grad_norm": 0.34375,
"learning_rate": 0.0003014933527590603,
"loss": 0.139,
"step": 4460
},
{
"epoch": 1.2100703844071468,
"grad_norm": 0.0703125,
"learning_rate": 0.00030103806228373703,
"loss": 0.1043,
"step": 4470
},
{
"epoch": 1.212777476989713,
"grad_norm": 0.19921875,
"learning_rate": 0.00030058277180841376,
"loss": 0.133,
"step": 4480
},
{
"epoch": 1.2154845695722793,
"grad_norm": 0.0830078125,
"learning_rate": 0.00030012748133309054,
"loss": 0.1323,
"step": 4490
},
{
"epoch": 1.2181916621548456,
"grad_norm": 0.376953125,
"learning_rate": 0.00029967219085776727,
"loss": 0.0916,
"step": 4500
},
{
"epoch": 1.2181916621548456,
"eval_loss": 0.15184161067008972,
"eval_runtime": 51.497,
"eval_samples_per_second": 9.709,
"eval_steps_per_second": 0.311,
"step": 4500
},
{
"epoch": 1.220898754737412,
"grad_norm": 0.34765625,
"learning_rate": 0.000299216900382444,
"loss": 0.1554,
"step": 4510
},
{
"epoch": 1.2236058473199782,
"grad_norm": 0.291015625,
"learning_rate": 0.0002987616099071208,
"loss": 0.119,
"step": 4520
},
{
"epoch": 1.2263129399025448,
"grad_norm": 0.3984375,
"learning_rate": 0.0002983063194317975,
"loss": 0.1219,
"step": 4530
},
{
"epoch": 1.229020032485111,
"grad_norm": 0.2109375,
"learning_rate": 0.0002978510289564743,
"loss": 0.1145,
"step": 4540
},
{
"epoch": 1.2317271250676773,
"grad_norm": 0.189453125,
"learning_rate": 0.00029739573848115095,
"loss": 0.1175,
"step": 4550
},
{
"epoch": 1.2344342176502436,
"grad_norm": 0.4453125,
"learning_rate": 0.0002969404480058277,
"loss": 0.1393,
"step": 4560
},
{
"epoch": 1.23714131023281,
"grad_norm": 0.2470703125,
"learning_rate": 0.00029648515753050446,
"loss": 0.1182,
"step": 4570
},
{
"epoch": 1.2398484028153762,
"grad_norm": 0.275390625,
"learning_rate": 0.0002960298670551812,
"loss": 0.1182,
"step": 4580
},
{
"epoch": 1.2425554953979425,
"grad_norm": 0.2333984375,
"learning_rate": 0.00029557457657985797,
"loss": 0.1228,
"step": 4590
},
{
"epoch": 1.245262587980509,
"grad_norm": 0.16796875,
"learning_rate": 0.0002951192861045347,
"loss": 0.1389,
"step": 4600
},
{
"epoch": 1.245262587980509,
"eval_loss": 0.15244832634925842,
"eval_runtime": 48.8981,
"eval_samples_per_second": 10.225,
"eval_steps_per_second": 0.327,
"step": 4600
},
{
"epoch": 1.2479696805630753,
"grad_norm": 0.2578125,
"learning_rate": 0.0002946639956292114,
"loss": 0.116,
"step": 4610
},
{
"epoch": 1.2506767731456416,
"grad_norm": 0.16796875,
"learning_rate": 0.0002942087051538882,
"loss": 0.1196,
"step": 4620
},
{
"epoch": 1.253383865728208,
"grad_norm": 0.2275390625,
"learning_rate": 0.00029375341467856493,
"loss": 0.1146,
"step": 4630
},
{
"epoch": 1.2560909583107742,
"grad_norm": 0.10791015625,
"learning_rate": 0.0002932981242032417,
"loss": 0.1242,
"step": 4640
},
{
"epoch": 1.2587980508933405,
"grad_norm": 0.1728515625,
"learning_rate": 0.00029284283372791844,
"loss": 0.0968,
"step": 4650
},
{
"epoch": 1.2615051434759068,
"grad_norm": 0.13671875,
"learning_rate": 0.00029238754325259516,
"loss": 0.1109,
"step": 4660
},
{
"epoch": 1.2642122360584733,
"grad_norm": 0.2197265625,
"learning_rate": 0.00029193225277727194,
"loss": 0.108,
"step": 4670
},
{
"epoch": 1.2669193286410394,
"grad_norm": 0.439453125,
"learning_rate": 0.00029147696230194867,
"loss": 0.1303,
"step": 4680
},
{
"epoch": 1.269626421223606,
"grad_norm": 0.185546875,
"learning_rate": 0.00029102167182662534,
"loss": 0.1217,
"step": 4690
},
{
"epoch": 1.2723335138061722,
"grad_norm": 0.294921875,
"learning_rate": 0.0002905663813513021,
"loss": 0.1103,
"step": 4700
},
{
"epoch": 1.2723335138061722,
"eval_loss": 0.15599898993968964,
"eval_runtime": 49.5398,
"eval_samples_per_second": 10.093,
"eval_steps_per_second": 0.323,
"step": 4700
},
{
"epoch": 1.2750406063887385,
"grad_norm": 0.458984375,
"learning_rate": 0.00029011109087597885,
"loss": 0.1386,
"step": 4710
},
{
"epoch": 1.2777476989713048,
"grad_norm": 0.29296875,
"learning_rate": 0.00028965580040065563,
"loss": 0.1032,
"step": 4720
},
{
"epoch": 1.280454791553871,
"grad_norm": 0.27734375,
"learning_rate": 0.00028920050992533236,
"loss": 0.1613,
"step": 4730
},
{
"epoch": 1.2831618841364374,
"grad_norm": 0.30859375,
"learning_rate": 0.0002887452194500091,
"loss": 0.1049,
"step": 4740
},
{
"epoch": 1.2858689767190037,
"grad_norm": 0.1259765625,
"learning_rate": 0.00028828992897468586,
"loss": 0.1108,
"step": 4750
},
{
"epoch": 1.2885760693015702,
"grad_norm": 0.19921875,
"learning_rate": 0.0002878346384993626,
"loss": 0.1154,
"step": 4760
},
{
"epoch": 1.2912831618841365,
"grad_norm": 0.1259765625,
"learning_rate": 0.00028737934802403937,
"loss": 0.1158,
"step": 4770
},
{
"epoch": 1.2939902544667028,
"grad_norm": 0.384765625,
"learning_rate": 0.0002869240575487161,
"loss": 0.1171,
"step": 4780
},
{
"epoch": 1.296697347049269,
"grad_norm": 0.244140625,
"learning_rate": 0.0002864687670733928,
"loss": 0.1393,
"step": 4790
},
{
"epoch": 1.2994044396318354,
"grad_norm": 0.466796875,
"learning_rate": 0.0002860134765980696,
"loss": 0.1372,
"step": 4800
},
{
"epoch": 1.2994044396318354,
"eval_loss": 0.15471972525119781,
"eval_runtime": 52.6076,
"eval_samples_per_second": 9.504,
"eval_steps_per_second": 0.304,
"step": 4800
},
{
"epoch": 1.3021115322144017,
"grad_norm": 0.1083984375,
"learning_rate": 0.00028555818612274633,
"loss": 0.0928,
"step": 4810
},
{
"epoch": 1.304818624796968,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002851028956474231,
"loss": 0.129,
"step": 4820
},
{
"epoch": 1.3075257173795345,
"grad_norm": 0.11083984375,
"learning_rate": 0.0002846476051720998,
"loss": 0.1089,
"step": 4830
},
{
"epoch": 1.3102328099621008,
"grad_norm": 0.63671875,
"learning_rate": 0.0002841923146967765,
"loss": 0.1102,
"step": 4840
},
{
"epoch": 1.312939902544667,
"grad_norm": 0.255859375,
"learning_rate": 0.0002837370242214533,
"loss": 0.119,
"step": 4850
},
{
"epoch": 1.3156469951272334,
"grad_norm": 0.1201171875,
"learning_rate": 0.00028328173374613,
"loss": 0.1385,
"step": 4860
},
{
"epoch": 1.3183540877097997,
"grad_norm": 0.333984375,
"learning_rate": 0.00028282644327080674,
"loss": 0.0835,
"step": 4870
},
{
"epoch": 1.321061180292366,
"grad_norm": 0.45703125,
"learning_rate": 0.0002823711527954835,
"loss": 0.1346,
"step": 4880
},
{
"epoch": 1.3237682728749323,
"grad_norm": 0.40625,
"learning_rate": 0.00028191586232016025,
"loss": 0.1573,
"step": 4890
},
{
"epoch": 1.3264753654574988,
"grad_norm": 0.07275390625,
"learning_rate": 0.00028146057184483703,
"loss": 0.1716,
"step": 4900
},
{
"epoch": 1.3264753654574988,
"eval_loss": 0.1505041867494583,
"eval_runtime": 51.3184,
"eval_samples_per_second": 9.743,
"eval_steps_per_second": 0.312,
"step": 4900
},
{
"epoch": 1.3291824580400649,
"grad_norm": 0.059326171875,
"learning_rate": 0.00028100528136951376,
"loss": 0.1112,
"step": 4910
},
{
"epoch": 1.3318895506226314,
"grad_norm": 0.2265625,
"learning_rate": 0.0002805499908941905,
"loss": 0.1306,
"step": 4920
},
{
"epoch": 1.3345966432051977,
"grad_norm": 0.326171875,
"learning_rate": 0.00028009470041886727,
"loss": 0.1261,
"step": 4930
},
{
"epoch": 1.337303735787764,
"grad_norm": 0.3515625,
"learning_rate": 0.000279639409943544,
"loss": 0.1284,
"step": 4940
},
{
"epoch": 1.3400108283703303,
"grad_norm": 0.326171875,
"learning_rate": 0.0002791841194682208,
"loss": 0.1275,
"step": 4950
},
{
"epoch": 1.3427179209528965,
"grad_norm": 0.1318359375,
"learning_rate": 0.0002787288289928975,
"loss": 0.1327,
"step": 4960
},
{
"epoch": 1.3454250135354628,
"grad_norm": 0.0859375,
"learning_rate": 0.00027827353851757417,
"loss": 0.1023,
"step": 4970
},
{
"epoch": 1.3481321061180291,
"grad_norm": 0.173828125,
"learning_rate": 0.00027781824804225095,
"loss": 0.1184,
"step": 4980
},
{
"epoch": 1.3508391987005957,
"grad_norm": 0.26953125,
"learning_rate": 0.0002773629575669277,
"loss": 0.1258,
"step": 4990
},
{
"epoch": 1.353546291283162,
"grad_norm": 0.26953125,
"learning_rate": 0.00027690766709160446,
"loss": 0.1334,
"step": 5000
},
{
"epoch": 1.353546291283162,
"eval_loss": 0.15792174637317657,
"eval_runtime": 51.3475,
"eval_samples_per_second": 9.738,
"eval_steps_per_second": 0.312,
"step": 5000
},
{
"epoch": 1.3562533838657282,
"grad_norm": 0.0556640625,
"learning_rate": 0.0002764523766162812,
"loss": 0.1221,
"step": 5010
},
{
"epoch": 1.3589604764482945,
"grad_norm": 0.1650390625,
"learning_rate": 0.0002759970861409579,
"loss": 0.1017,
"step": 5020
},
{
"epoch": 1.3616675690308608,
"grad_norm": 0.49609375,
"learning_rate": 0.0002755417956656347,
"loss": 0.1287,
"step": 5030
},
{
"epoch": 1.3643746616134271,
"grad_norm": 0.1923828125,
"learning_rate": 0.0002750865051903114,
"loss": 0.1031,
"step": 5040
},
{
"epoch": 1.3670817541959934,
"grad_norm": 0.2197265625,
"learning_rate": 0.00027463121471498815,
"loss": 0.1034,
"step": 5050
},
{
"epoch": 1.36978884677856,
"grad_norm": 0.267578125,
"learning_rate": 0.00027417592423966493,
"loss": 0.1253,
"step": 5060
},
{
"epoch": 1.372495939361126,
"grad_norm": 0.6328125,
"learning_rate": 0.00027372063376434165,
"loss": 0.1762,
"step": 5070
},
{
"epoch": 1.3752030319436925,
"grad_norm": 0.2060546875,
"learning_rate": 0.00027326534328901843,
"loss": 0.1169,
"step": 5080
},
{
"epoch": 1.3779101245262588,
"grad_norm": 0.51171875,
"learning_rate": 0.00027281005281369516,
"loss": 0.1402,
"step": 5090
},
{
"epoch": 1.3806172171088251,
"grad_norm": 0.314453125,
"learning_rate": 0.0002723547623383719,
"loss": 0.1372,
"step": 5100
},
{
"epoch": 1.3806172171088251,
"eval_loss": 0.1598336547613144,
"eval_runtime": 50.3768,
"eval_samples_per_second": 9.925,
"eval_steps_per_second": 0.318,
"step": 5100
},
{
"epoch": 1.3833243096913914,
"grad_norm": 0.54296875,
"learning_rate": 0.00027189947186304867,
"loss": 0.1452,
"step": 5110
},
{
"epoch": 1.3860314022739577,
"grad_norm": 0.2001953125,
"learning_rate": 0.00027144418138772534,
"loss": 0.1236,
"step": 5120
},
{
"epoch": 1.3887384948565242,
"grad_norm": 0.13671875,
"learning_rate": 0.0002709888909124021,
"loss": 0.1157,
"step": 5130
},
{
"epoch": 1.3914455874390903,
"grad_norm": 0.326171875,
"learning_rate": 0.00027053360043707885,
"loss": 0.1247,
"step": 5140
},
{
"epoch": 1.3941526800216568,
"grad_norm": 0.2490234375,
"learning_rate": 0.0002700783099617556,
"loss": 0.1463,
"step": 5150
},
{
"epoch": 1.3968597726042231,
"grad_norm": 0.1494140625,
"learning_rate": 0.00026962301948643236,
"loss": 0.1092,
"step": 5160
},
{
"epoch": 1.3995668651867894,
"grad_norm": 0.35546875,
"learning_rate": 0.0002691677290111091,
"loss": 0.1224,
"step": 5170
},
{
"epoch": 1.4022739577693557,
"grad_norm": 0.2431640625,
"learning_rate": 0.00026871243853578586,
"loss": 0.1487,
"step": 5180
},
{
"epoch": 1.404981050351922,
"grad_norm": 0.2060546875,
"learning_rate": 0.0002682571480604626,
"loss": 0.1434,
"step": 5190
},
{
"epoch": 1.4076881429344883,
"grad_norm": 0.263671875,
"learning_rate": 0.0002678018575851393,
"loss": 0.1347,
"step": 5200
},
{
"epoch": 1.4076881429344883,
"eval_loss": 0.15952111780643463,
"eval_runtime": 49.7815,
"eval_samples_per_second": 10.044,
"eval_steps_per_second": 0.321,
"step": 5200
},
{
"epoch": 1.4103952355170546,
"grad_norm": 0.1572265625,
"learning_rate": 0.0002673465671098161,
"loss": 0.1377,
"step": 5210
},
{
"epoch": 1.413102328099621,
"grad_norm": 0.1279296875,
"learning_rate": 0.0002668912766344928,
"loss": 0.1274,
"step": 5220
},
{
"epoch": 1.4158094206821874,
"grad_norm": 0.25,
"learning_rate": 0.00026643598615916955,
"loss": 0.133,
"step": 5230
},
{
"epoch": 1.4185165132647537,
"grad_norm": 0.1611328125,
"learning_rate": 0.00026598069568384633,
"loss": 0.1196,
"step": 5240
},
{
"epoch": 1.42122360584732,
"grad_norm": 0.1787109375,
"learning_rate": 0.00026552540520852306,
"loss": 0.1445,
"step": 5250
},
{
"epoch": 1.4239306984298863,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002650701147331998,
"loss": 0.1009,
"step": 5260
},
{
"epoch": 1.4266377910124526,
"grad_norm": 0.1298828125,
"learning_rate": 0.0002646148242578765,
"loss": 0.1107,
"step": 5270
},
{
"epoch": 1.4293448835950189,
"grad_norm": 0.2333984375,
"learning_rate": 0.00026415953378255324,
"loss": 0.1158,
"step": 5280
},
{
"epoch": 1.4320519761775854,
"grad_norm": 0.2119140625,
"learning_rate": 0.00026370424330723,
"loss": 0.1222,
"step": 5290
},
{
"epoch": 1.4347590687601515,
"grad_norm": 0.296875,
"learning_rate": 0.00026324895283190674,
"loss": 0.1167,
"step": 5300
},
{
"epoch": 1.4347590687601515,
"eval_loss": 0.15868441760540009,
"eval_runtime": 50.4922,
"eval_samples_per_second": 9.903,
"eval_steps_per_second": 0.317,
"step": 5300
},
{
"epoch": 1.437466161342718,
"grad_norm": 0.1630859375,
"learning_rate": 0.0002627936623565835,
"loss": 0.0958,
"step": 5310
},
{
"epoch": 1.4401732539252843,
"grad_norm": 0.4375,
"learning_rate": 0.00026233837188126025,
"loss": 0.1646,
"step": 5320
},
{
"epoch": 1.4428803465078506,
"grad_norm": 0.337890625,
"learning_rate": 0.000261883081405937,
"loss": 0.1436,
"step": 5330
},
{
"epoch": 1.4455874390904169,
"grad_norm": 0.1474609375,
"learning_rate": 0.00026142779093061376,
"loss": 0.1172,
"step": 5340
},
{
"epoch": 1.4482945316729832,
"grad_norm": 0.1943359375,
"learning_rate": 0.0002609725004552905,
"loss": 0.112,
"step": 5350
},
{
"epoch": 1.4510016242555495,
"grad_norm": 0.12255859375,
"learning_rate": 0.00026051720997996726,
"loss": 0.1186,
"step": 5360
},
{
"epoch": 1.4537087168381158,
"grad_norm": 0.119140625,
"learning_rate": 0.000260061919504644,
"loss": 0.0967,
"step": 5370
},
{
"epoch": 1.4564158094206823,
"grad_norm": 0.28515625,
"learning_rate": 0.0002596066290293207,
"loss": 0.1182,
"step": 5380
},
{
"epoch": 1.4591229020032486,
"grad_norm": 0.1845703125,
"learning_rate": 0.0002591513385539975,
"loss": 0.1395,
"step": 5390
},
{
"epoch": 1.4618299945858149,
"grad_norm": 0.1826171875,
"learning_rate": 0.00025869604807867417,
"loss": 0.1237,
"step": 5400
},
{
"epoch": 1.4618299945858149,
"eval_loss": 0.1595367193222046,
"eval_runtime": 50.3465,
"eval_samples_per_second": 9.931,
"eval_steps_per_second": 0.318,
"step": 5400
},
{
"epoch": 1.4645370871683812,
"grad_norm": 0.11181640625,
"learning_rate": 0.0002582407576033509,
"loss": 0.118,
"step": 5410
},
{
"epoch": 1.4672441797509475,
"grad_norm": 0.1904296875,
"learning_rate": 0.0002577854671280277,
"loss": 0.0953,
"step": 5420
},
{
"epoch": 1.4699512723335137,
"grad_norm": 0.22265625,
"learning_rate": 0.0002573301766527044,
"loss": 0.1219,
"step": 5430
},
{
"epoch": 1.47265836491608,
"grad_norm": 0.140625,
"learning_rate": 0.0002568748861773812,
"loss": 0.11,
"step": 5440
},
{
"epoch": 1.4753654574986466,
"grad_norm": 0.23828125,
"learning_rate": 0.0002564195957020579,
"loss": 0.1108,
"step": 5450
},
{
"epoch": 1.4780725500812129,
"grad_norm": 0.455078125,
"learning_rate": 0.00025596430522673464,
"loss": 0.1313,
"step": 5460
},
{
"epoch": 1.4807796426637791,
"grad_norm": 0.2734375,
"learning_rate": 0.0002555090147514114,
"loss": 0.163,
"step": 5470
},
{
"epoch": 1.4834867352463454,
"grad_norm": 0.291015625,
"learning_rate": 0.00025505372427608815,
"loss": 0.138,
"step": 5480
},
{
"epoch": 1.4861938278289117,
"grad_norm": 0.103515625,
"learning_rate": 0.0002545984338007649,
"loss": 0.1257,
"step": 5490
},
{
"epoch": 1.488900920411478,
"grad_norm": 0.150390625,
"learning_rate": 0.00025414314332544165,
"loss": 0.1304,
"step": 5500
},
{
"epoch": 1.488900920411478,
"eval_loss": 0.16379128396511078,
"eval_runtime": 51.0248,
"eval_samples_per_second": 9.799,
"eval_steps_per_second": 0.314,
"step": 5500
},
{
"epoch": 1.4916080129940443,
"grad_norm": 0.2197265625,
"learning_rate": 0.0002536878528501184,
"loss": 0.1124,
"step": 5510
},
{
"epoch": 1.4943151055766108,
"grad_norm": 0.298828125,
"learning_rate": 0.00025323256237479516,
"loss": 0.1048,
"step": 5520
},
{
"epoch": 1.497022198159177,
"grad_norm": 0.2578125,
"learning_rate": 0.0002527772718994719,
"loss": 0.1251,
"step": 5530
},
{
"epoch": 1.4997292907417434,
"grad_norm": 0.51953125,
"learning_rate": 0.0002523219814241486,
"loss": 0.1434,
"step": 5540
},
{
"epoch": 1.5024363833243097,
"grad_norm": 0.35546875,
"learning_rate": 0.00025186669094882534,
"loss": 0.1254,
"step": 5550
},
{
"epoch": 1.505143475906876,
"grad_norm": 0.287109375,
"learning_rate": 0.00025141140047350207,
"loss": 0.0985,
"step": 5560
},
{
"epoch": 1.5078505684894423,
"grad_norm": 0.287109375,
"learning_rate": 0.00025095610999817885,
"loss": 0.1109,
"step": 5570
},
{
"epoch": 1.5105576610720086,
"grad_norm": 0.171875,
"learning_rate": 0.0002505008195228556,
"loss": 0.116,
"step": 5580
},
{
"epoch": 1.5132647536545751,
"grad_norm": 0.396484375,
"learning_rate": 0.0002500455290475323,
"loss": 0.1411,
"step": 5590
},
{
"epoch": 1.5159718462371412,
"grad_norm": 0.154296875,
"learning_rate": 0.0002495902385722091,
"loss": 0.1172,
"step": 5600
},
{
"epoch": 1.5159718462371412,
"eval_loss": 0.15981900691986084,
"eval_runtime": 51.7233,
"eval_samples_per_second": 9.667,
"eval_steps_per_second": 0.309,
"step": 5600
},
{
"epoch": 1.5186789388197077,
"grad_norm": 0.306640625,
"learning_rate": 0.0002491349480968858,
"loss": 0.1516,
"step": 5610
},
{
"epoch": 1.5213860314022738,
"grad_norm": 0.2255859375,
"learning_rate": 0.00024867965762156253,
"loss": 0.1095,
"step": 5620
},
{
"epoch": 1.5240931239848403,
"grad_norm": 0.54296875,
"learning_rate": 0.0002482243671462393,
"loss": 0.125,
"step": 5630
},
{
"epoch": 1.5268002165674066,
"grad_norm": 0.16796875,
"learning_rate": 0.00024776907667091604,
"loss": 0.1146,
"step": 5640
},
{
"epoch": 1.529507309149973,
"grad_norm": 0.28125,
"learning_rate": 0.0002473137861955928,
"loss": 0.1103,
"step": 5650
},
{
"epoch": 1.5322144017325392,
"grad_norm": 0.2734375,
"learning_rate": 0.00024685849572026955,
"loss": 0.1014,
"step": 5660
},
{
"epoch": 1.5349214943151055,
"grad_norm": 0.203125,
"learning_rate": 0.0002464032052449463,
"loss": 0.1246,
"step": 5670
},
{
"epoch": 1.537628586897672,
"grad_norm": 0.298828125,
"learning_rate": 0.000245947914769623,
"loss": 0.1121,
"step": 5680
},
{
"epoch": 1.540335679480238,
"grad_norm": 0.255859375,
"learning_rate": 0.0002454926242942998,
"loss": 0.1614,
"step": 5690
},
{
"epoch": 1.5430427720628046,
"grad_norm": 0.259765625,
"learning_rate": 0.0002450373338189765,
"loss": 0.1091,
"step": 5700
},
{
"epoch": 1.5430427720628046,
"eval_loss": 0.16020576655864716,
"eval_runtime": 51.2639,
"eval_samples_per_second": 9.753,
"eval_steps_per_second": 0.312,
"step": 5700
},
{
"epoch": 1.545749864645371,
"grad_norm": 0.1533203125,
"learning_rate": 0.00024458204334365324,
"loss": 0.1055,
"step": 5710
},
{
"epoch": 1.5484569572279372,
"grad_norm": 0.1044921875,
"learning_rate": 0.00024412675286833,
"loss": 0.0968,
"step": 5720
},
{
"epoch": 1.5511640498105035,
"grad_norm": 0.11767578125,
"learning_rate": 0.00024367146239300674,
"loss": 0.1163,
"step": 5730
},
{
"epoch": 1.5538711423930698,
"grad_norm": 0.498046875,
"learning_rate": 0.0002432161719176835,
"loss": 0.1126,
"step": 5740
},
{
"epoch": 1.5565782349756363,
"grad_norm": 0.193359375,
"learning_rate": 0.00024276088144236022,
"loss": 0.1062,
"step": 5750
},
{
"epoch": 1.5592853275582024,
"grad_norm": 0.484375,
"learning_rate": 0.00024230559096703698,
"loss": 0.1405,
"step": 5760
},
{
"epoch": 1.561992420140769,
"grad_norm": 0.25390625,
"learning_rate": 0.00024185030049171373,
"loss": 0.1097,
"step": 5770
},
{
"epoch": 1.5646995127233352,
"grad_norm": 0.203125,
"learning_rate": 0.00024139501001639048,
"loss": 0.1447,
"step": 5780
},
{
"epoch": 1.5674066053059015,
"grad_norm": 0.2734375,
"learning_rate": 0.00024093971954106718,
"loss": 0.1413,
"step": 5790
},
{
"epoch": 1.5701136978884678,
"grad_norm": 0.267578125,
"learning_rate": 0.00024048442906574394,
"loss": 0.1561,
"step": 5800
},
{
"epoch": 1.5701136978884678,
"eval_loss": 0.16192464530467987,
"eval_runtime": 49.5357,
"eval_samples_per_second": 10.094,
"eval_steps_per_second": 0.323,
"step": 5800
},
{
"epoch": 1.572820790471034,
"grad_norm": 0.330078125,
"learning_rate": 0.0002400291385904207,
"loss": 0.1137,
"step": 5810
},
{
"epoch": 1.5755278830536006,
"grad_norm": 0.2216796875,
"learning_rate": 0.00023957384811509744,
"loss": 0.1074,
"step": 5820
},
{
"epoch": 1.5782349756361667,
"grad_norm": 0.30078125,
"learning_rate": 0.0002391185576397742,
"loss": 0.1498,
"step": 5830
},
{
"epoch": 1.5809420682187332,
"grad_norm": 0.2109375,
"learning_rate": 0.00023866326716445092,
"loss": 0.1271,
"step": 5840
},
{
"epoch": 1.5836491608012992,
"grad_norm": 0.28515625,
"learning_rate": 0.00023820797668912768,
"loss": 0.106,
"step": 5850
},
{
"epoch": 1.5863562533838658,
"grad_norm": 0.359375,
"learning_rate": 0.0002377526862138044,
"loss": 0.1288,
"step": 5860
},
{
"epoch": 1.589063345966432,
"grad_norm": 0.1455078125,
"learning_rate": 0.00023729739573848116,
"loss": 0.1038,
"step": 5870
},
{
"epoch": 1.5917704385489984,
"grad_norm": 0.431640625,
"learning_rate": 0.00023684210526315788,
"loss": 0.127,
"step": 5880
},
{
"epoch": 1.5944775311315647,
"grad_norm": 0.353515625,
"learning_rate": 0.00023638681478783464,
"loss": 0.093,
"step": 5890
},
{
"epoch": 1.597184623714131,
"grad_norm": 0.32421875,
"learning_rate": 0.0002359315243125114,
"loss": 0.111,
"step": 5900
},
{
"epoch": 1.597184623714131,
"eval_loss": 0.16383513808250427,
"eval_runtime": 50.6949,
"eval_samples_per_second": 9.863,
"eval_steps_per_second": 0.316,
"step": 5900
},
{
"epoch": 1.5998917162966975,
"grad_norm": 0.40625,
"learning_rate": 0.00023547623383718814,
"loss": 0.1285,
"step": 5910
},
{
"epoch": 1.6025988088792635,
"grad_norm": 0.3125,
"learning_rate": 0.0002350209433618649,
"loss": 0.1296,
"step": 5920
},
{
"epoch": 1.60530590146183,
"grad_norm": 0.22265625,
"learning_rate": 0.0002345656528865416,
"loss": 0.1152,
"step": 5930
},
{
"epoch": 1.6080129940443963,
"grad_norm": 0.10546875,
"learning_rate": 0.00023411036241121835,
"loss": 0.1388,
"step": 5940
},
{
"epoch": 1.6107200866269626,
"grad_norm": 0.35546875,
"learning_rate": 0.0002336550719358951,
"loss": 0.1083,
"step": 5950
},
{
"epoch": 1.613427179209529,
"grad_norm": 0.455078125,
"learning_rate": 0.00023319978146057186,
"loss": 0.1388,
"step": 5960
},
{
"epoch": 1.6161342717920952,
"grad_norm": 0.26171875,
"learning_rate": 0.00023274449098524859,
"loss": 0.1116,
"step": 5970
},
{
"epoch": 1.6188413643746618,
"grad_norm": 0.484375,
"learning_rate": 0.00023228920050992534,
"loss": 0.128,
"step": 5980
},
{
"epoch": 1.6215484569572278,
"grad_norm": 0.08984375,
"learning_rate": 0.0002318339100346021,
"loss": 0.0834,
"step": 5990
},
{
"epoch": 1.6242555495397943,
"grad_norm": 0.296875,
"learning_rate": 0.00023137861955927882,
"loss": 0.1207,
"step": 6000
},
{
"epoch": 1.6242555495397943,
"eval_loss": 0.15791058540344238,
"eval_runtime": 50.2164,
"eval_samples_per_second": 9.957,
"eval_steps_per_second": 0.319,
"step": 6000
},
{
"epoch": 1.6269626421223606,
"grad_norm": 0.1484375,
"learning_rate": 0.00023092332908395557,
"loss": 0.119,
"step": 6010
},
{
"epoch": 1.629669734704927,
"grad_norm": 0.125,
"learning_rate": 0.0002304680386086323,
"loss": 0.1005,
"step": 6020
},
{
"epoch": 1.6323768272874932,
"grad_norm": 0.37109375,
"learning_rate": 0.00023001274813330905,
"loss": 0.1397,
"step": 6030
},
{
"epoch": 1.6350839198700595,
"grad_norm": 0.1337890625,
"learning_rate": 0.0002295574576579858,
"loss": 0.1025,
"step": 6040
},
{
"epoch": 1.637791012452626,
"grad_norm": 0.1416015625,
"learning_rate": 0.00022910216718266256,
"loss": 0.1437,
"step": 6050
},
{
"epoch": 1.6404981050351921,
"grad_norm": 0.271484375,
"learning_rate": 0.0002286468767073393,
"loss": 0.1037,
"step": 6060
},
{
"epoch": 1.6432051976177586,
"grad_norm": 0.078125,
"learning_rate": 0.000228191586232016,
"loss": 0.0911,
"step": 6070
},
{
"epoch": 1.6459122902003247,
"grad_norm": 0.2294921875,
"learning_rate": 0.00022773629575669277,
"loss": 0.0996,
"step": 6080
},
{
"epoch": 1.6486193827828912,
"grad_norm": 0.439453125,
"learning_rate": 0.00022728100528136952,
"loss": 0.1274,
"step": 6090
},
{
"epoch": 1.6513264753654575,
"grad_norm": 0.29296875,
"learning_rate": 0.00022682571480604627,
"loss": 0.1214,
"step": 6100
},
{
"epoch": 1.6513264753654575,
"eval_loss": 0.15438494086265564,
"eval_runtime": 49.8084,
"eval_samples_per_second": 10.038,
"eval_steps_per_second": 0.321,
"step": 6100
},
{
"epoch": 1.6540335679480238,
"grad_norm": 0.115234375,
"learning_rate": 0.000226370424330723,
"loss": 0.1349,
"step": 6110
},
{
"epoch": 1.65674066053059,
"grad_norm": 0.328125,
"learning_rate": 0.00022591513385539975,
"loss": 0.1153,
"step": 6120
},
{
"epoch": 1.6594477531131564,
"grad_norm": 0.380859375,
"learning_rate": 0.0002254598433800765,
"loss": 0.1108,
"step": 6130
},
{
"epoch": 1.662154845695723,
"grad_norm": 0.1748046875,
"learning_rate": 0.00022500455290475323,
"loss": 0.115,
"step": 6140
},
{
"epoch": 1.664861938278289,
"grad_norm": 0.35546875,
"learning_rate": 0.00022454926242942996,
"loss": 0.1185,
"step": 6150
},
{
"epoch": 1.6675690308608555,
"grad_norm": 0.2109375,
"learning_rate": 0.00022409397195410671,
"loss": 0.1193,
"step": 6160
},
{
"epoch": 1.6702761234434218,
"grad_norm": 0.16796875,
"learning_rate": 0.00022363868147878347,
"loss": 0.1319,
"step": 6170
},
{
"epoch": 1.672983216025988,
"grad_norm": 0.1796875,
"learning_rate": 0.00022318339100346022,
"loss": 0.1,
"step": 6180
},
{
"epoch": 1.6756903086085544,
"grad_norm": 0.455078125,
"learning_rate": 0.00022272810052813698,
"loss": 0.1286,
"step": 6190
},
{
"epoch": 1.6783974011911207,
"grad_norm": 0.271484375,
"learning_rate": 0.0002222728100528137,
"loss": 0.1062,
"step": 6200
},
{
"epoch": 1.6783974011911207,
"eval_loss": 0.1509903371334076,
"eval_runtime": 50.1548,
"eval_samples_per_second": 9.969,
"eval_steps_per_second": 0.319,
"step": 6200
},
{
"epoch": 1.6811044937736872,
"grad_norm": 0.12109375,
"learning_rate": 0.00022181751957749043,
"loss": 0.1115,
"step": 6210
},
{
"epoch": 1.6838115863562533,
"grad_norm": 0.2275390625,
"learning_rate": 0.00022136222910216718,
"loss": 0.0958,
"step": 6220
},
{
"epoch": 1.6865186789388198,
"grad_norm": 0.419921875,
"learning_rate": 0.00022090693862684394,
"loss": 0.1072,
"step": 6230
},
{
"epoch": 1.6892257715213859,
"grad_norm": 0.490234375,
"learning_rate": 0.00022045164815152066,
"loss": 0.1338,
"step": 6240
},
{
"epoch": 1.6919328641039524,
"grad_norm": 0.384765625,
"learning_rate": 0.00021999635767619742,
"loss": 0.1025,
"step": 6250
},
{
"epoch": 1.6946399566865187,
"grad_norm": 0.31640625,
"learning_rate": 0.00021954106720087417,
"loss": 0.1323,
"step": 6260
},
{
"epoch": 1.697347049269085,
"grad_norm": 0.47265625,
"learning_rate": 0.00021908577672555092,
"loss": 0.1312,
"step": 6270
},
{
"epoch": 1.7000541418516515,
"grad_norm": 0.248046875,
"learning_rate": 0.00021863048625022765,
"loss": 0.1137,
"step": 6280
},
{
"epoch": 1.7027612344342176,
"grad_norm": 0.09375,
"learning_rate": 0.00021817519577490438,
"loss": 0.1201,
"step": 6290
},
{
"epoch": 1.705468327016784,
"grad_norm": 0.298828125,
"learning_rate": 0.00021771990529958113,
"loss": 0.1161,
"step": 6300
},
{
"epoch": 1.705468327016784,
"eval_loss": 0.1535915732383728,
"eval_runtime": 51.7815,
"eval_samples_per_second": 9.656,
"eval_steps_per_second": 0.309,
"step": 6300
},
{
"epoch": 1.7081754195993502,
"grad_norm": 0.1884765625,
"learning_rate": 0.00021726461482425788,
"loss": 0.109,
"step": 6310
},
{
"epoch": 1.7108825121819167,
"grad_norm": 0.13671875,
"learning_rate": 0.00021680932434893464,
"loss": 0.1241,
"step": 6320
},
{
"epoch": 1.713589604764483,
"grad_norm": 0.111328125,
"learning_rate": 0.00021635403387361136,
"loss": 0.1272,
"step": 6330
},
{
"epoch": 1.7162966973470493,
"grad_norm": 0.5390625,
"learning_rate": 0.00021589874339828812,
"loss": 0.1066,
"step": 6340
},
{
"epoch": 1.7190037899296156,
"grad_norm": 0.494140625,
"learning_rate": 0.00021544345292296484,
"loss": 0.1083,
"step": 6350
},
{
"epoch": 1.7217108825121819,
"grad_norm": 0.228515625,
"learning_rate": 0.0002149881624476416,
"loss": 0.1119,
"step": 6360
},
{
"epoch": 1.7244179750947484,
"grad_norm": 0.1494140625,
"learning_rate": 0.00021453287197231835,
"loss": 0.1142,
"step": 6370
},
{
"epoch": 1.7271250676773144,
"grad_norm": 0.15234375,
"learning_rate": 0.00021407758149699508,
"loss": 0.1237,
"step": 6380
},
{
"epoch": 1.729832160259881,
"grad_norm": 0.283203125,
"learning_rate": 0.00021362229102167183,
"loss": 0.1137,
"step": 6390
},
{
"epoch": 1.7325392528424473,
"grad_norm": 0.224609375,
"learning_rate": 0.00021316700054634858,
"loss": 0.115,
"step": 6400
},
{
"epoch": 1.7325392528424473,
"eval_loss": 0.15494155883789062,
"eval_runtime": 51.5495,
"eval_samples_per_second": 9.699,
"eval_steps_per_second": 0.31,
"step": 6400
},
{
"epoch": 1.7352463454250135,
"grad_norm": 0.07666015625,
"learning_rate": 0.00021271171007102534,
"loss": 0.1437,
"step": 6410
},
{
"epoch": 1.7379534380075798,
"grad_norm": 0.55078125,
"learning_rate": 0.00021225641959570204,
"loss": 0.1266,
"step": 6420
},
{
"epoch": 1.7406605305901461,
"grad_norm": 0.1025390625,
"learning_rate": 0.0002118011291203788,
"loss": 0.0967,
"step": 6430
},
{
"epoch": 1.7433676231727127,
"grad_norm": 0.126953125,
"learning_rate": 0.00021134583864505554,
"loss": 0.1034,
"step": 6440
},
{
"epoch": 1.7460747157552787,
"grad_norm": 0.27734375,
"learning_rate": 0.0002108905481697323,
"loss": 0.1016,
"step": 6450
},
{
"epoch": 1.7487818083378452,
"grad_norm": 0.1806640625,
"learning_rate": 0.00021043525769440905,
"loss": 0.0758,
"step": 6460
},
{
"epoch": 1.7514889009204113,
"grad_norm": 0.2275390625,
"learning_rate": 0.00020997996721908578,
"loss": 0.1228,
"step": 6470
},
{
"epoch": 1.7541959935029778,
"grad_norm": 0.1240234375,
"learning_rate": 0.00020952467674376253,
"loss": 0.0879,
"step": 6480
},
{
"epoch": 1.7569030860855441,
"grad_norm": 0.140625,
"learning_rate": 0.00020906938626843926,
"loss": 0.1235,
"step": 6490
},
{
"epoch": 1.7596101786681104,
"grad_norm": 0.25390625,
"learning_rate": 0.000208614095793116,
"loss": 0.0861,
"step": 6500
},
{
"epoch": 1.7596101786681104,
"eval_loss": 0.15266965329647064,
"eval_runtime": 49.6548,
"eval_samples_per_second": 10.07,
"eval_steps_per_second": 0.322,
"step": 6500
},
{
"epoch": 1.7623172712506767,
"grad_norm": 0.419921875,
"learning_rate": 0.00020815880531779274,
"loss": 0.1356,
"step": 6510
},
{
"epoch": 1.765024363833243,
"grad_norm": 0.2294921875,
"learning_rate": 0.0002077035148424695,
"loss": 0.1153,
"step": 6520
},
{
"epoch": 1.7677314564158095,
"grad_norm": 0.1455078125,
"learning_rate": 0.00020724822436714625,
"loss": 0.1144,
"step": 6530
},
{
"epoch": 1.7704385489983756,
"grad_norm": 0.1904296875,
"learning_rate": 0.000206792933891823,
"loss": 0.1145,
"step": 6540
},
{
"epoch": 1.7731456415809421,
"grad_norm": 0.42578125,
"learning_rate": 0.00020633764341649975,
"loss": 0.1313,
"step": 6550
},
{
"epoch": 1.7758527341635084,
"grad_norm": 0.26171875,
"learning_rate": 0.00020588235294117645,
"loss": 0.0949,
"step": 6560
},
{
"epoch": 1.7785598267460747,
"grad_norm": 0.337890625,
"learning_rate": 0.0002054270624658532,
"loss": 0.1124,
"step": 6570
},
{
"epoch": 1.781266919328641,
"grad_norm": 0.2578125,
"learning_rate": 0.00020497177199052996,
"loss": 0.1421,
"step": 6580
},
{
"epoch": 1.7839740119112073,
"grad_norm": 0.154296875,
"learning_rate": 0.0002045164815152067,
"loss": 0.1376,
"step": 6590
},
{
"epoch": 1.7866811044937738,
"grad_norm": 0.1484375,
"learning_rate": 0.00020406119103988344,
"loss": 0.1102,
"step": 6600
},
{
"epoch": 1.7866811044937738,
"eval_loss": 0.1520686000585556,
"eval_runtime": 52.089,
"eval_samples_per_second": 9.599,
"eval_steps_per_second": 0.307,
"step": 6600
},
{
"epoch": 1.78938819707634,
"grad_norm": 0.193359375,
"learning_rate": 0.0002036059005645602,
"loss": 0.1197,
"step": 6610
},
{
"epoch": 1.7920952896589064,
"grad_norm": 0.201171875,
"learning_rate": 0.00020315061008923695,
"loss": 0.1083,
"step": 6620
},
{
"epoch": 1.7948023822414727,
"grad_norm": 0.30859375,
"learning_rate": 0.00020269531961391367,
"loss": 0.1413,
"step": 6630
},
{
"epoch": 1.797509474824039,
"grad_norm": 0.169921875,
"learning_rate": 0.00020224002913859043,
"loss": 0.1038,
"step": 6640
},
{
"epoch": 1.8002165674066053,
"grad_norm": 0.154296875,
"learning_rate": 0.00020178473866326715,
"loss": 0.1429,
"step": 6650
},
{
"epoch": 1.8029236599891716,
"grad_norm": 0.173828125,
"learning_rate": 0.0002013294481879439,
"loss": 0.1182,
"step": 6660
},
{
"epoch": 1.805630752571738,
"grad_norm": 0.1044921875,
"learning_rate": 0.00020087415771262066,
"loss": 0.109,
"step": 6670
},
{
"epoch": 1.8083378451543042,
"grad_norm": 0.115234375,
"learning_rate": 0.00020041886723729741,
"loss": 0.1167,
"step": 6680
},
{
"epoch": 1.8110449377368707,
"grad_norm": 0.2177734375,
"learning_rate": 0.00019996357676197414,
"loss": 0.1191,
"step": 6690
},
{
"epoch": 1.8137520303194368,
"grad_norm": 0.248046875,
"learning_rate": 0.00019950828628665087,
"loss": 0.1249,
"step": 6700
},
{
"epoch": 1.8137520303194368,
"eval_loss": 0.14892610907554626,
"eval_runtime": 50.9547,
"eval_samples_per_second": 9.813,
"eval_steps_per_second": 0.314,
"step": 6700
},
{
"epoch": 1.8164591229020033,
"grad_norm": 0.39453125,
"learning_rate": 0.00019905299581132762,
"loss": 0.1164,
"step": 6710
},
{
"epoch": 1.8191662154845696,
"grad_norm": 0.287109375,
"learning_rate": 0.00019859770533600437,
"loss": 0.1257,
"step": 6720
},
{
"epoch": 1.8218733080671359,
"grad_norm": 0.25390625,
"learning_rate": 0.00019814241486068113,
"loss": 0.1283,
"step": 6730
},
{
"epoch": 1.8245804006497022,
"grad_norm": 0.1650390625,
"learning_rate": 0.00019768712438535786,
"loss": 0.1121,
"step": 6740
},
{
"epoch": 1.8272874932322685,
"grad_norm": 0.5546875,
"learning_rate": 0.0001972318339100346,
"loss": 0.0988,
"step": 6750
},
{
"epoch": 1.829994585814835,
"grad_norm": 0.275390625,
"learning_rate": 0.00019677654343471136,
"loss": 0.0845,
"step": 6760
},
{
"epoch": 1.832701678397401,
"grad_norm": 0.30078125,
"learning_rate": 0.00019632125295938812,
"loss": 0.1241,
"step": 6770
},
{
"epoch": 1.8354087709799676,
"grad_norm": 0.2236328125,
"learning_rate": 0.00019586596248406482,
"loss": 0.1042,
"step": 6780
},
{
"epoch": 1.8381158635625339,
"grad_norm": 0.173828125,
"learning_rate": 0.00019541067200874157,
"loss": 0.0877,
"step": 6790
},
{
"epoch": 1.8408229561451002,
"grad_norm": 0.150390625,
"learning_rate": 0.00019495538153341832,
"loss": 0.1287,
"step": 6800
},
{
"epoch": 1.8408229561451002,
"eval_loss": 0.1503976583480835,
"eval_runtime": 50.5525,
"eval_samples_per_second": 9.891,
"eval_steps_per_second": 0.317,
"step": 6800
},
{
"epoch": 1.8435300487276665,
"grad_norm": 0.248046875,
"learning_rate": 0.00019450009105809508,
"loss": 0.1022,
"step": 6810
},
{
"epoch": 1.8462371413102328,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019404480058277183,
"loss": 0.1109,
"step": 6820
},
{
"epoch": 1.8489442338927993,
"grad_norm": 0.123046875,
"learning_rate": 0.00019358951010744856,
"loss": 0.1186,
"step": 6830
},
{
"epoch": 1.8516513264753653,
"grad_norm": 0.267578125,
"learning_rate": 0.0001931342196321253,
"loss": 0.1238,
"step": 6840
},
{
"epoch": 1.8543584190579319,
"grad_norm": 0.373046875,
"learning_rate": 0.00019267892915680204,
"loss": 0.1492,
"step": 6850
},
{
"epoch": 1.8570655116404982,
"grad_norm": 0.1806640625,
"learning_rate": 0.0001922236386814788,
"loss": 0.1294,
"step": 6860
},
{
"epoch": 1.8597726042230645,
"grad_norm": 0.2109375,
"learning_rate": 0.00019176834820615552,
"loss": 0.1256,
"step": 6870
},
{
"epoch": 1.8624796968056307,
"grad_norm": 0.140625,
"learning_rate": 0.00019131305773083227,
"loss": 0.1055,
"step": 6880
},
{
"epoch": 1.865186789388197,
"grad_norm": 0.255859375,
"learning_rate": 0.00019085776725550902,
"loss": 0.1017,
"step": 6890
},
{
"epoch": 1.8678938819707636,
"grad_norm": 0.375,
"learning_rate": 0.00019040247678018578,
"loss": 0.1311,
"step": 6900
},
{
"epoch": 1.8678938819707636,
"eval_loss": 0.15065474808216095,
"eval_runtime": 51.105,
"eval_samples_per_second": 9.784,
"eval_steps_per_second": 0.313,
"step": 6900
},
{
"epoch": 1.8706009745533296,
"grad_norm": 0.19140625,
"learning_rate": 0.00018994718630486253,
"loss": 0.1184,
"step": 6910
},
{
"epoch": 1.8733080671358961,
"grad_norm": 0.275390625,
"learning_rate": 0.00018949189582953923,
"loss": 0.0929,
"step": 6920
},
{
"epoch": 1.8760151597184622,
"grad_norm": 0.099609375,
"learning_rate": 0.00018903660535421598,
"loss": 0.1177,
"step": 6930
},
{
"epoch": 1.8787222523010287,
"grad_norm": 0.150390625,
"learning_rate": 0.00018858131487889274,
"loss": 0.1116,
"step": 6940
},
{
"epoch": 1.881429344883595,
"grad_norm": 0.33203125,
"learning_rate": 0.0001881260244035695,
"loss": 0.1492,
"step": 6950
},
{
"epoch": 1.8841364374661613,
"grad_norm": 0.234375,
"learning_rate": 0.00018767073392824622,
"loss": 0.1335,
"step": 6960
},
{
"epoch": 1.8868435300487276,
"grad_norm": 0.3984375,
"learning_rate": 0.00018721544345292297,
"loss": 0.1445,
"step": 6970
},
{
"epoch": 1.889550622631294,
"grad_norm": 0.2177734375,
"learning_rate": 0.00018676015297759973,
"loss": 0.1011,
"step": 6980
},
{
"epoch": 1.8922577152138604,
"grad_norm": 0.059326171875,
"learning_rate": 0.00018630486250227645,
"loss": 0.1133,
"step": 6990
},
{
"epoch": 1.8949648077964265,
"grad_norm": 0.0654296875,
"learning_rate": 0.0001858495720269532,
"loss": 0.0976,
"step": 7000
},
{
"epoch": 1.8949648077964265,
"eval_loss": 0.15026648342609406,
"eval_runtime": 50.4333,
"eval_samples_per_second": 9.914,
"eval_steps_per_second": 0.317,
"step": 7000
},
{
"epoch": 1.897671900378993,
"grad_norm": 0.08056640625,
"learning_rate": 0.00018539428155162993,
"loss": 0.092,
"step": 7010
},
{
"epoch": 1.9003789929615593,
"grad_norm": 0.1416015625,
"learning_rate": 0.00018493899107630669,
"loss": 0.0998,
"step": 7020
},
{
"epoch": 1.9030860855441256,
"grad_norm": 0.267578125,
"learning_rate": 0.00018448370060098344,
"loss": 0.1095,
"step": 7030
},
{
"epoch": 1.905793178126692,
"grad_norm": 0.1474609375,
"learning_rate": 0.0001840284101256602,
"loss": 0.119,
"step": 7040
},
{
"epoch": 1.9085002707092582,
"grad_norm": 0.2421875,
"learning_rate": 0.00018357311965033692,
"loss": 0.1162,
"step": 7050
},
{
"epoch": 1.9112073632918247,
"grad_norm": 0.3515625,
"learning_rate": 0.00018311782917501365,
"loss": 0.117,
"step": 7060
},
{
"epoch": 1.9139144558743908,
"grad_norm": 0.181640625,
"learning_rate": 0.0001826625386996904,
"loss": 0.1154,
"step": 7070
},
{
"epoch": 1.9166215484569573,
"grad_norm": 0.322265625,
"learning_rate": 0.00018220724822436715,
"loss": 0.1305,
"step": 7080
},
{
"epoch": 1.9193286410395234,
"grad_norm": 0.388671875,
"learning_rate": 0.0001817519577490439,
"loss": 0.1165,
"step": 7090
},
{
"epoch": 1.92203573362209,
"grad_norm": 0.0986328125,
"learning_rate": 0.00018129666727372063,
"loss": 0.1134,
"step": 7100
},
{
"epoch": 1.92203573362209,
"eval_loss": 0.15066738426685333,
"eval_runtime": 50.4193,
"eval_samples_per_second": 9.917,
"eval_steps_per_second": 0.317,
"step": 7100
},
{
"epoch": 1.9247428262046562,
"grad_norm": 0.322265625,
"learning_rate": 0.0001808413767983974,
"loss": 0.1113,
"step": 7110
},
{
"epoch": 1.9274499187872225,
"grad_norm": 0.212890625,
"learning_rate": 0.00018038608632307414,
"loss": 0.0985,
"step": 7120
},
{
"epoch": 1.930157011369789,
"grad_norm": 0.185546875,
"learning_rate": 0.00017993079584775087,
"loss": 0.1138,
"step": 7130
},
{
"epoch": 1.932864103952355,
"grad_norm": 0.29296875,
"learning_rate": 0.0001794755053724276,
"loss": 0.1113,
"step": 7140
},
{
"epoch": 1.9355711965349216,
"grad_norm": 0.1259765625,
"learning_rate": 0.00017902021489710435,
"loss": 0.1194,
"step": 7150
},
{
"epoch": 1.9382782891174877,
"grad_norm": 0.353515625,
"learning_rate": 0.0001785649244217811,
"loss": 0.1109,
"step": 7160
},
{
"epoch": 1.9409853817000542,
"grad_norm": 0.32421875,
"learning_rate": 0.00017810963394645785,
"loss": 0.1055,
"step": 7170
},
{
"epoch": 1.9436924742826205,
"grad_norm": 0.197265625,
"learning_rate": 0.0001776543434711346,
"loss": 0.1217,
"step": 7180
},
{
"epoch": 1.9463995668651868,
"grad_norm": 0.34765625,
"learning_rate": 0.00017719905299581133,
"loss": 0.1275,
"step": 7190
},
{
"epoch": 1.949106659447753,
"grad_norm": 0.0810546875,
"learning_rate": 0.00017674376252048806,
"loss": 0.0839,
"step": 7200
},
{
"epoch": 1.949106659447753,
"eval_loss": 0.15255558490753174,
"eval_runtime": 50.1937,
"eval_samples_per_second": 9.961,
"eval_steps_per_second": 0.319,
"step": 7200
},
{
"epoch": 1.9518137520303194,
"grad_norm": 0.29296875,
"learning_rate": 0.00017628847204516481,
"loss": 0.1094,
"step": 7210
},
{
"epoch": 1.9545208446128859,
"grad_norm": 0.482421875,
"learning_rate": 0.00017583318156984157,
"loss": 0.1362,
"step": 7220
},
{
"epoch": 1.957227937195452,
"grad_norm": 0.3984375,
"learning_rate": 0.0001753778910945183,
"loss": 0.115,
"step": 7230
},
{
"epoch": 1.9599350297780185,
"grad_norm": 0.49609375,
"learning_rate": 0.00017492260061919505,
"loss": 0.1188,
"step": 7240
},
{
"epoch": 1.9626421223605848,
"grad_norm": 0.16796875,
"learning_rate": 0.0001744673101438718,
"loss": 0.1114,
"step": 7250
},
{
"epoch": 1.965349214943151,
"grad_norm": 0.26953125,
"learning_rate": 0.00017401201966854856,
"loss": 0.0983,
"step": 7260
},
{
"epoch": 1.9680563075257174,
"grad_norm": 0.216796875,
"learning_rate": 0.00017355672919322528,
"loss": 0.135,
"step": 7270
},
{
"epoch": 1.9707634001082837,
"grad_norm": 0.19140625,
"learning_rate": 0.000173101438717902,
"loss": 0.1057,
"step": 7280
},
{
"epoch": 1.9734704926908502,
"grad_norm": 0.2734375,
"learning_rate": 0.00017264614824257876,
"loss": 0.1421,
"step": 7290
},
{
"epoch": 1.9761775852734162,
"grad_norm": 0.298828125,
"learning_rate": 0.00017219085776725552,
"loss": 0.0989,
"step": 7300
},
{
"epoch": 1.9761775852734162,
"eval_loss": 0.15032486617565155,
"eval_runtime": 50.1976,
"eval_samples_per_second": 9.961,
"eval_steps_per_second": 0.319,
"step": 7300
},
{
"epoch": 1.9788846778559828,
"grad_norm": 0.2490234375,
"learning_rate": 0.00017173556729193227,
"loss": 0.1003,
"step": 7310
},
{
"epoch": 1.9815917704385488,
"grad_norm": 0.111328125,
"learning_rate": 0.000171280276816609,
"loss": 0.106,
"step": 7320
},
{
"epoch": 1.9842988630211154,
"grad_norm": 0.251953125,
"learning_rate": 0.00017082498634128575,
"loss": 0.1063,
"step": 7330
},
{
"epoch": 1.9870059556036817,
"grad_norm": 0.1875,
"learning_rate": 0.00017036969586596248,
"loss": 0.0799,
"step": 7340
},
{
"epoch": 1.989713048186248,
"grad_norm": 0.171875,
"learning_rate": 0.00016991440539063923,
"loss": 0.1114,
"step": 7350
},
{
"epoch": 1.9924201407688142,
"grad_norm": 0.205078125,
"learning_rate": 0.00016945911491531598,
"loss": 0.1166,
"step": 7360
},
{
"epoch": 1.9951272333513805,
"grad_norm": 0.369140625,
"learning_rate": 0.0001690038244399927,
"loss": 0.1113,
"step": 7370
},
{
"epoch": 1.997834325933947,
"grad_norm": 0.37890625,
"learning_rate": 0.00016854853396466946,
"loss": 0.084,
"step": 7380
},
{
"epoch": 2.000541418516513,
"grad_norm": 0.173828125,
"learning_rate": 0.00016809324348934622,
"loss": 0.1154,
"step": 7390
},
{
"epoch": 2.0032485110990796,
"grad_norm": 0.24609375,
"learning_rate": 0.00016763795301402297,
"loss": 0.0998,
"step": 7400
},
{
"epoch": 2.0032485110990796,
"eval_loss": 0.1515250951051712,
"eval_runtime": 51.4661,
"eval_samples_per_second": 9.715,
"eval_steps_per_second": 0.311,
"step": 7400
},
{
"epoch": 2.0059556036816457,
"grad_norm": 0.234375,
"learning_rate": 0.00016718266253869967,
"loss": 0.0854,
"step": 7410
},
{
"epoch": 2.0086626962642122,
"grad_norm": 0.1845703125,
"learning_rate": 0.00016672737206337642,
"loss": 0.0835,
"step": 7420
},
{
"epoch": 2.0113697888467788,
"grad_norm": 0.234375,
"learning_rate": 0.00016627208158805318,
"loss": 0.1461,
"step": 7430
},
{
"epoch": 2.014076881429345,
"grad_norm": 0.30078125,
"learning_rate": 0.00016581679111272993,
"loss": 0.0953,
"step": 7440
},
{
"epoch": 2.0167839740119113,
"grad_norm": 0.0830078125,
"learning_rate": 0.00016536150063740668,
"loss": 0.0911,
"step": 7450
},
{
"epoch": 2.0194910665944774,
"grad_norm": 0.1630859375,
"learning_rate": 0.0001649062101620834,
"loss": 0.0795,
"step": 7460
},
{
"epoch": 2.022198159177044,
"grad_norm": 0.2890625,
"learning_rate": 0.00016445091968676016,
"loss": 0.1355,
"step": 7470
},
{
"epoch": 2.02490525175961,
"grad_norm": 0.1904296875,
"learning_rate": 0.0001639956292114369,
"loss": 0.0898,
"step": 7480
},
{
"epoch": 2.0276123443421765,
"grad_norm": 0.1064453125,
"learning_rate": 0.00016354033873611364,
"loss": 0.1181,
"step": 7490
},
{
"epoch": 2.030319436924743,
"grad_norm": 0.138671875,
"learning_rate": 0.00016308504826079037,
"loss": 0.1093,
"step": 7500
},
{
"epoch": 2.030319436924743,
"eval_loss": 0.15121085941791534,
"eval_runtime": 50.3881,
"eval_samples_per_second": 9.923,
"eval_steps_per_second": 0.318,
"step": 7500
},
{
"epoch": 2.033026529507309,
"grad_norm": 0.16410693526268005,
"learning_rate": 0.00016262975778546712,
"loss": 0.1239,
"step": 7510
},
{
"epoch": 2.0357336220898756,
"grad_norm": 0.2050447016954422,
"learning_rate": 0.00016217446731014388,
"loss": 0.1089,
"step": 7520
},
{
"epoch": 2.0384407146724417,
"grad_norm": 0.3048134446144104,
"learning_rate": 0.00016171917683482063,
"loss": 0.1338,
"step": 7530
},
{
"epoch": 2.041147807255008,
"grad_norm": 0.2749716639518738,
"learning_rate": 0.00016126388635949739,
"loss": 0.125,
"step": 7540
},
{
"epoch": 2.0438548998375743,
"grad_norm": 0.2698230445384979,
"learning_rate": 0.00016080859588417409,
"loss": 0.1449,
"step": 7550
},
{
"epoch": 2.046561992420141,
"grad_norm": 0.13137570023536682,
"learning_rate": 0.00016035330540885084,
"loss": 0.0914,
"step": 7560
},
{
"epoch": 2.049269085002707,
"grad_norm": 0.5193370580673218,
"learning_rate": 0.0001598980149335276,
"loss": 0.1134,
"step": 7570
},
{
"epoch": 2.0519761775852734,
"grad_norm": 0.19694364070892334,
"learning_rate": 0.00015944272445820435,
"loss": 0.1058,
"step": 7580
},
{
"epoch": 2.05468327016784,
"grad_norm": 0.35678553581237793,
"learning_rate": 0.00015898743398288107,
"loss": 0.1209,
"step": 7590
},
{
"epoch": 2.057390362750406,
"grad_norm": 0.26627665758132935,
"learning_rate": 0.00015853214350755783,
"loss": 0.1166,
"step": 7600
},
{
"epoch": 2.057390362750406,
"eval_loss": 0.15223285555839539,
"eval_runtime": 54.6423,
"eval_samples_per_second": 9.15,
"eval_steps_per_second": 0.293,
"step": 7600
},
{
"epoch": 2.0600974553329725,
"grad_norm": 0.28212130069732666,
"learning_rate": 0.00015807685303223458,
"loss": 0.1307,
"step": 7610
},
{
"epoch": 2.0628045479155386,
"grad_norm": 0.2795688807964325,
"learning_rate": 0.0001576215625569113,
"loss": 0.1183,
"step": 7620
},
{
"epoch": 2.065511640498105,
"grad_norm": 0.17099031805992126,
"learning_rate": 0.00015716627208158806,
"loss": 0.0937,
"step": 7630
},
{
"epoch": 2.068218733080671,
"grad_norm": 0.3722110092639923,
"learning_rate": 0.0001567109816062648,
"loss": 0.1389,
"step": 7640
},
{
"epoch": 2.0709258256632377,
"grad_norm": 0.3928683400154114,
"learning_rate": 0.00015625569113094154,
"loss": 0.1033,
"step": 7650
},
{
"epoch": 2.073632918245804,
"grad_norm": 0.4039820432662964,
"learning_rate": 0.0001558004006556183,
"loss": 0.1348,
"step": 7660
},
{
"epoch": 2.0763400108283703,
"grad_norm": 0.18146918714046478,
"learning_rate": 0.00015534511018029505,
"loss": 0.1063,
"step": 7670
},
{
"epoch": 2.079047103410937,
"grad_norm": 0.27164018154144287,
"learning_rate": 0.00015488981970497177,
"loss": 0.1314,
"step": 7680
},
{
"epoch": 2.081754195993503,
"grad_norm": 0.15179571509361267,
"learning_rate": 0.0001544345292296485,
"loss": 0.1215,
"step": 7690
},
{
"epoch": 2.0844612885760694,
"grad_norm": 0.317647784948349,
"learning_rate": 0.00015397923875432525,
"loss": 0.1121,
"step": 7700
},
{
"epoch": 2.0844612885760694,
"eval_loss": 0.1496654599905014,
"eval_runtime": 51.3749,
"eval_samples_per_second": 9.732,
"eval_steps_per_second": 0.311,
"step": 7700
},
{
"epoch": 2.0871683811586355,
"grad_norm": 0.20976316928863525,
"learning_rate": 0.000153523948279002,
"loss": 0.0827,
"step": 7710
},
{
"epoch": 2.089875473741202,
"grad_norm": 0.36998632550239563,
"learning_rate": 0.00015306865780367876,
"loss": 0.0985,
"step": 7720
},
{
"epoch": 2.0925825663237685,
"grad_norm": 0.3467310070991516,
"learning_rate": 0.0001526133673283555,
"loss": 0.123,
"step": 7730
},
{
"epoch": 2.0952896589063346,
"grad_norm": 0.24463088810443878,
"learning_rate": 0.00015215807685303224,
"loss": 0.0928,
"step": 7740
},
{
"epoch": 2.097996751488901,
"grad_norm": 0.19121113419532776,
"learning_rate": 0.000151702786377709,
"loss": 0.1227,
"step": 7750
},
{
"epoch": 2.100703844071467,
"grad_norm": 0.3805799186229706,
"learning_rate": 0.00015124749590238572,
"loss": 0.1059,
"step": 7760
},
{
"epoch": 2.1034109366540337,
"grad_norm": 0.21060994267463684,
"learning_rate": 0.00015079220542706245,
"loss": 0.1047,
"step": 7770
},
{
"epoch": 2.1061180292365997,
"grad_norm": 0.27969229221343994,
"learning_rate": 0.0001503369149517392,
"loss": 0.1097,
"step": 7780
},
{
"epoch": 2.1088251218191663,
"grad_norm": 0.17218650877475739,
"learning_rate": 0.00014988162447641596,
"loss": 0.1041,
"step": 7790
},
{
"epoch": 2.1115322144017323,
"grad_norm": 0.16784489154815674,
"learning_rate": 0.0001494263340010927,
"loss": 0.1231,
"step": 7800
},
{
"epoch": 2.1115322144017323,
"eval_loss": 0.15185900032520294,
"eval_runtime": 51.7594,
"eval_samples_per_second": 9.66,
"eval_steps_per_second": 0.309,
"step": 7800
},
{
"epoch": 2.114239306984299,
"grad_norm": 0.15567447245121002,
"learning_rate": 0.00014897104352576946,
"loss": 0.1209,
"step": 7810
},
{
"epoch": 2.1169463995668654,
"grad_norm": 0.3253188729286194,
"learning_rate": 0.0001485157530504462,
"loss": 0.1214,
"step": 7820
},
{
"epoch": 2.1196534921494314,
"grad_norm": 0.20111972093582153,
"learning_rate": 0.00014806046257512292,
"loss": 0.1132,
"step": 7830
},
{
"epoch": 2.122360584731998,
"grad_norm": 0.26392456889152527,
"learning_rate": 0.00014760517209979967,
"loss": 0.1035,
"step": 7840
},
{
"epoch": 2.125067677314564,
"grad_norm": 0.21585513651371002,
"learning_rate": 0.00014714988162447642,
"loss": 0.1051,
"step": 7850
},
{
"epoch": 2.1277747698971305,
"grad_norm": 0.2535959482192993,
"learning_rate": 0.00014669459114915315,
"loss": 0.1261,
"step": 7860
},
{
"epoch": 2.1304818624796966,
"grad_norm": 0.1298813670873642,
"learning_rate": 0.0001462393006738299,
"loss": 0.0847,
"step": 7870
},
{
"epoch": 2.133188955062263,
"grad_norm": 0.25601625442504883,
"learning_rate": 0.00014578401019850666,
"loss": 0.1023,
"step": 7880
},
{
"epoch": 2.1358960476448297,
"grad_norm": 0.11331689357757568,
"learning_rate": 0.0001453287197231834,
"loss": 0.1191,
"step": 7890
},
{
"epoch": 2.1386031402273957,
"grad_norm": 0.28952756524086,
"learning_rate": 0.00014487342924786014,
"loss": 0.1102,
"step": 7900
},
{
"epoch": 2.1386031402273957,
"eval_loss": 0.14867374300956726,
"eval_runtime": 51.7768,
"eval_samples_per_second": 9.657,
"eval_steps_per_second": 0.309,
"step": 7900
},
{
"epoch": 2.1413102328099622,
"grad_norm": 0.19193996489048004,
"learning_rate": 0.00014441813877253686,
"loss": 0.1217,
"step": 7910
},
{
"epoch": 2.1440173253925283,
"grad_norm": 0.2836934030056,
"learning_rate": 0.00014396284829721362,
"loss": 0.1214,
"step": 7920
},
{
"epoch": 2.146724417975095,
"grad_norm": 0.17073260247707367,
"learning_rate": 0.00014350755782189037,
"loss": 0.1124,
"step": 7930
},
{
"epoch": 2.149431510557661,
"grad_norm": 0.18840056657791138,
"learning_rate": 0.00014305226734656712,
"loss": 0.0757,
"step": 7940
},
{
"epoch": 2.1521386031402274,
"grad_norm": 0.2775890529155731,
"learning_rate": 0.00014259697687124385,
"loss": 0.1388,
"step": 7950
},
{
"epoch": 2.154845695722794,
"grad_norm": 0.44438081979751587,
"learning_rate": 0.0001421416863959206,
"loss": 0.1055,
"step": 7960
},
{
"epoch": 2.15755278830536,
"grad_norm": 0.29212743043899536,
"learning_rate": 0.00014168639592059733,
"loss": 0.0971,
"step": 7970
},
{
"epoch": 2.1602598808879265,
"grad_norm": 0.4168376326560974,
"learning_rate": 0.00014123110544527408,
"loss": 0.109,
"step": 7980
},
{
"epoch": 2.1629669734704926,
"grad_norm": 0.3299972414970398,
"learning_rate": 0.00014077581496995084,
"loss": 0.1135,
"step": 7990
},
{
"epoch": 2.165674066053059,
"grad_norm": 0.06806978583335876,
"learning_rate": 0.00014032052449462756,
"loss": 0.1063,
"step": 8000
},
{
"epoch": 2.165674066053059,
"eval_loss": 0.14812108874320984,
"eval_runtime": 53.3268,
"eval_samples_per_second": 9.376,
"eval_steps_per_second": 0.3,
"step": 8000
},
{
"epoch": 2.168381158635625,
"grad_norm": 0.31759846210479736,
"learning_rate": 0.00013986523401930432,
"loss": 0.094,
"step": 8010
},
{
"epoch": 2.1710882512181917,
"grad_norm": 0.07891532778739929,
"learning_rate": 0.00013940994354398107,
"loss": 0.1148,
"step": 8020
},
{
"epoch": 2.173795343800758,
"grad_norm": 0.19877280294895172,
"learning_rate": 0.00013895465306865783,
"loss": 0.1104,
"step": 8030
},
{
"epoch": 2.1765024363833243,
"grad_norm": 0.2574431300163269,
"learning_rate": 0.00013849936259333452,
"loss": 0.0945,
"step": 8040
},
{
"epoch": 2.179209528965891,
"grad_norm": 0.31091296672821045,
"learning_rate": 0.00013804407211801128,
"loss": 0.1113,
"step": 8050
},
{
"epoch": 2.181916621548457,
"grad_norm": 0.2589088976383209,
"learning_rate": 0.00013758878164268803,
"loss": 0.0903,
"step": 8060
},
{
"epoch": 2.1846237141310234,
"grad_norm": 0.1652253419160843,
"learning_rate": 0.00013713349116736479,
"loss": 0.106,
"step": 8070
},
{
"epoch": 2.1873308067135895,
"grad_norm": 0.11054456979036331,
"learning_rate": 0.00013667820069204154,
"loss": 0.0956,
"step": 8080
},
{
"epoch": 2.190037899296156,
"grad_norm": 0.13949480652809143,
"learning_rate": 0.00013622291021671827,
"loss": 0.1021,
"step": 8090
},
{
"epoch": 2.192744991878722,
"grad_norm": 0.1612575799226761,
"learning_rate": 0.00013576761974139502,
"loss": 0.0975,
"step": 8100
},
{
"epoch": 2.192744991878722,
"eval_loss": 0.15088878571987152,
"eval_runtime": 52.3858,
"eval_samples_per_second": 9.545,
"eval_steps_per_second": 0.305,
"step": 8100
},
{
"epoch": 2.1954520844612886,
"grad_norm": 0.17387332022190094,
"learning_rate": 0.00013531232926607177,
"loss": 0.1086,
"step": 8110
},
{
"epoch": 2.198159177043855,
"grad_norm": 0.17473745346069336,
"learning_rate": 0.0001348570387907485,
"loss": 0.1015,
"step": 8120
},
{
"epoch": 2.200866269626421,
"grad_norm": 0.26711127161979675,
"learning_rate": 0.00013440174831542523,
"loss": 0.0953,
"step": 8130
},
{
"epoch": 2.2035733622089877,
"grad_norm": 0.407000869512558,
"learning_rate": 0.00013394645784010198,
"loss": 0.1357,
"step": 8140
},
{
"epoch": 2.2062804547915538,
"grad_norm": 0.4457509517669678,
"learning_rate": 0.00013349116736477873,
"loss": 0.1154,
"step": 8150
},
{
"epoch": 2.2089875473741203,
"grad_norm": 0.38976404070854187,
"learning_rate": 0.0001330358768894555,
"loss": 0.0974,
"step": 8160
},
{
"epoch": 2.2116946399566864,
"grad_norm": 0.15152068436145782,
"learning_rate": 0.00013258058641413224,
"loss": 0.1106,
"step": 8170
},
{
"epoch": 2.214401732539253,
"grad_norm": 0.31610003113746643,
"learning_rate": 0.00013212529593880897,
"loss": 0.1105,
"step": 8180
},
{
"epoch": 2.2171088251218194,
"grad_norm": 0.3985111117362976,
"learning_rate": 0.0001316700054634857,
"loss": 0.1184,
"step": 8190
},
{
"epoch": 2.2198159177043855,
"grad_norm": 0.28766173124313354,
"learning_rate": 0.00013121471498816245,
"loss": 0.0931,
"step": 8200
},
{
"epoch": 2.2198159177043855,
"eval_loss": 0.14935404062271118,
"eval_runtime": 52.889,
"eval_samples_per_second": 9.454,
"eval_steps_per_second": 0.303,
"step": 8200
},
{
"epoch": 2.222523010286952,
"grad_norm": 0.3414205312728882,
"learning_rate": 0.0001307594245128392,
"loss": 0.104,
"step": 8210
},
{
"epoch": 2.225230102869518,
"grad_norm": 0.49229031801223755,
"learning_rate": 0.00013030413403751593,
"loss": 0.1446,
"step": 8220
},
{
"epoch": 2.2279371954520846,
"grad_norm": 0.3560900092124939,
"learning_rate": 0.00012984884356219268,
"loss": 0.1134,
"step": 8230
},
{
"epoch": 2.2306442880346506,
"grad_norm": 0.17200881242752075,
"learning_rate": 0.00012939355308686943,
"loss": 0.1069,
"step": 8240
},
{
"epoch": 2.233351380617217,
"grad_norm": 0.3682667016983032,
"learning_rate": 0.0001289382626115462,
"loss": 0.1188,
"step": 8250
},
{
"epoch": 2.2360584731997832,
"grad_norm": 0.16727127134799957,
"learning_rate": 0.00012848297213622291,
"loss": 0.1427,
"step": 8260
},
{
"epoch": 2.2387655657823498,
"grad_norm": 0.14669375121593475,
"learning_rate": 0.00012802768166089964,
"loss": 0.0982,
"step": 8270
},
{
"epoch": 2.2414726583649163,
"grad_norm": 0.04038509353995323,
"learning_rate": 0.0001275723911855764,
"loss": 0.0908,
"step": 8280
},
{
"epoch": 2.2441797509474823,
"grad_norm": 0.10155865550041199,
"learning_rate": 0.00012711710071025315,
"loss": 0.0877,
"step": 8290
},
{
"epoch": 2.246886843530049,
"grad_norm": 0.24377843737602234,
"learning_rate": 0.0001266618102349299,
"loss": 0.0991,
"step": 8300
},
{
"epoch": 2.246886843530049,
"eval_loss": 0.15127049386501312,
"eval_runtime": 52.9728,
"eval_samples_per_second": 9.439,
"eval_steps_per_second": 0.302,
"step": 8300
},
{
"epoch": 2.249593936112615,
"grad_norm": 0.23823745548725128,
"learning_rate": 0.00012620651975960663,
"loss": 0.1043,
"step": 8310
},
{
"epoch": 2.2523010286951815,
"grad_norm": 0.227587029337883,
"learning_rate": 0.00012575122928428338,
"loss": 0.1056,
"step": 8320
},
{
"epoch": 2.2550081212777475,
"grad_norm": 0.14021603763103485,
"learning_rate": 0.0001252959388089601,
"loss": 0.0994,
"step": 8330
},
{
"epoch": 2.257715213860314,
"grad_norm": 0.23243439197540283,
"learning_rate": 0.00012484064833363686,
"loss": 0.1024,
"step": 8340
},
{
"epoch": 2.26042230644288,
"grad_norm": 0.1790105700492859,
"learning_rate": 0.00012438535785831362,
"loss": 0.084,
"step": 8350
},
{
"epoch": 2.2631293990254466,
"grad_norm": 0.2110361009836197,
"learning_rate": 0.00012393006738299034,
"loss": 0.0895,
"step": 8360
},
{
"epoch": 2.265836491608013,
"grad_norm": 0.24758604168891907,
"learning_rate": 0.0001234747769076671,
"loss": 0.1154,
"step": 8370
},
{
"epoch": 2.268543584190579,
"grad_norm": 0.16578835248947144,
"learning_rate": 0.00012301948643234385,
"loss": 0.1193,
"step": 8380
},
{
"epoch": 2.2712506767731457,
"grad_norm": 0.20040668547153473,
"learning_rate": 0.00012256419595702058,
"loss": 0.1194,
"step": 8390
},
{
"epoch": 2.273957769355712,
"grad_norm": 0.08023863285779953,
"learning_rate": 0.00012210890548169733,
"loss": 0.1105,
"step": 8400
},
{
"epoch": 2.273957769355712,
"eval_loss": 0.15107358992099762,
"eval_runtime": 51.9198,
"eval_samples_per_second": 9.63,
"eval_steps_per_second": 0.308,
"step": 8400
},
{
"epoch": 2.2766648619382783,
"grad_norm": 0.11227234452962875,
"learning_rate": 0.00012165361500637407,
"loss": 0.1002,
"step": 8410
},
{
"epoch": 2.279371954520845,
"grad_norm": 0.20993013679981232,
"learning_rate": 0.00012119832453105081,
"loss": 0.1062,
"step": 8420
},
{
"epoch": 2.282079047103411,
"grad_norm": 0.2064633071422577,
"learning_rate": 0.00012074303405572756,
"loss": 0.0985,
"step": 8430
},
{
"epoch": 2.2847861396859774,
"grad_norm": 0.19416238367557526,
"learning_rate": 0.0001202877435804043,
"loss": 0.1329,
"step": 8440
},
{
"epoch": 2.2874932322685435,
"grad_norm": 0.1850809007883072,
"learning_rate": 0.00011983245310508104,
"loss": 0.0988,
"step": 8450
},
{
"epoch": 2.29020032485111,
"grad_norm": 0.17581935226917267,
"learning_rate": 0.00011937716262975778,
"loss": 0.1033,
"step": 8460
},
{
"epoch": 2.292907417433676,
"grad_norm": 0.38977089524269104,
"learning_rate": 0.00011892187215443454,
"loss": 0.1167,
"step": 8470
},
{
"epoch": 2.2956145100162426,
"grad_norm": 0.1589565873146057,
"learning_rate": 0.00011846658167911128,
"loss": 0.0888,
"step": 8480
},
{
"epoch": 2.2983216025988087,
"grad_norm": 0.2188950628042221,
"learning_rate": 0.00011801129120378802,
"loss": 0.0936,
"step": 8490
},
{
"epoch": 2.301028695181375,
"grad_norm": 0.15488547086715698,
"learning_rate": 0.00011755600072846476,
"loss": 0.1217,
"step": 8500
},
{
"epoch": 2.301028695181375,
"eval_loss": 0.15261313319206238,
"eval_runtime": 53.1666,
"eval_samples_per_second": 9.404,
"eval_steps_per_second": 0.301,
"step": 8500
},
{
"epoch": 2.3037357877639417,
"grad_norm": 0.448257178068161,
"learning_rate": 0.00011710071025314151,
"loss": 0.1098,
"step": 8510
},
{
"epoch": 2.306442880346508,
"grad_norm": 0.2177487164735794,
"learning_rate": 0.00011664541977781825,
"loss": 0.1096,
"step": 8520
},
{
"epoch": 2.3091499729290743,
"grad_norm": 0.1455925852060318,
"learning_rate": 0.00011619012930249499,
"loss": 0.1125,
"step": 8530
},
{
"epoch": 2.3118570655116404,
"grad_norm": 0.22162719070911407,
"learning_rate": 0.00011573483882717174,
"loss": 0.1104,
"step": 8540
},
{
"epoch": 2.314564158094207,
"grad_norm": 0.31285014748573303,
"learning_rate": 0.00011527954835184849,
"loss": 0.1086,
"step": 8550
},
{
"epoch": 2.317271250676773,
"grad_norm": 0.3933559060096741,
"learning_rate": 0.00011482425787652523,
"loss": 0.1179,
"step": 8560
},
{
"epoch": 2.3199783432593395,
"grad_norm": 0.2300388514995575,
"learning_rate": 0.00011436896740120197,
"loss": 0.117,
"step": 8570
},
{
"epoch": 2.3226854358419056,
"grad_norm": 0.23163248598575592,
"learning_rate": 0.00011391367692587872,
"loss": 0.1183,
"step": 8580
},
{
"epoch": 2.325392528424472,
"grad_norm": 0.5895636677742004,
"learning_rate": 0.00011345838645055545,
"loss": 0.1439,
"step": 8590
},
{
"epoch": 2.3280996210070386,
"grad_norm": 0.2042539417743683,
"learning_rate": 0.0001130030959752322,
"loss": 0.0834,
"step": 8600
},
{
"epoch": 2.3280996210070386,
"eval_loss": 0.15408456325531006,
"eval_runtime": 51.8771,
"eval_samples_per_second": 9.638,
"eval_steps_per_second": 0.308,
"step": 8600
},
{
"epoch": 2.3308067135896047,
"grad_norm": 0.2250804603099823,
"learning_rate": 0.00011254780549990895,
"loss": 0.1125,
"step": 8610
},
{
"epoch": 2.333513806172171,
"grad_norm": 0.19512999057769775,
"learning_rate": 0.00011209251502458569,
"loss": 0.0954,
"step": 8620
},
{
"epoch": 2.3362208987547373,
"grad_norm": 0.2871667146682739,
"learning_rate": 0.00011163722454926243,
"loss": 0.1123,
"step": 8630
},
{
"epoch": 2.338927991337304,
"grad_norm": 0.5765364766120911,
"learning_rate": 0.00011118193407393917,
"loss": 0.0942,
"step": 8640
},
{
"epoch": 2.3416350839198703,
"grad_norm": 0.26905539631843567,
"learning_rate": 0.00011072664359861593,
"loss": 0.1314,
"step": 8650
},
{
"epoch": 2.3443421765024364,
"grad_norm": 0.40762558579444885,
"learning_rate": 0.00011027135312329265,
"loss": 0.0969,
"step": 8660
},
{
"epoch": 2.347049269085003,
"grad_norm": 0.2780246436595917,
"learning_rate": 0.0001098160626479694,
"loss": 0.1091,
"step": 8670
},
{
"epoch": 2.349756361667569,
"grad_norm": 0.2559877634048462,
"learning_rate": 0.00010936077217264615,
"loss": 0.0946,
"step": 8680
},
{
"epoch": 2.3524634542501355,
"grad_norm": 0.31101638078689575,
"learning_rate": 0.0001089054816973229,
"loss": 0.1132,
"step": 8690
},
{
"epoch": 2.3551705468327016,
"grad_norm": 0.2569589912891388,
"learning_rate": 0.00010845019122199964,
"loss": 0.1025,
"step": 8700
},
{
"epoch": 2.3551705468327016,
"eval_loss": 0.15364421904087067,
"eval_runtime": 53.3286,
"eval_samples_per_second": 9.376,
"eval_steps_per_second": 0.3,
"step": 8700
},
{
"epoch": 2.357877639415268,
"grad_norm": 0.11930614709854126,
"learning_rate": 0.00010799490074667638,
"loss": 0.1002,
"step": 8710
},
{
"epoch": 2.360584731997834,
"grad_norm": 0.1807277500629425,
"learning_rate": 0.00010753961027135313,
"loss": 0.107,
"step": 8720
},
{
"epoch": 2.3632918245804007,
"grad_norm": 0.12084764242172241,
"learning_rate": 0.00010708431979602986,
"loss": 0.0863,
"step": 8730
},
{
"epoch": 2.365998917162967,
"grad_norm": 0.5520146489143372,
"learning_rate": 0.00010662902932070661,
"loss": 0.1199,
"step": 8740
},
{
"epoch": 2.3687060097455332,
"grad_norm": 0.3508378267288208,
"learning_rate": 0.00010617373884538335,
"loss": 0.1079,
"step": 8750
},
{
"epoch": 2.3714131023280998,
"grad_norm": 0.414885550737381,
"learning_rate": 0.00010571844837006011,
"loss": 0.1308,
"step": 8760
},
{
"epoch": 2.374120194910666,
"grad_norm": 0.2651754319667816,
"learning_rate": 0.00010526315789473683,
"loss": 0.109,
"step": 8770
},
{
"epoch": 2.3768272874932324,
"grad_norm": 0.23027944564819336,
"learning_rate": 0.00010480786741941359,
"loss": 0.0785,
"step": 8780
},
{
"epoch": 2.3795343800757984,
"grad_norm": 0.2035895586013794,
"learning_rate": 0.00010435257694409034,
"loss": 0.1022,
"step": 8790
},
{
"epoch": 2.382241472658365,
"grad_norm": 0.3900894522666931,
"learning_rate": 0.00010389728646876707,
"loss": 0.1025,
"step": 8800
},
{
"epoch": 2.382241472658365,
"eval_loss": 0.15135331451892853,
"eval_runtime": 52.1508,
"eval_samples_per_second": 9.588,
"eval_steps_per_second": 0.307,
"step": 8800
},
{
"epoch": 2.384948565240931,
"grad_norm": 0.25562533736228943,
"learning_rate": 0.00010344199599344382,
"loss": 0.1165,
"step": 8810
},
{
"epoch": 2.3876556578234975,
"grad_norm": 0.3861636519432068,
"learning_rate": 0.00010298670551812056,
"loss": 0.1022,
"step": 8820
},
{
"epoch": 2.390362750406064,
"grad_norm": 0.11617624759674072,
"learning_rate": 0.00010253141504279732,
"loss": 0.0868,
"step": 8830
},
{
"epoch": 2.39306984298863,
"grad_norm": 0.21198932826519012,
"learning_rate": 0.00010207612456747404,
"loss": 0.1252,
"step": 8840
},
{
"epoch": 2.3957769355711966,
"grad_norm": 0.17055420577526093,
"learning_rate": 0.0001016208340921508,
"loss": 0.1306,
"step": 8850
},
{
"epoch": 2.3984840281537627,
"grad_norm": 0.11652148514986038,
"learning_rate": 0.00010116554361682754,
"loss": 0.1009,
"step": 8860
},
{
"epoch": 2.4011911207363292,
"grad_norm": 0.13691246509552002,
"learning_rate": 0.00010071025314150428,
"loss": 0.1085,
"step": 8870
},
{
"epoch": 2.4038982133188957,
"grad_norm": 0.07319015264511108,
"learning_rate": 0.00010025496266618103,
"loss": 0.0754,
"step": 8880
},
{
"epoch": 2.406605305901462,
"grad_norm": 0.3480643630027771,
"learning_rate": 9.979967219085777e-05,
"loss": 0.1308,
"step": 8890
},
{
"epoch": 2.4093123984840283,
"grad_norm": 0.15720431506633759,
"learning_rate": 9.934438171553452e-05,
"loss": 0.1385,
"step": 8900
},
{
"epoch": 2.4093123984840283,
"eval_loss": 0.14918014407157898,
"eval_runtime": 53.1709,
"eval_samples_per_second": 9.404,
"eval_steps_per_second": 0.301,
"step": 8900
},
{
"epoch": 2.4120194910665944,
"grad_norm": 0.0895061045885086,
"learning_rate": 9.888909124021125e-05,
"loss": 0.1012,
"step": 8910
},
{
"epoch": 2.414726583649161,
"grad_norm": 0.3535843789577484,
"learning_rate": 9.8433800764888e-05,
"loss": 0.1049,
"step": 8920
},
{
"epoch": 2.417433676231727,
"grad_norm": 0.303932785987854,
"learning_rate": 9.797851028956474e-05,
"loss": 0.0859,
"step": 8930
},
{
"epoch": 2.4201407688142935,
"grad_norm": 0.3384048640727997,
"learning_rate": 9.75232198142415e-05,
"loss": 0.1232,
"step": 8940
},
{
"epoch": 2.4228478613968596,
"grad_norm": 0.44046324491500854,
"learning_rate": 9.706792933891822e-05,
"loss": 0.1284,
"step": 8950
},
{
"epoch": 2.425554953979426,
"grad_norm": 0.13039818406105042,
"learning_rate": 9.661263886359498e-05,
"loss": 0.092,
"step": 8960
},
{
"epoch": 2.4282620465619926,
"grad_norm": 0.07619010657072067,
"learning_rate": 9.615734838827173e-05,
"loss": 0.0939,
"step": 8970
},
{
"epoch": 2.4309691391445587,
"grad_norm": 0.23351195454597473,
"learning_rate": 9.570205791294846e-05,
"loss": 0.1092,
"step": 8980
},
{
"epoch": 2.433676231727125,
"grad_norm": 0.14060206711292267,
"learning_rate": 9.524676743762521e-05,
"loss": 0.0893,
"step": 8990
},
{
"epoch": 2.4363833243096913,
"grad_norm": 0.18231533467769623,
"learning_rate": 9.479147696230195e-05,
"loss": 0.1229,
"step": 9000
},
{
"epoch": 2.4363833243096913,
"eval_loss": 0.14696462452411652,
"eval_runtime": 52.2474,
"eval_samples_per_second": 9.57,
"eval_steps_per_second": 0.306,
"step": 9000
},
{
"epoch": 2.439090416892258,
"grad_norm": 0.26201844215393066,
"learning_rate": 9.43361864869787e-05,
"loss": 0.1107,
"step": 9010
},
{
"epoch": 2.441797509474824,
"grad_norm": 0.16013525426387787,
"learning_rate": 9.388089601165543e-05,
"loss": 0.1007,
"step": 9020
},
{
"epoch": 2.4445046020573904,
"grad_norm": 0.3495844006538391,
"learning_rate": 9.342560553633218e-05,
"loss": 0.1171,
"step": 9030
},
{
"epoch": 2.4472116946399565,
"grad_norm": 0.323893666267395,
"learning_rate": 9.297031506100892e-05,
"loss": 0.1002,
"step": 9040
},
{
"epoch": 2.449918787222523,
"grad_norm": 0.27720922231674194,
"learning_rate": 9.251502458568566e-05,
"loss": 0.1335,
"step": 9050
},
{
"epoch": 2.4526258798050895,
"grad_norm": 0.20315824449062347,
"learning_rate": 9.205973411036242e-05,
"loss": 0.0839,
"step": 9060
},
{
"epoch": 2.4553329723876556,
"grad_norm": 0.18524716794490814,
"learning_rate": 9.160444363503916e-05,
"loss": 0.1173,
"step": 9070
},
{
"epoch": 2.458040064970222,
"grad_norm": 0.5791286826133728,
"learning_rate": 9.114915315971591e-05,
"loss": 0.1349,
"step": 9080
},
{
"epoch": 2.460747157552788,
"grad_norm": 0.0970366969704628,
"learning_rate": 9.069386268439264e-05,
"loss": 0.1141,
"step": 9090
},
{
"epoch": 2.4634542501353547,
"grad_norm": 0.425823837518692,
"learning_rate": 9.023857220906939e-05,
"loss": 0.1014,
"step": 9100
},
{
"epoch": 2.4634542501353547,
"eval_loss": 0.15250267088413239,
"eval_runtime": 52.0809,
"eval_samples_per_second": 9.6,
"eval_steps_per_second": 0.307,
"step": 9100
},
{
"epoch": 2.466161342717921,
"grad_norm": 0.2796113193035126,
"learning_rate": 8.978328173374613e-05,
"loss": 0.1077,
"step": 9110
},
{
"epoch": 2.4688684353004873,
"grad_norm": 0.32091084122657776,
"learning_rate": 8.932799125842287e-05,
"loss": 0.0973,
"step": 9120
},
{
"epoch": 2.471575527883054,
"grad_norm": 0.17554207146167755,
"learning_rate": 8.887270078309961e-05,
"loss": 0.1089,
"step": 9130
},
{
"epoch": 2.47428262046562,
"grad_norm": 0.2353290617465973,
"learning_rate": 8.841741030777637e-05,
"loss": 0.1037,
"step": 9140
},
{
"epoch": 2.4769897130481864,
"grad_norm": 0.2735700309276581,
"learning_rate": 8.796211983245312e-05,
"loss": 0.1067,
"step": 9150
},
{
"epoch": 2.4796968056307525,
"grad_norm": 0.07648351788520813,
"learning_rate": 8.750682935712985e-05,
"loss": 0.1258,
"step": 9160
},
{
"epoch": 2.482403898213319,
"grad_norm": 0.2713570296764374,
"learning_rate": 8.70515388818066e-05,
"loss": 0.1134,
"step": 9170
},
{
"epoch": 2.485110990795885,
"grad_norm": 0.2194133698940277,
"learning_rate": 8.659624840648334e-05,
"loss": 0.1103,
"step": 9180
},
{
"epoch": 2.4878180833784516,
"grad_norm": 0.34865009784698486,
"learning_rate": 8.614095793116008e-05,
"loss": 0.0919,
"step": 9190
},
{
"epoch": 2.490525175961018,
"grad_norm": 0.22364215552806854,
"learning_rate": 8.568566745583682e-05,
"loss": 0.1001,
"step": 9200
},
{
"epoch": 2.490525175961018,
"eval_loss": 0.15467584133148193,
"eval_runtime": 53.1037,
"eval_samples_per_second": 9.416,
"eval_steps_per_second": 0.301,
"step": 9200
},
{
"epoch": 2.493232268543584,
"grad_norm": 0.3470965623855591,
"learning_rate": 8.523037698051357e-05,
"loss": 0.1028,
"step": 9210
},
{
"epoch": 2.4959393611261507,
"grad_norm": 0.23215217888355255,
"learning_rate": 8.477508650519031e-05,
"loss": 0.0985,
"step": 9220
},
{
"epoch": 2.4986464537087167,
"grad_norm": 0.16627094149589539,
"learning_rate": 8.431979602986705e-05,
"loss": 0.1572,
"step": 9230
},
{
"epoch": 2.5013535462912833,
"grad_norm": 0.34447458386421204,
"learning_rate": 8.386450555454381e-05,
"loss": 0.1048,
"step": 9240
},
{
"epoch": 2.5040606388738493,
"grad_norm": 0.5015201568603516,
"learning_rate": 8.340921507922055e-05,
"loss": 0.1147,
"step": 9250
},
{
"epoch": 2.506767731456416,
"grad_norm": 0.5202590227127075,
"learning_rate": 8.295392460389729e-05,
"loss": 0.1112,
"step": 9260
},
{
"epoch": 2.509474824038982,
"grad_norm": 0.22088183462619781,
"learning_rate": 8.249863412857403e-05,
"loss": 0.1044,
"step": 9270
},
{
"epoch": 2.5121819166215484,
"grad_norm": 0.269815593957901,
"learning_rate": 8.204334365325078e-05,
"loss": 0.0893,
"step": 9280
},
{
"epoch": 2.514889009204115,
"grad_norm": 0.3005484938621521,
"learning_rate": 8.158805317792752e-05,
"loss": 0.0742,
"step": 9290
},
{
"epoch": 2.517596101786681,
"grad_norm": 0.2150215059518814,
"learning_rate": 8.113276270260426e-05,
"loss": 0.1216,
"step": 9300
},
{
"epoch": 2.517596101786681,
"eval_loss": 0.15590840578079224,
"eval_runtime": 53.3113,
"eval_samples_per_second": 9.379,
"eval_steps_per_second": 0.3,
"step": 9300
},
{
"epoch": 2.5203031943692475,
"grad_norm": 0.3834645748138428,
"learning_rate": 8.0677472227281e-05,
"loss": 0.1044,
"step": 9310
},
{
"epoch": 2.5230102869518136,
"grad_norm": 0.22386111319065094,
"learning_rate": 8.022218175195775e-05,
"loss": 0.1021,
"step": 9320
},
{
"epoch": 2.52571737953438,
"grad_norm": 0.3309231996536255,
"learning_rate": 7.97668912766345e-05,
"loss": 0.1251,
"step": 9330
},
{
"epoch": 2.5284244721169467,
"grad_norm": 0.6254246830940247,
"learning_rate": 7.931160080131124e-05,
"loss": 0.1014,
"step": 9340
},
{
"epoch": 2.5311315646995127,
"grad_norm": 0.08925973623991013,
"learning_rate": 7.885631032598799e-05,
"loss": 0.0984,
"step": 9350
},
{
"epoch": 2.533838657282079,
"grad_norm": 0.2096097618341446,
"learning_rate": 7.840101985066473e-05,
"loss": 0.1046,
"step": 9360
},
{
"epoch": 2.5365457498646453,
"grad_norm": 0.3621312379837036,
"learning_rate": 7.794572937534147e-05,
"loss": 0.0862,
"step": 9370
},
{
"epoch": 2.539252842447212,
"grad_norm": 0.40978795289993286,
"learning_rate": 7.749043890001821e-05,
"loss": 0.0962,
"step": 9380
},
{
"epoch": 2.541959935029778,
"grad_norm": 0.40507039427757263,
"learning_rate": 7.703514842469496e-05,
"loss": 0.115,
"step": 9390
},
{
"epoch": 2.5446670276123444,
"grad_norm": 0.09742297232151031,
"learning_rate": 7.657985794937169e-05,
"loss": 0.0933,
"step": 9400
},
{
"epoch": 2.5446670276123444,
"eval_loss": 0.1495400369167328,
"eval_runtime": 52.305,
"eval_samples_per_second": 9.559,
"eval_steps_per_second": 0.306,
"step": 9400
},
{
"epoch": 2.5473741201949105,
"grad_norm": 0.24087636172771454,
"learning_rate": 7.612456747404844e-05,
"loss": 0.1054,
"step": 9410
},
{
"epoch": 2.550081212777477,
"grad_norm": 0.25820058584213257,
"learning_rate": 7.56692769987252e-05,
"loss": 0.0947,
"step": 9420
},
{
"epoch": 2.5527883053600435,
"grad_norm": 0.1936085969209671,
"learning_rate": 7.521398652340194e-05,
"loss": 0.0821,
"step": 9430
},
{
"epoch": 2.5554953979426096,
"grad_norm": 0.3259497284889221,
"learning_rate": 7.475869604807868e-05,
"loss": 0.1147,
"step": 9440
},
{
"epoch": 2.558202490525176,
"grad_norm": 0.22895482182502747,
"learning_rate": 7.430340557275542e-05,
"loss": 0.0919,
"step": 9450
},
{
"epoch": 2.560909583107742,
"grad_norm": 0.17221614718437195,
"learning_rate": 7.384811509743217e-05,
"loss": 0.1029,
"step": 9460
},
{
"epoch": 2.5636166756903087,
"grad_norm": 0.46346572041511536,
"learning_rate": 7.33928246221089e-05,
"loss": 0.1073,
"step": 9470
},
{
"epoch": 2.566323768272875,
"grad_norm": 0.5658268332481384,
"learning_rate": 7.293753414678565e-05,
"loss": 0.119,
"step": 9480
},
{
"epoch": 2.5690308608554413,
"grad_norm": 0.25370046496391296,
"learning_rate": 7.248224367146239e-05,
"loss": 0.1091,
"step": 9490
},
{
"epoch": 2.5717379534380074,
"grad_norm": 0.26131343841552734,
"learning_rate": 7.202695319613914e-05,
"loss": 0.0881,
"step": 9500
},
{
"epoch": 2.5717379534380074,
"eval_loss": 0.15294285118579865,
"eval_runtime": 51.764,
"eval_samples_per_second": 9.659,
"eval_steps_per_second": 0.309,
"step": 9500
},
{
"epoch": 2.574445046020574,
"grad_norm": 0.17354480922222137,
"learning_rate": 7.157166272081588e-05,
"loss": 0.1093,
"step": 9510
},
{
"epoch": 2.5771521386031404,
"grad_norm": 0.3243185877799988,
"learning_rate": 7.111637224549262e-05,
"loss": 0.0885,
"step": 9520
},
{
"epoch": 2.5798592311857065,
"grad_norm": 0.3836454451084137,
"learning_rate": 7.066108177016938e-05,
"loss": 0.1073,
"step": 9530
},
{
"epoch": 2.582566323768273,
"grad_norm": 0.3406403660774231,
"learning_rate": 7.02057912948461e-05,
"loss": 0.1115,
"step": 9540
},
{
"epoch": 2.585273416350839,
"grad_norm": 0.2781931161880493,
"learning_rate": 6.975050081952286e-05,
"loss": 0.1074,
"step": 9550
},
{
"epoch": 2.5879805089334056,
"grad_norm": 0.16563768684864044,
"learning_rate": 6.92952103441996e-05,
"loss": 0.1395,
"step": 9560
},
{
"epoch": 2.590687601515972,
"grad_norm": 0.12540295720100403,
"learning_rate": 6.883991986887635e-05,
"loss": 0.1409,
"step": 9570
},
{
"epoch": 2.593394694098538,
"grad_norm": 0.22774340212345123,
"learning_rate": 6.838462939355308e-05,
"loss": 0.1045,
"step": 9580
},
{
"epoch": 2.5961017866811043,
"grad_norm": 0.6437894701957703,
"learning_rate": 6.792933891822983e-05,
"loss": 0.1695,
"step": 9590
},
{
"epoch": 2.5988088792636708,
"grad_norm": 0.08183804899454117,
"learning_rate": 6.747404844290659e-05,
"loss": 0.1047,
"step": 9600
},
{
"epoch": 2.5988088792636708,
"eval_loss": 0.15261416137218475,
"eval_runtime": 51.5572,
"eval_samples_per_second": 9.698,
"eval_steps_per_second": 0.31,
"step": 9600
},
{
"epoch": 2.6015159718462373,
"grad_norm": 0.34598779678344727,
"learning_rate": 6.701875796758333e-05,
"loss": 0.1092,
"step": 9610
},
{
"epoch": 2.6042230644288034,
"grad_norm": 0.36952805519104004,
"learning_rate": 6.656346749226007e-05,
"loss": 0.0929,
"step": 9620
},
{
"epoch": 2.60693015701137,
"grad_norm": 0.09631340950727463,
"learning_rate": 6.61081770169368e-05,
"loss": 0.0918,
"step": 9630
},
{
"epoch": 2.609637249593936,
"grad_norm": 0.3857302963733673,
"learning_rate": 6.565288654161356e-05,
"loss": 0.1054,
"step": 9640
},
{
"epoch": 2.6123443421765025,
"grad_norm": 0.38102293014526367,
"learning_rate": 6.519759606629029e-05,
"loss": 0.117,
"step": 9650
},
{
"epoch": 2.615051434759069,
"grad_norm": 0.18504104018211365,
"learning_rate": 6.474230559096704e-05,
"loss": 0.0875,
"step": 9660
},
{
"epoch": 2.617758527341635,
"grad_norm": 0.12376754730939865,
"learning_rate": 6.428701511564378e-05,
"loss": 0.1076,
"step": 9670
},
{
"epoch": 2.6204656199242016,
"grad_norm": 0.3745812475681305,
"learning_rate": 6.383172464032053e-05,
"loss": 0.0932,
"step": 9680
},
{
"epoch": 2.6231727125067676,
"grad_norm": 0.17206531763076782,
"learning_rate": 6.337643416499727e-05,
"loss": 0.0923,
"step": 9690
},
{
"epoch": 2.625879805089334,
"grad_norm": 0.4507389962673187,
"learning_rate": 6.292114368967401e-05,
"loss": 0.1159,
"step": 9700
},
{
"epoch": 2.625879805089334,
"eval_loss": 0.15273411571979523,
"eval_runtime": 51.4115,
"eval_samples_per_second": 9.725,
"eval_steps_per_second": 0.311,
"step": 9700
},
{
"epoch": 2.6285868976719002,
"grad_norm": 0.22679084539413452,
"learning_rate": 6.246585321435075e-05,
"loss": 0.1191,
"step": 9710
},
{
"epoch": 2.6312939902544668,
"grad_norm": 0.15608219802379608,
"learning_rate": 6.201056273902749e-05,
"loss": 0.113,
"step": 9720
},
{
"epoch": 2.634001082837033,
"grad_norm": 0.20562225580215454,
"learning_rate": 6.155527226370425e-05,
"loss": 0.1299,
"step": 9730
},
{
"epoch": 2.6367081754195993,
"grad_norm": 0.5659207701683044,
"learning_rate": 6.109998178838099e-05,
"loss": 0.1035,
"step": 9740
},
{
"epoch": 2.639415268002166,
"grad_norm": 0.17303211987018585,
"learning_rate": 6.0644691313057734e-05,
"loss": 0.1127,
"step": 9750
},
{
"epoch": 2.642122360584732,
"grad_norm": 0.17225438356399536,
"learning_rate": 6.018940083773448e-05,
"loss": 0.0961,
"step": 9760
},
{
"epoch": 2.6448294531672984,
"grad_norm": 0.2602575123310089,
"learning_rate": 5.973411036241122e-05,
"loss": 0.0971,
"step": 9770
},
{
"epoch": 2.6475365457498645,
"grad_norm": 0.17937231063842773,
"learning_rate": 5.927881988708796e-05,
"loss": 0.09,
"step": 9780
},
{
"epoch": 2.650243638332431,
"grad_norm": 0.35192254185676575,
"learning_rate": 5.882352941176471e-05,
"loss": 0.1198,
"step": 9790
},
{
"epoch": 2.6529507309149976,
"grad_norm": 0.1931726187467575,
"learning_rate": 5.836823893644145e-05,
"loss": 0.0925,
"step": 9800
},
{
"epoch": 2.6529507309149976,
"eval_loss": 0.1525072157382965,
"eval_runtime": 54.8545,
"eval_samples_per_second": 9.115,
"eval_steps_per_second": 0.292,
"step": 9800
},
{
"epoch": 2.6556578234975636,
"grad_norm": 0.24726568162441254,
"learning_rate": 5.7912948461118194e-05,
"loss": 0.1037,
"step": 9810
},
{
"epoch": 2.6583649160801297,
"grad_norm": 0.1900894194841385,
"learning_rate": 5.7457657985794934e-05,
"loss": 0.0859,
"step": 9820
},
{
"epoch": 2.661072008662696,
"grad_norm": 0.15850365161895752,
"learning_rate": 5.700236751047169e-05,
"loss": 0.104,
"step": 9830
},
{
"epoch": 2.6637791012452627,
"grad_norm": 0.3096344769001007,
"learning_rate": 5.654707703514843e-05,
"loss": 0.1046,
"step": 9840
},
{
"epoch": 2.666486193827829,
"grad_norm": 0.16559633612632751,
"learning_rate": 5.609178655982517e-05,
"loss": 0.103,
"step": 9850
},
{
"epoch": 2.6691932864103953,
"grad_norm": 0.19194342195987701,
"learning_rate": 5.5636496084501915e-05,
"loss": 0.0991,
"step": 9860
},
{
"epoch": 2.6719003789929614,
"grad_norm": 0.18227119743824005,
"learning_rate": 5.5181205609178655e-05,
"loss": 0.0961,
"step": 9870
},
{
"epoch": 2.674607471575528,
"grad_norm": 0.2713763117790222,
"learning_rate": 5.47259151338554e-05,
"loss": 0.1147,
"step": 9880
},
{
"epoch": 2.6773145641580944,
"grad_norm": 0.36927032470703125,
"learning_rate": 5.427062465853214e-05,
"loss": 0.1079,
"step": 9890
},
{
"epoch": 2.6800216567406605,
"grad_norm": 0.13315054774284363,
"learning_rate": 5.381533418320889e-05,
"loss": 0.1017,
"step": 9900
},
{
"epoch": 2.6800216567406605,
"eval_loss": 0.15056326985359192,
"eval_runtime": 51.794,
"eval_samples_per_second": 9.654,
"eval_steps_per_second": 0.309,
"step": 9900
},
{
"epoch": 2.6827287493232266,
"grad_norm": 0.24048204720020294,
"learning_rate": 5.336004370788563e-05,
"loss": 0.1147,
"step": 9910
},
{
"epoch": 2.685435841905793,
"grad_norm": 0.20163370668888092,
"learning_rate": 5.2904753232562376e-05,
"loss": 0.0801,
"step": 9920
},
{
"epoch": 2.6881429344883596,
"grad_norm": 0.26496630907058716,
"learning_rate": 5.244946275723912e-05,
"loss": 0.0981,
"step": 9930
},
{
"epoch": 2.6908500270709257,
"grad_norm": 0.176680788397789,
"learning_rate": 5.199417228191586e-05,
"loss": 0.0975,
"step": 9940
},
{
"epoch": 2.693557119653492,
"grad_norm": 0.2778788208961487,
"learning_rate": 5.153888180659261e-05,
"loss": 0.0963,
"step": 9950
},
{
"epoch": 2.6962642122360583,
"grad_norm": 0.31696370244026184,
"learning_rate": 5.108359133126935e-05,
"loss": 0.0943,
"step": 9960
},
{
"epoch": 2.698971304818625,
"grad_norm": 0.16855411231517792,
"learning_rate": 5.0628300855946096e-05,
"loss": 0.1076,
"step": 9970
},
{
"epoch": 2.7016783974011913,
"grad_norm": 0.1627056747674942,
"learning_rate": 5.0173010380622837e-05,
"loss": 0.1268,
"step": 9980
},
{
"epoch": 2.7043854899837574,
"grad_norm": 0.4440973103046417,
"learning_rate": 4.971771990529958e-05,
"loss": 0.1233,
"step": 9990
},
{
"epoch": 2.707092582566324,
"grad_norm": 0.11012829840183258,
"learning_rate": 4.9262429429976323e-05,
"loss": 0.1123,
"step": 10000
},
{
"epoch": 2.707092582566324,
"eval_loss": 0.14893101155757904,
"eval_runtime": 53.2999,
"eval_samples_per_second": 9.381,
"eval_steps_per_second": 0.3,
"step": 10000
},
{
"epoch": 2.70979967514889,
"grad_norm": 0.2912381589412689,
"learning_rate": 4.880713895465307e-05,
"loss": 0.1033,
"step": 10010
},
{
"epoch": 2.7125067677314565,
"grad_norm": 0.15744605660438538,
"learning_rate": 4.835184847932982e-05,
"loss": 0.1073,
"step": 10020
},
{
"epoch": 2.715213860314023,
"grad_norm": 0.1352146863937378,
"learning_rate": 4.789655800400656e-05,
"loss": 0.0889,
"step": 10030
},
{
"epoch": 2.717920952896589,
"grad_norm": 0.30048781633377075,
"learning_rate": 4.7441267528683304e-05,
"loss": 0.1009,
"step": 10040
},
{
"epoch": 2.720628045479155,
"grad_norm": 0.21028059720993042,
"learning_rate": 4.6985977053360044e-05,
"loss": 0.1017,
"step": 10050
},
{
"epoch": 2.7233351380617217,
"grad_norm": 0.3342764973640442,
"learning_rate": 4.653068657803679e-05,
"loss": 0.1127,
"step": 10060
},
{
"epoch": 2.726042230644288,
"grad_norm": 0.23139092326164246,
"learning_rate": 4.607539610271353e-05,
"loss": 0.1009,
"step": 10070
},
{
"epoch": 2.7287493232268543,
"grad_norm": 0.18197013437747955,
"learning_rate": 4.562010562739027e-05,
"loss": 0.0933,
"step": 10080
},
{
"epoch": 2.731456415809421,
"grad_norm": 0.1409190595149994,
"learning_rate": 4.516481515206702e-05,
"loss": 0.0856,
"step": 10090
},
{
"epoch": 2.734163508391987,
"grad_norm": 0.1978885680437088,
"learning_rate": 4.4709524676743765e-05,
"loss": 0.0888,
"step": 10100
},
{
"epoch": 2.734163508391987,
"eval_loss": 0.15170806646347046,
"eval_runtime": 53.4463,
"eval_samples_per_second": 9.355,
"eval_steps_per_second": 0.299,
"step": 10100
},
{
"epoch": 2.7368706009745534,
"grad_norm": 0.18989257514476776,
"learning_rate": 4.425423420142051e-05,
"loss": 0.106,
"step": 10110
},
{
"epoch": 2.73957769355712,
"grad_norm": 0.15426640212535858,
"learning_rate": 4.379894372609725e-05,
"loss": 0.0973,
"step": 10120
},
{
"epoch": 2.742284786139686,
"grad_norm": 0.17095568776130676,
"learning_rate": 4.3343653250774e-05,
"loss": 0.0904,
"step": 10130
},
{
"epoch": 2.744991878722252,
"grad_norm": 0.19205018877983093,
"learning_rate": 4.288836277545074e-05,
"loss": 0.0936,
"step": 10140
},
{
"epoch": 2.7476989713048185,
"grad_norm": 0.20674671232700348,
"learning_rate": 4.243307230012748e-05,
"loss": 0.1101,
"step": 10150
},
{
"epoch": 2.750406063887385,
"grad_norm": 0.1812015026807785,
"learning_rate": 4.1977781824804226e-05,
"loss": 0.1288,
"step": 10160
},
{
"epoch": 2.753113156469951,
"grad_norm": 0.28594598174095154,
"learning_rate": 4.1522491349480966e-05,
"loss": 0.0758,
"step": 10170
},
{
"epoch": 2.7558202490525177,
"grad_norm": 0.4622754752635956,
"learning_rate": 4.106720087415771e-05,
"loss": 0.1008,
"step": 10180
},
{
"epoch": 2.7585273416350837,
"grad_norm": 0.32906344532966614,
"learning_rate": 4.061191039883446e-05,
"loss": 0.1031,
"step": 10190
},
{
"epoch": 2.7612344342176502,
"grad_norm": 0.131916344165802,
"learning_rate": 4.0156619923511206e-05,
"loss": 0.0938,
"step": 10200
},
{
"epoch": 2.7612344342176502,
"eval_loss": 0.14903075993061066,
"eval_runtime": 53.7121,
"eval_samples_per_second": 9.309,
"eval_steps_per_second": 0.298,
"step": 10200
},
{
"epoch": 2.7639415268002168,
"grad_norm": 0.14141401648521423,
"learning_rate": 3.9701329448187946e-05,
"loss": 0.1089,
"step": 10210
},
{
"epoch": 2.766648619382783,
"grad_norm": 0.20000173151493073,
"learning_rate": 3.9246038972864686e-05,
"loss": 0.0864,
"step": 10220
},
{
"epoch": 2.7693557119653494,
"grad_norm": 0.5456509590148926,
"learning_rate": 3.879074849754143e-05,
"loss": 0.0859,
"step": 10230
},
{
"epoch": 2.7720628045479154,
"grad_norm": 0.27678969502449036,
"learning_rate": 3.833545802221817e-05,
"loss": 0.1197,
"step": 10240
},
{
"epoch": 2.774769897130482,
"grad_norm": 0.253469318151474,
"learning_rate": 3.788016754689492e-05,
"loss": 0.0762,
"step": 10250
},
{
"epoch": 2.7774769897130485,
"grad_norm": 0.1845778226852417,
"learning_rate": 3.742487707157166e-05,
"loss": 0.1043,
"step": 10260
},
{
"epoch": 2.7801840822956145,
"grad_norm": 0.27909618616104126,
"learning_rate": 3.696958659624841e-05,
"loss": 0.1042,
"step": 10270
},
{
"epoch": 2.7828911748781806,
"grad_norm": 0.39091622829437256,
"learning_rate": 3.6514296120925154e-05,
"loss": 0.1029,
"step": 10280
},
{
"epoch": 2.785598267460747,
"grad_norm": 0.38871899247169495,
"learning_rate": 3.6059005645601894e-05,
"loss": 0.125,
"step": 10290
},
{
"epoch": 2.7883053600433136,
"grad_norm": 0.1890731155872345,
"learning_rate": 3.560371517027864e-05,
"loss": 0.1166,
"step": 10300
},
{
"epoch": 2.7883053600433136,
"eval_loss": 0.14813275635242462,
"eval_runtime": 53.2518,
"eval_samples_per_second": 9.389,
"eval_steps_per_second": 0.3,
"step": 10300
},
{
"epoch": 2.7910124526258797,
"grad_norm": 0.24631626904010773,
"learning_rate": 3.514842469495538e-05,
"loss": 0.1058,
"step": 10310
},
{
"epoch": 2.7937195452084462,
"grad_norm": 0.23233796656131744,
"learning_rate": 3.469313421963213e-05,
"loss": 0.0929,
"step": 10320
},
{
"epoch": 2.7964266377910123,
"grad_norm": 0.17788082361221313,
"learning_rate": 3.423784374430887e-05,
"loss": 0.1115,
"step": 10330
},
{
"epoch": 2.799133730373579,
"grad_norm": 0.10910706222057343,
"learning_rate": 3.3782553268985615e-05,
"loss": 0.122,
"step": 10340
},
{
"epoch": 2.8018408229561453,
"grad_norm": 0.37555086612701416,
"learning_rate": 3.3327262793662355e-05,
"loss": 0.1072,
"step": 10350
},
{
"epoch": 2.8045479155387114,
"grad_norm": 0.057445164769887924,
"learning_rate": 3.28719723183391e-05,
"loss": 0.0953,
"step": 10360
},
{
"epoch": 2.8072550081212775,
"grad_norm": 0.16641780734062195,
"learning_rate": 3.241668184301585e-05,
"loss": 0.1032,
"step": 10370
},
{
"epoch": 2.809962100703844,
"grad_norm": 0.29210391640663147,
"learning_rate": 3.196139136769259e-05,
"loss": 0.0918,
"step": 10380
},
{
"epoch": 2.8126691932864105,
"grad_norm": 0.23352032899856567,
"learning_rate": 3.1506100892369335e-05,
"loss": 0.0713,
"step": 10390
},
{
"epoch": 2.8153762858689766,
"grad_norm": 0.1475362628698349,
"learning_rate": 3.1050810417046075e-05,
"loss": 0.0904,
"step": 10400
},
{
"epoch": 2.8153762858689766,
"eval_loss": 0.1505148857831955,
"eval_runtime": 52.203,
"eval_samples_per_second": 9.578,
"eval_steps_per_second": 0.306,
"step": 10400
},
{
"epoch": 2.818083378451543,
"grad_norm": 0.4008180797100067,
"learning_rate": 3.059551994172282e-05,
"loss": 0.1166,
"step": 10410
},
{
"epoch": 2.820790471034109,
"grad_norm": 0.11473873257637024,
"learning_rate": 3.0140229466399562e-05,
"loss": 0.0805,
"step": 10420
},
{
"epoch": 2.8234975636166757,
"grad_norm": 0.24119402468204498,
"learning_rate": 2.9684938991076306e-05,
"loss": 0.1163,
"step": 10430
},
{
"epoch": 2.826204656199242,
"grad_norm": 0.37018585205078125,
"learning_rate": 2.9229648515753052e-05,
"loss": 0.1032,
"step": 10440
},
{
"epoch": 2.8289117487818083,
"grad_norm": 0.29082244634628296,
"learning_rate": 2.8774358040429796e-05,
"loss": 0.0882,
"step": 10450
},
{
"epoch": 2.831618841364375,
"grad_norm": 0.18865616619586945,
"learning_rate": 2.831906756510654e-05,
"loss": 0.1011,
"step": 10460
},
{
"epoch": 2.834325933946941,
"grad_norm": 0.1716868132352829,
"learning_rate": 2.7863777089783283e-05,
"loss": 0.0936,
"step": 10470
},
{
"epoch": 2.8370330265295074,
"grad_norm": 0.15654022991657257,
"learning_rate": 2.7408486614460023e-05,
"loss": 0.0926,
"step": 10480
},
{
"epoch": 2.8397401191120735,
"grad_norm": 0.31513598561286926,
"learning_rate": 2.695319613913677e-05,
"loss": 0.1046,
"step": 10490
},
{
"epoch": 2.84244721169464,
"grad_norm": 0.32550135254859924,
"learning_rate": 2.6497905663813513e-05,
"loss": 0.1064,
"step": 10500
},
{
"epoch": 2.84244721169464,
"eval_loss": 0.14921054244041443,
"eval_runtime": 53.2325,
"eval_samples_per_second": 9.393,
"eval_steps_per_second": 0.301,
"step": 10500
},
{
"epoch": 2.845154304277206,
"grad_norm": 0.1897037923336029,
"learning_rate": 2.6042615188490257e-05,
"loss": 0.1141,
"step": 10510
},
{
"epoch": 2.8478613968597726,
"grad_norm": 0.21038039028644562,
"learning_rate": 2.5587324713167e-05,
"loss": 0.1005,
"step": 10520
},
{
"epoch": 2.850568489442339,
"grad_norm": 0.32643935084342957,
"learning_rate": 2.5132034237843747e-05,
"loss": 0.0984,
"step": 10530
},
{
"epoch": 2.853275582024905,
"grad_norm": 0.1864829808473587,
"learning_rate": 2.467674376252049e-05,
"loss": 0.0901,
"step": 10540
},
{
"epoch": 2.8559826746074717,
"grad_norm": 0.2076212465763092,
"learning_rate": 2.4221453287197234e-05,
"loss": 0.0794,
"step": 10550
},
{
"epoch": 2.8586897671900378,
"grad_norm": 0.26182305812835693,
"learning_rate": 2.3766162811873974e-05,
"loss": 0.0986,
"step": 10560
},
{
"epoch": 2.8613968597726043,
"grad_norm": 0.42842212319374084,
"learning_rate": 2.3310872336550717e-05,
"loss": 0.0984,
"step": 10570
},
{
"epoch": 2.864103952355171,
"grad_norm": 0.3149246871471405,
"learning_rate": 2.2855581861227464e-05,
"loss": 0.1205,
"step": 10580
},
{
"epoch": 2.866811044937737,
"grad_norm": 0.3597836494445801,
"learning_rate": 2.2400291385904208e-05,
"loss": 0.1264,
"step": 10590
},
{
"epoch": 2.869518137520303,
"grad_norm": 0.2727559208869934,
"learning_rate": 2.194500091058095e-05,
"loss": 0.1158,
"step": 10600
},
{
"epoch": 2.869518137520303,
"eval_loss": 0.1474287360906601,
"eval_runtime": 52.781,
"eval_samples_per_second": 9.473,
"eval_steps_per_second": 0.303,
"step": 10600
},
{
"epoch": 2.8722252301028695,
"grad_norm": 0.22489579021930695,
"learning_rate": 2.1489710435257695e-05,
"loss": 0.0899,
"step": 10610
},
{
"epoch": 2.874932322685436,
"grad_norm": 0.21127289533615112,
"learning_rate": 2.103441995993444e-05,
"loss": 0.1077,
"step": 10620
},
{
"epoch": 2.877639415268002,
"grad_norm": 0.27591705322265625,
"learning_rate": 2.057912948461118e-05,
"loss": 0.0933,
"step": 10630
},
{
"epoch": 2.8803465078505686,
"grad_norm": 0.3215999901294708,
"learning_rate": 2.0123839009287925e-05,
"loss": 0.1045,
"step": 10640
},
{
"epoch": 2.8830536004331346,
"grad_norm": 0.2435692399740219,
"learning_rate": 1.966854853396467e-05,
"loss": 0.0967,
"step": 10650
},
{
"epoch": 2.885760693015701,
"grad_norm": 0.2958237826824188,
"learning_rate": 1.9213258058641412e-05,
"loss": 0.0843,
"step": 10660
},
{
"epoch": 2.8884677855982677,
"grad_norm": 0.4848542809486389,
"learning_rate": 1.875796758331816e-05,
"loss": 0.1119,
"step": 10670
},
{
"epoch": 2.8911748781808337,
"grad_norm": 0.09771529585123062,
"learning_rate": 1.8302677107994902e-05,
"loss": 0.0851,
"step": 10680
},
{
"epoch": 2.8938819707634003,
"grad_norm": 0.30730119347572327,
"learning_rate": 1.7847386632671646e-05,
"loss": 0.0978,
"step": 10690
},
{
"epoch": 2.8965890633459663,
"grad_norm": 0.16735531389713287,
"learning_rate": 1.7392096157348386e-05,
"loss": 0.1139,
"step": 10700
},
{
"epoch": 2.8965890633459663,
"eval_loss": 0.14749093353748322,
"eval_runtime": 52.8044,
"eval_samples_per_second": 9.469,
"eval_steps_per_second": 0.303,
"step": 10700
},
{
"epoch": 2.899296155928533,
"grad_norm": 0.18200255930423737,
"learning_rate": 1.6936805682025133e-05,
"loss": 0.1289,
"step": 10710
},
{
"epoch": 2.902003248511099,
"grad_norm": 0.2688741683959961,
"learning_rate": 1.6481515206701876e-05,
"loss": 0.077,
"step": 10720
},
{
"epoch": 2.9047103410936654,
"grad_norm": 0.34718960523605347,
"learning_rate": 1.602622473137862e-05,
"loss": 0.136,
"step": 10730
},
{
"epoch": 2.9074174336762315,
"grad_norm": 0.21658600866794586,
"learning_rate": 1.5570934256055363e-05,
"loss": 0.1305,
"step": 10740
},
{
"epoch": 2.910124526258798,
"grad_norm": 0.31314098834991455,
"learning_rate": 1.5115643780732108e-05,
"loss": 0.1157,
"step": 10750
},
{
"epoch": 2.9128316188413645,
"grad_norm": 0.10926242917776108,
"learning_rate": 1.466035330540885e-05,
"loss": 0.096,
"step": 10760
},
{
"epoch": 2.9155387114239306,
"grad_norm": 0.3475625216960907,
"learning_rate": 1.4205062830085595e-05,
"loss": 0.1047,
"step": 10770
},
{
"epoch": 2.918245804006497,
"grad_norm": 0.2349124401807785,
"learning_rate": 1.3749772354762338e-05,
"loss": 0.1017,
"step": 10780
},
{
"epoch": 2.920952896589063,
"grad_norm": 0.29219192266464233,
"learning_rate": 1.3294481879439084e-05,
"loss": 0.1327,
"step": 10790
},
{
"epoch": 2.9236599891716297,
"grad_norm": 0.26028120517730713,
"learning_rate": 1.2839191404115825e-05,
"loss": 0.0921,
"step": 10800
},
{
"epoch": 2.9236599891716297,
"eval_loss": 0.1457306444644928,
"eval_runtime": 53.5264,
"eval_samples_per_second": 9.341,
"eval_steps_per_second": 0.299,
"step": 10800
},
{
"epoch": 2.9263670817541962,
"grad_norm": 0.2346615195274353,
"learning_rate": 1.238390092879257e-05,
"loss": 0.1017,
"step": 10810
},
{
"epoch": 2.9290741743367623,
"grad_norm": 0.1553301364183426,
"learning_rate": 1.1928610453469314e-05,
"loss": 0.1024,
"step": 10820
},
{
"epoch": 2.9317812669193284,
"grad_norm": 0.18650998175144196,
"learning_rate": 1.1473319978146057e-05,
"loss": 0.1278,
"step": 10830
},
{
"epoch": 2.934488359501895,
"grad_norm": 0.3397215008735657,
"learning_rate": 1.1018029502822801e-05,
"loss": 0.1161,
"step": 10840
},
{
"epoch": 2.9371954520844614,
"grad_norm": 0.3774276077747345,
"learning_rate": 1.0562739027499544e-05,
"loss": 0.1111,
"step": 10850
},
{
"epoch": 2.9399025446670275,
"grad_norm": 0.06833741813898087,
"learning_rate": 1.010744855217629e-05,
"loss": 0.1067,
"step": 10860
},
{
"epoch": 2.942609637249594,
"grad_norm": 0.11081808060407639,
"learning_rate": 9.652158076853031e-06,
"loss": 0.1156,
"step": 10870
},
{
"epoch": 2.94531672983216,
"grad_norm": 0.21258531510829926,
"learning_rate": 9.196867601529776e-06,
"loss": 0.1178,
"step": 10880
},
{
"epoch": 2.9480238224147266,
"grad_norm": 0.4802851676940918,
"learning_rate": 8.74157712620652e-06,
"loss": 0.1103,
"step": 10890
},
{
"epoch": 2.950730914997293,
"grad_norm": 0.3009449541568756,
"learning_rate": 8.286286650883265e-06,
"loss": 0.0918,
"step": 10900
},
{
"epoch": 2.950730914997293,
"eval_loss": 0.1471521258354187,
"eval_runtime": 52.5879,
"eval_samples_per_second": 9.508,
"eval_steps_per_second": 0.304,
"step": 10900
},
{
"epoch": 2.953438007579859,
"grad_norm": 0.19883263111114502,
"learning_rate": 7.830996175560007e-06,
"loss": 0.0901,
"step": 10910
},
{
"epoch": 2.9561451001624257,
"grad_norm": 0.16847321391105652,
"learning_rate": 7.375705700236751e-06,
"loss": 0.1249,
"step": 10920
},
{
"epoch": 2.958852192744992,
"grad_norm": 0.4061388373374939,
"learning_rate": 6.920415224913495e-06,
"loss": 0.1278,
"step": 10930
},
{
"epoch": 2.9615592853275583,
"grad_norm": 0.22170685231685638,
"learning_rate": 6.465124749590239e-06,
"loss": 0.0946,
"step": 10940
},
{
"epoch": 2.9642663779101244,
"grad_norm": 0.41052505373954773,
"learning_rate": 6.009834274266983e-06,
"loss": 0.1146,
"step": 10950
},
{
"epoch": 2.966973470492691,
"grad_norm": 0.3749537765979767,
"learning_rate": 5.554543798943726e-06,
"loss": 0.1297,
"step": 10960
},
{
"epoch": 2.969680563075257,
"grad_norm": 0.13162532448768616,
"learning_rate": 5.09925332362047e-06,
"loss": 0.1492,
"step": 10970
},
{
"epoch": 2.9723876556578235,
"grad_norm": 0.32849496603012085,
"learning_rate": 4.6439628482972135e-06,
"loss": 0.1038,
"step": 10980
},
{
"epoch": 2.97509474824039,
"grad_norm": 0.15441325306892395,
"learning_rate": 4.188672372973958e-06,
"loss": 0.0905,
"step": 10990
},
{
"epoch": 2.977801840822956,
"grad_norm": 0.14825201034545898,
"learning_rate": 3.7333818976507013e-06,
"loss": 0.0941,
"step": 11000
},
{
"epoch": 2.977801840822956,
"eval_loss": 0.1465151160955429,
"eval_runtime": 51.5309,
"eval_samples_per_second": 9.703,
"eval_steps_per_second": 0.31,
"step": 11000
},
{
"epoch": 2.9805089334055226,
"grad_norm": 0.5358628630638123,
"learning_rate": 3.278091422327445e-06,
"loss": 0.118,
"step": 11010
},
{
"epoch": 2.9832160259880887,
"grad_norm": 0.27287015318870544,
"learning_rate": 2.8228009470041886e-06,
"loss": 0.1072,
"step": 11020
},
{
"epoch": 2.985923118570655,
"grad_norm": 0.17518705129623413,
"learning_rate": 2.3675104716809325e-06,
"loss": 0.1033,
"step": 11030
},
{
"epoch": 2.9886302111532217,
"grad_norm": 0.27911001443862915,
"learning_rate": 1.9122199963576764e-06,
"loss": 0.0967,
"step": 11040
},
{
"epoch": 2.9913373037357878,
"grad_norm": 0.17551441490650177,
"learning_rate": 1.4569295210344198e-06,
"loss": 0.1081,
"step": 11050
},
{
"epoch": 2.994044396318354,
"grad_norm": 0.2103782594203949,
"learning_rate": 1.0016390457111637e-06,
"loss": 0.1113,
"step": 11060
},
{
"epoch": 2.9967514889009204,
"grad_norm": 0.13261333107948303,
"learning_rate": 5.463485703879075e-07,
"loss": 0.0912,
"step": 11070
},
{
"epoch": 2.999458581483487,
"grad_norm": 0.20483067631721497,
"learning_rate": 9.105809506465124e-08,
"loss": 0.0981,
"step": 11080
}
],
"logging_steps": 10,
"max_steps": 11082,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.041237140368352e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}