ProgramInNonsense's picture
Training in progress, step 4800, checkpoint
9784344 verified
{
"best_metric": 1.2978554964065552,
"best_model_checkpoint": "./output/checkpoint-3900",
"epoch": 2.3255813953488373,
"eval_steps": 150,
"global_step": 4800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0048449612403100775,
"grad_norm": 33.16083526611328,
"learning_rate": 1.25e-05,
"loss": 3.9099,
"step": 10
},
{
"epoch": 0.009689922480620155,
"grad_norm": 14.680173873901367,
"learning_rate": 2.5e-05,
"loss": 2.7088,
"step": 20
},
{
"epoch": 0.014534883720930232,
"grad_norm": 11.869793891906738,
"learning_rate": 3.75e-05,
"loss": 1.8952,
"step": 30
},
{
"epoch": 0.01937984496124031,
"grad_norm": 9.641575813293457,
"learning_rate": 5e-05,
"loss": 1.6699,
"step": 40
},
{
"epoch": 0.02422480620155039,
"grad_norm": 8.119805335998535,
"learning_rate": 6.25e-05,
"loss": 1.6491,
"step": 50
},
{
"epoch": 0.029069767441860465,
"grad_norm": 8.050493240356445,
"learning_rate": 7.5e-05,
"loss": 1.5062,
"step": 60
},
{
"epoch": 0.03391472868217054,
"grad_norm": 7.072201251983643,
"learning_rate": 8.75e-05,
"loss": 1.535,
"step": 70
},
{
"epoch": 0.03875968992248062,
"grad_norm": 7.6121931076049805,
"learning_rate": 0.0001,
"loss": 1.4697,
"step": 80
},
{
"epoch": 0.0436046511627907,
"grad_norm": 6.530114650726318,
"learning_rate": 0.00011250000000000001,
"loss": 1.5404,
"step": 90
},
{
"epoch": 0.04844961240310078,
"grad_norm": 7.183197975158691,
"learning_rate": 0.000125,
"loss": 1.5182,
"step": 100
},
{
"epoch": 0.05329457364341085,
"grad_norm": 6.398809909820557,
"learning_rate": 0.00012499871543489787,
"loss": 1.4754,
"step": 110
},
{
"epoch": 0.05813953488372093,
"grad_norm": 5.547942638397217,
"learning_rate": 0.00012499486179239495,
"loss": 1.5071,
"step": 120
},
{
"epoch": 0.06298449612403101,
"grad_norm": 5.425671577453613,
"learning_rate": 0.00012498843923089938,
"loss": 1.4976,
"step": 130
},
{
"epoch": 0.06782945736434108,
"grad_norm": 5.802628993988037,
"learning_rate": 0.0001249794480144175,
"loss": 1.5023,
"step": 140
},
{
"epoch": 0.07267441860465117,
"grad_norm": 6.187611103057861,
"learning_rate": 0.000124967888512543,
"loss": 1.4752,
"step": 150
},
{
"epoch": 0.07267441860465117,
"eval_loss": 1.4769259691238403,
"eval_runtime": 34.5162,
"eval_samples_per_second": 14.486,
"eval_steps_per_second": 14.486,
"step": 150
},
{
"epoch": 0.07751937984496124,
"grad_norm": 5.855692386627197,
"learning_rate": 0.00012495376120044173,
"loss": 1.4765,
"step": 160
},
{
"epoch": 0.08236434108527131,
"grad_norm": 5.552834510803223,
"learning_rate": 0.00012493706665883217,
"loss": 1.4861,
"step": 170
},
{
"epoch": 0.0872093023255814,
"grad_norm": 5.825935363769531,
"learning_rate": 0.00012491780557396154,
"loss": 1.5161,
"step": 180
},
{
"epoch": 0.09205426356589147,
"grad_norm": 5.047133922576904,
"learning_rate": 0.00012489597873757756,
"loss": 1.4709,
"step": 190
},
{
"epoch": 0.09689922480620156,
"grad_norm": 5.497174263000488,
"learning_rate": 0.00012487158704689602,
"loss": 1.5179,
"step": 200
},
{
"epoch": 0.10174418604651163,
"grad_norm": 5.359213352203369,
"learning_rate": 0.0001248446315045638,
"loss": 1.4926,
"step": 210
},
{
"epoch": 0.1065891472868217,
"grad_norm": 4.8698410987854,
"learning_rate": 0.00012481511321861763,
"loss": 1.4917,
"step": 220
},
{
"epoch": 0.11143410852713179,
"grad_norm": 5.268365383148193,
"learning_rate": 0.00012478303340243864,
"loss": 1.4716,
"step": 230
},
{
"epoch": 0.11627906976744186,
"grad_norm": 5.6616129875183105,
"learning_rate": 0.00012474839337470246,
"loss": 1.4198,
"step": 240
},
{
"epoch": 0.12112403100775193,
"grad_norm": 4.87906551361084,
"learning_rate": 0.0001247111945593249,
"loss": 1.4401,
"step": 250
},
{
"epoch": 0.12596899224806202,
"grad_norm": 5.170640468597412,
"learning_rate": 0.00012467143848540359,
"loss": 1.4238,
"step": 260
},
{
"epoch": 0.1308139534883721,
"grad_norm": 5.322551727294922,
"learning_rate": 0.000124629126787155,
"loss": 1.4508,
"step": 270
},
{
"epoch": 0.13565891472868216,
"grad_norm": 5.429434776306152,
"learning_rate": 0.00012458426120384738,
"loss": 1.4353,
"step": 280
},
{
"epoch": 0.14050387596899225,
"grad_norm": 5.199410438537598,
"learning_rate": 0.00012453684357972906,
"loss": 1.461,
"step": 290
},
{
"epoch": 0.14534883720930233,
"grad_norm": 5.171191692352295,
"learning_rate": 0.00012448687586395289,
"loss": 1.4664,
"step": 300
},
{
"epoch": 0.14534883720930233,
"eval_loss": 1.4425313472747803,
"eval_runtime": 36.0012,
"eval_samples_per_second": 13.888,
"eval_steps_per_second": 13.888,
"step": 300
},
{
"epoch": 0.1501937984496124,
"grad_norm": 4.924413681030273,
"learning_rate": 0.00012443436011049593,
"loss": 1.4615,
"step": 310
},
{
"epoch": 0.15503875968992248,
"grad_norm": 4.954301834106445,
"learning_rate": 0.0001243792984780751,
"loss": 1.4509,
"step": 320
},
{
"epoch": 0.15988372093023256,
"grad_norm": 4.462923526763916,
"learning_rate": 0.00012432169323005853,
"loss": 1.4489,
"step": 330
},
{
"epoch": 0.16472868217054262,
"grad_norm": 4.328495502471924,
"learning_rate": 0.00012426154673437223,
"loss": 1.4734,
"step": 340
},
{
"epoch": 0.1695736434108527,
"grad_norm": 4.499818801879883,
"learning_rate": 0.00012419886146340314,
"loss": 1.4282,
"step": 350
},
{
"epoch": 0.1744186046511628,
"grad_norm": 4.927942276000977,
"learning_rate": 0.0001241336399938972,
"loss": 1.4036,
"step": 360
},
{
"epoch": 0.17926356589147288,
"grad_norm": 4.415456295013428,
"learning_rate": 0.00012406588500685355,
"loss": 1.4121,
"step": 370
},
{
"epoch": 0.18410852713178294,
"grad_norm": 4.326357841491699,
"learning_rate": 0.00012399559928741435,
"loss": 1.4743,
"step": 380
},
{
"epoch": 0.18895348837209303,
"grad_norm": 4.8075408935546875,
"learning_rate": 0.00012392278572475023,
"loss": 1.4212,
"step": 390
},
{
"epoch": 0.1937984496124031,
"grad_norm": 5.174283027648926,
"learning_rate": 0.0001238474473119416,
"loss": 1.4244,
"step": 400
},
{
"epoch": 0.19864341085271317,
"grad_norm": 5.053370952606201,
"learning_rate": 0.00012376958714585545,
"loss": 1.4763,
"step": 410
},
{
"epoch": 0.20348837209302326,
"grad_norm": 4.216630458831787,
"learning_rate": 0.0001236892084270183,
"loss": 1.4072,
"step": 420
},
{
"epoch": 0.20833333333333334,
"grad_norm": 4.96584939956665,
"learning_rate": 0.00012360631445948448,
"loss": 1.3984,
"step": 430
},
{
"epoch": 0.2131782945736434,
"grad_norm": 4.817144393920898,
"learning_rate": 0.00012352090865070026,
"loss": 1.3884,
"step": 440
},
{
"epoch": 0.2180232558139535,
"grad_norm": 4.395285129547119,
"learning_rate": 0.00012343299451136397,
"loss": 1.4737,
"step": 450
},
{
"epoch": 0.2180232558139535,
"eval_loss": 1.437117576599121,
"eval_runtime": 34.379,
"eval_samples_per_second": 14.544,
"eval_steps_per_second": 14.544,
"step": 450
},
{
"epoch": 0.22286821705426357,
"grad_norm": 4.261348247528076,
"learning_rate": 0.00012334257565528155,
"loss": 1.4427,
"step": 460
},
{
"epoch": 0.22771317829457363,
"grad_norm": 4.141024589538574,
"learning_rate": 0.000123249655799218,
"loss": 1.345,
"step": 470
},
{
"epoch": 0.23255813953488372,
"grad_norm": 5.018428802490234,
"learning_rate": 0.00012315423876274468,
"loss": 1.3671,
"step": 480
},
{
"epoch": 0.2374031007751938,
"grad_norm": 4.942126274108887,
"learning_rate": 0.0001230563284680822,
"loss": 1.3863,
"step": 490
},
{
"epoch": 0.24224806201550386,
"grad_norm": 4.775027275085449,
"learning_rate": 0.00012295592893993935,
"loss": 1.3574,
"step": 500
},
{
"epoch": 0.24709302325581395,
"grad_norm": 4.757259845733643,
"learning_rate": 0.00012285304430534745,
"loss": 1.3904,
"step": 510
},
{
"epoch": 0.25193798449612403,
"grad_norm": 4.785523414611816,
"learning_rate": 0.00012274767879349083,
"loss": 1.4255,
"step": 520
},
{
"epoch": 0.2567829457364341,
"grad_norm": 4.766105651855469,
"learning_rate": 0.00012263983673553306,
"loss": 1.4344,
"step": 530
},
{
"epoch": 0.2616279069767442,
"grad_norm": 4.429969310760498,
"learning_rate": 0.0001225295225644387,
"loss": 1.3928,
"step": 540
},
{
"epoch": 0.26647286821705424,
"grad_norm": 4.636560440063477,
"learning_rate": 0.0001224167408147913,
"loss": 1.4041,
"step": 550
},
{
"epoch": 0.2713178294573643,
"grad_norm": 4.649174213409424,
"learning_rate": 0.0001223014961226068,
"loss": 1.4014,
"step": 560
},
{
"epoch": 0.2761627906976744,
"grad_norm": 4.894825458526611,
"learning_rate": 0.00012218379322514317,
"loss": 1.4221,
"step": 570
},
{
"epoch": 0.2810077519379845,
"grad_norm": 4.615547180175781,
"learning_rate": 0.00012206363696070545,
"loss": 1.4099,
"step": 580
},
{
"epoch": 0.2858527131782946,
"grad_norm": 5.573257923126221,
"learning_rate": 0.0001219410322684471,
"loss": 1.3943,
"step": 590
},
{
"epoch": 0.29069767441860467,
"grad_norm": 4.57981538772583,
"learning_rate": 0.0001218159841881668,
"loss": 1.356,
"step": 600
},
{
"epoch": 0.29069767441860467,
"eval_loss": 1.4054266214370728,
"eval_runtime": 34.2312,
"eval_samples_per_second": 14.607,
"eval_steps_per_second": 14.607,
"step": 600
},
{
"epoch": 0.29554263565891475,
"grad_norm": 4.497678756713867,
"learning_rate": 0.00012168849786010133,
"loss": 1.4294,
"step": 610
},
{
"epoch": 0.3003875968992248,
"grad_norm": 4.548286437988281,
"learning_rate": 0.00012155857852471433,
"loss": 1.3586,
"step": 620
},
{
"epoch": 0.30523255813953487,
"grad_norm": 4.280961036682129,
"learning_rate": 0.0001214262315224808,
"loss": 1.4012,
"step": 630
},
{
"epoch": 0.31007751937984496,
"grad_norm": 4.456599712371826,
"learning_rate": 0.00012129146229366766,
"loss": 1.3864,
"step": 640
},
{
"epoch": 0.31492248062015504,
"grad_norm": 4.1045026779174805,
"learning_rate": 0.00012115427637811003,
"loss": 1.3577,
"step": 650
},
{
"epoch": 0.31976744186046513,
"grad_norm": 4.406848907470703,
"learning_rate": 0.00012101467941498357,
"loss": 1.4146,
"step": 660
},
{
"epoch": 0.3246124031007752,
"grad_norm": 4.694105625152588,
"learning_rate": 0.0001208726771425727,
"loss": 1.4122,
"step": 670
},
{
"epoch": 0.32945736434108525,
"grad_norm": 4.289068222045898,
"learning_rate": 0.00012072827539803463,
"loss": 1.3991,
"step": 680
},
{
"epoch": 0.33430232558139533,
"grad_norm": 4.505423069000244,
"learning_rate": 0.00012058148011715949,
"loss": 1.3951,
"step": 690
},
{
"epoch": 0.3391472868217054,
"grad_norm": 4.226860523223877,
"learning_rate": 0.00012043229733412636,
"loss": 1.4685,
"step": 700
},
{
"epoch": 0.3439922480620155,
"grad_norm": 4.11680269241333,
"learning_rate": 0.0001202807331812551,
"loss": 1.3788,
"step": 710
},
{
"epoch": 0.3488372093023256,
"grad_norm": 3.9299204349517822,
"learning_rate": 0.00012012679388875441,
"loss": 1.3745,
"step": 720
},
{
"epoch": 0.3536821705426357,
"grad_norm": 4.7261128425598145,
"learning_rate": 0.00011997048578446568,
"loss": 1.4027,
"step": 730
},
{
"epoch": 0.35852713178294576,
"grad_norm": 4.087492942810059,
"learning_rate": 0.00011981181529360282,
"loss": 1.3779,
"step": 740
},
{
"epoch": 0.3633720930232558,
"grad_norm": 3.9707279205322266,
"learning_rate": 0.00011965078893848828,
"loss": 1.3969,
"step": 750
},
{
"epoch": 0.3633720930232558,
"eval_loss": 1.3779653310775757,
"eval_runtime": 35.15,
"eval_samples_per_second": 14.225,
"eval_steps_per_second": 14.225,
"step": 750
},
{
"epoch": 0.3682170542635659,
"grad_norm": 4.034614086151123,
"learning_rate": 0.00011948741333828481,
"loss": 1.3493,
"step": 760
},
{
"epoch": 0.37306201550387597,
"grad_norm": 4.590168476104736,
"learning_rate": 0.00011932169520872344,
"loss": 1.3591,
"step": 770
},
{
"epoch": 0.37790697674418605,
"grad_norm": 3.9696972370147705,
"learning_rate": 0.00011915364136182738,
"loss": 1.3723,
"step": 780
},
{
"epoch": 0.38275193798449614,
"grad_norm": 4.8090362548828125,
"learning_rate": 0.0001189832587056321,
"loss": 1.3493,
"step": 790
},
{
"epoch": 0.3875968992248062,
"grad_norm": 4.091393947601318,
"learning_rate": 0.00011881055424390119,
"loss": 1.3971,
"step": 800
},
{
"epoch": 0.39244186046511625,
"grad_norm": 4.403999328613281,
"learning_rate": 0.00011863553507583869,
"loss": 1.4213,
"step": 810
},
{
"epoch": 0.39728682170542634,
"grad_norm": 4.0738935470581055,
"learning_rate": 0.00011845820839579708,
"loss": 1.408,
"step": 820
},
{
"epoch": 0.4021317829457364,
"grad_norm": 4.7122273445129395,
"learning_rate": 0.00011827858149298162,
"loss": 1.373,
"step": 830
},
{
"epoch": 0.4069767441860465,
"grad_norm": 4.809484958648682,
"learning_rate": 0.00011809666175115075,
"loss": 1.3717,
"step": 840
},
{
"epoch": 0.4118217054263566,
"grad_norm": 4.254739284515381,
"learning_rate": 0.00011791245664831251,
"loss": 1.4034,
"step": 850
},
{
"epoch": 0.4166666666666667,
"grad_norm": 4.524000644683838,
"learning_rate": 0.0001177259737564172,
"loss": 1.4208,
"step": 860
},
{
"epoch": 0.42151162790697677,
"grad_norm": 4.406377792358398,
"learning_rate": 0.00011753722074104613,
"loss": 1.4435,
"step": 870
},
{
"epoch": 0.4263565891472868,
"grad_norm": 4.082479000091553,
"learning_rate": 0.00011734620536109644,
"loss": 1.4212,
"step": 880
},
{
"epoch": 0.4312015503875969,
"grad_norm": 4.172248363494873,
"learning_rate": 0.00011715293546846223,
"loss": 1.3903,
"step": 890
},
{
"epoch": 0.436046511627907,
"grad_norm": 3.921027898788452,
"learning_rate": 0.00011695741900771184,
"loss": 1.3914,
"step": 900
},
{
"epoch": 0.436046511627907,
"eval_loss": 1.3852204084396362,
"eval_runtime": 34.9476,
"eval_samples_per_second": 14.307,
"eval_steps_per_second": 14.307,
"step": 900
},
{
"epoch": 0.44089147286821706,
"grad_norm": 4.309497833251953,
"learning_rate": 0.00011675966401576116,
"loss": 1.3977,
"step": 910
},
{
"epoch": 0.44573643410852715,
"grad_norm": 4.024077415466309,
"learning_rate": 0.00011655967862154335,
"loss": 1.3745,
"step": 920
},
{
"epoch": 0.45058139534883723,
"grad_norm": 4.421174049377441,
"learning_rate": 0.0001163574710456747,
"loss": 1.3927,
"step": 930
},
{
"epoch": 0.45542635658914726,
"grad_norm": 4.4601569175720215,
"learning_rate": 0.00011615304960011663,
"loss": 1.3895,
"step": 940
},
{
"epoch": 0.46027131782945735,
"grad_norm": 4.016902923583984,
"learning_rate": 0.00011594642268783415,
"loss": 1.3343,
"step": 950
},
{
"epoch": 0.46511627906976744,
"grad_norm": 4.68607759475708,
"learning_rate": 0.00011573759880245027,
"loss": 1.4032,
"step": 960
},
{
"epoch": 0.4699612403100775,
"grad_norm": 4.8534979820251465,
"learning_rate": 0.00011552658652789703,
"loss": 1.39,
"step": 970
},
{
"epoch": 0.4748062015503876,
"grad_norm": 4.396952152252197,
"learning_rate": 0.00011531339453806258,
"loss": 1.3895,
"step": 980
},
{
"epoch": 0.4796511627906977,
"grad_norm": 4.624823093414307,
"learning_rate": 0.00011509803159643458,
"loss": 1.3998,
"step": 990
},
{
"epoch": 0.4844961240310077,
"grad_norm": 4.828771114349365,
"learning_rate": 0.00011488050655574003,
"loss": 1.3768,
"step": 1000
},
{
"epoch": 0.4893410852713178,
"grad_norm": 4.6348137855529785,
"learning_rate": 0.00011466082835758141,
"loss": 1.3654,
"step": 1010
},
{
"epoch": 0.4941860465116279,
"grad_norm": 4.274475574493408,
"learning_rate": 0.000114439006032069,
"loss": 1.3733,
"step": 1020
},
{
"epoch": 0.499031007751938,
"grad_norm": 4.305916786193848,
"learning_rate": 0.00011421504869744978,
"loss": 1.3546,
"step": 1030
},
{
"epoch": 0.5038759689922481,
"grad_norm": 3.968743085861206,
"learning_rate": 0.0001139889655597326,
"loss": 1.415,
"step": 1040
},
{
"epoch": 0.5087209302325582,
"grad_norm": 4.156125068664551,
"learning_rate": 0.00011376076591230974,
"loss": 1.3696,
"step": 1050
},
{
"epoch": 0.5087209302325582,
"eval_loss": 1.3692814111709595,
"eval_runtime": 34.9432,
"eval_samples_per_second": 14.309,
"eval_steps_per_second": 14.309,
"step": 1050
},
{
"epoch": 0.5135658914728682,
"grad_norm": 4.3611369132995605,
"learning_rate": 0.00011353045913557492,
"loss": 1.3592,
"step": 1060
},
{
"epoch": 0.5184108527131783,
"grad_norm": 4.626410007476807,
"learning_rate": 0.00011329805469653768,
"loss": 1.4033,
"step": 1070
},
{
"epoch": 0.5232558139534884,
"grad_norm": 4.445812702178955,
"learning_rate": 0.00011306356214843422,
"loss": 1.4163,
"step": 1080
},
{
"epoch": 0.5281007751937985,
"grad_norm": 4.1766252517700195,
"learning_rate": 0.00011282699113033477,
"loss": 1.3933,
"step": 1090
},
{
"epoch": 0.5329457364341085,
"grad_norm": 4.215488910675049,
"learning_rate": 0.00011258835136674729,
"loss": 1.4096,
"step": 1100
},
{
"epoch": 0.5377906976744186,
"grad_norm": 4.028621196746826,
"learning_rate": 0.00011234765266721778,
"loss": 1.3521,
"step": 1110
},
{
"epoch": 0.5426356589147286,
"grad_norm": 4.436669826507568,
"learning_rate": 0.00011210490492592703,
"loss": 1.3729,
"step": 1120
},
{
"epoch": 0.5474806201550387,
"grad_norm": 4.071181297302246,
"learning_rate": 0.0001118601181212839,
"loss": 1.3735,
"step": 1130
},
{
"epoch": 0.5523255813953488,
"grad_norm": 4.056524276733398,
"learning_rate": 0.00011161330231551515,
"loss": 1.3438,
"step": 1140
},
{
"epoch": 0.5571705426356589,
"grad_norm": 4.2103657722473145,
"learning_rate": 0.00011136446765425187,
"loss": 1.4084,
"step": 1150
},
{
"epoch": 0.562015503875969,
"grad_norm": 3.7534801959991455,
"learning_rate": 0.00011111362436611234,
"loss": 1.3764,
"step": 1160
},
{
"epoch": 0.5668604651162791,
"grad_norm": 4.244382858276367,
"learning_rate": 0.00011086078276228167,
"loss": 1.337,
"step": 1170
},
{
"epoch": 0.5717054263565892,
"grad_norm": 4.235080242156982,
"learning_rate": 0.00011060595323608789,
"loss": 1.4298,
"step": 1180
},
{
"epoch": 0.5765503875968992,
"grad_norm": 4.290858268737793,
"learning_rate": 0.00011034914626257467,
"loss": 1.3565,
"step": 1190
},
{
"epoch": 0.5813953488372093,
"grad_norm": 4.229033470153809,
"learning_rate": 0.0001100903723980709,
"loss": 1.3638,
"step": 1200
},
{
"epoch": 0.5813953488372093,
"eval_loss": 1.3742729425430298,
"eval_runtime": 34.3543,
"eval_samples_per_second": 14.554,
"eval_steps_per_second": 14.554,
"step": 1200
},
{
"epoch": 0.5862403100775194,
"grad_norm": 4.066321849822998,
"learning_rate": 0.00010982964227975658,
"loss": 1.3564,
"step": 1210
},
{
"epoch": 0.5910852713178295,
"grad_norm": 4.3556718826293945,
"learning_rate": 0.00010956696662522569,
"loss": 1.4185,
"step": 1220
},
{
"epoch": 0.5959302325581395,
"grad_norm": 4.408959865570068,
"learning_rate": 0.00010930235623204551,
"loss": 1.3674,
"step": 1230
},
{
"epoch": 0.6007751937984496,
"grad_norm": 3.7719295024871826,
"learning_rate": 0.00010903582197731294,
"loss": 1.3014,
"step": 1240
},
{
"epoch": 0.6056201550387597,
"grad_norm": 3.8520030975341797,
"learning_rate": 0.00010876737481720722,
"loss": 1.3433,
"step": 1250
},
{
"epoch": 0.6104651162790697,
"grad_norm": 4.325175762176514,
"learning_rate": 0.0001084970257865397,
"loss": 1.4005,
"step": 1260
},
{
"epoch": 0.6153100775193798,
"grad_norm": 3.8847227096557617,
"learning_rate": 0.00010822478599830008,
"loss": 1.4091,
"step": 1270
},
{
"epoch": 0.6201550387596899,
"grad_norm": 3.9878618717193604,
"learning_rate": 0.00010795066664319983,
"loss": 1.3832,
"step": 1280
},
{
"epoch": 0.625,
"grad_norm": 4.19332218170166,
"learning_rate": 0.00010767467898921197,
"loss": 1.3659,
"step": 1290
},
{
"epoch": 0.6298449612403101,
"grad_norm": 4.531820774078369,
"learning_rate": 0.00010739683438110797,
"loss": 1.3475,
"step": 1300
},
{
"epoch": 0.6346899224806202,
"grad_norm": 3.9032485485076904,
"learning_rate": 0.00010711714423999145,
"loss": 1.3598,
"step": 1310
},
{
"epoch": 0.6395348837209303,
"grad_norm": 4.083787441253662,
"learning_rate": 0.00010683562006282861,
"loss": 1.3636,
"step": 1320
},
{
"epoch": 0.6443798449612403,
"grad_norm": 4.280800819396973,
"learning_rate": 0.00010655227342197574,
"loss": 1.3343,
"step": 1330
},
{
"epoch": 0.6492248062015504,
"grad_norm": 4.18052864074707,
"learning_rate": 0.00010626711596470343,
"loss": 1.3454,
"step": 1340
},
{
"epoch": 0.6540697674418605,
"grad_norm": 4.2066121101379395,
"learning_rate": 0.0001059801594127179,
"loss": 1.3608,
"step": 1350
},
{
"epoch": 0.6540697674418605,
"eval_loss": 1.3640578985214233,
"eval_runtime": 35.1187,
"eval_samples_per_second": 14.237,
"eval_steps_per_second": 14.237,
"step": 1350
},
{
"epoch": 0.6589147286821705,
"grad_norm": 4.734892845153809,
"learning_rate": 0.00010569141556167905,
"loss": 1.3878,
"step": 1360
},
{
"epoch": 0.6637596899224806,
"grad_norm": 4.622696399688721,
"learning_rate": 0.00010540089628071566,
"loss": 1.3813,
"step": 1370
},
{
"epoch": 0.6686046511627907,
"grad_norm": 4.379586219787598,
"learning_rate": 0.00010510861351193747,
"loss": 1.338,
"step": 1380
},
{
"epoch": 0.6734496124031008,
"grad_norm": 4.084921836853027,
"learning_rate": 0.00010481457926994435,
"loss": 1.3616,
"step": 1390
},
{
"epoch": 0.6782945736434108,
"grad_norm": 4.132017612457275,
"learning_rate": 0.0001045188056413323,
"loss": 1.3917,
"step": 1400
},
{
"epoch": 0.6831395348837209,
"grad_norm": 4.240015506744385,
"learning_rate": 0.00010422130478419676,
"loss": 1.3694,
"step": 1410
},
{
"epoch": 0.687984496124031,
"grad_norm": 4.058047771453857,
"learning_rate": 0.00010392208892763269,
"loss": 1.3665,
"step": 1420
},
{
"epoch": 0.6928294573643411,
"grad_norm": 4.3433837890625,
"learning_rate": 0.00010362117037123204,
"loss": 1.3495,
"step": 1430
},
{
"epoch": 0.6976744186046512,
"grad_norm": 4.379434108734131,
"learning_rate": 0.00010331856148457803,
"loss": 1.3792,
"step": 1440
},
{
"epoch": 0.7025193798449613,
"grad_norm": 4.294039726257324,
"learning_rate": 0.00010301427470673678,
"loss": 1.3507,
"step": 1450
},
{
"epoch": 0.7073643410852714,
"grad_norm": 3.8750815391540527,
"learning_rate": 0.00010270832254574588,
"loss": 1.3536,
"step": 1460
},
{
"epoch": 0.7122093023255814,
"grad_norm": 4.3442182540893555,
"learning_rate": 0.00010240071757810036,
"loss": 1.3499,
"step": 1470
},
{
"epoch": 0.7170542635658915,
"grad_norm": 4.361018657684326,
"learning_rate": 0.00010209147244823564,
"loss": 1.3458,
"step": 1480
},
{
"epoch": 0.7218992248062015,
"grad_norm": 4.097925186157227,
"learning_rate": 0.00010178059986800773,
"loss": 1.3349,
"step": 1490
},
{
"epoch": 0.7267441860465116,
"grad_norm": 4.0469841957092285,
"learning_rate": 0.00010146811261617085,
"loss": 1.3563,
"step": 1500
},
{
"epoch": 0.7267441860465116,
"eval_loss": 1.3613923788070679,
"eval_runtime": 35.3447,
"eval_samples_per_second": 14.146,
"eval_steps_per_second": 14.146,
"step": 1500
},
{
"epoch": 0.7315891472868217,
"grad_norm": 4.677872180938721,
"learning_rate": 0.00010115402353785197,
"loss": 1.3768,
"step": 1510
},
{
"epoch": 0.7364341085271318,
"grad_norm": 4.105203628540039,
"learning_rate": 0.00010083834554402292,
"loss": 1.3539,
"step": 1520
},
{
"epoch": 0.7412790697674418,
"grad_norm": 4.328856468200684,
"learning_rate": 0.00010052109161096958,
"loss": 1.3719,
"step": 1530
},
{
"epoch": 0.7461240310077519,
"grad_norm": 3.782973051071167,
"learning_rate": 0.00010020227477975852,
"loss": 1.3682,
"step": 1540
},
{
"epoch": 0.750968992248062,
"grad_norm": 4.47057580947876,
"learning_rate": 9.9881908155701e-05,
"loss": 1.3408,
"step": 1550
},
{
"epoch": 0.7558139534883721,
"grad_norm": 4.271434307098389,
"learning_rate": 9.956000490781411e-05,
"loss": 1.3304,
"step": 1560
},
{
"epoch": 0.7606589147286822,
"grad_norm": 4.192820072174072,
"learning_rate": 9.923657826827957e-05,
"loss": 1.3259,
"step": 1570
},
{
"epoch": 0.7655038759689923,
"grad_norm": 3.813554286956787,
"learning_rate": 9.891164153189976e-05,
"loss": 1.3234,
"step": 1580
},
{
"epoch": 0.7703488372093024,
"grad_norm": 3.9348225593566895,
"learning_rate": 9.858520805555123e-05,
"loss": 1.3185,
"step": 1590
},
{
"epoch": 0.7751937984496124,
"grad_norm": 4.340898513793945,
"learning_rate": 9.825729125763561e-05,
"loss": 1.3518,
"step": 1600
},
{
"epoch": 0.7800387596899225,
"grad_norm": 4.097005844116211,
"learning_rate": 9.792790461752813e-05,
"loss": 1.3641,
"step": 1610
},
{
"epoch": 0.7848837209302325,
"grad_norm": 4.03990364074707,
"learning_rate": 9.759706167502343e-05,
"loss": 1.3761,
"step": 1620
},
{
"epoch": 0.7897286821705426,
"grad_norm": 4.171065330505371,
"learning_rate": 9.726477602977905e-05,
"loss": 1.326,
"step": 1630
},
{
"epoch": 0.7945736434108527,
"grad_norm": 3.930081605911255,
"learning_rate": 9.69310613407564e-05,
"loss": 1.3669,
"step": 1640
},
{
"epoch": 0.7994186046511628,
"grad_norm": 4.125776290893555,
"learning_rate": 9.659593132565929e-05,
"loss": 1.3572,
"step": 1650
},
{
"epoch": 0.7994186046511628,
"eval_loss": 1.3497689962387085,
"eval_runtime": 34.7157,
"eval_samples_per_second": 14.403,
"eval_steps_per_second": 14.403,
"step": 1650
},
{
"epoch": 0.8042635658914729,
"grad_norm": 4.123137950897217,
"learning_rate": 9.625939976037002e-05,
"loss": 1.3696,
"step": 1660
},
{
"epoch": 0.8091085271317829,
"grad_norm": 4.232200622558594,
"learning_rate": 9.59214804783831e-05,
"loss": 1.3692,
"step": 1670
},
{
"epoch": 0.813953488372093,
"grad_norm": 4.422992706298828,
"learning_rate": 9.558218737023671e-05,
"loss": 1.3037,
"step": 1680
},
{
"epoch": 0.8187984496124031,
"grad_norm": 4.27236795425415,
"learning_rate": 9.524153438294159e-05,
"loss": 1.3327,
"step": 1690
},
{
"epoch": 0.8236434108527132,
"grad_norm": 3.908616542816162,
"learning_rate": 9.489953551940783e-05,
"loss": 1.3689,
"step": 1700
},
{
"epoch": 0.8284883720930233,
"grad_norm": 3.9563255310058594,
"learning_rate": 9.455620483786914e-05,
"loss": 1.3409,
"step": 1710
},
{
"epoch": 0.8333333333333334,
"grad_norm": 4.521614074707031,
"learning_rate": 9.421155645130514e-05,
"loss": 1.3727,
"step": 1720
},
{
"epoch": 0.8381782945736435,
"grad_norm": 4.471999645233154,
"learning_rate": 9.38656045268611e-05,
"loss": 1.3606,
"step": 1730
},
{
"epoch": 0.8430232558139535,
"grad_norm": 3.7059531211853027,
"learning_rate": 9.351836328526563e-05,
"loss": 1.3842,
"step": 1740
},
{
"epoch": 0.8478682170542635,
"grad_norm": 4.375723838806152,
"learning_rate": 9.316984700024612e-05,
"loss": 1.3258,
"step": 1750
},
{
"epoch": 0.8527131782945736,
"grad_norm": 4.4300994873046875,
"learning_rate": 9.2820069997942e-05,
"loss": 1.3815,
"step": 1760
},
{
"epoch": 0.8575581395348837,
"grad_norm": 4.2471394538879395,
"learning_rate": 9.246904665631588e-05,
"loss": 1.3335,
"step": 1770
},
{
"epoch": 0.8624031007751938,
"grad_norm": 4.115731239318848,
"learning_rate": 9.211679140456242e-05,
"loss": 1.3587,
"step": 1780
},
{
"epoch": 0.8672480620155039,
"grad_norm": 4.372647762298584,
"learning_rate": 9.176331872251536e-05,
"loss": 1.3888,
"step": 1790
},
{
"epoch": 0.872093023255814,
"grad_norm": 3.9626224040985107,
"learning_rate": 9.140864314005222e-05,
"loss": 1.3868,
"step": 1800
},
{
"epoch": 0.872093023255814,
"eval_loss": 1.3441931009292603,
"eval_runtime": 34.1493,
"eval_samples_per_second": 14.642,
"eval_steps_per_second": 14.642,
"step": 1800
},
{
"epoch": 0.876937984496124,
"grad_norm": 4.511176586151123,
"learning_rate": 9.105277923649698e-05,
"loss": 1.3216,
"step": 1810
},
{
"epoch": 0.8817829457364341,
"grad_norm": 4.154355525970459,
"learning_rate": 9.06957416400209e-05,
"loss": 1.3409,
"step": 1820
},
{
"epoch": 0.8866279069767442,
"grad_norm": 4.167087078094482,
"learning_rate": 9.03375450270412e-05,
"loss": 1.3284,
"step": 1830
},
{
"epoch": 0.8914728682170543,
"grad_norm": 4.253835678100586,
"learning_rate": 8.997820412161764e-05,
"loss": 1.3555,
"step": 1840
},
{
"epoch": 0.8963178294573644,
"grad_norm": 4.122718334197998,
"learning_rate": 8.961773369484738e-05,
"loss": 1.3109,
"step": 1850
},
{
"epoch": 0.9011627906976745,
"grad_norm": 4.078066825866699,
"learning_rate": 8.925614856425786e-05,
"loss": 1.3114,
"step": 1860
},
{
"epoch": 0.9060077519379846,
"grad_norm": 4.012754917144775,
"learning_rate": 8.88934635931975e-05,
"loss": 1.3685,
"step": 1870
},
{
"epoch": 0.9108527131782945,
"grad_norm": 4.192358016967773,
"learning_rate": 8.852969369022494e-05,
"loss": 1.3869,
"step": 1880
},
{
"epoch": 0.9156976744186046,
"grad_norm": 4.08505916595459,
"learning_rate": 8.816485380849613e-05,
"loss": 1.3766,
"step": 1890
},
{
"epoch": 0.9205426356589147,
"grad_norm": 4.077611923217773,
"learning_rate": 8.779895894514961e-05,
"loss": 1.3243,
"step": 1900
},
{
"epoch": 0.9253875968992248,
"grad_norm": 4.200951099395752,
"learning_rate": 8.743202414069011e-05,
"loss": 1.364,
"step": 1910
},
{
"epoch": 0.9302325581395349,
"grad_norm": 3.8946659564971924,
"learning_rate": 8.706406447837023e-05,
"loss": 1.3445,
"step": 1920
},
{
"epoch": 0.935077519379845,
"grad_norm": 4.817138195037842,
"learning_rate": 8.669509508357052e-05,
"loss": 1.3452,
"step": 1930
},
{
"epoch": 0.939922480620155,
"grad_norm": 3.7811405658721924,
"learning_rate": 8.632513112317761e-05,
"loss": 1.2988,
"step": 1940
},
{
"epoch": 0.9447674418604651,
"grad_norm": 4.025355815887451,
"learning_rate": 8.59541878049609e-05,
"loss": 1.2931,
"step": 1950
},
{
"epoch": 0.9447674418604651,
"eval_loss": 1.342638373374939,
"eval_runtime": 34.8923,
"eval_samples_per_second": 14.33,
"eval_steps_per_second": 14.33,
"step": 1950
},
{
"epoch": 0.9496124031007752,
"grad_norm": 4.080333709716797,
"learning_rate": 8.558228037694728e-05,
"loss": 1.3605,
"step": 1960
},
{
"epoch": 0.9544573643410853,
"grad_norm": 4.330687046051025,
"learning_rate": 8.520942412679447e-05,
"loss": 1.358,
"step": 1970
},
{
"epoch": 0.9593023255813954,
"grad_norm": 3.8246426582336426,
"learning_rate": 8.483563438116257e-05,
"loss": 1.3954,
"step": 1980
},
{
"epoch": 0.9641472868217055,
"grad_norm": 4.163506984710693,
"learning_rate": 8.446092650508393e-05,
"loss": 1.3248,
"step": 1990
},
{
"epoch": 0.9689922480620154,
"grad_norm": 4.115441799163818,
"learning_rate": 8.408531590133172e-05,
"loss": 1.336,
"step": 2000
},
{
"epoch": 0.9738372093023255,
"grad_norm": 4.257992744445801,
"learning_rate": 8.370881800978673e-05,
"loss": 1.3192,
"step": 2010
},
{
"epoch": 0.9786821705426356,
"grad_norm": 4.327164649963379,
"learning_rate": 8.333144830680262e-05,
"loss": 1.3805,
"step": 2020
},
{
"epoch": 0.9835271317829457,
"grad_norm": 4.227909564971924,
"learning_rate": 8.29532223045698e-05,
"loss": 1.3663,
"step": 2030
},
{
"epoch": 0.9883720930232558,
"grad_norm": 4.192224502563477,
"learning_rate": 8.257415555047785e-05,
"loss": 1.2889,
"step": 2040
},
{
"epoch": 0.9932170542635659,
"grad_norm": 4.218623161315918,
"learning_rate": 8.21942636264763e-05,
"loss": 1.341,
"step": 2050
},
{
"epoch": 0.998062015503876,
"grad_norm": 4.5620503425598145,
"learning_rate": 8.181356214843422e-05,
"loss": 1.3425,
"step": 2060
},
{
"epoch": 1.002906976744186,
"grad_norm": 3.8904709815979004,
"learning_rate": 8.143206676549826e-05,
"loss": 1.2196,
"step": 2070
},
{
"epoch": 1.0077519379844961,
"grad_norm": 3.8977930545806885,
"learning_rate": 8.10497931594494e-05,
"loss": 1.1792,
"step": 2080
},
{
"epoch": 1.0125968992248062,
"grad_norm": 4.280106544494629,
"learning_rate": 8.066675704405836e-05,
"loss": 1.1339,
"step": 2090
},
{
"epoch": 1.0174418604651163,
"grad_norm": 4.202190399169922,
"learning_rate": 8.028297416443952e-05,
"loss": 1.2182,
"step": 2100
},
{
"epoch": 1.0174418604651163,
"eval_loss": 1.3388093709945679,
"eval_runtime": 35.4199,
"eval_samples_per_second": 14.116,
"eval_steps_per_second": 14.116,
"step": 2100
},
{
"epoch": 1.0222868217054264,
"grad_norm": 4.183404922485352,
"learning_rate": 7.989846029640397e-05,
"loss": 1.1741,
"step": 2110
},
{
"epoch": 1.0271317829457365,
"grad_norm": 3.9871344566345215,
"learning_rate": 7.951323124581069e-05,
"loss": 1.1608,
"step": 2120
},
{
"epoch": 1.0319767441860466,
"grad_norm": 4.37944221496582,
"learning_rate": 7.91273028479172e-05,
"loss": 1.209,
"step": 2130
},
{
"epoch": 1.0368217054263567,
"grad_norm": 4.377804279327393,
"learning_rate": 7.874069096672831e-05,
"loss": 1.193,
"step": 2140
},
{
"epoch": 1.0416666666666667,
"grad_norm": 4.7681074142456055,
"learning_rate": 7.83534114943442e-05,
"loss": 1.1636,
"step": 2150
},
{
"epoch": 1.0465116279069768,
"grad_norm": 4.442102432250977,
"learning_rate": 7.796548035030715e-05,
"loss": 1.1814,
"step": 2160
},
{
"epoch": 1.051356589147287,
"grad_norm": 4.541327476501465,
"learning_rate": 7.757691348094703e-05,
"loss": 1.202,
"step": 2170
},
{
"epoch": 1.056201550387597,
"grad_norm": 4.2842698097229,
"learning_rate": 7.718772685872595e-05,
"loss": 1.2073,
"step": 2180
},
{
"epoch": 1.0610465116279069,
"grad_norm": 4.021533966064453,
"learning_rate": 7.679793648158159e-05,
"loss": 1.21,
"step": 2190
},
{
"epoch": 1.0658914728682172,
"grad_norm": 4.059267520904541,
"learning_rate": 7.640755837226965e-05,
"loss": 1.1831,
"step": 2200
},
{
"epoch": 1.070736434108527,
"grad_norm": 4.384668827056885,
"learning_rate": 7.601660857770522e-05,
"loss": 1.1574,
"step": 2210
},
{
"epoch": 1.0755813953488371,
"grad_norm": 4.469862937927246,
"learning_rate": 7.562510316830308e-05,
"loss": 1.1677,
"step": 2220
},
{
"epoch": 1.0804263565891472,
"grad_norm": 3.912555456161499,
"learning_rate": 7.523305823731723e-05,
"loss": 1.1334,
"step": 2230
},
{
"epoch": 1.0852713178294573,
"grad_norm": 4.685311794281006,
"learning_rate": 7.484048990017919e-05,
"loss": 1.1501,
"step": 2240
},
{
"epoch": 1.0901162790697674,
"grad_norm": 4.790440559387207,
"learning_rate": 7.444741429383578e-05,
"loss": 1.1735,
"step": 2250
},
{
"epoch": 1.0901162790697674,
"eval_loss": 1.3379790782928467,
"eval_runtime": 35.4837,
"eval_samples_per_second": 14.091,
"eval_steps_per_second": 14.091,
"step": 2250
},
{
"epoch": 1.0949612403100775,
"grad_norm": 4.603212833404541,
"learning_rate": 7.405384757608555e-05,
"loss": 1.2212,
"step": 2260
},
{
"epoch": 1.0998062015503876,
"grad_norm": 4.408427715301514,
"learning_rate": 7.36598059249148e-05,
"loss": 1.22,
"step": 2270
},
{
"epoch": 1.1046511627906976,
"grad_norm": 4.194492340087891,
"learning_rate": 7.326530553783243e-05,
"loss": 1.1911,
"step": 2280
},
{
"epoch": 1.1094961240310077,
"grad_norm": 4.090687274932861,
"learning_rate": 7.287036263120425e-05,
"loss": 1.1692,
"step": 2290
},
{
"epoch": 1.1143410852713178,
"grad_norm": 4.440927028656006,
"learning_rate": 7.247499343958621e-05,
"loss": 1.1881,
"step": 2300
},
{
"epoch": 1.119186046511628,
"grad_norm": 4.5461745262146,
"learning_rate": 7.207921421505724e-05,
"loss": 1.1476,
"step": 2310
},
{
"epoch": 1.124031007751938,
"grad_norm": 4.559352874755859,
"learning_rate": 7.168304122655113e-05,
"loss": 1.183,
"step": 2320
},
{
"epoch": 1.128875968992248,
"grad_norm": 4.365304470062256,
"learning_rate": 7.128649075918768e-05,
"loss": 1.1699,
"step": 2330
},
{
"epoch": 1.1337209302325582,
"grad_norm": 4.1360063552856445,
"learning_rate": 7.088957911360347e-05,
"loss": 1.23,
"step": 2340
},
{
"epoch": 1.1385658914728682,
"grad_norm": 4.621219635009766,
"learning_rate": 7.049232260528163e-05,
"loss": 1.2519,
"step": 2350
},
{
"epoch": 1.1434108527131783,
"grad_norm": 3.837965726852417,
"learning_rate": 7.009473756388128e-05,
"loss": 1.1412,
"step": 2360
},
{
"epoch": 1.1482558139534884,
"grad_norm": 4.362300872802734,
"learning_rate": 6.969684033256622e-05,
"loss": 1.156,
"step": 2370
},
{
"epoch": 1.1531007751937985,
"grad_norm": 4.805346488952637,
"learning_rate": 6.92986472673332e-05,
"loss": 1.1887,
"step": 2380
},
{
"epoch": 1.1579457364341086,
"grad_norm": 4.508747100830078,
"learning_rate": 6.890017473633946e-05,
"loss": 1.2109,
"step": 2390
},
{
"epoch": 1.1627906976744187,
"grad_norm": 4.3871026039123535,
"learning_rate": 6.850143911923011e-05,
"loss": 1.2054,
"step": 2400
},
{
"epoch": 1.1627906976744187,
"eval_loss": 1.337631344795227,
"eval_runtime": 34.3514,
"eval_samples_per_second": 14.555,
"eval_steps_per_second": 14.555,
"step": 2400
},
{
"epoch": 1.1676356589147288,
"grad_norm": 4.372729301452637,
"learning_rate": 6.81024568064646e-05,
"loss": 1.1381,
"step": 2410
},
{
"epoch": 1.1724806201550388,
"grad_norm": 4.620536804199219,
"learning_rate": 6.770324419864309e-05,
"loss": 1.1852,
"step": 2420
},
{
"epoch": 1.177325581395349,
"grad_norm": 4.0550312995910645,
"learning_rate": 6.73038177058323e-05,
"loss": 1.2378,
"step": 2430
},
{
"epoch": 1.1821705426356588,
"grad_norm": 4.528865814208984,
"learning_rate": 6.690419374689087e-05,
"loss": 1.2158,
"step": 2440
},
{
"epoch": 1.187015503875969,
"grad_norm": 4.868921756744385,
"learning_rate": 6.650438874879456e-05,
"loss": 1.1556,
"step": 2450
},
{
"epoch": 1.191860465116279,
"grad_norm": 4.525996208190918,
"learning_rate": 6.61044191459609e-05,
"loss": 1.2004,
"step": 2460
},
{
"epoch": 1.196705426356589,
"grad_norm": 4.129622459411621,
"learning_rate": 6.57043013795737e-05,
"loss": 1.1904,
"step": 2470
},
{
"epoch": 1.2015503875968991,
"grad_norm": 4.325841903686523,
"learning_rate": 6.530405189690719e-05,
"loss": 1.1473,
"step": 2480
},
{
"epoch": 1.2063953488372092,
"grad_norm": 4.778893947601318,
"learning_rate": 6.49036871506499e-05,
"loss": 1.1831,
"step": 2490
},
{
"epoch": 1.2112403100775193,
"grad_norm": 5.402483940124512,
"learning_rate": 6.450322359822846e-05,
"loss": 1.1794,
"step": 2500
},
{
"epoch": 1.2160852713178294,
"grad_norm": 4.464822292327881,
"learning_rate": 6.410267770113098e-05,
"loss": 1.1906,
"step": 2510
},
{
"epoch": 1.2209302325581395,
"grad_norm": 4.59386682510376,
"learning_rate": 6.370206592423045e-05,
"loss": 1.2027,
"step": 2520
},
{
"epoch": 1.2257751937984496,
"grad_norm": 4.05305290222168,
"learning_rate": 6.330140473510796e-05,
"loss": 1.1652,
"step": 2530
},
{
"epoch": 1.2306201550387597,
"grad_norm": 4.635653018951416,
"learning_rate": 6.29007106033757e-05,
"loss": 1.2091,
"step": 2540
},
{
"epoch": 1.2354651162790697,
"grad_norm": 4.434372901916504,
"learning_rate": 6.25e-05,
"loss": 1.2082,
"step": 2550
},
{
"epoch": 1.2354651162790697,
"eval_loss": 1.3322062492370605,
"eval_runtime": 34.4713,
"eval_samples_per_second": 14.505,
"eval_steps_per_second": 14.505,
"step": 2550
},
{
"epoch": 1.2403100775193798,
"grad_norm": 4.956961631774902,
"learning_rate": 6.20992893966243e-05,
"loss": 1.1897,
"step": 2560
},
{
"epoch": 1.24515503875969,
"grad_norm": 3.7832202911376953,
"learning_rate": 6.169859526489204e-05,
"loss": 1.1915,
"step": 2570
},
{
"epoch": 1.25,
"grad_norm": 4.197141647338867,
"learning_rate": 6.129793407576955e-05,
"loss": 1.161,
"step": 2580
},
{
"epoch": 1.25484496124031,
"grad_norm": 5.222776889801025,
"learning_rate": 6.089732229886904e-05,
"loss": 1.1943,
"step": 2590
},
{
"epoch": 1.2596899224806202,
"grad_norm": 4.98862361907959,
"learning_rate": 6.049677640177155e-05,
"loss": 1.2317,
"step": 2600
},
{
"epoch": 1.2645348837209303,
"grad_norm": 4.635793209075928,
"learning_rate": 6.00963128493501e-05,
"loss": 1.1987,
"step": 2610
},
{
"epoch": 1.2693798449612403,
"grad_norm": 4.15401029586792,
"learning_rate": 5.969594810309284e-05,
"loss": 1.2063,
"step": 2620
},
{
"epoch": 1.2742248062015504,
"grad_norm": 4.845739841461182,
"learning_rate": 5.929569862042631e-05,
"loss": 1.2207,
"step": 2630
},
{
"epoch": 1.2790697674418605,
"grad_norm": 4.469569206237793,
"learning_rate": 5.889558085403911e-05,
"loss": 1.1866,
"step": 2640
},
{
"epoch": 1.2839147286821706,
"grad_norm": 4.760675430297852,
"learning_rate": 5.849561125120545e-05,
"loss": 1.1932,
"step": 2650
},
{
"epoch": 1.2887596899224807,
"grad_norm": 4.755754470825195,
"learning_rate": 5.809580625310912e-05,
"loss": 1.0994,
"step": 2660
},
{
"epoch": 1.2936046511627908,
"grad_norm": 4.62287712097168,
"learning_rate": 5.769618229416773e-05,
"loss": 1.1789,
"step": 2670
},
{
"epoch": 1.2984496124031009,
"grad_norm": 4.824830055236816,
"learning_rate": 5.7296755801356926e-05,
"loss": 1.2136,
"step": 2680
},
{
"epoch": 1.3032945736434107,
"grad_norm": 4.84926700592041,
"learning_rate": 5.6897543193535414e-05,
"loss": 1.2006,
"step": 2690
},
{
"epoch": 1.308139534883721,
"grad_norm": 4.716812610626221,
"learning_rate": 5.649856088076989e-05,
"loss": 1.161,
"step": 2700
},
{
"epoch": 1.308139534883721,
"eval_loss": 1.3203985691070557,
"eval_runtime": 37.0313,
"eval_samples_per_second": 13.502,
"eval_steps_per_second": 13.502,
"step": 2700
},
{
"epoch": 1.312984496124031,
"grad_norm": 4.2137651443481445,
"learning_rate": 5.609982526366054e-05,
"loss": 1.167,
"step": 2710
},
{
"epoch": 1.3178294573643412,
"grad_norm": 4.377100944519043,
"learning_rate": 5.570135273266683e-05,
"loss": 1.1736,
"step": 2720
},
{
"epoch": 1.322674418604651,
"grad_norm": 4.399763584136963,
"learning_rate": 5.53031596674338e-05,
"loss": 1.2068,
"step": 2730
},
{
"epoch": 1.3275193798449614,
"grad_norm": 4.772261619567871,
"learning_rate": 5.490526243611873e-05,
"loss": 1.1748,
"step": 2740
},
{
"epoch": 1.3323643410852712,
"grad_norm": 4.910473346710205,
"learning_rate": 5.450767739471837e-05,
"loss": 1.1514,
"step": 2750
},
{
"epoch": 1.3372093023255813,
"grad_norm": 4.758593559265137,
"learning_rate": 5.411042088639655e-05,
"loss": 1.1768,
"step": 2760
},
{
"epoch": 1.3420542635658914,
"grad_norm": 4.396140098571777,
"learning_rate": 5.371350924081234e-05,
"loss": 1.2153,
"step": 2770
},
{
"epoch": 1.3468992248062015,
"grad_norm": 4.715628147125244,
"learning_rate": 5.331695877344888e-05,
"loss": 1.1453,
"step": 2780
},
{
"epoch": 1.3517441860465116,
"grad_norm": 4.38018798828125,
"learning_rate": 5.292078578494275e-05,
"loss": 1.1688,
"step": 2790
},
{
"epoch": 1.3565891472868217,
"grad_norm": 4.341307163238525,
"learning_rate": 5.2525006560413816e-05,
"loss": 1.1549,
"step": 2800
},
{
"epoch": 1.3614341085271318,
"grad_norm": 4.443046569824219,
"learning_rate": 5.212963736879578e-05,
"loss": 1.1985,
"step": 2810
},
{
"epoch": 1.3662790697674418,
"grad_norm": 4.29320764541626,
"learning_rate": 5.173469446216757e-05,
"loss": 1.1957,
"step": 2820
},
{
"epoch": 1.371124031007752,
"grad_norm": 4.762570858001709,
"learning_rate": 5.134019407508521e-05,
"loss": 1.1626,
"step": 2830
},
{
"epoch": 1.375968992248062,
"grad_norm": 4.549506664276123,
"learning_rate": 5.0946152423914456e-05,
"loss": 1.1377,
"step": 2840
},
{
"epoch": 1.380813953488372,
"grad_norm": 4.547486782073975,
"learning_rate": 5.0552585706164246e-05,
"loss": 1.1764,
"step": 2850
},
{
"epoch": 1.380813953488372,
"eval_loss": 1.3321481943130493,
"eval_runtime": 37.0386,
"eval_samples_per_second": 13.499,
"eval_steps_per_second": 13.499,
"step": 2850
},
{
"epoch": 1.3856589147286822,
"grad_norm": 4.949843883514404,
"learning_rate": 5.015951009982081e-05,
"loss": 1.1557,
"step": 2860
},
{
"epoch": 1.3905038759689923,
"grad_norm": 5.2824602127075195,
"learning_rate": 4.976694176268278e-05,
"loss": 1.2206,
"step": 2870
},
{
"epoch": 1.3953488372093024,
"grad_norm": 4.623624801635742,
"learning_rate": 4.937489683169692e-05,
"loss": 1.2192,
"step": 2880
},
{
"epoch": 1.4001937984496124,
"grad_norm": 4.3337626457214355,
"learning_rate": 4.8983391422294786e-05,
"loss": 1.1753,
"step": 2890
},
{
"epoch": 1.4050387596899225,
"grad_norm": 5.181596755981445,
"learning_rate": 4.8592441627730355e-05,
"loss": 1.2189,
"step": 2900
},
{
"epoch": 1.4098837209302326,
"grad_norm": 4.476628303527832,
"learning_rate": 4.820206351841842e-05,
"loss": 1.2062,
"step": 2910
},
{
"epoch": 1.4147286821705427,
"grad_norm": 4.46708869934082,
"learning_rate": 4.781227314127405e-05,
"loss": 1.1427,
"step": 2920
},
{
"epoch": 1.4195736434108528,
"grad_norm": 4.566659450531006,
"learning_rate": 4.7423086519052966e-05,
"loss": 1.158,
"step": 2930
},
{
"epoch": 1.4244186046511627,
"grad_norm": 4.646278381347656,
"learning_rate": 4.703451964969287e-05,
"loss": 1.1786,
"step": 2940
},
{
"epoch": 1.429263565891473,
"grad_norm": 4.6226277351379395,
"learning_rate": 4.66465885056558e-05,
"loss": 1.2042,
"step": 2950
},
{
"epoch": 1.4341085271317828,
"grad_norm": 4.581994533538818,
"learning_rate": 4.62593090332717e-05,
"loss": 1.1456,
"step": 2960
},
{
"epoch": 1.4389534883720931,
"grad_norm": 4.573033332824707,
"learning_rate": 4.587269715208281e-05,
"loss": 1.2082,
"step": 2970
},
{
"epoch": 1.443798449612403,
"grad_norm": 4.8347649574279785,
"learning_rate": 4.5486768754189305e-05,
"loss": 1.2103,
"step": 2980
},
{
"epoch": 1.4486434108527133,
"grad_norm": 4.730405330657959,
"learning_rate": 4.510153970359606e-05,
"loss": 1.1868,
"step": 2990
},
{
"epoch": 1.4534883720930232,
"grad_norm": 4.4655561447143555,
"learning_rate": 4.4717025835560476e-05,
"loss": 1.1379,
"step": 3000
},
{
"epoch": 1.4534883720930232,
"eval_loss": 1.3219256401062012,
"eval_runtime": 34.7991,
"eval_samples_per_second": 14.368,
"eval_steps_per_second": 14.368,
"step": 3000
},
{
"epoch": 1.4583333333333333,
"grad_norm": 4.40006160736084,
"learning_rate": 4.433324295594166e-05,
"loss": 1.1494,
"step": 3010
},
{
"epoch": 1.4631782945736433,
"grad_norm": 4.606269836425781,
"learning_rate": 4.3950206840550585e-05,
"loss": 1.1849,
"step": 3020
},
{
"epoch": 1.4680232558139534,
"grad_norm": 4.411858558654785,
"learning_rate": 4.3567933234501746e-05,
"loss": 1.1973,
"step": 3030
},
{
"epoch": 1.4728682170542635,
"grad_norm": 4.655940055847168,
"learning_rate": 4.318643785156579e-05,
"loss": 1.146,
"step": 3040
},
{
"epoch": 1.4777131782945736,
"grad_norm": 4.7272233963012695,
"learning_rate": 4.280573637352371e-05,
"loss": 1.1986,
"step": 3050
},
{
"epoch": 1.4825581395348837,
"grad_norm": 4.558550834655762,
"learning_rate": 4.242584444952216e-05,
"loss": 1.1522,
"step": 3060
},
{
"epoch": 1.4874031007751938,
"grad_norm": 4.483912467956543,
"learning_rate": 4.204677769543019e-05,
"loss": 1.132,
"step": 3070
},
{
"epoch": 1.4922480620155039,
"grad_norm": 4.158421516418457,
"learning_rate": 4.16685516931974e-05,
"loss": 1.1468,
"step": 3080
},
{
"epoch": 1.497093023255814,
"grad_norm": 5.205226898193359,
"learning_rate": 4.1291181990213286e-05,
"loss": 1.238,
"step": 3090
},
{
"epoch": 1.501937984496124,
"grad_norm": 4.967478275299072,
"learning_rate": 4.0914684098668286e-05,
"loss": 1.1865,
"step": 3100
},
{
"epoch": 1.5067829457364341,
"grad_norm": 4.679478645324707,
"learning_rate": 4.053907349491608e-05,
"loss": 1.1713,
"step": 3110
},
{
"epoch": 1.5116279069767442,
"grad_norm": 4.585414886474609,
"learning_rate": 4.016436561883746e-05,
"loss": 1.1352,
"step": 3120
},
{
"epoch": 1.5164728682170543,
"grad_norm": 4.369693279266357,
"learning_rate": 3.979057587320554e-05,
"loss": 1.1918,
"step": 3130
},
{
"epoch": 1.5213178294573644,
"grad_norm": 5.233042240142822,
"learning_rate": 3.941771962305274e-05,
"loss": 1.19,
"step": 3140
},
{
"epoch": 1.5261627906976745,
"grad_norm": 4.907647132873535,
"learning_rate": 3.9045812195039125e-05,
"loss": 1.1569,
"step": 3150
},
{
"epoch": 1.5261627906976745,
"eval_loss": 1.3149200677871704,
"eval_runtime": 34.7411,
"eval_samples_per_second": 14.392,
"eval_steps_per_second": 14.392,
"step": 3150
},
{
"epoch": 1.5310077519379846,
"grad_norm": 4.570740222930908,
"learning_rate": 3.8674868876822395e-05,
"loss": 1.1619,
"step": 3160
},
{
"epoch": 1.5358527131782944,
"grad_norm": 4.761332035064697,
"learning_rate": 3.83049049164295e-05,
"loss": 1.1719,
"step": 3170
},
{
"epoch": 1.5406976744186047,
"grad_norm": 4.39130973815918,
"learning_rate": 3.793593552162978e-05,
"loss": 1.1727,
"step": 3180
},
{
"epoch": 1.5455426356589146,
"grad_norm": 4.595332145690918,
"learning_rate": 3.75679758593099e-05,
"loss": 1.1708,
"step": 3190
},
{
"epoch": 1.550387596899225,
"grad_norm": 4.71079683303833,
"learning_rate": 3.720104105485039e-05,
"loss": 1.1967,
"step": 3200
},
{
"epoch": 1.5552325581395348,
"grad_norm": 4.250856876373291,
"learning_rate": 3.6835146191503885e-05,
"loss": 1.1726,
"step": 3210
},
{
"epoch": 1.560077519379845,
"grad_norm": 4.2891364097595215,
"learning_rate": 3.647030630977508e-05,
"loss": 1.164,
"step": 3220
},
{
"epoch": 1.564922480620155,
"grad_norm": 4.25226354598999,
"learning_rate": 3.6106536406802524e-05,
"loss": 1.1522,
"step": 3230
},
{
"epoch": 1.5697674418604652,
"grad_norm": 4.491365909576416,
"learning_rate": 3.5743851435742176e-05,
"loss": 1.1693,
"step": 3240
},
{
"epoch": 1.574612403100775,
"grad_norm": 4.436107158660889,
"learning_rate": 3.538226630515262e-05,
"loss": 1.2205,
"step": 3250
},
{
"epoch": 1.5794573643410854,
"grad_norm": 4.75892972946167,
"learning_rate": 3.502179587838238e-05,
"loss": 1.1412,
"step": 3260
},
{
"epoch": 1.5843023255813953,
"grad_norm": 4.313481330871582,
"learning_rate": 3.46624549729588e-05,
"loss": 1.1621,
"step": 3270
},
{
"epoch": 1.5891472868217056,
"grad_norm": 4.274176120758057,
"learning_rate": 3.430425835997908e-05,
"loss": 1.1676,
"step": 3280
},
{
"epoch": 1.5939922480620154,
"grad_norm": 4.709112644195557,
"learning_rate": 3.394722076350302e-05,
"loss": 1.1514,
"step": 3290
},
{
"epoch": 1.5988372093023255,
"grad_norm": 4.2664079666137695,
"learning_rate": 3.359135685994781e-05,
"loss": 1.2006,
"step": 3300
},
{
"epoch": 1.5988372093023255,
"eval_loss": 1.308353066444397,
"eval_runtime": 36.1139,
"eval_samples_per_second": 13.845,
"eval_steps_per_second": 13.845,
"step": 3300
},
{
"epoch": 1.6036821705426356,
"grad_norm": 4.822900772094727,
"learning_rate": 3.3236681277484654e-05,
"loss": 1.1643,
"step": 3310
},
{
"epoch": 1.6085271317829457,
"grad_norm": 4.587668418884277,
"learning_rate": 3.2883208595437584e-05,
"loss": 1.1914,
"step": 3320
},
{
"epoch": 1.6133720930232558,
"grad_norm": 4.759604454040527,
"learning_rate": 3.2530953343684136e-05,
"loss": 1.1826,
"step": 3330
},
{
"epoch": 1.6182170542635659,
"grad_norm": 4.67030668258667,
"learning_rate": 3.217993000205799e-05,
"loss": 1.1793,
"step": 3340
},
{
"epoch": 1.623062015503876,
"grad_norm": 4.612665176391602,
"learning_rate": 3.1830152999753903e-05,
"loss": 1.198,
"step": 3350
},
{
"epoch": 1.627906976744186,
"grad_norm": 4.420239448547363,
"learning_rate": 3.148163671473439e-05,
"loss": 1.1513,
"step": 3360
},
{
"epoch": 1.6327519379844961,
"grad_norm": 4.726665496826172,
"learning_rate": 3.113439547313892e-05,
"loss": 1.145,
"step": 3370
},
{
"epoch": 1.6375968992248062,
"grad_norm": 5.277156352996826,
"learning_rate": 3.0788443548694874e-05,
"loss": 1.1698,
"step": 3380
},
{
"epoch": 1.6424418604651163,
"grad_norm": 4.7339630126953125,
"learning_rate": 3.0443795162130876e-05,
"loss": 1.1492,
"step": 3390
},
{
"epoch": 1.6472868217054264,
"grad_norm": 4.68505334854126,
"learning_rate": 3.0100464480592185e-05,
"loss": 1.158,
"step": 3400
},
{
"epoch": 1.6521317829457365,
"grad_norm": 5.142575740814209,
"learning_rate": 2.9758465617058404e-05,
"loss": 1.224,
"step": 3410
},
{
"epoch": 1.6569767441860463,
"grad_norm": 4.73928165435791,
"learning_rate": 2.9417812629763285e-05,
"loss": 1.1897,
"step": 3420
},
{
"epoch": 1.6618217054263567,
"grad_norm": 4.277917385101318,
"learning_rate": 2.9078519521616894e-05,
"loss": 1.1559,
"step": 3430
},
{
"epoch": 1.6666666666666665,
"grad_norm": 4.362334251403809,
"learning_rate": 2.8740600239630002e-05,
"loss": 1.1269,
"step": 3440
},
{
"epoch": 1.6715116279069768,
"grad_norm": 4.715338706970215,
"learning_rate": 2.8404068674340714e-05,
"loss": 1.1224,
"step": 3450
},
{
"epoch": 1.6715116279069768,
"eval_loss": 1.3105417490005493,
"eval_runtime": 36.9202,
"eval_samples_per_second": 13.543,
"eval_steps_per_second": 13.543,
"step": 3450
},
{
"epoch": 1.6763565891472867,
"grad_norm": 4.6475138664245605,
"learning_rate": 2.80689386592436e-05,
"loss": 1.1789,
"step": 3460
},
{
"epoch": 1.681201550387597,
"grad_norm": 4.647360324859619,
"learning_rate": 2.7735223970220955e-05,
"loss": 1.1575,
"step": 3470
},
{
"epoch": 1.6860465116279069,
"grad_norm": 5.539259433746338,
"learning_rate": 2.7402938324976576e-05,
"loss": 1.1573,
"step": 3480
},
{
"epoch": 1.6908914728682172,
"grad_norm": 5.069607257843018,
"learning_rate": 2.70720953824719e-05,
"loss": 1.2175,
"step": 3490
},
{
"epoch": 1.695736434108527,
"grad_norm": 4.86273193359375,
"learning_rate": 2.674270874236441e-05,
"loss": 1.1546,
"step": 3500
},
{
"epoch": 1.7005813953488373,
"grad_norm": 4.686957359313965,
"learning_rate": 2.64147919444488e-05,
"loss": 1.1283,
"step": 3510
},
{
"epoch": 1.7054263565891472,
"grad_norm": 4.801562309265137,
"learning_rate": 2.6088358468100247e-05,
"loss": 1.1535,
"step": 3520
},
{
"epoch": 1.7102713178294575,
"grad_norm": 4.969239711761475,
"learning_rate": 2.5763421731720435e-05,
"loss": 1.1932,
"step": 3530
},
{
"epoch": 1.7151162790697674,
"grad_norm": 4.972625255584717,
"learning_rate": 2.5439995092185892e-05,
"loss": 1.2001,
"step": 3540
},
{
"epoch": 1.7199612403100775,
"grad_norm": 4.53269624710083,
"learning_rate": 2.5118091844299e-05,
"loss": 1.1269,
"step": 3550
},
{
"epoch": 1.7248062015503876,
"grad_norm": 4.459876537322998,
"learning_rate": 2.479772522024147e-05,
"loss": 1.1612,
"step": 3560
},
{
"epoch": 1.7296511627906976,
"grad_norm": 4.202701091766357,
"learning_rate": 2.4478908389030427e-05,
"loss": 1.1336,
"step": 3570
},
{
"epoch": 1.7344961240310077,
"grad_norm": 4.356159210205078,
"learning_rate": 2.41616544559771e-05,
"loss": 1.1813,
"step": 3580
},
{
"epoch": 1.7393410852713178,
"grad_norm": 4.778804779052734,
"learning_rate": 2.3845976462148033e-05,
"loss": 1.1568,
"step": 3590
},
{
"epoch": 1.744186046511628,
"grad_norm": 4.015143871307373,
"learning_rate": 2.3531887383829157e-05,
"loss": 1.1537,
"step": 3600
},
{
"epoch": 1.744186046511628,
"eval_loss": 1.303134560585022,
"eval_runtime": 35.466,
"eval_samples_per_second": 14.098,
"eval_steps_per_second": 14.098,
"step": 3600
},
{
"epoch": 1.749031007751938,
"grad_norm": 4.870272636413574,
"learning_rate": 2.3219400131992273e-05,
"loss": 1.1905,
"step": 3610
},
{
"epoch": 1.753875968992248,
"grad_norm": 4.328778266906738,
"learning_rate": 2.2908527551764404e-05,
"loss": 1.1212,
"step": 3620
},
{
"epoch": 1.7587209302325582,
"grad_norm": 4.691504955291748,
"learning_rate": 2.259928242189966e-05,
"loss": 1.1463,
"step": 3630
},
{
"epoch": 1.7635658914728682,
"grad_norm": 4.509458065032959,
"learning_rate": 2.2291677454254136e-05,
"loss": 1.1534,
"step": 3640
},
{
"epoch": 1.7684108527131783,
"grad_norm": 4.431001663208008,
"learning_rate": 2.1985725293263237e-05,
"loss": 1.1404,
"step": 3650
},
{
"epoch": 1.7732558139534884,
"grad_norm": 4.709799766540527,
"learning_rate": 2.1681438515421953e-05,
"loss": 1.1536,
"step": 3660
},
{
"epoch": 1.7781007751937985,
"grad_norm": 4.597837448120117,
"learning_rate": 2.1378829628767965e-05,
"loss": 1.1385,
"step": 3670
},
{
"epoch": 1.7829457364341086,
"grad_norm": 4.932100296020508,
"learning_rate": 2.1077911072367317e-05,
"loss": 1.112,
"step": 3680
},
{
"epoch": 1.7877906976744184,
"grad_norm": 4.695112705230713,
"learning_rate": 2.077869521580325e-05,
"loss": 1.1437,
"step": 3690
},
{
"epoch": 1.7926356589147288,
"grad_norm": 4.614483833312988,
"learning_rate": 2.0481194358667695e-05,
"loss": 1.1403,
"step": 3700
},
{
"epoch": 1.7974806201550386,
"grad_norm": 4.5487141609191895,
"learning_rate": 2.018542073005567e-05,
"loss": 1.2048,
"step": 3710
},
{
"epoch": 1.802325581395349,
"grad_norm": 4.563633441925049,
"learning_rate": 1.9891386488062538e-05,
"loss": 1.1905,
"step": 3720
},
{
"epoch": 1.8071705426356588,
"grad_norm": 4.88431978225708,
"learning_rate": 1.959910371928436e-05,
"loss": 1.1999,
"step": 3730
},
{
"epoch": 1.812015503875969,
"grad_norm": 4.790411472320557,
"learning_rate": 1.930858443832096e-05,
"loss": 1.1908,
"step": 3740
},
{
"epoch": 1.816860465116279,
"grad_norm": 4.809146404266357,
"learning_rate": 1.90198405872821e-05,
"loss": 1.1942,
"step": 3750
},
{
"epoch": 1.816860465116279,
"eval_loss": 1.3001154661178589,
"eval_runtime": 35.3757,
"eval_samples_per_second": 14.134,
"eval_steps_per_second": 14.134,
"step": 3750
},
{
"epoch": 1.8217054263565893,
"grad_norm": 4.622406959533691,
"learning_rate": 1.8732884035296582e-05,
"loss": 1.171,
"step": 3760
},
{
"epoch": 1.8265503875968991,
"grad_norm": 4.658726692199707,
"learning_rate": 1.844772657802428e-05,
"loss": 1.1856,
"step": 3770
},
{
"epoch": 1.8313953488372094,
"grad_norm": 4.213481903076172,
"learning_rate": 1.8164379937171382e-05,
"loss": 1.1396,
"step": 3780
},
{
"epoch": 1.8362403100775193,
"grad_norm": 4.337812900543213,
"learning_rate": 1.7882855760008547e-05,
"loss": 1.1649,
"step": 3790
},
{
"epoch": 1.8410852713178296,
"grad_norm": 4.894495010375977,
"learning_rate": 1.760316561889203e-05,
"loss": 1.1902,
"step": 3800
},
{
"epoch": 1.8459302325581395,
"grad_norm": 4.742762565612793,
"learning_rate": 1.7325321010788034e-05,
"loss": 1.2039,
"step": 3810
},
{
"epoch": 1.8507751937984496,
"grad_norm": 4.0735859870910645,
"learning_rate": 1.7049333356800167e-05,
"loss": 1.1367,
"step": 3820
},
{
"epoch": 1.8556201550387597,
"grad_norm": 4.980224609375,
"learning_rate": 1.6775214001699914e-05,
"loss": 1.1802,
"step": 3830
},
{
"epoch": 1.8604651162790697,
"grad_norm": 4.273513317108154,
"learning_rate": 1.6502974213460316e-05,
"loss": 1.179,
"step": 3840
},
{
"epoch": 1.8653100775193798,
"grad_norm": 4.441263198852539,
"learning_rate": 1.623262518279279e-05,
"loss": 1.1551,
"step": 3850
},
{
"epoch": 1.87015503875969,
"grad_norm": 4.667087554931641,
"learning_rate": 1.596417802268707e-05,
"loss": 1.142,
"step": 3860
},
{
"epoch": 1.875,
"grad_norm": 4.452362537384033,
"learning_rate": 1.5697643767954488e-05,
"loss": 1.1323,
"step": 3870
},
{
"epoch": 1.87984496124031,
"grad_norm": 4.532679557800293,
"learning_rate": 1.543303337477432e-05,
"loss": 1.1637,
"step": 3880
},
{
"epoch": 1.8846899224806202,
"grad_norm": 4.801875114440918,
"learning_rate": 1.517035772024343e-05,
"loss": 1.1405,
"step": 3890
},
{
"epoch": 1.8895348837209303,
"grad_norm": 4.448420524597168,
"learning_rate": 1.49096276019291e-05,
"loss": 1.1664,
"step": 3900
},
{
"epoch": 1.8895348837209303,
"eval_loss": 1.2978554964065552,
"eval_runtime": 35.0871,
"eval_samples_per_second": 14.25,
"eval_steps_per_second": 14.25,
"step": 3900
},
{
"epoch": 1.8943798449612403,
"grad_norm": 4.431020736694336,
"learning_rate": 1.4650853737425327e-05,
"loss": 1.1702,
"step": 3910
},
{
"epoch": 1.8992248062015504,
"grad_norm": 4.734516143798828,
"learning_rate": 1.4394046763912122e-05,
"loss": 1.1383,
"step": 3920
},
{
"epoch": 1.9040697674418605,
"grad_norm": 4.348439693450928,
"learning_rate": 1.413921723771832e-05,
"loss": 1.1701,
"step": 3930
},
{
"epoch": 1.9089147286821704,
"grad_norm": 4.6349263191223145,
"learning_rate": 1.3886375633887665e-05,
"loss": 1.1877,
"step": 3940
},
{
"epoch": 1.9137596899224807,
"grad_norm": 4.767696857452393,
"learning_rate": 1.3635532345748137e-05,
"loss": 1.1573,
"step": 3950
},
{
"epoch": 1.9186046511627906,
"grad_norm": 4.602748394012451,
"learning_rate": 1.3386697684484853e-05,
"loss": 1.1472,
"step": 3960
},
{
"epoch": 1.9234496124031009,
"grad_norm": 4.704031467437744,
"learning_rate": 1.3139881878716107e-05,
"loss": 1.1425,
"step": 3970
},
{
"epoch": 1.9282945736434107,
"grad_norm": 4.546751022338867,
"learning_rate": 1.2895095074072986e-05,
"loss": 1.1796,
"step": 3980
},
{
"epoch": 1.933139534883721,
"grad_norm": 4.875455379486084,
"learning_rate": 1.2652347332782227e-05,
"loss": 1.1565,
"step": 3990
},
{
"epoch": 1.937984496124031,
"grad_norm": 4.274509429931641,
"learning_rate": 1.2411648633252719e-05,
"loss": 1.1479,
"step": 4000
},
{
"epoch": 1.9428294573643412,
"grad_norm": 5.224791049957275,
"learning_rate": 1.2173008869665241e-05,
"loss": 1.1812,
"step": 4010
},
{
"epoch": 1.947674418604651,
"grad_norm": 4.49781608581543,
"learning_rate": 1.1936437851565791e-05,
"loss": 1.0814,
"step": 4020
},
{
"epoch": 1.9525193798449614,
"grad_norm": 4.98948335647583,
"learning_rate": 1.1701945303462337e-05,
"loss": 1.135,
"step": 4030
},
{
"epoch": 1.9573643410852712,
"grad_norm": 4.684279918670654,
"learning_rate": 1.146954086442508e-05,
"loss": 1.139,
"step": 4040
},
{
"epoch": 1.9622093023255816,
"grad_norm": 4.083446502685547,
"learning_rate": 1.1239234087690252e-05,
"loss": 1.1605,
"step": 4050
},
{
"epoch": 1.9622093023255816,
"eval_loss": 1.3004401922225952,
"eval_runtime": 36.7898,
"eval_samples_per_second": 13.591,
"eval_steps_per_second": 13.591,
"step": 4050
},
{
"epoch": 1.9670542635658914,
"grad_norm": 4.628580093383789,
"learning_rate": 1.1011034440267395e-05,
"loss": 1.1282,
"step": 4060
},
{
"epoch": 1.9718992248062015,
"grad_norm": 4.7327399253845215,
"learning_rate": 1.078495130255023e-05,
"loss": 1.1448,
"step": 4070
},
{
"epoch": 1.9767441860465116,
"grad_norm": 4.666077136993408,
"learning_rate": 1.0560993967931004e-05,
"loss": 1.165,
"step": 4080
},
{
"epoch": 1.9815891472868217,
"grad_norm": 4.282505989074707,
"learning_rate": 1.0339171642418585e-05,
"loss": 1.1501,
"step": 4090
},
{
"epoch": 1.9864341085271318,
"grad_norm": 5.030765056610107,
"learning_rate": 1.0119493444259963e-05,
"loss": 1.1606,
"step": 4100
},
{
"epoch": 1.9912790697674418,
"grad_norm": 4.556647777557373,
"learning_rate": 9.901968403565428e-06,
"loss": 1.1273,
"step": 4110
},
{
"epoch": 1.996124031007752,
"grad_norm": 4.7390289306640625,
"learning_rate": 9.686605461937441e-06,
"loss": 1.1746,
"step": 4120
},
{
"epoch": 2.000968992248062,
"grad_norm": 4.468574523925781,
"learning_rate": 9.473413472102982e-06,
"loss": 1.1559,
"step": 4130
},
{
"epoch": 2.005813953488372,
"grad_norm": 4.097834587097168,
"learning_rate": 9.262401197549744e-06,
"loss": 1.0378,
"step": 4140
},
{
"epoch": 2.010658914728682,
"grad_norm": 4.449402809143066,
"learning_rate": 9.05357731216587e-06,
"loss": 1.0429,
"step": 4150
},
{
"epoch": 2.0155038759689923,
"grad_norm": 4.115217685699463,
"learning_rate": 8.846950399883368e-06,
"loss": 1.0474,
"step": 4160
},
{
"epoch": 2.020348837209302,
"grad_norm": 4.607194423675537,
"learning_rate": 8.64252895432531e-06,
"loss": 1.053,
"step": 4170
},
{
"epoch": 2.0251937984496124,
"grad_norm": 4.8986029624938965,
"learning_rate": 8.440321378456656e-06,
"loss": 1.0119,
"step": 4180
},
{
"epoch": 2.0300387596899223,
"grad_norm": 4.387269973754883,
"learning_rate": 8.240335984238844e-06,
"loss": 0.9652,
"step": 4190
},
{
"epoch": 2.0348837209302326,
"grad_norm": 4.39861536026001,
"learning_rate": 8.042580992288163e-06,
"loss": 1.0401,
"step": 4200
},
{
"epoch": 2.0348837209302326,
"eval_loss": 1.3077232837677002,
"eval_runtime": 36.5162,
"eval_samples_per_second": 13.693,
"eval_steps_per_second": 13.693,
"step": 4200
},
{
"epoch": 2.0397286821705425,
"grad_norm": 4.842596054077148,
"learning_rate": 7.847064531537774e-06,
"loss": 1.0324,
"step": 4210
},
{
"epoch": 2.044573643410853,
"grad_norm": 4.725849628448486,
"learning_rate": 7.653794638903574e-06,
"loss": 1.06,
"step": 4220
},
{
"epoch": 2.0494186046511627,
"grad_norm": 4.722954750061035,
"learning_rate": 7.462779258953875e-06,
"loss": 1.0461,
"step": 4230
},
{
"epoch": 2.054263565891473,
"grad_norm": 4.449377536773682,
"learning_rate": 7.274026243582796e-06,
"loss": 1.0381,
"step": 4240
},
{
"epoch": 2.059108527131783,
"grad_norm": 4.836928367614746,
"learning_rate": 7.087543351687493e-06,
"loss": 1.0541,
"step": 4250
},
{
"epoch": 2.063953488372093,
"grad_norm": 4.825849533081055,
"learning_rate": 6.903338248849269e-06,
"loss": 1.0178,
"step": 4260
},
{
"epoch": 2.068798449612403,
"grad_norm": 4.556478977203369,
"learning_rate": 6.7214185070183925e-06,
"loss": 1.0202,
"step": 4270
},
{
"epoch": 2.0736434108527133,
"grad_norm": 5.168104648590088,
"learning_rate": 6.541791604202936e-06,
"loss": 1.0212,
"step": 4280
},
{
"epoch": 2.078488372093023,
"grad_norm": 4.436131000518799,
"learning_rate": 6.364464924161311e-06,
"loss": 1.035,
"step": 4290
},
{
"epoch": 2.0833333333333335,
"grad_norm": 4.729435443878174,
"learning_rate": 6.1894457560988106e-06,
"loss": 1.0691,
"step": 4300
},
{
"epoch": 2.0881782945736433,
"grad_norm": 4.654105186462402,
"learning_rate": 6.016741294367911e-06,
"loss": 0.9836,
"step": 4310
},
{
"epoch": 2.0930232558139537,
"grad_norm": 4.738863945007324,
"learning_rate": 5.846358638172615e-06,
"loss": 1.059,
"step": 4320
},
{
"epoch": 2.0978682170542635,
"grad_norm": 4.768299102783203,
"learning_rate": 5.678304791276567e-06,
"loss": 1.0208,
"step": 4330
},
{
"epoch": 2.102713178294574,
"grad_norm": 4.950557231903076,
"learning_rate": 5.51258666171519e-06,
"loss": 1.0373,
"step": 4340
},
{
"epoch": 2.1075581395348837,
"grad_norm": 4.744584083557129,
"learning_rate": 5.349211061511726e-06,
"loss": 1.0382,
"step": 4350
},
{
"epoch": 2.1075581395348837,
"eval_loss": 1.318005084991455,
"eval_runtime": 34.8879,
"eval_samples_per_second": 14.332,
"eval_steps_per_second": 14.332,
"step": 4350
},
{
"epoch": 2.112403100775194,
"grad_norm": 4.970248222351074,
"learning_rate": 5.188184706397182e-06,
"loss": 1.0336,
"step": 4360
},
{
"epoch": 2.117248062015504,
"grad_norm": 4.452147483825684,
"learning_rate": 5.029514215534339e-06,
"loss": 1.0783,
"step": 4370
},
{
"epoch": 2.1220930232558137,
"grad_norm": 5.069756507873535,
"learning_rate": 4.873206111245594e-06,
"loss": 1.0183,
"step": 4380
},
{
"epoch": 2.126937984496124,
"grad_norm": 5.139758586883545,
"learning_rate": 4.719266818744912e-06,
"loss": 1.0064,
"step": 4390
},
{
"epoch": 2.1317829457364343,
"grad_norm": 4.728250980377197,
"learning_rate": 4.567702665873648e-06,
"loss": 1.0729,
"step": 4400
},
{
"epoch": 2.136627906976744,
"grad_norm": 4.994308948516846,
"learning_rate": 4.418519882840505e-06,
"loss": 1.0429,
"step": 4410
},
{
"epoch": 2.141472868217054,
"grad_norm": 5.102052211761475,
"learning_rate": 4.271724601965371e-06,
"loss": 0.9935,
"step": 4420
},
{
"epoch": 2.1463178294573644,
"grad_norm": 4.426207065582275,
"learning_rate": 4.127322857427306e-06,
"loss": 1.0107,
"step": 4430
},
{
"epoch": 2.1511627906976742,
"grad_norm": 4.988846302032471,
"learning_rate": 3.985320585016425e-06,
"loss": 1.053,
"step": 4440
},
{
"epoch": 2.1560077519379846,
"grad_norm": 4.7682600021362305,
"learning_rate": 3.845723621889973e-06,
"loss": 1.0547,
"step": 4450
},
{
"epoch": 2.1608527131782944,
"grad_norm": 4.747420787811279,
"learning_rate": 3.7085377063323447e-06,
"loss": 1.0372,
"step": 4460
},
{
"epoch": 2.1656976744186047,
"grad_norm": 5.47692346572876,
"learning_rate": 3.5737684775191887e-06,
"loss": 1.0329,
"step": 4470
},
{
"epoch": 2.1705426356589146,
"grad_norm": 4.682116508483887,
"learning_rate": 3.441421475285679e-06,
"loss": 1.0207,
"step": 4480
},
{
"epoch": 2.175387596899225,
"grad_norm": 4.9538421630859375,
"learning_rate": 3.3115021398986768e-06,
"loss": 1.0312,
"step": 4490
},
{
"epoch": 2.1802325581395348,
"grad_norm": 4.703029155731201,
"learning_rate": 3.18401581183321e-06,
"loss": 1.0013,
"step": 4500
},
{
"epoch": 2.1802325581395348,
"eval_loss": 1.3207753896713257,
"eval_runtime": 34.0398,
"eval_samples_per_second": 14.689,
"eval_steps_per_second": 14.689,
"step": 4500
},
{
"epoch": 2.185077519379845,
"grad_norm": 5.3770599365234375,
"learning_rate": 3.0589677315529044e-06,
"loss": 1.0393,
"step": 4510
},
{
"epoch": 2.189922480620155,
"grad_norm": 5.012081146240234,
"learning_rate": 2.9363630392945513e-06,
"loss": 1.0691,
"step": 4520
},
{
"epoch": 2.1947674418604652,
"grad_norm": 4.9033589363098145,
"learning_rate": 2.816206774856854e-06,
"loss": 1.0394,
"step": 4530
},
{
"epoch": 2.199612403100775,
"grad_norm": 5.1483001708984375,
"learning_rate": 2.6985038773932046e-06,
"loss": 1.0717,
"step": 4540
},
{
"epoch": 2.2044573643410854,
"grad_norm": 4.764675617218018,
"learning_rate": 2.583259185208714e-06,
"loss": 1.0564,
"step": 4550
},
{
"epoch": 2.2093023255813953,
"grad_norm": 5.014820575714111,
"learning_rate": 2.4704774355612943e-06,
"loss": 1.0331,
"step": 4560
},
{
"epoch": 2.2141472868217056,
"grad_norm": 4.7594099044799805,
"learning_rate": 2.3601632644669536e-06,
"loss": 1.0387,
"step": 4570
},
{
"epoch": 2.2189922480620154,
"grad_norm": 4.573930740356445,
"learning_rate": 2.2523212065091723e-06,
"loss": 1.0151,
"step": 4580
},
{
"epoch": 2.2238372093023258,
"grad_norm": 4.81765079498291,
"learning_rate": 2.1469556946525706e-06,
"loss": 1.0812,
"step": 4590
},
{
"epoch": 2.2286821705426356,
"grad_norm": 4.986940383911133,
"learning_rate": 2.0440710600606595e-06,
"loss": 1.008,
"step": 4600
},
{
"epoch": 2.233527131782946,
"grad_norm": 5.306875705718994,
"learning_rate": 1.9436715319177956e-06,
"loss": 0.9869,
"step": 4610
},
{
"epoch": 2.238372093023256,
"grad_norm": 5.1074395179748535,
"learning_rate": 1.8457612372553348e-06,
"loss": 1.056,
"step": 4620
},
{
"epoch": 2.2432170542635657,
"grad_norm": 4.792019367218018,
"learning_rate": 1.75034420078201e-06,
"loss": 1.0536,
"step": 4630
},
{
"epoch": 2.248062015503876,
"grad_norm": 5.631920337677002,
"learning_rate": 1.6574243447184597e-06,
"loss": 1.0099,
"step": 4640
},
{
"epoch": 2.2529069767441863,
"grad_norm": 4.535787105560303,
"learning_rate": 1.567005488636024e-06,
"loss": 1.0242,
"step": 4650
},
{
"epoch": 2.2529069767441863,
"eval_loss": 1.321666955947876,
"eval_runtime": 34.8681,
"eval_samples_per_second": 14.34,
"eval_steps_per_second": 14.34,
"step": 4650
},
{
"epoch": 2.257751937984496,
"grad_norm": 5.02518367767334,
"learning_rate": 1.4790913492997438e-06,
"loss": 1.0151,
"step": 4660
},
{
"epoch": 2.262596899224806,
"grad_norm": 4.680893421173096,
"learning_rate": 1.3936855405155408e-06,
"loss": 1.0531,
"step": 4670
},
{
"epoch": 2.2674418604651163,
"grad_norm": 4.825300216674805,
"learning_rate": 1.3107915729816954e-06,
"loss": 0.963,
"step": 4680
},
{
"epoch": 2.272286821705426,
"grad_norm": 4.554722785949707,
"learning_rate": 1.230412854144547e-06,
"loss": 1.0193,
"step": 4690
},
{
"epoch": 2.2771317829457365,
"grad_norm": 4.723803520202637,
"learning_rate": 1.15255268805841e-06,
"loss": 1.0079,
"step": 4700
},
{
"epoch": 2.2819767441860463,
"grad_norm": 5.480154514312744,
"learning_rate": 1.0772142752497604e-06,
"loss": 1.0737,
"step": 4710
},
{
"epoch": 2.2868217054263567,
"grad_norm": 5.367369651794434,
"learning_rate": 1.004400712585646e-06,
"loss": 1.0406,
"step": 4720
},
{
"epoch": 2.2916666666666665,
"grad_norm": 4.804909706115723,
"learning_rate": 9.341149931464537e-07,
"loss": 1.037,
"step": 4730
},
{
"epoch": 2.296511627906977,
"grad_norm": 5.131532192230225,
"learning_rate": 8.663600061028162e-07,
"loss": 1.0684,
"step": 4740
},
{
"epoch": 2.3013565891472867,
"grad_norm": 4.939748287200928,
"learning_rate": 8.011385365968641e-07,
"loss": 1.0439,
"step": 4750
},
{
"epoch": 2.306201550387597,
"grad_norm": 4.985282897949219,
"learning_rate": 7.384532656277698e-07,
"loss": 1.0066,
"step": 4760
},
{
"epoch": 2.311046511627907,
"grad_norm": 5.235836505889893,
"learning_rate": 6.783067699414891e-07,
"loss": 1.0223,
"step": 4770
},
{
"epoch": 2.315891472868217,
"grad_norm": 5.52247953414917,
"learning_rate": 6.207015219248866e-07,
"loss": 1.0469,
"step": 4780
},
{
"epoch": 2.320736434108527,
"grad_norm": 4.918477535247803,
"learning_rate": 5.656398895040813e-07,
"loss": 1.0399,
"step": 4790
},
{
"epoch": 2.3255813953488373,
"grad_norm": 4.3925628662109375,
"learning_rate": 5.131241360471217e-07,
"loss": 1.0114,
"step": 4800
},
{
"epoch": 2.3255813953488373,
"eval_loss": 1.3223485946655273,
"eval_runtime": 34.8062,
"eval_samples_per_second": 14.365,
"eval_steps_per_second": 14.365,
"step": 4800
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 150,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.570336699071898e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}