FatCat87's picture
Upload folder using huggingface_hub
fca825d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 108,
"global_step": 216,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004629629629629629,
"grad_norm": 0.4398077726364136,
"learning_rate": 2e-05,
"loss": 1.1885,
"step": 1
},
{
"epoch": 0.004629629629629629,
"eval_loss": 1.181751012802124,
"eval_runtime": 16.5418,
"eval_samples_per_second": 22.005,
"eval_steps_per_second": 2.781,
"step": 1
},
{
"epoch": 0.009259259259259259,
"grad_norm": 0.4034997820854187,
"learning_rate": 4e-05,
"loss": 1.294,
"step": 2
},
{
"epoch": 0.013888888888888888,
"grad_norm": 0.3441363573074341,
"learning_rate": 6e-05,
"loss": 1.1688,
"step": 3
},
{
"epoch": 0.018518518518518517,
"grad_norm": 0.3627238869667053,
"learning_rate": 8e-05,
"loss": 1.1068,
"step": 4
},
{
"epoch": 0.023148148148148147,
"grad_norm": 0.4161143898963928,
"learning_rate": 0.0001,
"loss": 1.2275,
"step": 5
},
{
"epoch": 0.027777777777777776,
"grad_norm": 0.3521893620491028,
"learning_rate": 0.00012,
"loss": 1.0201,
"step": 6
},
{
"epoch": 0.032407407407407406,
"grad_norm": 0.3282751142978668,
"learning_rate": 0.00014,
"loss": 1.0386,
"step": 7
},
{
"epoch": 0.037037037037037035,
"grad_norm": 0.3734138607978821,
"learning_rate": 0.00016,
"loss": 1.1113,
"step": 8
},
{
"epoch": 0.041666666666666664,
"grad_norm": 0.31210270524024963,
"learning_rate": 0.00018,
"loss": 0.8375,
"step": 9
},
{
"epoch": 0.046296296296296294,
"grad_norm": 0.3017318844795227,
"learning_rate": 0.0002,
"loss": 0.7965,
"step": 10
},
{
"epoch": 0.05092592592592592,
"grad_norm": 0.3388109803199768,
"learning_rate": 0.00019998837141446378,
"loss": 0.7386,
"step": 11
},
{
"epoch": 0.05555555555555555,
"grad_norm": 0.3247002959251404,
"learning_rate": 0.00019995348836233516,
"loss": 0.591,
"step": 12
},
{
"epoch": 0.06018518518518518,
"grad_norm": 0.2645561695098877,
"learning_rate": 0.00019989535895642525,
"loss": 0.5455,
"step": 13
},
{
"epoch": 0.06481481481481481,
"grad_norm": 0.26551955938339233,
"learning_rate": 0.00019981399671598939,
"loss": 0.5003,
"step": 14
},
{
"epoch": 0.06944444444444445,
"grad_norm": 0.2658671736717224,
"learning_rate": 0.00019970942056358307,
"loss": 0.5077,
"step": 15
},
{
"epoch": 0.07407407407407407,
"grad_norm": 0.33185991644859314,
"learning_rate": 0.00019958165482066094,
"loss": 0.5111,
"step": 16
},
{
"epoch": 0.0787037037037037,
"grad_norm": 0.327854186296463,
"learning_rate": 0.0001994307292019204,
"loss": 0.4994,
"step": 17
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.2778171896934509,
"learning_rate": 0.0001992566788083908,
"loss": 0.5184,
"step": 18
},
{
"epoch": 0.08796296296296297,
"grad_norm": 0.22942981123924255,
"learning_rate": 0.00019905954411926992,
"loss": 0.4833,
"step": 19
},
{
"epoch": 0.09259259259259259,
"grad_norm": 0.18429657816886902,
"learning_rate": 0.00019883937098250963,
"loss": 0.4244,
"step": 20
},
{
"epoch": 0.09722222222222222,
"grad_norm": 0.20559769868850708,
"learning_rate": 0.000198596210604153,
"loss": 0.4432,
"step": 21
},
{
"epoch": 0.10185185185185185,
"grad_norm": 0.1754508763551712,
"learning_rate": 0.00019833011953642525,
"loss": 0.4271,
"step": 22
},
{
"epoch": 0.10648148148148148,
"grad_norm": 0.150568887591362,
"learning_rate": 0.00019804115966458115,
"loss": 0.443,
"step": 23
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.15353460609912872,
"learning_rate": 0.0001977293981925125,
"loss": 0.4264,
"step": 24
},
{
"epoch": 0.11574074074074074,
"grad_norm": 0.14020614326000214,
"learning_rate": 0.00019739490762711812,
"loss": 0.4575,
"step": 25
},
{
"epoch": 0.12037037037037036,
"grad_norm": 0.14703242480754852,
"learning_rate": 0.00019703776576144105,
"loss": 0.4824,
"step": 26
},
{
"epoch": 0.125,
"grad_norm": 0.171169713139534,
"learning_rate": 0.00019665805565657603,
"loss": 0.4568,
"step": 27
},
{
"epoch": 0.12962962962962962,
"grad_norm": 0.13848260045051575,
"learning_rate": 0.0001962558656223516,
"loss": 0.4729,
"step": 28
},
{
"epoch": 0.13425925925925927,
"grad_norm": 0.11175956577062607,
"learning_rate": 0.00019583128919679215,
"loss": 0.3614,
"step": 29
},
{
"epoch": 0.1388888888888889,
"grad_norm": 0.11471609771251678,
"learning_rate": 0.00019538442512436328,
"loss": 0.3938,
"step": 30
},
{
"epoch": 0.14351851851851852,
"grad_norm": 0.1153385117650032,
"learning_rate": 0.00019491537733300676,
"loss": 0.4317,
"step": 31
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.10725446045398712,
"learning_rate": 0.00019442425490996988,
"loss": 0.4493,
"step": 32
},
{
"epoch": 0.1527777777777778,
"grad_norm": 0.11065792292356491,
"learning_rate": 0.0001939111720764347,
"loss": 0.4004,
"step": 33
},
{
"epoch": 0.1574074074074074,
"grad_norm": 0.12286635488271713,
"learning_rate": 0.00019337624816095358,
"loss": 0.5065,
"step": 34
},
{
"epoch": 0.16203703703703703,
"grad_norm": 0.11002598702907562,
"learning_rate": 0.0001928196075716966,
"loss": 0.4308,
"step": 35
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.09922719746828079,
"learning_rate": 0.00019224137976751795,
"loss": 0.344,
"step": 36
},
{
"epoch": 0.1712962962962963,
"grad_norm": 0.1075102686882019,
"learning_rate": 0.00019164169922784716,
"loss": 0.4705,
"step": 37
},
{
"epoch": 0.17592592592592593,
"grad_norm": 0.11824163794517517,
"learning_rate": 0.00019102070542141328,
"loss": 0.3725,
"step": 38
},
{
"epoch": 0.18055555555555555,
"grad_norm": 0.11450410634279251,
"learning_rate": 0.0001903785427738082,
"loss": 0.3854,
"step": 39
},
{
"epoch": 0.18518518518518517,
"grad_norm": 0.13033916056156158,
"learning_rate": 0.00018971536063389744,
"loss": 0.4519,
"step": 40
},
{
"epoch": 0.18981481481481483,
"grad_norm": 0.12207476049661636,
"learning_rate": 0.00018903131323908578,
"loss": 0.3709,
"step": 41
},
{
"epoch": 0.19444444444444445,
"grad_norm": 0.10579460859298706,
"learning_rate": 0.00018832655967944607,
"loss": 0.3767,
"step": 42
},
{
"epoch": 0.19907407407407407,
"grad_norm": 0.12160316854715347,
"learning_rate": 0.00018760126386071935,
"loss": 0.4176,
"step": 43
},
{
"epoch": 0.2037037037037037,
"grad_norm": 0.11053751409053802,
"learning_rate": 0.0001868555944661949,
"loss": 0.4282,
"step": 44
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.11802718043327332,
"learning_rate": 0.00018608972491747944,
"loss": 0.3983,
"step": 45
},
{
"epoch": 0.21296296296296297,
"grad_norm": 0.1361842304468155,
"learning_rate": 0.00018530383333416418,
"loss": 0.4587,
"step": 46
},
{
"epoch": 0.2175925925925926,
"grad_norm": 0.1138835921883583,
"learning_rate": 0.00018449810249239902,
"loss": 0.4004,
"step": 47
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.11451338976621628,
"learning_rate": 0.0001836727197823842,
"loss": 0.4509,
"step": 48
},
{
"epoch": 0.22685185185185186,
"grad_norm": 0.12116890400648117,
"learning_rate": 0.00018282787716478868,
"loss": 0.4331,
"step": 49
},
{
"epoch": 0.23148148148148148,
"grad_norm": 0.11061301827430725,
"learning_rate": 0.00018196377112610526,
"loss": 0.367,
"step": 50
},
{
"epoch": 0.2361111111111111,
"grad_norm": 0.13139884173870087,
"learning_rate": 0.00018108060263295362,
"loss": 0.4631,
"step": 51
},
{
"epoch": 0.24074074074074073,
"grad_norm": 0.1242774948477745,
"learning_rate": 0.00018017857708534107,
"loss": 0.3843,
"step": 52
},
{
"epoch": 0.24537037037037038,
"grad_norm": 0.12658777832984924,
"learning_rate": 0.00017925790426889235,
"loss": 0.3888,
"step": 53
},
{
"epoch": 0.25,
"grad_norm": 0.12799492478370667,
"learning_rate": 0.00017831879830605937,
"loss": 0.3476,
"step": 54
},
{
"epoch": 0.25462962962962965,
"grad_norm": 0.13426098227500916,
"learning_rate": 0.00017736147760632248,
"loss": 0.4578,
"step": 55
},
{
"epoch": 0.25925925925925924,
"grad_norm": 0.12698565423488617,
"learning_rate": 0.0001763861648153945,
"loss": 0.4511,
"step": 56
},
{
"epoch": 0.2638888888888889,
"grad_norm": 0.10626720637083054,
"learning_rate": 0.00017539308676343973,
"loss": 0.3417,
"step": 57
},
{
"epoch": 0.26851851851851855,
"grad_norm": 0.11828374862670898,
"learning_rate": 0.0001743824744123196,
"loss": 0.431,
"step": 58
},
{
"epoch": 0.27314814814814814,
"grad_norm": 0.12974177300930023,
"learning_rate": 0.00017335456280187752,
"loss": 0.472,
"step": 59
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.11126148700714111,
"learning_rate": 0.00017230959099527512,
"loss": 0.3984,
"step": 60
},
{
"epoch": 0.2824074074074074,
"grad_norm": 0.1262761354446411,
"learning_rate": 0.0001712478020233932,
"loss": 0.409,
"step": 61
},
{
"epoch": 0.28703703703703703,
"grad_norm": 0.12113980203866959,
"learning_rate": 0.00017016944282830933,
"loss": 0.4328,
"step": 62
},
{
"epoch": 0.2916666666666667,
"grad_norm": 0.12315801531076431,
"learning_rate": 0.00016907476420586633,
"loss": 0.389,
"step": 63
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.1181735098361969,
"learning_rate": 0.00016796402074734402,
"loss": 0.4255,
"step": 64
},
{
"epoch": 0.30092592592592593,
"grad_norm": 0.12145352363586426,
"learning_rate": 0.00016683747078024888,
"loss": 0.3954,
"step": 65
},
{
"epoch": 0.3055555555555556,
"grad_norm": 0.11947956681251526,
"learning_rate": 0.00016569537630823383,
"loss": 0.3895,
"step": 66
},
{
"epoch": 0.3101851851851852,
"grad_norm": 0.12213783711194992,
"learning_rate": 0.0001645380029501641,
"loss": 0.4496,
"step": 67
},
{
"epoch": 0.3148148148148148,
"grad_norm": 0.11353302001953125,
"learning_rate": 0.00016336561987834153,
"loss": 0.4209,
"step": 68
},
{
"epoch": 0.3194444444444444,
"grad_norm": 0.10145577788352966,
"learning_rate": 0.00016217849975590272,
"loss": 0.375,
"step": 69
},
{
"epoch": 0.32407407407407407,
"grad_norm": 0.13219210505485535,
"learning_rate": 0.00016097691867340545,
"loss": 0.3499,
"step": 70
},
{
"epoch": 0.3287037037037037,
"grad_norm": 0.12123432010412216,
"learning_rate": 0.00015976115608461758,
"loss": 0.4793,
"step": 71
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.11631368845701218,
"learning_rate": 0.00015853149474152423,
"loss": 0.4143,
"step": 72
},
{
"epoch": 0.33796296296296297,
"grad_norm": 0.13316309452056885,
"learning_rate": 0.00015728822062856758,
"loss": 0.4258,
"step": 73
},
{
"epoch": 0.3425925925925926,
"grad_norm": 0.1209026649594307,
"learning_rate": 0.00015603162289613503,
"loss": 0.4195,
"step": 74
},
{
"epoch": 0.3472222222222222,
"grad_norm": 0.1319185197353363,
"learning_rate": 0.0001547619937933108,
"loss": 0.3667,
"step": 75
},
{
"epoch": 0.35185185185185186,
"grad_norm": 0.11245721578598022,
"learning_rate": 0.00015347962859990744,
"loss": 0.3914,
"step": 76
},
{
"epoch": 0.35648148148148145,
"grad_norm": 0.12136757373809814,
"learning_rate": 0.00015218482555779165,
"loss": 0.4091,
"step": 77
},
{
"epoch": 0.3611111111111111,
"grad_norm": 0.12102342396974564,
"learning_rate": 0.00015087788580152206,
"loss": 0.388,
"step": 78
},
{
"epoch": 0.36574074074074076,
"grad_norm": 0.12198664247989655,
"learning_rate": 0.00014955911328831355,
"loss": 0.4031,
"step": 79
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.12982136011123657,
"learning_rate": 0.00014822881472734562,
"loss": 0.409,
"step": 80
},
{
"epoch": 0.375,
"grad_norm": 0.134304016828537,
"learning_rate": 0.00014688729950843035,
"loss": 0.3815,
"step": 81
},
{
"epoch": 0.37962962962962965,
"grad_norm": 0.11940658837556839,
"learning_rate": 0.0001455348796300571,
"loss": 0.4119,
"step": 82
},
{
"epoch": 0.38425925925925924,
"grad_norm": 0.13899056613445282,
"learning_rate": 0.0001441718696268307,
"loss": 0.485,
"step": 83
},
{
"epoch": 0.3888888888888889,
"grad_norm": 0.12257847934961319,
"learning_rate": 0.0001427985864963193,
"loss": 0.3877,
"step": 84
},
{
"epoch": 0.39351851851851855,
"grad_norm": 0.11885726451873779,
"learning_rate": 0.00014141534962532984,
"loss": 0.4371,
"step": 85
},
{
"epoch": 0.39814814814814814,
"grad_norm": 0.12147360295057297,
"learning_rate": 0.0001400224807156278,
"loss": 0.4201,
"step": 86
},
{
"epoch": 0.4027777777777778,
"grad_norm": 0.1597365140914917,
"learning_rate": 0.0001386203037091183,
"loss": 0.3968,
"step": 87
},
{
"epoch": 0.4074074074074074,
"grad_norm": 0.13029275834560394,
"learning_rate": 0.00013720914471250644,
"loss": 0.3683,
"step": 88
},
{
"epoch": 0.41203703703703703,
"grad_norm": 0.1297105997800827,
"learning_rate": 0.0001357893319214542,
"loss": 0.4314,
"step": 89
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.127646341919899,
"learning_rate": 0.00013436119554425133,
"loss": 0.4263,
"step": 90
},
{
"epoch": 0.4212962962962963,
"grad_norm": 0.16037966310977936,
"learning_rate": 0.00013292506772501819,
"loss": 0.3966,
"step": 91
},
{
"epoch": 0.42592592592592593,
"grad_norm": 0.13694526255130768,
"learning_rate": 0.0001314812824664585,
"loss": 0.4312,
"step": 92
},
{
"epoch": 0.4305555555555556,
"grad_norm": 0.138661727309227,
"learning_rate": 0.0001300301755521798,
"loss": 0.4041,
"step": 93
},
{
"epoch": 0.4351851851851852,
"grad_norm": 0.1248079240322113,
"learning_rate": 0.0001285720844685996,
"loss": 0.4191,
"step": 94
},
{
"epoch": 0.4398148148148148,
"grad_norm": 0.14905443787574768,
"learning_rate": 0.00012710734832645557,
"loss": 0.4246,
"step": 95
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.1143692284822464,
"learning_rate": 0.00012563630778193805,
"loss": 0.392,
"step": 96
},
{
"epoch": 0.44907407407407407,
"grad_norm": 0.13354505598545074,
"learning_rate": 0.00012415930495746302,
"loss": 0.3888,
"step": 97
},
{
"epoch": 0.4537037037037037,
"grad_norm": 0.12897755205631256,
"learning_rate": 0.00012267668336210413,
"loss": 0.4005,
"step": 98
},
{
"epoch": 0.4583333333333333,
"grad_norm": 0.13209104537963867,
"learning_rate": 0.00012118878781170214,
"loss": 0.4388,
"step": 99
},
{
"epoch": 0.46296296296296297,
"grad_norm": 0.1058369129896164,
"learning_rate": 0.00011969596434867063,
"loss": 0.3768,
"step": 100
},
{
"epoch": 0.4675925925925926,
"grad_norm": 0.1227889209985733,
"learning_rate": 0.00011819856016151615,
"loss": 0.4185,
"step": 101
},
{
"epoch": 0.4722222222222222,
"grad_norm": 0.12500321865081787,
"learning_rate": 0.00011669692350409223,
"loss": 0.4384,
"step": 102
},
{
"epoch": 0.47685185185185186,
"grad_norm": 0.11348072439432144,
"learning_rate": 0.00011519140361460509,
"loss": 0.3781,
"step": 103
},
{
"epoch": 0.48148148148148145,
"grad_norm": 0.10950199514627457,
"learning_rate": 0.00011368235063439103,
"loss": 0.4361,
"step": 104
},
{
"epoch": 0.4861111111111111,
"grad_norm": 0.1136554479598999,
"learning_rate": 0.00011217011552648316,
"loss": 0.3969,
"step": 105
},
{
"epoch": 0.49074074074074076,
"grad_norm": 0.11277665942907333,
"learning_rate": 0.00011065504999398762,
"loss": 0.3661,
"step": 106
},
{
"epoch": 0.49537037037037035,
"grad_norm": 0.12983547151088715,
"learning_rate": 0.00010913750639828711,
"loss": 0.4564,
"step": 107
},
{
"epoch": 0.5,
"grad_norm": 0.11856082826852798,
"learning_rate": 0.00010761783767709182,
"loss": 0.3701,
"step": 108
},
{
"epoch": 0.5,
"eval_loss": 0.4069269299507141,
"eval_runtime": 16.2051,
"eval_samples_per_second": 22.462,
"eval_steps_per_second": 2.839,
"step": 108
},
{
"epoch": 0.5046296296296297,
"grad_norm": 0.1263894885778427,
"learning_rate": 0.00010609639726235591,
"loss": 0.3944,
"step": 109
},
{
"epoch": 0.5092592592592593,
"grad_norm": 0.11527308821678162,
"learning_rate": 0.00010457353899807946,
"loss": 0.3595,
"step": 110
},
{
"epoch": 0.5138888888888888,
"grad_norm": 0.13389469683170319,
"learning_rate": 0.00010304961705801415,
"loss": 0.3942,
"step": 111
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.13051699101924896,
"learning_rate": 0.0001015249858632926,
"loss": 0.4191,
"step": 112
},
{
"epoch": 0.5231481481481481,
"grad_norm": 0.11771571636199951,
"learning_rate": 0.0001,
"loss": 0.3562,
"step": 113
},
{
"epoch": 0.5277777777777778,
"grad_norm": 0.12572339177131653,
"learning_rate": 9.847501413670742e-05,
"loss": 0.4627,
"step": 114
},
{
"epoch": 0.5324074074074074,
"grad_norm": 0.11780008673667908,
"learning_rate": 9.695038294198589e-05,
"loss": 0.3913,
"step": 115
},
{
"epoch": 0.5370370370370371,
"grad_norm": 0.12987138330936432,
"learning_rate": 9.542646100192056e-05,
"loss": 0.3927,
"step": 116
},
{
"epoch": 0.5416666666666666,
"grad_norm": 0.11142438650131226,
"learning_rate": 9.390360273764411e-05,
"loss": 0.3878,
"step": 117
},
{
"epoch": 0.5462962962962963,
"grad_norm": 0.12454476207494736,
"learning_rate": 9.238216232290822e-05,
"loss": 0.3821,
"step": 118
},
{
"epoch": 0.5509259259259259,
"grad_norm": 0.1253252476453781,
"learning_rate": 9.08624936017129e-05,
"loss": 0.3993,
"step": 119
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.12194947898387909,
"learning_rate": 8.93449500060124e-05,
"loss": 0.3954,
"step": 120
},
{
"epoch": 0.5601851851851852,
"grad_norm": 0.1179381012916565,
"learning_rate": 8.782988447351685e-05,
"loss": 0.3478,
"step": 121
},
{
"epoch": 0.5648148148148148,
"grad_norm": 0.1392665058374405,
"learning_rate": 8.6317649365609e-05,
"loss": 0.4455,
"step": 122
},
{
"epoch": 0.5694444444444444,
"grad_norm": 0.12414383888244629,
"learning_rate": 8.480859638539492e-05,
"loss": 0.3763,
"step": 123
},
{
"epoch": 0.5740740740740741,
"grad_norm": 0.1223541721701622,
"learning_rate": 8.33030764959078e-05,
"loss": 0.3973,
"step": 124
},
{
"epoch": 0.5787037037037037,
"grad_norm": 0.11729186028242111,
"learning_rate": 8.180143983848387e-05,
"loss": 0.3639,
"step": 125
},
{
"epoch": 0.5833333333333334,
"grad_norm": 0.13269482553005219,
"learning_rate": 8.030403565132942e-05,
"loss": 0.4481,
"step": 126
},
{
"epoch": 0.5879629629629629,
"grad_norm": 0.12841080129146576,
"learning_rate": 7.881121218829787e-05,
"loss": 0.4442,
"step": 127
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.11603926122188568,
"learning_rate": 7.732331663789592e-05,
"loss": 0.447,
"step": 128
},
{
"epoch": 0.5972222222222222,
"grad_norm": 0.12863722443580627,
"learning_rate": 7.584069504253703e-05,
"loss": 0.3837,
"step": 129
},
{
"epoch": 0.6018518518518519,
"grad_norm": 0.1303769052028656,
"learning_rate": 7.436369221806201e-05,
"loss": 0.4251,
"step": 130
},
{
"epoch": 0.6064814814814815,
"grad_norm": 0.12298469245433807,
"learning_rate": 7.289265167354449e-05,
"loss": 0.3873,
"step": 131
},
{
"epoch": 0.6111111111111112,
"grad_norm": 0.13410842418670654,
"learning_rate": 7.142791553140045e-05,
"loss": 0.3767,
"step": 132
},
{
"epoch": 0.6157407407407407,
"grad_norm": 0.13108515739440918,
"learning_rate": 6.996982444782021e-05,
"loss": 0.3785,
"step": 133
},
{
"epoch": 0.6203703703703703,
"grad_norm": 0.12329968065023422,
"learning_rate": 6.851871753354153e-05,
"loss": 0.4202,
"step": 134
},
{
"epoch": 0.625,
"grad_norm": 0.12874025106430054,
"learning_rate": 6.707493227498188e-05,
"loss": 0.4251,
"step": 135
},
{
"epoch": 0.6296296296296297,
"grad_norm": 0.13529813289642334,
"learning_rate": 6.563880445574873e-05,
"loss": 0.4349,
"step": 136
},
{
"epoch": 0.6342592592592593,
"grad_norm": 0.12647311389446259,
"learning_rate": 6.421066807854584e-05,
"loss": 0.3653,
"step": 137
},
{
"epoch": 0.6388888888888888,
"grad_norm": 0.15596190094947815,
"learning_rate": 6.279085528749359e-05,
"loss": 0.4317,
"step": 138
},
{
"epoch": 0.6435185185185185,
"grad_norm": 0.11850515007972717,
"learning_rate": 6.137969629088173e-05,
"loss": 0.344,
"step": 139
},
{
"epoch": 0.6481481481481481,
"grad_norm": 0.12194859981536865,
"learning_rate": 5.9977519284372194e-05,
"loss": 0.3983,
"step": 140
},
{
"epoch": 0.6527777777777778,
"grad_norm": 0.13437777757644653,
"learning_rate": 5.8584650374670135e-05,
"loss": 0.4072,
"step": 141
},
{
"epoch": 0.6574074074074074,
"grad_norm": 0.13930705189704895,
"learning_rate": 5.720141350368072e-05,
"loss": 0.3578,
"step": 142
},
{
"epoch": 0.6620370370370371,
"grad_norm": 0.13413161039352417,
"learning_rate": 5.582813037316927e-05,
"loss": 0.4193,
"step": 143
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.11998701095581055,
"learning_rate": 5.446512036994287e-05,
"loss": 0.383,
"step": 144
},
{
"epoch": 0.6712962962962963,
"grad_norm": 0.14133970439434052,
"learning_rate": 5.3112700491569666e-05,
"loss": 0.421,
"step": 145
},
{
"epoch": 0.6759259259259259,
"grad_norm": 0.13386432826519012,
"learning_rate": 5.177118527265438e-05,
"loss": 0.4164,
"step": 146
},
{
"epoch": 0.6805555555555556,
"grad_norm": 0.12818817794322968,
"learning_rate": 5.044088671168644e-05,
"loss": 0.3719,
"step": 147
},
{
"epoch": 0.6851851851851852,
"grad_norm": 0.13193628191947937,
"learning_rate": 4.912211419847794e-05,
"loss": 0.3729,
"step": 148
},
{
"epoch": 0.6898148148148148,
"grad_norm": 0.1342621147632599,
"learning_rate": 4.7815174442208354e-05,
"loss": 0.4562,
"step": 149
},
{
"epoch": 0.6944444444444444,
"grad_norm": 0.13930144906044006,
"learning_rate": 4.652037140009259e-05,
"loss": 0.4098,
"step": 150
},
{
"epoch": 0.6990740740740741,
"grad_norm": 0.15138937532901764,
"learning_rate": 4.523800620668921e-05,
"loss": 0.4521,
"step": 151
},
{
"epoch": 0.7037037037037037,
"grad_norm": 0.12120551615953445,
"learning_rate": 4.3968377103865024e-05,
"loss": 0.4156,
"step": 152
},
{
"epoch": 0.7083333333333334,
"grad_norm": 0.1313461810350418,
"learning_rate": 4.271177937143245e-05,
"loss": 0.4086,
"step": 153
},
{
"epoch": 0.7129629629629629,
"grad_norm": 0.12950317561626434,
"learning_rate": 4.146850525847579e-05,
"loss": 0.3993,
"step": 154
},
{
"epoch": 0.7175925925925926,
"grad_norm": 0.11331982165575027,
"learning_rate": 4.023884391538244e-05,
"loss": 0.3846,
"step": 155
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.12068940699100494,
"learning_rate": 3.902308132659457e-05,
"loss": 0.356,
"step": 156
},
{
"epoch": 0.7268518518518519,
"grad_norm": 0.12462019920349121,
"learning_rate": 3.7821500244097274e-05,
"loss": 0.4099,
"step": 157
},
{
"epoch": 0.7314814814814815,
"grad_norm": 0.12054712325334549,
"learning_rate": 3.663438012165848e-05,
"loss": 0.4046,
"step": 158
},
{
"epoch": 0.7361111111111112,
"grad_norm": 0.1289607435464859,
"learning_rate": 3.5461997049835914e-05,
"loss": 0.37,
"step": 159
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.14201810956001282,
"learning_rate": 3.430462369176619e-05,
"loss": 0.407,
"step": 160
},
{
"epoch": 0.7453703703703703,
"grad_norm": 0.1232706606388092,
"learning_rate": 3.316252921975116e-05,
"loss": 0.3583,
"step": 161
},
{
"epoch": 0.75,
"grad_norm": 0.12519972026348114,
"learning_rate": 3.203597925265598e-05,
"loss": 0.3278,
"step": 162
},
{
"epoch": 0.7546296296296297,
"grad_norm": 0.12523451447486877,
"learning_rate": 3.092523579413372e-05,
"loss": 0.3919,
"step": 163
},
{
"epoch": 0.7592592592592593,
"grad_norm": 0.12420208007097244,
"learning_rate": 2.98305571716907e-05,
"loss": 0.3761,
"step": 164
},
{
"epoch": 0.7638888888888888,
"grad_norm": 0.13486765325069427,
"learning_rate": 2.875219797660681e-05,
"loss": 0.3933,
"step": 165
},
{
"epoch": 0.7685185185185185,
"grad_norm": 0.1384674608707428,
"learning_rate": 2.769040900472488e-05,
"loss": 0.4366,
"step": 166
},
{
"epoch": 0.7731481481481481,
"grad_norm": 0.1498284786939621,
"learning_rate": 2.6645437198122502e-05,
"loss": 0.4676,
"step": 167
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.1257813721895218,
"learning_rate": 2.5617525587680402e-05,
"loss": 0.3873,
"step": 168
},
{
"epoch": 0.7824074074074074,
"grad_norm": 0.12799781560897827,
"learning_rate": 2.4606913236560282e-05,
"loss": 0.3943,
"step": 169
},
{
"epoch": 0.7870370370370371,
"grad_norm": 0.12439963966608047,
"learning_rate": 2.3613835184605525e-05,
"loss": 0.394,
"step": 170
},
{
"epoch": 0.7916666666666666,
"grad_norm": 0.11842609196901321,
"learning_rate": 2.2638522393677563e-05,
"loss": 0.3348,
"step": 171
},
{
"epoch": 0.7962962962962963,
"grad_norm": 0.14370441436767578,
"learning_rate": 2.1681201693940668e-05,
"loss": 0.4303,
"step": 172
},
{
"epoch": 0.8009259259259259,
"grad_norm": 0.137470081448555,
"learning_rate": 2.074209573110769e-05,
"loss": 0.3785,
"step": 173
},
{
"epoch": 0.8055555555555556,
"grad_norm": 0.1420845240354538,
"learning_rate": 1.982142291465896e-05,
"loss": 0.3943,
"step": 174
},
{
"epoch": 0.8101851851851852,
"grad_norm": 0.15280939638614655,
"learning_rate": 1.891939736704641e-05,
"loss": 0.4234,
"step": 175
},
{
"epoch": 0.8148148148148148,
"grad_norm": 0.12755198776721954,
"learning_rate": 1.8036228873894746e-05,
"loss": 0.3992,
"step": 176
},
{
"epoch": 0.8194444444444444,
"grad_norm": 0.12794767320156097,
"learning_rate": 1.7172122835211337e-05,
"loss": 0.3598,
"step": 177
},
{
"epoch": 0.8240740740740741,
"grad_norm": 0.15256819128990173,
"learning_rate": 1.6327280217615792e-05,
"loss": 0.3803,
"step": 178
},
{
"epoch": 0.8287037037037037,
"grad_norm": 0.1349879652261734,
"learning_rate": 1.5501897507601014e-05,
"loss": 0.4246,
"step": 179
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.13489031791687012,
"learning_rate": 1.4696166665835853e-05,
"loss": 0.3833,
"step": 180
},
{
"epoch": 0.8379629629629629,
"grad_norm": 0.13907630741596222,
"learning_rate": 1.3910275082520573e-05,
"loss": 0.3788,
"step": 181
},
{
"epoch": 0.8425925925925926,
"grad_norm": 0.13649378716945648,
"learning_rate": 1.3144405533805138e-05,
"loss": 0.4687,
"step": 182
},
{
"epoch": 0.8472222222222222,
"grad_norm": 0.1456245630979538,
"learning_rate": 1.2398736139280686e-05,
"loss": 0.3921,
"step": 183
},
{
"epoch": 0.8518518518518519,
"grad_norm": 0.12474421411752701,
"learning_rate": 1.167344032055394e-05,
"loss": 0.4069,
"step": 184
},
{
"epoch": 0.8564814814814815,
"grad_norm": 0.15731000900268555,
"learning_rate": 1.096868676091425e-05,
"loss": 0.4182,
"step": 185
},
{
"epoch": 0.8611111111111112,
"grad_norm": 0.12348570674657822,
"learning_rate": 1.02846393661026e-05,
"loss": 0.2944,
"step": 186
},
{
"epoch": 0.8657407407407407,
"grad_norm": 0.14169996976852417,
"learning_rate": 9.62145722619182e-06,
"loss": 0.4497,
"step": 187
},
{
"epoch": 0.8703703703703703,
"grad_norm": 0.14467938244342804,
"learning_rate": 8.979294578586738e-06,
"loss": 0.3987,
"step": 188
},
{
"epoch": 0.875,
"grad_norm": 0.13816998898983002,
"learning_rate": 8.358300772152849e-06,
"loss": 0.4331,
"step": 189
},
{
"epoch": 0.8796296296296297,
"grad_norm": 0.12907293438911438,
"learning_rate": 7.758620232482084e-06,
"loss": 0.408,
"step": 190
},
{
"epoch": 0.8842592592592593,
"grad_norm": 0.13425540924072266,
"learning_rate": 7.180392428303395e-06,
"loss": 0.41,
"step": 191
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.12629586458206177,
"learning_rate": 6.623751839046455e-06,
"loss": 0.383,
"step": 192
},
{
"epoch": 0.8935185185185185,
"grad_norm": 0.12552037835121155,
"learning_rate": 6.0888279235653214e-06,
"loss": 0.3999,
"step": 193
},
{
"epoch": 0.8981481481481481,
"grad_norm": 0.1186065599322319,
"learning_rate": 5.575745090030138e-06,
"loss": 0.3515,
"step": 194
},
{
"epoch": 0.9027777777777778,
"grad_norm": 0.12220544368028641,
"learning_rate": 5.084622666993244e-06,
"loss": 0.3457,
"step": 195
},
{
"epoch": 0.9074074074074074,
"grad_norm": 0.12332714349031448,
"learning_rate": 4.61557487563673e-06,
"loss": 0.3682,
"step": 196
},
{
"epoch": 0.9120370370370371,
"grad_norm": 0.14207704365253448,
"learning_rate": 4.168710803207865e-06,
"loss": 0.4121,
"step": 197
},
{
"epoch": 0.9166666666666666,
"grad_norm": 0.15227369964122772,
"learning_rate": 3.7441343776484117e-06,
"loss": 0.4389,
"step": 198
},
{
"epoch": 0.9212962962962963,
"grad_norm": 0.11998874694108963,
"learning_rate": 3.3419443434240083e-06,
"loss": 0.387,
"step": 199
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.12720437347888947,
"learning_rate": 2.9622342385589254e-06,
"loss": 0.4549,
"step": 200
},
{
"epoch": 0.9305555555555556,
"grad_norm": 0.11992080509662628,
"learning_rate": 2.6050923728818787e-06,
"loss": 0.3897,
"step": 201
},
{
"epoch": 0.9351851851851852,
"grad_norm": 0.13751375675201416,
"learning_rate": 2.2706018074875045e-06,
"loss": 0.446,
"step": 202
},
{
"epoch": 0.9398148148148148,
"grad_norm": 0.13101902604103088,
"learning_rate": 1.9588403354188325e-06,
"loss": 0.3896,
"step": 203
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.16959664225578308,
"learning_rate": 1.6698804635747579e-06,
"loss": 0.3973,
"step": 204
},
{
"epoch": 0.9490740740740741,
"grad_norm": 0.12849944829940796,
"learning_rate": 1.4037893958469995e-06,
"loss": 0.4292,
"step": 205
},
{
"epoch": 0.9537037037037037,
"grad_norm": 0.1358756721019745,
"learning_rate": 1.160629017490389e-06,
"loss": 0.4263,
"step": 206
},
{
"epoch": 0.9583333333333334,
"grad_norm": 0.13140828907489777,
"learning_rate": 9.404558807301067e-07,
"loss": 0.4185,
"step": 207
},
{
"epoch": 0.9629629629629629,
"grad_norm": 0.11467701196670532,
"learning_rate": 7.433211916092142e-07,
"loss": 0.3713,
"step": 208
},
{
"epoch": 0.9675925925925926,
"grad_norm": 0.12362132221460342,
"learning_rate": 5.69270798079613e-07,
"loss": 0.3453,
"step": 209
},
{
"epoch": 0.9722222222222222,
"grad_norm": 0.12156709283590317,
"learning_rate": 4.1834517933907467e-07,
"loss": 0.3788,
"step": 210
},
{
"epoch": 0.9768518518518519,
"grad_norm": 0.12119793891906738,
"learning_rate": 2.9057943641693785e-07,
"loss": 0.3662,
"step": 211
},
{
"epoch": 0.9814814814814815,
"grad_norm": 0.12225915491580963,
"learning_rate": 1.8600328401061629e-07,
"loss": 0.3854,
"step": 212
},
{
"epoch": 0.9861111111111112,
"grad_norm": 0.1316448301076889,
"learning_rate": 1.0464104357477133e-07,
"loss": 0.3872,
"step": 213
},
{
"epoch": 0.9907407407407407,
"grad_norm": 0.11485154181718826,
"learning_rate": 4.651163766484779e-08,
"loss": 0.3017,
"step": 214
},
{
"epoch": 0.9953703703703703,
"grad_norm": 0.11364009231328964,
"learning_rate": 1.1628585536216374e-08,
"loss": 0.3339,
"step": 215
},
{
"epoch": 1.0,
"grad_norm": 0.13067637383937836,
"learning_rate": 0.0,
"loss": 0.3867,
"step": 216
},
{
"epoch": 1.0,
"eval_loss": 0.39731329679489136,
"eval_runtime": 16.3193,
"eval_samples_per_second": 22.305,
"eval_steps_per_second": 2.819,
"step": 216
}
],
"logging_steps": 1,
"max_steps": 216,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.356226766877491e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}