Qwen2.5-3B-Instruct_Lean_Code / trainer_state.json
cutelemonlili's picture
Add files using upload-large-folder tool
5f6c48d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 126,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.015873015873015872,
"grad_norm": 21.6184287842035,
"learning_rate": 9.998445910004082e-06,
"loss": 1.0292,
"step": 1
},
{
"epoch": 0.031746031746031744,
"grad_norm": 8.601431479803024,
"learning_rate": 9.993784606094612e-06,
"loss": 0.6819,
"step": 2
},
{
"epoch": 0.047619047619047616,
"grad_norm": 5.469957179017569,
"learning_rate": 9.986018985905901e-06,
"loss": 0.5248,
"step": 3
},
{
"epoch": 0.06349206349206349,
"grad_norm": 3.6094026681382103,
"learning_rate": 9.975153876827008e-06,
"loss": 0.4705,
"step": 4
},
{
"epoch": 0.07936507936507936,
"grad_norm": 2.7105363679691137,
"learning_rate": 9.961196033000862e-06,
"loss": 0.4175,
"step": 5
},
{
"epoch": 0.09523809523809523,
"grad_norm": 1.7282645378154455,
"learning_rate": 9.944154131125643e-06,
"loss": 0.3389,
"step": 6
},
{
"epoch": 0.1111111111111111,
"grad_norm": 1.4989780380182496,
"learning_rate": 9.924038765061042e-06,
"loss": 0.3454,
"step": 7
},
{
"epoch": 0.12698412698412698,
"grad_norm": 1.814894145492994,
"learning_rate": 9.900862439242719e-06,
"loss": 0.3349,
"step": 8
},
{
"epoch": 0.14285714285714285,
"grad_norm": 1.6168389000920744,
"learning_rate": 9.874639560909118e-06,
"loss": 0.3146,
"step": 9
},
{
"epoch": 0.15873015873015872,
"grad_norm": 1.496177983437817,
"learning_rate": 9.84538643114539e-06,
"loss": 0.3154,
"step": 10
},
{
"epoch": 0.1746031746031746,
"grad_norm": 1.4018373194945533,
"learning_rate": 9.81312123475006e-06,
"loss": 0.3093,
"step": 11
},
{
"epoch": 0.19047619047619047,
"grad_norm": 1.2189366436717317,
"learning_rate": 9.777864028930705e-06,
"loss": 0.2874,
"step": 12
},
{
"epoch": 0.20634920634920634,
"grad_norm": 1.131006477829288,
"learning_rate": 9.73963673083566e-06,
"loss": 0.2495,
"step": 13
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.976593304086101,
"learning_rate": 9.698463103929542e-06,
"loss": 0.2254,
"step": 14
},
{
"epoch": 0.23809523809523808,
"grad_norm": 1.0332208504203466,
"learning_rate": 9.654368743221022e-06,
"loss": 0.2349,
"step": 15
},
{
"epoch": 0.25396825396825395,
"grad_norm": 0.9637515709324546,
"learning_rate": 9.60738105935204e-06,
"loss": 0.2353,
"step": 16
},
{
"epoch": 0.2698412698412698,
"grad_norm": 1.0492641721846097,
"learning_rate": 9.557529261558367e-06,
"loss": 0.2428,
"step": 17
},
{
"epoch": 0.2857142857142857,
"grad_norm": 1.0573797101419928,
"learning_rate": 9.504844339512096e-06,
"loss": 0.2493,
"step": 18
},
{
"epoch": 0.30158730158730157,
"grad_norm": 1.3099835285002472,
"learning_rate": 9.449359044057344e-06,
"loss": 0.2556,
"step": 19
},
{
"epoch": 0.31746031746031744,
"grad_norm": 0.9689615323595119,
"learning_rate": 9.391107866851143e-06,
"loss": 0.2376,
"step": 20
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.9900128832309621,
"learning_rate": 9.330127018922195e-06,
"loss": 0.2202,
"step": 21
},
{
"epoch": 0.3492063492063492,
"grad_norm": 1.2030701134015975,
"learning_rate": 9.266454408160779e-06,
"loss": 0.2927,
"step": 22
},
{
"epoch": 0.36507936507936506,
"grad_norm": 1.225303742007084,
"learning_rate": 9.200129615753858e-06,
"loss": 0.2665,
"step": 23
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.9984086974610314,
"learning_rate": 9.131193871579975e-06,
"loss": 0.253,
"step": 24
},
{
"epoch": 0.3968253968253968,
"grad_norm": 1.0262210869927986,
"learning_rate": 9.059690028579285e-06,
"loss": 0.2289,
"step": 25
},
{
"epoch": 0.4126984126984127,
"grad_norm": 1.0690763277796091,
"learning_rate": 8.985662536114614e-06,
"loss": 0.2427,
"step": 26
},
{
"epoch": 0.42857142857142855,
"grad_norm": 1.2004204516591928,
"learning_rate": 8.90915741234015e-06,
"loss": 0.2758,
"step": 27
},
{
"epoch": 0.4444444444444444,
"grad_norm": 1.0209058239594098,
"learning_rate": 8.83022221559489e-06,
"loss": 0.2045,
"step": 28
},
{
"epoch": 0.4603174603174603,
"grad_norm": 1.004536320221334,
"learning_rate": 8.748906014838672e-06,
"loss": 0.2603,
"step": 29
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.9233789155952414,
"learning_rate": 8.665259359149132e-06,
"loss": 0.2245,
"step": 30
},
{
"epoch": 0.49206349206349204,
"grad_norm": 0.8662558058666597,
"learning_rate": 8.579334246298593e-06,
"loss": 0.2065,
"step": 31
},
{
"epoch": 0.5079365079365079,
"grad_norm": 0.8303475295817994,
"learning_rate": 8.491184090430365e-06,
"loss": 0.1846,
"step": 32
},
{
"epoch": 0.5238095238095238,
"grad_norm": 1.0032917558507295,
"learning_rate": 8.400863688854598e-06,
"loss": 0.2339,
"step": 33
},
{
"epoch": 0.5396825396825397,
"grad_norm": 0.9186657424030213,
"learning_rate": 8.308429187984298e-06,
"loss": 0.2161,
"step": 34
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.95604352072303,
"learning_rate": 8.213938048432697e-06,
"loss": 0.2509,
"step": 35
},
{
"epoch": 0.5714285714285714,
"grad_norm": 1.12366344336072,
"learning_rate": 8.117449009293668e-06,
"loss": 0.2574,
"step": 36
},
{
"epoch": 0.5873015873015873,
"grad_norm": 0.9771083031684482,
"learning_rate": 8.019022051627387e-06,
"loss": 0.224,
"step": 37
},
{
"epoch": 0.6031746031746031,
"grad_norm": 1.0461723224002608,
"learning_rate": 7.918718361173951e-06,
"loss": 0.2369,
"step": 38
},
{
"epoch": 0.6190476190476191,
"grad_norm": 0.9668140733421602,
"learning_rate": 7.81660029031811e-06,
"loss": 0.2349,
"step": 39
},
{
"epoch": 0.6349206349206349,
"grad_norm": 0.7893250018326969,
"learning_rate": 7.712731319328798e-06,
"loss": 0.1863,
"step": 40
},
{
"epoch": 0.6507936507936508,
"grad_norm": 1.1300457267970305,
"learning_rate": 7.607176016897491e-06,
"loss": 0.2401,
"step": 41
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.1202488928160232,
"learning_rate": 7.500000000000001e-06,
"loss": 0.2712,
"step": 42
},
{
"epoch": 0.6825396825396826,
"grad_norm": 0.9622857687136114,
"learning_rate": 7.391269893106592e-06,
"loss": 0.2279,
"step": 43
},
{
"epoch": 0.6984126984126984,
"grad_norm": 0.9453787290513783,
"learning_rate": 7.281053286765816e-06,
"loss": 0.2116,
"step": 44
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.0065212623717958,
"learning_rate": 7.169418695587791e-06,
"loss": 0.2154,
"step": 45
},
{
"epoch": 0.7301587301587301,
"grad_norm": 0.9685709952407934,
"learning_rate": 7.056435515653059e-06,
"loss": 0.2244,
"step": 46
},
{
"epoch": 0.746031746031746,
"grad_norm": 0.8538765534238707,
"learning_rate": 6.942173981373474e-06,
"loss": 0.2099,
"step": 47
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.7662959001987232,
"learning_rate": 6.8267051218319766e-06,
"loss": 0.1816,
"step": 48
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.9699101400665243,
"learning_rate": 6.710100716628345e-06,
"loss": 0.215,
"step": 49
},
{
"epoch": 0.7936507936507936,
"grad_norm": 0.9780481387815342,
"learning_rate": 6.592433251258423e-06,
"loss": 0.2415,
"step": 50
},
{
"epoch": 0.8095238095238095,
"grad_norm": 0.9821592890808564,
"learning_rate": 6.473775872054522e-06,
"loss": 0.2149,
"step": 51
},
{
"epoch": 0.8253968253968254,
"grad_norm": 0.9108391743544639,
"learning_rate": 6.354202340715027e-06,
"loss": 0.2157,
"step": 52
},
{
"epoch": 0.8412698412698413,
"grad_norm": 0.9028880827265745,
"learning_rate": 6.233786988451468e-06,
"loss": 0.1887,
"step": 53
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.907744102945536,
"learning_rate": 6.112604669781572e-06,
"loss": 0.2029,
"step": 54
},
{
"epoch": 0.873015873015873,
"grad_norm": 0.8630697187332674,
"learning_rate": 5.990730715996989e-06,
"loss": 0.1846,
"step": 55
},
{
"epoch": 0.8888888888888888,
"grad_norm": 1.0791698951759934,
"learning_rate": 5.8682408883346535e-06,
"loss": 0.2298,
"step": 56
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.7972956766045064,
"learning_rate": 5.745211330880872e-06,
"loss": 0.1947,
"step": 57
},
{
"epoch": 0.9206349206349206,
"grad_norm": 0.9069175475481852,
"learning_rate": 5.621718523237427e-06,
"loss": 0.2024,
"step": 58
},
{
"epoch": 0.9365079365079365,
"grad_norm": 0.770710993604582,
"learning_rate": 5.497839232979084e-06,
"loss": 0.1757,
"step": 59
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.9683497718490081,
"learning_rate": 5.373650467932122e-06,
"loss": 0.2195,
"step": 60
},
{
"epoch": 0.9682539682539683,
"grad_norm": 0.7462995956460088,
"learning_rate": 5.249229428303486e-06,
"loss": 0.1779,
"step": 61
},
{
"epoch": 0.9841269841269841,
"grad_norm": 0.8904306989698368,
"learning_rate": 5.1246534586903655e-06,
"loss": 0.2167,
"step": 62
},
{
"epoch": 1.0,
"grad_norm": 0.9416099214587527,
"learning_rate": 5e-06,
"loss": 0.2095,
"step": 63
},
{
"epoch": 1.0158730158730158,
"grad_norm": 0.9128850032827356,
"learning_rate": 4.875346541309637e-06,
"loss": 0.1671,
"step": 64
},
{
"epoch": 1.0317460317460316,
"grad_norm": 0.9579574105633983,
"learning_rate": 4.750770571696514e-06,
"loss": 0.1898,
"step": 65
},
{
"epoch": 1.0476190476190477,
"grad_norm": 0.8937640536093561,
"learning_rate": 4.626349532067879e-06,
"loss": 0.1779,
"step": 66
},
{
"epoch": 1.0634920634920635,
"grad_norm": 0.9411391259660539,
"learning_rate": 4.502160767020918e-06,
"loss": 0.1791,
"step": 67
},
{
"epoch": 1.0793650793650793,
"grad_norm": 0.7882163925059721,
"learning_rate": 4.3782814767625755e-06,
"loss": 0.1664,
"step": 68
},
{
"epoch": 1.0952380952380953,
"grad_norm": 0.883636646772819,
"learning_rate": 4.254788669119127e-06,
"loss": 0.1826,
"step": 69
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.9562492274488318,
"learning_rate": 4.131759111665349e-06,
"loss": 0.1703,
"step": 70
},
{
"epoch": 1.126984126984127,
"grad_norm": 0.8441572816576932,
"learning_rate": 4.009269284003014e-06,
"loss": 0.1587,
"step": 71
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.8342750597761643,
"learning_rate": 3.887395330218429e-06,
"loss": 0.157,
"step": 72
},
{
"epoch": 1.1587301587301586,
"grad_norm": 1.0474366065218264,
"learning_rate": 3.7662130115485317e-06,
"loss": 0.2049,
"step": 73
},
{
"epoch": 1.1746031746031746,
"grad_norm": 0.8099146597063264,
"learning_rate": 3.6457976592849753e-06,
"loss": 0.1397,
"step": 74
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.8004305804001169,
"learning_rate": 3.526224127945479e-06,
"loss": 0.1425,
"step": 75
},
{
"epoch": 1.2063492063492063,
"grad_norm": 0.8905469112138509,
"learning_rate": 3.4075667487415785e-06,
"loss": 0.1578,
"step": 76
},
{
"epoch": 1.2222222222222223,
"grad_norm": 1.026845210909547,
"learning_rate": 3.289899283371657e-06,
"loss": 0.1679,
"step": 77
},
{
"epoch": 1.2380952380952381,
"grad_norm": 0.8561078638160003,
"learning_rate": 3.173294878168025e-06,
"loss": 0.1544,
"step": 78
},
{
"epoch": 1.253968253968254,
"grad_norm": 0.9201902864489047,
"learning_rate": 3.057826018626527e-06,
"loss": 0.1602,
"step": 79
},
{
"epoch": 1.2698412698412698,
"grad_norm": 0.9165276372228681,
"learning_rate": 2.9435644843469434e-06,
"loss": 0.1658,
"step": 80
},
{
"epoch": 1.2857142857142856,
"grad_norm": 0.862365519780657,
"learning_rate": 2.83058130441221e-06,
"loss": 0.1463,
"step": 81
},
{
"epoch": 1.3015873015873016,
"grad_norm": 0.9219096818896111,
"learning_rate": 2.718946713234185e-06,
"loss": 0.157,
"step": 82
},
{
"epoch": 1.3174603174603174,
"grad_norm": 0.9403980381086622,
"learning_rate": 2.608730106893411e-06,
"loss": 0.1736,
"step": 83
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.8460838442786127,
"learning_rate": 2.5000000000000015e-06,
"loss": 0.1453,
"step": 84
},
{
"epoch": 1.3492063492063493,
"grad_norm": 0.8394388782021253,
"learning_rate": 2.39282398310251e-06,
"loss": 0.1555,
"step": 85
},
{
"epoch": 1.3650793650793651,
"grad_norm": 0.8151362175464907,
"learning_rate": 2.2872686806712037e-06,
"loss": 0.1438,
"step": 86
},
{
"epoch": 1.380952380952381,
"grad_norm": 0.926558722531077,
"learning_rate": 2.1833997096818897e-06,
"loss": 0.1563,
"step": 87
},
{
"epoch": 1.3968253968253967,
"grad_norm": 0.9579087927493548,
"learning_rate": 2.081281638826052e-06,
"loss": 0.1597,
"step": 88
},
{
"epoch": 1.4126984126984126,
"grad_norm": 0.9007855143103636,
"learning_rate": 1.980977948372612e-06,
"loss": 0.1624,
"step": 89
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.7753212510208433,
"learning_rate": 1.8825509907063328e-06,
"loss": 0.141,
"step": 90
},
{
"epoch": 1.4444444444444444,
"grad_norm": 0.8791853314149657,
"learning_rate": 1.7860619515673034e-06,
"loss": 0.1654,
"step": 91
},
{
"epoch": 1.4603174603174602,
"grad_norm": 0.8359382498845804,
"learning_rate": 1.6915708120157042e-06,
"loss": 0.166,
"step": 92
},
{
"epoch": 1.4761904761904763,
"grad_norm": 0.8129183860856091,
"learning_rate": 1.5991363111454023e-06,
"loss": 0.1585,
"step": 93
},
{
"epoch": 1.492063492063492,
"grad_norm": 0.8382086206728524,
"learning_rate": 1.5088159095696365e-06,
"loss": 0.1446,
"step": 94
},
{
"epoch": 1.507936507936508,
"grad_norm": 0.8446012097737622,
"learning_rate": 1.4206657537014078e-06,
"loss": 0.1591,
"step": 95
},
{
"epoch": 1.5238095238095237,
"grad_norm": 0.7915534044040166,
"learning_rate": 1.3347406408508695e-06,
"loss": 0.1525,
"step": 96
},
{
"epoch": 1.5396825396825395,
"grad_norm": 0.8532541099992556,
"learning_rate": 1.2510939851613285e-06,
"loss": 0.1534,
"step": 97
},
{
"epoch": 1.5555555555555556,
"grad_norm": 1.0093759690937962,
"learning_rate": 1.1697777844051105e-06,
"loss": 0.1911,
"step": 98
},
{
"epoch": 1.5714285714285714,
"grad_norm": 0.8856596980432316,
"learning_rate": 1.0908425876598512e-06,
"loss": 0.1541,
"step": 99
},
{
"epoch": 1.5873015873015874,
"grad_norm": 0.9157111538126257,
"learning_rate": 1.0143374638853892e-06,
"loss": 0.1647,
"step": 100
},
{
"epoch": 1.6031746031746033,
"grad_norm": 0.8719657823296014,
"learning_rate": 9.403099714207175e-07,
"loss": 0.1557,
"step": 101
},
{
"epoch": 1.619047619047619,
"grad_norm": 0.8539622717206936,
"learning_rate": 8.688061284200266e-07,
"loss": 0.1745,
"step": 102
},
{
"epoch": 1.6349206349206349,
"grad_norm": 0.8050549020545622,
"learning_rate": 7.99870384246143e-07,
"loss": 0.1421,
"step": 103
},
{
"epoch": 1.6507936507936507,
"grad_norm": 0.9672771145882599,
"learning_rate": 7.33545591839222e-07,
"loss": 0.1556,
"step": 104
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.8972809389152612,
"learning_rate": 6.698729810778065e-07,
"loss": 0.1558,
"step": 105
},
{
"epoch": 1.6825396825396826,
"grad_norm": 0.920583324215122,
"learning_rate": 6.088921331488568e-07,
"loss": 0.1573,
"step": 106
},
{
"epoch": 1.6984126984126984,
"grad_norm": 0.8736053006300077,
"learning_rate": 5.506409559426573e-07,
"loss": 0.1541,
"step": 107
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.773509461894636,
"learning_rate": 4.951556604879049e-07,
"loss": 0.1422,
"step": 108
},
{
"epoch": 1.7301587301587302,
"grad_norm": 0.763156887567562,
"learning_rate": 4.4247073844163434e-07,
"loss": 0.1238,
"step": 109
},
{
"epoch": 1.746031746031746,
"grad_norm": 0.8125138415143264,
"learning_rate": 3.9261894064796136e-07,
"loss": 0.1511,
"step": 110
},
{
"epoch": 1.7619047619047619,
"grad_norm": 0.9131072141792466,
"learning_rate": 3.4563125677897936e-07,
"loss": 0.1724,
"step": 111
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.7875398231929296,
"learning_rate": 3.015368960704584e-07,
"loss": 0.144,
"step": 112
},
{
"epoch": 1.7936507936507935,
"grad_norm": 0.8289795771528357,
"learning_rate": 2.6036326916434153e-07,
"loss": 0.1495,
"step": 113
},
{
"epoch": 1.8095238095238095,
"grad_norm": 0.8434045488896569,
"learning_rate": 2.2213597106929608e-07,
"loss": 0.146,
"step": 114
},
{
"epoch": 1.8253968253968254,
"grad_norm": 0.931799639711572,
"learning_rate": 1.8687876524993987e-07,
"loss": 0.1474,
"step": 115
},
{
"epoch": 1.8412698412698414,
"grad_norm": 0.8212387515691058,
"learning_rate": 1.5461356885461077e-07,
"loss": 0.1514,
"step": 116
},
{
"epoch": 1.8571428571428572,
"grad_norm": 0.7642930433744584,
"learning_rate": 1.253604390908819e-07,
"loss": 0.1312,
"step": 117
},
{
"epoch": 1.873015873015873,
"grad_norm": 0.7568039557799128,
"learning_rate": 9.913756075728088e-08,
"loss": 0.1318,
"step": 118
},
{
"epoch": 1.8888888888888888,
"grad_norm": 0.775084397116709,
"learning_rate": 7.59612349389599e-08,
"loss": 0.1463,
"step": 119
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.8077950387475199,
"learning_rate": 5.584586887435739e-08,
"loss": 0.1385,
"step": 120
},
{
"epoch": 1.9206349206349205,
"grad_norm": 0.7331523365677751,
"learning_rate": 3.8803966999139686e-08,
"loss": 0.1288,
"step": 121
},
{
"epoch": 1.9365079365079365,
"grad_norm": 0.9088764131379831,
"learning_rate": 2.4846123172992953e-08,
"loss": 0.1555,
"step": 122
},
{
"epoch": 1.9523809523809523,
"grad_norm": 0.7736658967001014,
"learning_rate": 1.3981014094099354e-08,
"loss": 0.1324,
"step": 123
},
{
"epoch": 1.9682539682539684,
"grad_norm": 0.9219586041946046,
"learning_rate": 6.215393905388278e-09,
"loss": 0.1787,
"step": 124
},
{
"epoch": 1.9841269841269842,
"grad_norm": 0.7433182147609894,
"learning_rate": 1.5540899959187727e-09,
"loss": 0.1277,
"step": 125
},
{
"epoch": 2.0,
"grad_norm": 0.7977492578804788,
"learning_rate": 0.0,
"loss": 0.1314,
"step": 126
},
{
"epoch": 2.0,
"step": 126,
"total_flos": 5986647539712.0,
"train_loss": 0.2118229814583347,
"train_runtime": 253.661,
"train_samples_per_second": 3.958,
"train_steps_per_second": 0.497
}
],
"logging_steps": 1,
"max_steps": 126,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 70000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5986647539712.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}