GaetanMichelet's picture
Model save
0586828 verified
{
"best_metric": 0.46568557620048523,
"best_model_checkpoint": "data/Llama-31-8B_task-3_60-samples_config-3/checkpoint-149",
"epoch": 32.869565217391305,
"eval_steps": 500,
"global_step": 189,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17391304347826086,
"grad_norm": 3.831383466720581,
"learning_rate": 1.3333333333333336e-07,
"loss": 2.9855,
"step": 1
},
{
"epoch": 0.34782608695652173,
"grad_norm": 1.815189242362976,
"learning_rate": 2.666666666666667e-07,
"loss": 2.0019,
"step": 2
},
{
"epoch": 0.6956521739130435,
"grad_norm": 2.1689257621765137,
"learning_rate": 5.333333333333335e-07,
"loss": 2.4489,
"step": 4
},
{
"epoch": 0.8695652173913043,
"eval_loss": 2.470275402069092,
"eval_runtime": 11.1851,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 5
},
{
"epoch": 1.0434782608695652,
"grad_norm": 2.6814849376678467,
"learning_rate": 8.000000000000001e-07,
"loss": 2.449,
"step": 6
},
{
"epoch": 1.391304347826087,
"grad_norm": 3.0052318572998047,
"learning_rate": 1.066666666666667e-06,
"loss": 2.4279,
"step": 8
},
{
"epoch": 1.7391304347826086,
"grad_norm": 3.0235185623168945,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.4891,
"step": 10
},
{
"epoch": 1.9130434782608696,
"eval_loss": 2.4544930458068848,
"eval_runtime": 11.1948,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 11
},
{
"epoch": 2.0869565217391304,
"grad_norm": 2.1276910305023193,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.4166,
"step": 12
},
{
"epoch": 2.4347826086956523,
"grad_norm": 3.4439425468444824,
"learning_rate": 1.8666666666666669e-06,
"loss": 2.7413,
"step": 14
},
{
"epoch": 2.782608695652174,
"grad_norm": 2.9551491737365723,
"learning_rate": 2.133333333333334e-06,
"loss": 2.4697,
"step": 16
},
{
"epoch": 2.9565217391304346,
"eval_loss": 2.4171502590179443,
"eval_runtime": 11.1863,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 17
},
{
"epoch": 3.130434782608696,
"grad_norm": 2.11788010597229,
"learning_rate": 2.4000000000000003e-06,
"loss": 1.9378,
"step": 18
},
{
"epoch": 3.4782608695652173,
"grad_norm": 2.177309274673462,
"learning_rate": 2.666666666666667e-06,
"loss": 2.3009,
"step": 20
},
{
"epoch": 3.8260869565217392,
"grad_norm": 3.9081153869628906,
"learning_rate": 2.9333333333333338e-06,
"loss": 2.5196,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 2.3542613983154297,
"eval_runtime": 11.1853,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 23
},
{
"epoch": 4.173913043478261,
"grad_norm": 2.6722283363342285,
"learning_rate": 3.2000000000000003e-06,
"loss": 2.4047,
"step": 24
},
{
"epoch": 4.521739130434782,
"grad_norm": 2.0837063789367676,
"learning_rate": 3.4666666666666672e-06,
"loss": 2.1966,
"step": 26
},
{
"epoch": 4.869565217391305,
"grad_norm": 3.5176823139190674,
"learning_rate": 3.7333333333333337e-06,
"loss": 2.4481,
"step": 28
},
{
"epoch": 4.869565217391305,
"eval_loss": 2.2785379886627197,
"eval_runtime": 11.1838,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 28
},
{
"epoch": 5.217391304347826,
"grad_norm": 2.1525745391845703,
"learning_rate": 4.000000000000001e-06,
"loss": 2.1104,
"step": 30
},
{
"epoch": 5.565217391304348,
"grad_norm": 3.053701400756836,
"learning_rate": 4.266666666666668e-06,
"loss": 2.1746,
"step": 32
},
{
"epoch": 5.913043478260869,
"grad_norm": 3.4729630947113037,
"learning_rate": 4.533333333333334e-06,
"loss": 2.2925,
"step": 34
},
{
"epoch": 5.913043478260869,
"eval_loss": 2.150887966156006,
"eval_runtime": 11.186,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 34
},
{
"epoch": 6.260869565217392,
"grad_norm": 3.08322811126709,
"learning_rate": 4.800000000000001e-06,
"loss": 2.1084,
"step": 36
},
{
"epoch": 6.608695652173913,
"grad_norm": 2.111081123352051,
"learning_rate": 5.0666666666666676e-06,
"loss": 2.0085,
"step": 38
},
{
"epoch": 6.956521739130435,
"grad_norm": 3.0863735675811768,
"learning_rate": 5.333333333333334e-06,
"loss": 2.0169,
"step": 40
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.9727939367294312,
"eval_runtime": 11.1859,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 40
},
{
"epoch": 7.304347826086957,
"grad_norm": 2.0293078422546387,
"learning_rate": 5.600000000000001e-06,
"loss": 1.9966,
"step": 42
},
{
"epoch": 7.6521739130434785,
"grad_norm": 2.927000045776367,
"learning_rate": 5.8666666666666675e-06,
"loss": 1.9564,
"step": 44
},
{
"epoch": 8.0,
"grad_norm": 1.686829924583435,
"learning_rate": 6.133333333333334e-06,
"loss": 1.6364,
"step": 46
},
{
"epoch": 8.0,
"eval_loss": 1.7440919876098633,
"eval_runtime": 11.1911,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 46
},
{
"epoch": 8.347826086956522,
"grad_norm": 2.4069645404815674,
"learning_rate": 6.4000000000000006e-06,
"loss": 1.7646,
"step": 48
},
{
"epoch": 8.695652173913043,
"grad_norm": 2.646470546722412,
"learning_rate": 6.666666666666667e-06,
"loss": 1.6047,
"step": 50
},
{
"epoch": 8.869565217391305,
"eval_loss": 1.5191912651062012,
"eval_runtime": 11.1878,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 51
},
{
"epoch": 9.043478260869565,
"grad_norm": 2.2305424213409424,
"learning_rate": 6.9333333333333344e-06,
"loss": 1.4515,
"step": 52
},
{
"epoch": 9.391304347826088,
"grad_norm": 2.5355496406555176,
"learning_rate": 7.2000000000000005e-06,
"loss": 1.4093,
"step": 54
},
{
"epoch": 9.73913043478261,
"grad_norm": 2.0611608028411865,
"learning_rate": 7.4666666666666675e-06,
"loss": 1.3126,
"step": 56
},
{
"epoch": 9.91304347826087,
"eval_loss": 1.2352849245071411,
"eval_runtime": 11.1877,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 57
},
{
"epoch": 10.08695652173913,
"grad_norm": 1.9300768375396729,
"learning_rate": 7.733333333333334e-06,
"loss": 1.2497,
"step": 58
},
{
"epoch": 10.434782608695652,
"grad_norm": 1.9045050144195557,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1055,
"step": 60
},
{
"epoch": 10.782608695652174,
"grad_norm": 1.8269885778427124,
"learning_rate": 8.266666666666667e-06,
"loss": 1.0406,
"step": 62
},
{
"epoch": 10.956521739130435,
"eval_loss": 0.9674362540245056,
"eval_runtime": 11.1886,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 63
},
{
"epoch": 11.130434782608695,
"grad_norm": 1.7003505229949951,
"learning_rate": 8.533333333333335e-06,
"loss": 0.9995,
"step": 64
},
{
"epoch": 11.478260869565217,
"grad_norm": 2.517176866531372,
"learning_rate": 8.8e-06,
"loss": 0.7261,
"step": 66
},
{
"epoch": 11.826086956521738,
"grad_norm": 1.7687137126922607,
"learning_rate": 9.066666666666667e-06,
"loss": 0.8254,
"step": 68
},
{
"epoch": 12.0,
"eval_loss": 0.7401664853096008,
"eval_runtime": 11.1831,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 69
},
{
"epoch": 12.173913043478262,
"grad_norm": 1.434043049812317,
"learning_rate": 9.333333333333334e-06,
"loss": 0.6623,
"step": 70
},
{
"epoch": 12.521739130434783,
"grad_norm": 1.1792118549346924,
"learning_rate": 9.600000000000001e-06,
"loss": 0.6543,
"step": 72
},
{
"epoch": 12.869565217391305,
"grad_norm": 1.1398481130599976,
"learning_rate": 9.866666666666668e-06,
"loss": 0.7723,
"step": 74
},
{
"epoch": 12.869565217391305,
"eval_loss": 0.6372489929199219,
"eval_runtime": 11.1866,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 74
},
{
"epoch": 13.217391304347826,
"grad_norm": 1.3324942588806152,
"learning_rate": 9.999945845889795e-06,
"loss": 0.5831,
"step": 76
},
{
"epoch": 13.565217391304348,
"grad_norm": 0.8719578981399536,
"learning_rate": 9.999512620046523e-06,
"loss": 0.4151,
"step": 78
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.5932775735855103,
"learning_rate": 9.99864620589731e-06,
"loss": 0.4327,
"step": 80
},
{
"epoch": 13.91304347826087,
"eval_loss": 0.5750009417533875,
"eval_runtime": 11.1885,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 80
},
{
"epoch": 14.26086956521739,
"grad_norm": 0.9070873260498047,
"learning_rate": 9.99734667851357e-06,
"loss": 0.7031,
"step": 82
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.7875546216964722,
"learning_rate": 9.995614150494293e-06,
"loss": 0.5401,
"step": 84
},
{
"epoch": 14.956521739130435,
"grad_norm": 0.7815837264060974,
"learning_rate": 9.993448771956285e-06,
"loss": 0.279,
"step": 86
},
{
"epoch": 14.956521739130435,
"eval_loss": 0.531262218952179,
"eval_runtime": 11.1884,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 86
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.558903694152832,
"learning_rate": 9.99085073052117e-06,
"loss": 0.5445,
"step": 88
},
{
"epoch": 15.652173913043478,
"grad_norm": 0.8311617374420166,
"learning_rate": 9.987820251299121e-06,
"loss": 0.4926,
"step": 90
},
{
"epoch": 16.0,
"grad_norm": 0.7534677386283875,
"learning_rate": 9.984357596869369e-06,
"loss": 0.4039,
"step": 92
},
{
"epoch": 16.0,
"eval_loss": 0.5046530961990356,
"eval_runtime": 11.1879,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 92
},
{
"epoch": 16.347826086956523,
"grad_norm": 2.769418478012085,
"learning_rate": 9.980463067257437e-06,
"loss": 0.4303,
"step": 94
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.5363882780075073,
"learning_rate": 9.976136999909156e-06,
"loss": 0.5227,
"step": 96
},
{
"epoch": 16.869565217391305,
"eval_loss": 0.48834142088890076,
"eval_runtime": 11.1855,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 97
},
{
"epoch": 17.043478260869566,
"grad_norm": 0.6151431202888489,
"learning_rate": 9.971379769661422e-06,
"loss": 0.3441,
"step": 98
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.7451347708702087,
"learning_rate": 9.966191788709716e-06,
"loss": 0.502,
"step": 100
},
{
"epoch": 17.73913043478261,
"grad_norm": 0.5834351778030396,
"learning_rate": 9.960573506572391e-06,
"loss": 0.3704,
"step": 102
},
{
"epoch": 17.91304347826087,
"eval_loss": 0.4936355650424957,
"eval_runtime": 11.1889,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 103
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.4578331410884857,
"learning_rate": 9.95452541005172e-06,
"loss": 0.2609,
"step": 104
},
{
"epoch": 18.434782608695652,
"grad_norm": 0.4481121003627777,
"learning_rate": 9.948048023191728e-06,
"loss": 0.3326,
"step": 106
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.3353727459907532,
"learning_rate": 9.941141907232766e-06,
"loss": 0.497,
"step": 108
},
{
"epoch": 18.956521739130434,
"eval_loss": 0.49074581265449524,
"eval_runtime": 11.187,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 109
},
{
"epoch": 19.130434782608695,
"grad_norm": 0.4987470507621765,
"learning_rate": 9.933807660562898e-06,
"loss": 0.4857,
"step": 110
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.42145681381225586,
"learning_rate": 9.926045918666045e-06,
"loss": 0.3594,
"step": 112
},
{
"epoch": 19.82608695652174,
"grad_norm": 0.2636556029319763,
"learning_rate": 9.91785735406693e-06,
"loss": 0.4428,
"step": 114
},
{
"epoch": 20.0,
"eval_loss": 0.4828261137008667,
"eval_runtime": 11.1843,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 115
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.42601507902145386,
"learning_rate": 9.909242676272797e-06,
"loss": 0.3907,
"step": 116
},
{
"epoch": 20.52173913043478,
"grad_norm": 0.32994499802589417,
"learning_rate": 9.90020263171194e-06,
"loss": 0.4499,
"step": 118
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.4801340699195862,
"learning_rate": 9.890738003669029e-06,
"loss": 0.3477,
"step": 120
},
{
"epoch": 20.869565217391305,
"eval_loss": 0.482466459274292,
"eval_runtime": 11.187,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 120
},
{
"epoch": 21.217391304347824,
"grad_norm": 0.32882365584373474,
"learning_rate": 9.880849612217238e-06,
"loss": 0.2927,
"step": 122
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.3345174491405487,
"learning_rate": 9.870538314147194e-06,
"loss": 0.3384,
"step": 124
},
{
"epoch": 21.91304347826087,
"grad_norm": 0.4246956408023834,
"learning_rate": 9.859805002892733e-06,
"loss": 0.4628,
"step": 126
},
{
"epoch": 21.91304347826087,
"eval_loss": 0.4857006072998047,
"eval_runtime": 11.1862,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 126
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.35164156556129456,
"learning_rate": 9.84865060845349e-06,
"loss": 0.3811,
"step": 128
},
{
"epoch": 22.608695652173914,
"grad_norm": 0.22983089089393616,
"learning_rate": 9.83707609731432e-06,
"loss": 0.3886,
"step": 130
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.333185076713562,
"learning_rate": 9.825082472361558e-06,
"loss": 0.367,
"step": 132
},
{
"epoch": 22.956521739130434,
"eval_loss": 0.48269978165626526,
"eval_runtime": 11.1827,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 132
},
{
"epoch": 23.304347826086957,
"grad_norm": 0.3328111469745636,
"learning_rate": 9.812670772796113e-06,
"loss": 0.3419,
"step": 134
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.3405808210372925,
"learning_rate": 9.799842074043438e-06,
"loss": 0.4314,
"step": 136
},
{
"epoch": 24.0,
"grad_norm": 0.44388675689697266,
"learning_rate": 9.786597487660336e-06,
"loss": 0.2597,
"step": 138
},
{
"epoch": 24.0,
"eval_loss": 0.4767860472202301,
"eval_runtime": 11.184,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 138
},
{
"epoch": 24.347826086956523,
"grad_norm": 0.18694591522216797,
"learning_rate": 9.77293816123866e-06,
"loss": 0.3109,
"step": 140
},
{
"epoch": 24.695652173913043,
"grad_norm": 0.44868358969688416,
"learning_rate": 9.75886527830587e-06,
"loss": 0.3473,
"step": 142
},
{
"epoch": 24.869565217391305,
"eval_loss": 0.46839889883995056,
"eval_runtime": 11.1867,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 143
},
{
"epoch": 25.043478260869566,
"grad_norm": 0.4045964777469635,
"learning_rate": 9.744380058222483e-06,
"loss": 0.4445,
"step": 144
},
{
"epoch": 25.391304347826086,
"grad_norm": 0.3353895843029022,
"learning_rate": 9.729483756076436e-06,
"loss": 0.3212,
"step": 146
},
{
"epoch": 25.73913043478261,
"grad_norm": 0.3463374078273773,
"learning_rate": 9.714177662574316e-06,
"loss": 0.3795,
"step": 148
},
{
"epoch": 25.91304347826087,
"eval_loss": 0.46568557620048523,
"eval_runtime": 11.1848,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 149
},
{
"epoch": 26.08695652173913,
"grad_norm": 0.5068124532699585,
"learning_rate": 9.698463103929542e-06,
"loss": 0.3142,
"step": 150
},
{
"epoch": 26.434782608695652,
"grad_norm": 0.34328868985176086,
"learning_rate": 9.682341441747446e-06,
"loss": 0.1848,
"step": 152
},
{
"epoch": 26.782608695652176,
"grad_norm": 0.35909274220466614,
"learning_rate": 9.665814072907293e-06,
"loss": 0.437,
"step": 154
},
{
"epoch": 26.956521739130434,
"eval_loss": 0.47061336040496826,
"eval_runtime": 11.1847,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 155
},
{
"epoch": 27.130434782608695,
"grad_norm": 0.4798097312450409,
"learning_rate": 9.648882429441258e-06,
"loss": 0.3427,
"step": 156
},
{
"epoch": 27.47826086956522,
"grad_norm": 0.4749986529350281,
"learning_rate": 9.63154797841033e-06,
"loss": 0.3799,
"step": 158
},
{
"epoch": 27.82608695652174,
"grad_norm": 0.5031474828720093,
"learning_rate": 9.613812221777212e-06,
"loss": 0.3478,
"step": 160
},
{
"epoch": 28.0,
"eval_loss": 0.47599098086357117,
"eval_runtime": 11.1847,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 161
},
{
"epoch": 28.17391304347826,
"grad_norm": 0.38506221771240234,
"learning_rate": 9.595676696276173e-06,
"loss": 0.1651,
"step": 162
},
{
"epoch": 28.52173913043478,
"grad_norm": 0.6532711982727051,
"learning_rate": 9.577142973279896e-06,
"loss": 0.3973,
"step": 164
},
{
"epoch": 28.869565217391305,
"grad_norm": 0.4371902048587799,
"learning_rate": 9.55821265866333e-06,
"loss": 0.254,
"step": 166
},
{
"epoch": 28.869565217391305,
"eval_loss": 0.4744891822338104,
"eval_runtime": 11.1866,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 166
},
{
"epoch": 29.217391304347824,
"grad_norm": 0.4469977915287018,
"learning_rate": 9.538887392664544e-06,
"loss": 0.2313,
"step": 168
},
{
"epoch": 29.565217391304348,
"grad_norm": 0.42242884635925293,
"learning_rate": 9.519168849742603e-06,
"loss": 0.2858,
"step": 170
},
{
"epoch": 29.91304347826087,
"grad_norm": 0.47203147411346436,
"learning_rate": 9.499058738432492e-06,
"loss": 0.3934,
"step": 172
},
{
"epoch": 29.91304347826087,
"eval_loss": 0.48127391934394836,
"eval_runtime": 11.1845,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 172
},
{
"epoch": 30.26086956521739,
"grad_norm": 0.573071300983429,
"learning_rate": 9.478558801197065e-06,
"loss": 0.2361,
"step": 174
},
{
"epoch": 30.608695652173914,
"grad_norm": 0.6047350168228149,
"learning_rate": 9.457670814276083e-06,
"loss": 0.2041,
"step": 176
},
{
"epoch": 30.956521739130434,
"grad_norm": 0.5841593146324158,
"learning_rate": 9.436396587532297e-06,
"loss": 0.3074,
"step": 178
},
{
"epoch": 30.956521739130434,
"eval_loss": 0.4815356731414795,
"eval_runtime": 11.1885,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 178
},
{
"epoch": 31.304347826086957,
"grad_norm": 0.6498507857322693,
"learning_rate": 9.414737964294636e-06,
"loss": 0.3778,
"step": 180
},
{
"epoch": 31.652173913043477,
"grad_norm": 0.2765786945819855,
"learning_rate": 9.392696821198488e-06,
"loss": 0.1384,
"step": 182
},
{
"epoch": 32.0,
"grad_norm": 0.5654636025428772,
"learning_rate": 9.370275068023097e-06,
"loss": 0.3447,
"step": 184
},
{
"epoch": 32.0,
"eval_loss": 0.482303649187088,
"eval_runtime": 11.1841,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 184
},
{
"epoch": 32.34782608695652,
"grad_norm": 0.4734094738960266,
"learning_rate": 9.347474647526095e-06,
"loss": 0.2441,
"step": 186
},
{
"epoch": 32.69565217391305,
"grad_norm": 0.5260018110275269,
"learning_rate": 9.324297535275156e-06,
"loss": 0.225,
"step": 188
},
{
"epoch": 32.869565217391305,
"eval_loss": 0.4865100383758545,
"eval_runtime": 11.1875,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 189
},
{
"epoch": 32.869565217391305,
"step": 189,
"total_flos": 1.2027607575743693e+17,
"train_loss": 0.9406116593767095,
"train_runtime": 4446.8494,
"train_samples_per_second": 1.552,
"train_steps_per_second": 0.169
}
],
"logging_steps": 2,
"max_steps": 750,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2027607575743693e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}