SmolLM2-1.7B-Instruct-16k / trainer_state.json
loubnabnl's picture
loubnabnl HF staff
Duplicate from HuggingFaceTB/SmolLM2-Instruct-16k-SeaLong-LongAlign-ST10k-v2-rope500k-1e-4
3e119f2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 244,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020491803278688523,
"grad_norm": 0.3614635057701486,
"learning_rate": 2e-05,
"loss": 1.3385,
"step": 5
},
{
"epoch": 0.040983606557377046,
"grad_norm": 0.2489719842782885,
"learning_rate": 4e-05,
"loss": 1.3036,
"step": 10
},
{
"epoch": 0.06147540983606557,
"grad_norm": 0.21147248994437587,
"learning_rate": 6e-05,
"loss": 1.2625,
"step": 15
},
{
"epoch": 0.08196721311475409,
"grad_norm": 0.19373427885281047,
"learning_rate": 8e-05,
"loss": 1.1662,
"step": 20
},
{
"epoch": 0.10245901639344263,
"grad_norm": 0.16443335321911356,
"learning_rate": 0.0001,
"loss": 1.2303,
"step": 25
},
{
"epoch": 0.12295081967213115,
"grad_norm": 0.14174512674576367,
"learning_rate": 9.987144015627809e-05,
"loss": 1.1398,
"step": 30
},
{
"epoch": 0.14344262295081966,
"grad_norm": 0.1381398528306756,
"learning_rate": 9.948642173044905e-05,
"loss": 1.3659,
"step": 35
},
{
"epoch": 0.16393442622950818,
"grad_norm": 19.814446058738557,
"learning_rate": 9.88469246388591e-05,
"loss": 1.2665,
"step": 40
},
{
"epoch": 0.18442622950819673,
"grad_norm": 0.15246937379745587,
"learning_rate": 9.79562374273544e-05,
"loss": 1.2956,
"step": 45
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.11304889245590244,
"learning_rate": 9.681894036028365e-05,
"loss": 1.3647,
"step": 50
},
{
"epoch": 0.22540983606557377,
"grad_norm": 0.11799961373671249,
"learning_rate": 9.544088186697515e-05,
"loss": 1.2491,
"step": 55
},
{
"epoch": 0.2459016393442623,
"grad_norm": 0.0886669617443345,
"learning_rate": 9.382914846681047e-05,
"loss": 1.1422,
"step": 60
},
{
"epoch": 0.26639344262295084,
"grad_norm": 0.13383163861854966,
"learning_rate": 9.19920283275515e-05,
"loss": 1.217,
"step": 65
},
{
"epoch": 0.28688524590163933,
"grad_norm": 0.12664816060876666,
"learning_rate": 8.993896864431826e-05,
"loss": 1.1728,
"step": 70
},
{
"epoch": 0.3073770491803279,
"grad_norm": 0.11280543596286248,
"learning_rate": 8.76805270583919e-05,
"loss": 1.2914,
"step": 75
},
{
"epoch": 0.32786885245901637,
"grad_norm": 0.11457176824246548,
"learning_rate": 8.522831736566607e-05,
"loss": 1.2975,
"step": 80
},
{
"epoch": 0.3483606557377049,
"grad_norm": 0.125952078460287,
"learning_rate": 8.259494979393563e-05,
"loss": 1.2325,
"step": 85
},
{
"epoch": 0.36885245901639346,
"grad_norm": 0.14141125617004555,
"learning_rate": 7.97939661561399e-05,
"loss": 1.3628,
"step": 90
},
{
"epoch": 0.38934426229508196,
"grad_norm": 0.12898427772056165,
"learning_rate": 7.68397702130286e-05,
"loss": 1.3272,
"step": 95
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.12462484863790337,
"learning_rate": 7.374755360335253e-05,
"loss": 1.2025,
"step": 100
},
{
"epoch": 0.430327868852459,
"grad_norm": 0.15405301651774622,
"learning_rate": 7.053321772247545e-05,
"loss": 1.3946,
"step": 105
},
{
"epoch": 0.45081967213114754,
"grad_norm": 0.11850088757256459,
"learning_rate": 6.721329195113801e-05,
"loss": 1.0897,
"step": 110
},
{
"epoch": 0.4713114754098361,
"grad_norm": 0.10161954341550462,
"learning_rate": 6.380484865487347e-05,
"loss": 1.3683,
"step": 115
},
{
"epoch": 0.4918032786885246,
"grad_norm": 0.1338711959037296,
"learning_rate": 6.032541539118187e-05,
"loss": 1.0908,
"step": 120
},
{
"epoch": 0.5122950819672131,
"grad_norm": 0.12226354104013447,
"learning_rate": 5.679288477592815e-05,
"loss": 1.301,
"step": 125
},
{
"epoch": 0.5327868852459017,
"grad_norm": 0.11741161974948867,
"learning_rate": 5.3225422472465824e-05,
"loss": 1.2055,
"step": 130
},
{
"epoch": 0.5532786885245902,
"grad_norm": 0.1220585245683397,
"learning_rate": 4.9641373776643616e-05,
"loss": 1.2117,
"step": 135
},
{
"epoch": 0.5737704918032787,
"grad_norm": 0.1283897077062444,
"learning_rate": 4.605916927807057e-05,
"loss": 1.1798,
"step": 140
},
{
"epoch": 0.5942622950819673,
"grad_norm": 0.13195983815349108,
"learning_rate": 4.249723008276737e-05,
"loss": 1.2929,
"step": 145
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.10033441692536059,
"learning_rate": 3.89738730845858e-05,
"loss": 1.1865,
"step": 150
},
{
"epoch": 0.6352459016393442,
"grad_norm": 0.10526603636165197,
"learning_rate": 3.550721677252839e-05,
"loss": 1.1911,
"step": 155
},
{
"epoch": 0.6557377049180327,
"grad_norm": 0.11769891562161476,
"learning_rate": 3.2115088058343725e-05,
"loss": 1.1442,
"step": 160
},
{
"epoch": 0.6762295081967213,
"grad_norm": 0.09808797452553064,
"learning_rate": 2.8814930603527068e-05,
"loss": 1.2862,
"step": 165
},
{
"epoch": 0.6967213114754098,
"grad_norm": 0.25170420435929075,
"learning_rate": 2.5623715117144336e-05,
"loss": 1.2449,
"step": 170
},
{
"epoch": 0.7172131147540983,
"grad_norm": 0.14162820792284236,
"learning_rate": 2.2557852085764053e-05,
"loss": 1.2093,
"step": 175
},
{
"epoch": 0.7377049180327869,
"grad_norm": 0.09769546802692396,
"learning_rate": 1.963310738427367e-05,
"loss": 1.1962,
"step": 180
},
{
"epoch": 0.7581967213114754,
"grad_norm": 0.10477577716382458,
"learning_rate": 1.6864521201543197e-05,
"loss": 1.1101,
"step": 185
},
{
"epoch": 0.7786885245901639,
"grad_norm": 0.1197892608117282,
"learning_rate": 1.4266330697851954e-05,
"loss": 1.3805,
"step": 190
},
{
"epoch": 0.7991803278688525,
"grad_norm": 0.10767908097089735,
"learning_rate": 1.1851896791804507e-05,
"loss": 1.3342,
"step": 195
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.10976930143838647,
"learning_rate": 9.633635453226376e-06,
"loss": 1.3511,
"step": 200
},
{
"epoch": 0.8401639344262295,
"grad_norm": 0.10305982170288747,
"learning_rate": 7.6229538553584556e-06,
"loss": 1.265,
"step": 205
},
{
"epoch": 0.860655737704918,
"grad_norm": 0.09095323069274693,
"learning_rate": 5.830191714680577e-06,
"loss": 1.2826,
"step": 210
},
{
"epoch": 0.8811475409836066,
"grad_norm": 0.12664820610601704,
"learning_rate": 4.26456812001822e-06,
"loss": 1.2256,
"step": 215
},
{
"epoch": 0.9016393442622951,
"grad_norm": 0.11352528001951419,
"learning_rate": 2.934134124357646e-06,
"loss": 1.2583,
"step": 220
},
{
"epoch": 0.9221311475409836,
"grad_norm": 0.10147400081855255,
"learning_rate": 1.8457313431614498e-06,
"loss": 1.2593,
"step": 225
},
{
"epoch": 0.9426229508196722,
"grad_norm": 0.11813110730985737,
"learning_rate": 1.004956772087895e-06,
"loss": 1.2518,
"step": 230
},
{
"epoch": 0.9631147540983607,
"grad_norm": 0.10427451227791518,
"learning_rate": 4.1613400503550114e-07,
"loss": 1.3182,
"step": 235
},
{
"epoch": 0.9836065573770492,
"grad_norm": 0.10936990134938639,
"learning_rate": 8.229100052074556e-08,
"loss": 1.2333,
"step": 240
},
{
"epoch": 1.0,
"eval_loss": 1.1466403007507324,
"eval_runtime": 27.0287,
"eval_samples_per_second": 11.802,
"eval_steps_per_second": 0.37,
"step": 244
},
{
"epoch": 1.0,
"step": 244,
"total_flos": 84368078012416.0,
"train_loss": 0.0,
"train_runtime": 1.4481,
"train_samples_per_second": 10777.998,
"train_steps_per_second": 168.493
}
],
"logging_steps": 5,
"max_steps": 244,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 84368078012416.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}