vdos's picture
Training in progress, step 50, checkpoint
cf216c9 verified
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.16454134101192924,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003290826820238585,
"grad_norm": 1.039158821105957,
"learning_rate": 5e-05,
"loss": 1.8771,
"step": 1
},
{
"epoch": 0.003290826820238585,
"eval_loss": NaN,
"eval_runtime": 69.799,
"eval_samples_per_second": 29.327,
"eval_steps_per_second": 3.668,
"step": 1
},
{
"epoch": 0.00658165364047717,
"grad_norm": 1.0197597742080688,
"learning_rate": 0.0001,
"loss": 1.8875,
"step": 2
},
{
"epoch": 0.009872480460715755,
"grad_norm": 0.9108155965805054,
"learning_rate": 9.989294616193017e-05,
"loss": 1.8816,
"step": 3
},
{
"epoch": 0.01316330728095434,
"grad_norm": 0.7009238600730896,
"learning_rate": 9.957224306869053e-05,
"loss": 1.759,
"step": 4
},
{
"epoch": 0.016454134101192924,
"grad_norm": 1.2861307859420776,
"learning_rate": 9.903926402016153e-05,
"loss": 1.7925,
"step": 5
},
{
"epoch": 0.01974496092143151,
"grad_norm": 0.8928821086883545,
"learning_rate": 9.829629131445342e-05,
"loss": 1.7698,
"step": 6
},
{
"epoch": 0.023035787741670095,
"grad_norm": 0.5309975743293762,
"learning_rate": 9.73465064747553e-05,
"loss": 1.7075,
"step": 7
},
{
"epoch": 0.02632661456190868,
"grad_norm": 0.4590210020542145,
"learning_rate": 9.619397662556435e-05,
"loss": 1.6672,
"step": 8
},
{
"epoch": 0.029617441382147263,
"grad_norm": 0.48888978362083435,
"learning_rate": 9.484363707663442e-05,
"loss": 1.6435,
"step": 9
},
{
"epoch": 0.03290826820238585,
"grad_norm": 0.510526716709137,
"learning_rate": 9.330127018922194e-05,
"loss": 1.6777,
"step": 10
},
{
"epoch": 0.03619909502262444,
"grad_norm": 0.5071189403533936,
"learning_rate": 9.157348061512727e-05,
"loss": 1.6886,
"step": 11
},
{
"epoch": 0.03948992184286302,
"grad_norm": 0.528242826461792,
"learning_rate": 8.966766701456177e-05,
"loss": 1.6991,
"step": 12
},
{
"epoch": 0.0427807486631016,
"grad_norm": 0.5126593708992004,
"learning_rate": 8.759199037394887e-05,
"loss": 1.5474,
"step": 13
},
{
"epoch": 0.04607157548334019,
"grad_norm": 0.5633126497268677,
"learning_rate": 8.535533905932738e-05,
"loss": 1.5923,
"step": 14
},
{
"epoch": 0.04936240230357877,
"grad_norm": 0.5548191666603088,
"learning_rate": 8.296729075500344e-05,
"loss": 1.5197,
"step": 15
},
{
"epoch": 0.05265322912381736,
"grad_norm": 0.4316805601119995,
"learning_rate": 8.043807145043604e-05,
"loss": 1.5417,
"step": 16
},
{
"epoch": 0.055944055944055944,
"grad_norm": 0.3394129276275635,
"learning_rate": 7.777851165098012e-05,
"loss": 1.5373,
"step": 17
},
{
"epoch": 0.059234882764294526,
"grad_norm": 0.3246888816356659,
"learning_rate": 7.500000000000001e-05,
"loss": 1.5475,
"step": 18
},
{
"epoch": 0.06252570958453312,
"grad_norm": 0.3448064625263214,
"learning_rate": 7.211443451095007e-05,
"loss": 1.5234,
"step": 19
},
{
"epoch": 0.0658165364047717,
"grad_norm": 0.3767080307006836,
"learning_rate": 6.91341716182545e-05,
"loss": 1.5428,
"step": 20
},
{
"epoch": 0.06910736322501028,
"grad_norm": 0.3699794411659241,
"learning_rate": 6.607197326515808e-05,
"loss": 1.4891,
"step": 21
},
{
"epoch": 0.07239819004524888,
"grad_norm": 0.39573559165000916,
"learning_rate": 6.294095225512603e-05,
"loss": 1.5128,
"step": 22
},
{
"epoch": 0.07568901686548746,
"grad_norm": 0.3921695649623871,
"learning_rate": 5.9754516100806423e-05,
"loss": 1.5184,
"step": 23
},
{
"epoch": 0.07897984368572604,
"grad_norm": 0.43741467595100403,
"learning_rate": 5.6526309611002594e-05,
"loss": 1.5545,
"step": 24
},
{
"epoch": 0.08227067050596462,
"grad_norm": 0.5433678030967712,
"learning_rate": 5.327015646150716e-05,
"loss": 1.5304,
"step": 25
},
{
"epoch": 0.08227067050596462,
"eval_loss": NaN,
"eval_runtime": 69.5401,
"eval_samples_per_second": 29.436,
"eval_steps_per_second": 3.681,
"step": 25
},
{
"epoch": 0.0855614973262032,
"grad_norm": 0.3675021231174469,
"learning_rate": 5e-05,
"loss": 1.4677,
"step": 26
},
{
"epoch": 0.0888523241464418,
"grad_norm": 0.3743444085121155,
"learning_rate": 4.6729843538492847e-05,
"loss": 1.4981,
"step": 27
},
{
"epoch": 0.09214315096668038,
"grad_norm": 0.38730910420417786,
"learning_rate": 4.347369038899744e-05,
"loss": 1.4948,
"step": 28
},
{
"epoch": 0.09543397778691896,
"grad_norm": 0.3946470618247986,
"learning_rate": 4.0245483899193595e-05,
"loss": 1.4812,
"step": 29
},
{
"epoch": 0.09872480460715755,
"grad_norm": 0.3759434223175049,
"learning_rate": 3.705904774487396e-05,
"loss": 1.4756,
"step": 30
},
{
"epoch": 0.10201563142739613,
"grad_norm": 0.36649543046951294,
"learning_rate": 3.392802673484193e-05,
"loss": 1.4756,
"step": 31
},
{
"epoch": 0.10530645824763472,
"grad_norm": 0.397172212600708,
"learning_rate": 3.086582838174551e-05,
"loss": 1.4794,
"step": 32
},
{
"epoch": 0.1085972850678733,
"grad_norm": 0.3425104022026062,
"learning_rate": 2.7885565489049946e-05,
"loss": 1.4798,
"step": 33
},
{
"epoch": 0.11188811188811189,
"grad_norm": 0.37429553270339966,
"learning_rate": 2.500000000000001e-05,
"loss": 1.4882,
"step": 34
},
{
"epoch": 0.11517893870835047,
"grad_norm": 0.4080633521080017,
"learning_rate": 2.2221488349019903e-05,
"loss": 1.4917,
"step": 35
},
{
"epoch": 0.11846976552858905,
"grad_norm": 0.40952998399734497,
"learning_rate": 1.9561928549563968e-05,
"loss": 1.4858,
"step": 36
},
{
"epoch": 0.12176059234882765,
"grad_norm": 0.5022099614143372,
"learning_rate": 1.703270924499656e-05,
"loss": 1.5295,
"step": 37
},
{
"epoch": 0.12505141916906623,
"grad_norm": 0.38732391595840454,
"learning_rate": 1.4644660940672627e-05,
"loss": 1.4484,
"step": 38
},
{
"epoch": 0.12834224598930483,
"grad_norm": 0.3115442097187042,
"learning_rate": 1.2408009626051137e-05,
"loss": 1.4107,
"step": 39
},
{
"epoch": 0.1316330728095434,
"grad_norm": 0.29826825857162476,
"learning_rate": 1.0332332985438248e-05,
"loss": 1.4337,
"step": 40
},
{
"epoch": 0.134923899629782,
"grad_norm": 0.26260218024253845,
"learning_rate": 8.426519384872733e-06,
"loss": 1.4357,
"step": 41
},
{
"epoch": 0.13821472645002056,
"grad_norm": 0.27064716815948486,
"learning_rate": 6.698729810778065e-06,
"loss": 1.4669,
"step": 42
},
{
"epoch": 0.14150555327025915,
"grad_norm": 0.2965468168258667,
"learning_rate": 5.156362923365588e-06,
"loss": 1.476,
"step": 43
},
{
"epoch": 0.14479638009049775,
"grad_norm": 0.2908375859260559,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.4166,
"step": 44
},
{
"epoch": 0.14808720691073632,
"grad_norm": 0.2952311336994171,
"learning_rate": 2.653493525244721e-06,
"loss": 1.4736,
"step": 45
},
{
"epoch": 0.15137803373097491,
"grad_norm": 0.3133932650089264,
"learning_rate": 1.70370868554659e-06,
"loss": 1.4653,
"step": 46
},
{
"epoch": 0.15466886055121348,
"grad_norm": 0.3325834572315216,
"learning_rate": 9.607359798384785e-07,
"loss": 1.5009,
"step": 47
},
{
"epoch": 0.15795968737145208,
"grad_norm": 0.3826364278793335,
"learning_rate": 4.277569313094809e-07,
"loss": 1.4529,
"step": 48
},
{
"epoch": 0.16125051419169067,
"grad_norm": 0.42253777384757996,
"learning_rate": 1.0705383806982606e-07,
"loss": 1.4926,
"step": 49
},
{
"epoch": 0.16454134101192924,
"grad_norm": 0.6342712640762329,
"learning_rate": 0.0,
"loss": 1.5481,
"step": 50
},
{
"epoch": 0.16454134101192924,
"eval_loss": NaN,
"eval_runtime": 69.9353,
"eval_samples_per_second": 29.27,
"eval_steps_per_second": 3.661,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.26462276232151e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}