|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 1.8018018018018018, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.036036036036036036, |
|
"grad_norm": 0.9987035989761353, |
|
"learning_rate": 5e-05, |
|
"loss": 5.6149, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.036036036036036036, |
|
"eval_loss": NaN, |
|
"eval_runtime": 5.0843, |
|
"eval_samples_per_second": 36.78, |
|
"eval_steps_per_second": 4.72, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"grad_norm": 0.971971333026886, |
|
"learning_rate": 0.0001, |
|
"loss": 5.8095, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10810810810810811, |
|
"grad_norm": 1.0247091054916382, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 6.0093, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.14414414414414414, |
|
"grad_norm": 1.0774474143981934, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 6.1557, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.18018018018018017, |
|
"grad_norm": 1.101145625114441, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 6.0087, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 1.0605506896972656, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 5.9362, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.25225225225225223, |
|
"grad_norm": 0.9831973314285278, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 5.2861, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2882882882882883, |
|
"grad_norm": 1.1115872859954834, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.0562, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.32432432432432434, |
|
"grad_norm": 1.3432058095932007, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 5.1655, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.36036036036036034, |
|
"grad_norm": 1.3428442478179932, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 5.108, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3963963963963964, |
|
"grad_norm": 1.2244690656661987, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 5.1427, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.43243243243243246, |
|
"grad_norm": 1.1294629573822021, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 5.0746, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.46846846846846846, |
|
"grad_norm": 1.3526092767715454, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 5.231, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.5045045045045045, |
|
"grad_norm": 1.020145297050476, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 4.7132, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5405405405405406, |
|
"grad_norm": 1.0296194553375244, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 4.6688, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5765765765765766, |
|
"grad_norm": 1.1286603212356567, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 4.5959, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.6126126126126126, |
|
"grad_norm": 1.1740059852600098, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 4.887, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.6486486486486487, |
|
"grad_norm": 1.2440125942230225, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.6247, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6846846846846847, |
|
"grad_norm": 1.2338988780975342, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 4.5548, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.7207207207207207, |
|
"grad_norm": 1.399675965309143, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.7504, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7567567567567568, |
|
"grad_norm": 0.9647018909454346, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 4.4062, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7927927927927928, |
|
"grad_norm": 0.920181930065155, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 4.1608, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.8288288288288288, |
|
"grad_norm": 1.0398772954940796, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 4.3302, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 1.0814138650894165, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 4.4058, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.9009009009009009, |
|
"grad_norm": 1.162844181060791, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 4.5071, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9009009009009009, |
|
"eval_loss": NaN, |
|
"eval_runtime": 5.0767, |
|
"eval_samples_per_second": 36.835, |
|
"eval_steps_per_second": 4.728, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.9369369369369369, |
|
"grad_norm": 1.3344053030014038, |
|
"learning_rate": 5e-05, |
|
"loss": 4.515, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.972972972972973, |
|
"grad_norm": 1.7347127199172974, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 4.757, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.009009009009009, |
|
"grad_norm": 1.270090103149414, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 5.3056, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.045045045045045, |
|
"grad_norm": 0.9538925290107727, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 4.028, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0810810810810811, |
|
"grad_norm": 0.9219748377799988, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 3.7613, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.117117117117117, |
|
"grad_norm": 1.083941102027893, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 4.2848, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.1531531531531531, |
|
"grad_norm": 1.0106741189956665, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 3.9071, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1891891891891893, |
|
"grad_norm": 1.3047679662704468, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 4.2408, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.2252252252252251, |
|
"grad_norm": 1.0641274452209473, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.3484, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.2612612612612613, |
|
"grad_norm": 1.174180269241333, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 4.4811, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.2972972972972974, |
|
"grad_norm": 0.8928071856498718, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 3.5643, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 1.018917202949524, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 3.9813, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.3693693693693694, |
|
"grad_norm": 1.0619010925292969, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 4.1614, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.4054054054054055, |
|
"grad_norm": 1.1027988195419312, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 4.0984, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.4414414414414414, |
|
"grad_norm": 1.2321841716766357, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 3.9475, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4774774774774775, |
|
"grad_norm": 1.2451386451721191, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 3.3466, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.5135135135135136, |
|
"grad_norm": 1.1862729787826538, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 4.5245, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.5495495495495497, |
|
"grad_norm": 1.0056686401367188, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 3.9216, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.5855855855855856, |
|
"grad_norm": 1.0354795455932617, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 4.0247, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.6216216216216215, |
|
"grad_norm": 1.0059832334518433, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 3.6011, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.6576576576576576, |
|
"grad_norm": 1.219404935836792, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 3.9867, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.6936936936936937, |
|
"grad_norm": 1.2384576797485352, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 3.8457, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.7297297297297298, |
|
"grad_norm": 1.3424208164215088, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 3.3521, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.7657657657657657, |
|
"grad_norm": 1.2357970476150513, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 4.6568, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"grad_norm": 0.9953389167785645, |
|
"learning_rate": 0.0, |
|
"loss": 3.814, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.8018018018018018, |
|
"eval_loss": NaN, |
|
"eval_runtime": 5.0746, |
|
"eval_samples_per_second": 36.851, |
|
"eval_steps_per_second": 4.729, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.624057810649088e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|