|
{ |
|
"best_metric": 1.5977141857147217, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.008104056080068074, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016208112160136148, |
|
"grad_norm": 24.242855072021484, |
|
"learning_rate": 2e-05, |
|
"loss": 11.3236, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016208112160136148, |
|
"eval_loss": 2.9382340908050537, |
|
"eval_runtime": 749.3473, |
|
"eval_samples_per_second": 13.868, |
|
"eval_steps_per_second": 1.734, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00032416224320272295, |
|
"grad_norm": 24.139141082763672, |
|
"learning_rate": 4e-05, |
|
"loss": 11.5612, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00048624336480408443, |
|
"grad_norm": 16.70271110534668, |
|
"learning_rate": 6e-05, |
|
"loss": 11.2569, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006483244864054459, |
|
"grad_norm": 15.517374038696289, |
|
"learning_rate": 8e-05, |
|
"loss": 10.2208, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0008104056080068074, |
|
"grad_norm": 14.588680267333984, |
|
"learning_rate": 0.0001, |
|
"loss": 9.4875, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009724867296081689, |
|
"grad_norm": 11.657694816589355, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 9.1785, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0011345678512095303, |
|
"grad_norm": 10.309767723083496, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 9.4207, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0012966489728108918, |
|
"grad_norm": 8.698164939880371, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 8.5038, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0014587300944122533, |
|
"grad_norm": 8.156781196594238, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 8.7649, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016208112160136148, |
|
"grad_norm": 7.506374835968018, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 7.5702, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0017828923376149762, |
|
"grad_norm": 6.744434356689453, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 7.4313, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0019449734592163377, |
|
"grad_norm": 6.5357561111450195, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 7.8756, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002107054580817699, |
|
"grad_norm": 6.78232479095459, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 7.585, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0022691357024190607, |
|
"grad_norm": 6.280641555786133, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 7.486, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002431216824020422, |
|
"grad_norm": 6.4468464851379395, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 7.0867, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0025932979456217836, |
|
"grad_norm": 5.949858665466309, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 7.2818, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002755379067223145, |
|
"grad_norm": 6.213530540466309, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 6.9205, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0029174601888245066, |
|
"grad_norm": 5.894623279571533, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 6.8963, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.003079541310425868, |
|
"grad_norm": 5.9999470710754395, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 7.1314, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0032416224320272295, |
|
"grad_norm": 5.75547981262207, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 6.8284, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.003403703553628591, |
|
"grad_norm": 5.886826038360596, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 6.6926, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0035657846752299525, |
|
"grad_norm": 5.543822288513184, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 6.6954, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.003727865796831314, |
|
"grad_norm": 5.991809844970703, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 6.8628, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0038899469184326754, |
|
"grad_norm": 5.590277671813965, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 6.6366, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004052028040034037, |
|
"grad_norm": 5.658617973327637, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 6.2058, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004052028040034037, |
|
"eval_loss": 1.6936973333358765, |
|
"eval_runtime": 755.9587, |
|
"eval_samples_per_second": 13.747, |
|
"eval_steps_per_second": 1.718, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004214109161635398, |
|
"grad_norm": 5.783700942993164, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 6.5527, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00437619028323676, |
|
"grad_norm": 5.581294536590576, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 6.161, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004538271404838121, |
|
"grad_norm": 5.728160858154297, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 6.3117, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004700352526439483, |
|
"grad_norm": 5.748680114746094, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 6.4574, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.004862433648040844, |
|
"grad_norm": 5.648220539093018, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 6.215, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.005024514769642206, |
|
"grad_norm": 5.6319379806518555, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 5.9622, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.005186595891243567, |
|
"grad_norm": 5.8718953132629395, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 6.2086, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005348677012844929, |
|
"grad_norm": 6.098597049713135, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 6.38, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00551075813444629, |
|
"grad_norm": 6.240828990936279, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 6.3073, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005672839256047652, |
|
"grad_norm": 6.659487247467041, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 6.5308, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005834920377649013, |
|
"grad_norm": 6.27744197845459, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 6.113, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005997001499250375, |
|
"grad_norm": 6.688141345977783, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 6.5809, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.006159082620851736, |
|
"grad_norm": 7.0126447677612305, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 6.6365, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.006321163742453098, |
|
"grad_norm": 7.171256065368652, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 6.8393, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006483244864054459, |
|
"grad_norm": 8.205148696899414, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 7.0387, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0066453259856558205, |
|
"grad_norm": 8.000205993652344, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 7.1483, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006807407107257182, |
|
"grad_norm": 7.7371697425842285, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 6.6281, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0069694882288585435, |
|
"grad_norm": 8.671666145324707, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 7.1924, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.007131569350459905, |
|
"grad_norm": 9.656037330627441, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 6.8932, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.007293650472061266, |
|
"grad_norm": 9.278881072998047, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 6.8363, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.007455731593662628, |
|
"grad_norm": 10.156712532043457, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 7.2396, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007617812715263989, |
|
"grad_norm": 11.086377143859863, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 7.0737, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007779893836865351, |
|
"grad_norm": 12.269014358520508, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 7.1526, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007941974958466713, |
|
"grad_norm": 14.667993545532227, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 7.5643, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.008104056080068074, |
|
"grad_norm": 21.117507934570312, |
|
"learning_rate": 0.0, |
|
"loss": 6.5986, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.008104056080068074, |
|
"eval_loss": 1.5977141857147217, |
|
"eval_runtime": 756.8752, |
|
"eval_samples_per_second": 13.73, |
|
"eval_steps_per_second": 1.716, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.857571679862784e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|