|
{ |
|
"best_metric": 0.3237687349319458, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.005743165632896853, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00011486331265793706, |
|
"grad_norm": 20.194072723388672, |
|
"learning_rate": 2e-05, |
|
"loss": 1.9961, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00011486331265793706, |
|
"eval_loss": 0.5248078107833862, |
|
"eval_runtime": 617.9557, |
|
"eval_samples_per_second": 23.728, |
|
"eval_steps_per_second": 2.966, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00022972662531587412, |
|
"grad_norm": 17.88739013671875, |
|
"learning_rate": 4e-05, |
|
"loss": 2.0858, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00034458993797381116, |
|
"grad_norm": 6.636772632598877, |
|
"learning_rate": 6e-05, |
|
"loss": 2.0479, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00045945325063174823, |
|
"grad_norm": 5.536622047424316, |
|
"learning_rate": 8e-05, |
|
"loss": 1.7255, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0005743165632896852, |
|
"grad_norm": 5.835659027099609, |
|
"learning_rate": 0.0001, |
|
"loss": 1.879, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0006891798759476223, |
|
"grad_norm": 3.0433731079101562, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.8946, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0008040431886055594, |
|
"grad_norm": 5.9740681648254395, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 1.9344, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0009189065012634965, |
|
"grad_norm": 1.4726699590682983, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.8448, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0010337698139214334, |
|
"grad_norm": 3.3580729961395264, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.1496, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0011486331265793705, |
|
"grad_norm": 19.40848159790039, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.032, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0012634964392373076, |
|
"grad_norm": 3.5965750217437744, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.6346, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0013783597518952446, |
|
"grad_norm": 2.1662826538085938, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.8285, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0014932230645531817, |
|
"grad_norm": 2.366093158721924, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.6677, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0016080863772111188, |
|
"grad_norm": 1.0059821605682373, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.5528, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0017229496898690559, |
|
"grad_norm": 1.226986289024353, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.5359, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.001837813002526993, |
|
"grad_norm": 1.7851588726043701, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.0056, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00195267631518493, |
|
"grad_norm": 1.310687780380249, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 1.7473, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.002067539627842867, |
|
"grad_norm": 1.1058948040008545, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.4829, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.002182402940500804, |
|
"grad_norm": 1.3350424766540527, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.5503, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.002297266253158741, |
|
"grad_norm": 1.8176335096359253, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.5147, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.002412129565816678, |
|
"grad_norm": 1.88288152217865, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.767, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.002526992878474615, |
|
"grad_norm": 1.9403029680252075, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.6289, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.002641856191132552, |
|
"grad_norm": 2.1161484718322754, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.6755, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0027567195037904893, |
|
"grad_norm": 1.9855263233184814, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.921, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0028715828164484264, |
|
"grad_norm": 2.2303965091705322, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.6624, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0028715828164484264, |
|
"eval_loss": 0.37796109914779663, |
|
"eval_runtime": 620.9259, |
|
"eval_samples_per_second": 23.615, |
|
"eval_steps_per_second": 2.952, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0029864461291063634, |
|
"grad_norm": 2.292692184448242, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 1.4062, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0031013094417643005, |
|
"grad_norm": 2.8349287509918213, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.679, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0032161727544222376, |
|
"grad_norm": 2.254132032394409, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 1.5455, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0033310360670801746, |
|
"grad_norm": 2.188596248626709, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.591, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0034458993797381117, |
|
"grad_norm": 1.725136399269104, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.467, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.003560762692396049, |
|
"grad_norm": 1.6573512554168701, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 1.4838, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.003675626005053986, |
|
"grad_norm": 2.2952048778533936, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.5725, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.003790489317711923, |
|
"grad_norm": 1.939889907836914, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 1.6289, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00390535263036986, |
|
"grad_norm": 1.8188632726669312, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 1.3653, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.004020215943027797, |
|
"grad_norm": 2.1988892555236816, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.6012, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.004135079255685734, |
|
"grad_norm": 2.2693634033203125, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 1.5576, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.004249942568343671, |
|
"grad_norm": 1.932142734527588, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.3468, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.004364805881001608, |
|
"grad_norm": 3.0825328826904297, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 1.333, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.004479669193659545, |
|
"grad_norm": 1.783441424369812, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 1.1038, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.004594532506317482, |
|
"grad_norm": 1.5691484212875366, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.1039, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0047093958189754195, |
|
"grad_norm": 1.9904344081878662, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.3097, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.004824259131633356, |
|
"grad_norm": 2.1154487133026123, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.5107, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.004939122444291294, |
|
"grad_norm": 1.9345293045043945, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 1.3873, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.00505398575694923, |
|
"grad_norm": 1.7051095962524414, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 1.3449, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.005168849069607168, |
|
"grad_norm": 1.8302801847457886, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.483, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.005283712382265104, |
|
"grad_norm": 1.9598950147628784, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 1.3838, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.005398575694923042, |
|
"grad_norm": 1.9354257583618164, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 1.422, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.005513439007580979, |
|
"grad_norm": 2.225748062133789, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 1.5035, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.005628302320238916, |
|
"grad_norm": 1.9461932182312012, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 1.1787, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.005743165632896853, |
|
"grad_norm": 1.6582098007202148, |
|
"learning_rate": 0.0, |
|
"loss": 1.359, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.005743165632896853, |
|
"eval_loss": 0.3237687349319458, |
|
"eval_runtime": 620.993, |
|
"eval_samples_per_second": 23.612, |
|
"eval_steps_per_second": 2.952, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.57749502246912e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|