|
{ |
|
"best_metric": 2.0671772956848145, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.18744142455482662, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0037488284910965324, |
|
"grad_norm": 0.20465420186519623, |
|
"learning_rate": 2e-05, |
|
"loss": 1.8354, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0037488284910965324, |
|
"eval_loss": 2.3435218334198, |
|
"eval_runtime": 7.3235, |
|
"eval_samples_per_second": 61.446, |
|
"eval_steps_per_second": 7.783, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007497656982193065, |
|
"grad_norm": 0.1741742640733719, |
|
"learning_rate": 4e-05, |
|
"loss": 1.954, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.011246485473289597, |
|
"grad_norm": 0.2319125086069107, |
|
"learning_rate": 6e-05, |
|
"loss": 1.957, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01499531396438613, |
|
"grad_norm": 0.19860558211803436, |
|
"learning_rate": 8e-05, |
|
"loss": 1.9693, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01874414245548266, |
|
"grad_norm": 0.19092042744159698, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9022, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.022492970946579195, |
|
"grad_norm": 0.18668273091316223, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.1489, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.026241799437675725, |
|
"grad_norm": 0.1946374922990799, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 1.8344, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02999062792877226, |
|
"grad_norm": 0.21194031834602356, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.9127, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.033739456419868794, |
|
"grad_norm": 0.22352221608161926, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 1.9701, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03748828491096532, |
|
"grad_norm": 0.24026735126972198, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.0824, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.041237113402061855, |
|
"grad_norm": 0.2265283763408661, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.9292, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04498594189315839, |
|
"grad_norm": 0.25951072573661804, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.036, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04873477038425492, |
|
"grad_norm": 0.23826272785663605, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.8531, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05248359887535145, |
|
"grad_norm": 0.24685098230838776, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.9693, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.056232427366447985, |
|
"grad_norm": 0.31856727600097656, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.9089, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05998125585754452, |
|
"grad_norm": 0.3416077792644501, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.9659, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06373008434864105, |
|
"grad_norm": 0.25727298855781555, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.0328, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06747891283973759, |
|
"grad_norm": 0.2617862820625305, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.0191, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07122774133083412, |
|
"grad_norm": 0.2551460862159729, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.9836, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07497656982193064, |
|
"grad_norm": 0.2661774754524231, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.9065, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07872539831302718, |
|
"grad_norm": 0.2693111300468445, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.9384, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08247422680412371, |
|
"grad_norm": 0.27645257115364075, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.9451, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08622305529522024, |
|
"grad_norm": 0.29583096504211426, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.9816, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08997188378631678, |
|
"grad_norm": 0.2951229214668274, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.9707, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09372071227741331, |
|
"grad_norm": 0.30265846848487854, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.966, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09372071227741331, |
|
"eval_loss": 2.104829788208008, |
|
"eval_runtime": 7.3882, |
|
"eval_samples_per_second": 60.908, |
|
"eval_steps_per_second": 7.715, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09746954076850985, |
|
"grad_norm": 0.3987659811973572, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.0137, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.10121836925960637, |
|
"grad_norm": 0.33826690912246704, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.0594, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1049671977507029, |
|
"grad_norm": 0.34245115518569946, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.1526, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10871602624179943, |
|
"grad_norm": 0.35702967643737793, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 2.0391, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.11246485473289597, |
|
"grad_norm": 0.36739176511764526, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.0303, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1162136832239925, |
|
"grad_norm": 0.3986171782016754, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 2.173, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11996251171508904, |
|
"grad_norm": 0.419553279876709, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.2194, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12371134020618557, |
|
"grad_norm": 0.4767111539840698, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 1.956, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1274601686972821, |
|
"grad_norm": 0.4549711346626282, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.1381, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.13120899718837864, |
|
"grad_norm": 0.45940274000167847, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.1526, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13495782567947517, |
|
"grad_norm": 0.5444797873497009, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.1269, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1387066541705717, |
|
"grad_norm": 0.519831120967865, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 2.1167, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.14245548266166824, |
|
"grad_norm": 0.5798463225364685, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 2.1214, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14620431115276475, |
|
"grad_norm": 0.5918245911598206, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.1993, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14995313964386128, |
|
"grad_norm": 0.6637012362480164, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.0433, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15370196813495782, |
|
"grad_norm": 0.6506871581077576, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 2.1465, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.15745079662605435, |
|
"grad_norm": 0.6886833310127258, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.2776, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.16119962511715089, |
|
"grad_norm": 0.7585076093673706, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 2.2323, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.16494845360824742, |
|
"grad_norm": 0.749284029006958, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 2.3017, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.16869728209934395, |
|
"grad_norm": 0.8231189250946045, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.1884, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1724461105904405, |
|
"grad_norm": 0.9007522463798523, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 2.2728, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.17619493908153702, |
|
"grad_norm": 0.9463634490966797, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 2.4172, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.17994376757263356, |
|
"grad_norm": 1.162490725517273, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.4727, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1836925960637301, |
|
"grad_norm": 1.6514424085617065, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 2.3781, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.18744142455482662, |
|
"grad_norm": 2.209977388381958, |
|
"learning_rate": 0.0, |
|
"loss": 2.7399, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18744142455482662, |
|
"eval_loss": 2.0671772956848145, |
|
"eval_runtime": 7.4249, |
|
"eval_samples_per_second": 60.607, |
|
"eval_steps_per_second": 7.677, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8520778270113792.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|