{ "best_metric": 1.0106004476547241, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.44052863436123346, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00881057268722467, "grad_norm": 68.2496566772461, "learning_rate": 2e-05, "loss": 6.4572, "step": 1 }, { "epoch": 0.00881057268722467, "eval_loss": 2.527791738510132, "eval_runtime": 6.71, "eval_samples_per_second": 28.614, "eval_steps_per_second": 3.577, "step": 1 }, { "epoch": 0.01762114537444934, "grad_norm": 50.708988189697266, "learning_rate": 4e-05, "loss": 6.4946, "step": 2 }, { "epoch": 0.02643171806167401, "grad_norm": 44.1339225769043, "learning_rate": 6e-05, "loss": 6.527, "step": 3 }, { "epoch": 0.03524229074889868, "grad_norm": 19.318286895751953, "learning_rate": 8e-05, "loss": 4.3438, "step": 4 }, { "epoch": 0.04405286343612335, "grad_norm": 18.93522071838379, "learning_rate": 0.0001, "loss": 4.9276, "step": 5 }, { "epoch": 0.05286343612334802, "grad_norm": 14.795171737670898, "learning_rate": 9.987820251299122e-05, "loss": 4.8183, "step": 6 }, { "epoch": 0.06167400881057269, "grad_norm": 13.332947731018066, "learning_rate": 9.951340343707852e-05, "loss": 4.4671, "step": 7 }, { "epoch": 0.07048458149779736, "grad_norm": 12.206098556518555, "learning_rate": 9.890738003669029e-05, "loss": 4.3656, "step": 8 }, { "epoch": 0.07929515418502203, "grad_norm": 12.272443771362305, "learning_rate": 9.806308479691595e-05, "loss": 3.893, "step": 9 }, { "epoch": 0.0881057268722467, "grad_norm": 12.4472074508667, "learning_rate": 9.698463103929542e-05, "loss": 4.4506, "step": 10 }, { "epoch": 0.09691629955947137, "grad_norm": 12.094071388244629, "learning_rate": 9.567727288213005e-05, "loss": 4.7599, "step": 11 }, { "epoch": 0.10572687224669604, "grad_norm": 12.491962432861328, "learning_rate": 9.414737964294636e-05, "loss": 4.513, "step": 12 }, { "epoch": 0.1145374449339207, "grad_norm": 12.175878524780273, "learning_rate": 9.24024048078213e-05, "loss": 4.2163, "step": 13 }, { "epoch": 0.12334801762114538, "grad_norm": 11.396886825561523, "learning_rate": 9.045084971874738e-05, "loss": 4.1033, "step": 14 }, { "epoch": 0.13215859030837004, "grad_norm": 12.93130874633789, "learning_rate": 8.83022221559489e-05, "loss": 4.5041, "step": 15 }, { "epoch": 0.14096916299559473, "grad_norm": 12.235413551330566, "learning_rate": 8.596699001693255e-05, "loss": 4.4852, "step": 16 }, { "epoch": 0.14977973568281938, "grad_norm": 11.997471809387207, "learning_rate": 8.345653031794292e-05, "loss": 4.1465, "step": 17 }, { "epoch": 0.15859030837004406, "grad_norm": 10.910942077636719, "learning_rate": 8.07830737662829e-05, "loss": 3.9378, "step": 18 }, { "epoch": 0.16740088105726872, "grad_norm": 11.567279815673828, "learning_rate": 7.795964517353735e-05, "loss": 3.9317, "step": 19 }, { "epoch": 0.1762114537444934, "grad_norm": 13.30049991607666, "learning_rate": 7.500000000000001e-05, "loss": 4.3284, "step": 20 }, { "epoch": 0.18502202643171806, "grad_norm": 11.890619277954102, "learning_rate": 7.191855733945387e-05, "loss": 4.4783, "step": 21 }, { "epoch": 0.19383259911894274, "grad_norm": 12.334750175476074, "learning_rate": 6.873032967079561e-05, "loss": 4.1746, "step": 22 }, { "epoch": 0.2026431718061674, "grad_norm": 12.509641647338867, "learning_rate": 6.545084971874738e-05, "loss": 4.2126, "step": 23 }, { "epoch": 0.21145374449339208, "grad_norm": 11.47304630279541, "learning_rate": 6.209609477998338e-05, "loss": 3.6245, "step": 24 }, { "epoch": 0.22026431718061673, "grad_norm": 12.029542922973633, "learning_rate": 5.868240888334653e-05, "loss": 4.1595, "step": 25 }, { "epoch": 0.22026431718061673, "eval_loss": 1.1055799722671509, "eval_runtime": 6.9256, "eval_samples_per_second": 27.723, "eval_steps_per_second": 3.465, "step": 25 }, { "epoch": 0.2290748898678414, "grad_norm": 13.63646411895752, "learning_rate": 5.522642316338268e-05, "loss": 4.2333, "step": 26 }, { "epoch": 0.23788546255506607, "grad_norm": 12.906288146972656, "learning_rate": 5.174497483512506e-05, "loss": 3.7754, "step": 27 }, { "epoch": 0.24669603524229075, "grad_norm": 32.98773956298828, "learning_rate": 4.825502516487497e-05, "loss": 5.5734, "step": 28 }, { "epoch": 0.2555066079295154, "grad_norm": 36.30422592163086, "learning_rate": 4.477357683661734e-05, "loss": 5.2083, "step": 29 }, { "epoch": 0.2643171806167401, "grad_norm": 29.99471092224121, "learning_rate": 4.131759111665349e-05, "loss": 5.2787, "step": 30 }, { "epoch": 0.27312775330396477, "grad_norm": 17.82193374633789, "learning_rate": 3.790390522001662e-05, "loss": 4.5795, "step": 31 }, { "epoch": 0.28193832599118945, "grad_norm": 13.2213716506958, "learning_rate": 3.4549150281252636e-05, "loss": 4.3673, "step": 32 }, { "epoch": 0.2907488986784141, "grad_norm": 11.667417526245117, "learning_rate": 3.12696703292044e-05, "loss": 3.9109, "step": 33 }, { "epoch": 0.29955947136563876, "grad_norm": 10.663423538208008, "learning_rate": 2.8081442660546125e-05, "loss": 4.0043, "step": 34 }, { "epoch": 0.30837004405286345, "grad_norm": 10.346181869506836, "learning_rate": 2.500000000000001e-05, "loss": 3.8749, "step": 35 }, { "epoch": 0.31718061674008813, "grad_norm": 10.776055335998535, "learning_rate": 2.2040354826462668e-05, "loss": 3.8847, "step": 36 }, { "epoch": 0.32599118942731276, "grad_norm": 10.248950004577637, "learning_rate": 1.9216926233717085e-05, "loss": 3.7897, "step": 37 }, { "epoch": 0.33480176211453744, "grad_norm": 10.948817253112793, "learning_rate": 1.6543469682057106e-05, "loss": 3.9268, "step": 38 }, { "epoch": 0.3436123348017621, "grad_norm": 10.441291809082031, "learning_rate": 1.4033009983067452e-05, "loss": 3.5599, "step": 39 }, { "epoch": 0.3524229074889868, "grad_norm": 12.858811378479004, "learning_rate": 1.1697777844051105e-05, "loss": 4.607, "step": 40 }, { "epoch": 0.36123348017621143, "grad_norm": 11.29244327545166, "learning_rate": 9.549150281252633e-06, "loss": 4.1881, "step": 41 }, { "epoch": 0.3700440528634361, "grad_norm": 10.727145195007324, "learning_rate": 7.597595192178702e-06, "loss": 4.1046, "step": 42 }, { "epoch": 0.3788546255506608, "grad_norm": 11.454111099243164, "learning_rate": 5.852620357053651e-06, "loss": 4.0834, "step": 43 }, { "epoch": 0.3876651982378855, "grad_norm": 10.730988502502441, "learning_rate": 4.322727117869951e-06, "loss": 3.7658, "step": 44 }, { "epoch": 0.3964757709251101, "grad_norm": 12.551647186279297, "learning_rate": 3.0153689607045845e-06, "loss": 4.5078, "step": 45 }, { "epoch": 0.4052863436123348, "grad_norm": 12.465065956115723, "learning_rate": 1.9369152030840556e-06, "loss": 3.9693, "step": 46 }, { "epoch": 0.41409691629955947, "grad_norm": 11.369115829467773, "learning_rate": 1.0926199633097157e-06, "loss": 3.7502, "step": 47 }, { "epoch": 0.42290748898678415, "grad_norm": 11.680598258972168, "learning_rate": 4.865965629214819e-07, "loss": 3.6231, "step": 48 }, { "epoch": 0.43171806167400884, "grad_norm": 14.236860275268555, "learning_rate": 1.2179748700879012e-07, "loss": 4.1521, "step": 49 }, { "epoch": 0.44052863436123346, "grad_norm": 12.475255966186523, "learning_rate": 0.0, "loss": 4.2304, "step": 50 }, { "epoch": 0.44052863436123346, "eval_loss": 1.0106004476547241, "eval_runtime": 6.9593, "eval_samples_per_second": 27.589, "eval_steps_per_second": 3.449, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.57749502246912e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }