{ "best_metric": 1.0094060897827148, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.44052863436123346, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00881057268722467, "grad_norm": 71.52523803710938, "learning_rate": 2e-05, "loss": 6.4572, "step": 1 }, { "epoch": 0.00881057268722467, "eval_loss": 2.527791738510132, "eval_runtime": 6.7645, "eval_samples_per_second": 28.383, "eval_steps_per_second": 3.548, "step": 1 }, { "epoch": 0.01762114537444934, "grad_norm": 52.80047607421875, "learning_rate": 4e-05, "loss": 6.4946, "step": 2 }, { "epoch": 0.02643171806167401, "grad_norm": 45.3133544921875, "learning_rate": 6e-05, "loss": 6.4838, "step": 3 }, { "epoch": 0.03524229074889868, "grad_norm": 19.76211929321289, "learning_rate": 8e-05, "loss": 4.3298, "step": 4 }, { "epoch": 0.04405286343612335, "grad_norm": 19.206777572631836, "learning_rate": 0.0001, "loss": 4.9177, "step": 5 }, { "epoch": 0.05286343612334802, "grad_norm": 14.95755386352539, "learning_rate": 9.987820251299122e-05, "loss": 4.8063, "step": 6 }, { "epoch": 0.06167400881057269, "grad_norm": 13.54239273071289, "learning_rate": 9.951340343707852e-05, "loss": 4.4596, "step": 7 }, { "epoch": 0.07048458149779736, "grad_norm": 12.369343757629395, "learning_rate": 9.890738003669029e-05, "loss": 4.3516, "step": 8 }, { "epoch": 0.07929515418502203, "grad_norm": 12.412100791931152, "learning_rate": 9.806308479691595e-05, "loss": 3.8791, "step": 9 }, { "epoch": 0.0881057268722467, "grad_norm": 12.685561180114746, "learning_rate": 9.698463103929542e-05, "loss": 4.4534, "step": 10 }, { "epoch": 0.09691629955947137, "grad_norm": 12.313078880310059, "learning_rate": 9.567727288213005e-05, "loss": 4.7578, "step": 11 }, { "epoch": 0.10572687224669604, "grad_norm": 12.696328163146973, "learning_rate": 9.414737964294636e-05, "loss": 4.5216, "step": 12 }, { "epoch": 0.1145374449339207, "grad_norm": 12.560774803161621, "learning_rate": 9.24024048078213e-05, "loss": 4.2162, "step": 13 }, { "epoch": 0.12334801762114538, "grad_norm": 11.407295227050781, "learning_rate": 9.045084971874738e-05, "loss": 4.1029, "step": 14 }, { "epoch": 0.13215859030837004, "grad_norm": 13.14664077758789, "learning_rate": 8.83022221559489e-05, "loss": 4.5139, "step": 15 }, { "epoch": 0.14096916299559473, "grad_norm": 12.264524459838867, "learning_rate": 8.596699001693255e-05, "loss": 4.4759, "step": 16 }, { "epoch": 0.14977973568281938, "grad_norm": 12.008437156677246, "learning_rate": 8.345653031794292e-05, "loss": 4.1597, "step": 17 }, { "epoch": 0.15859030837004406, "grad_norm": 11.117318153381348, "learning_rate": 8.07830737662829e-05, "loss": 3.9539, "step": 18 }, { "epoch": 0.16740088105726872, "grad_norm": 11.550792694091797, "learning_rate": 7.795964517353735e-05, "loss": 3.9259, "step": 19 }, { "epoch": 0.1762114537444934, "grad_norm": 13.204764366149902, "learning_rate": 7.500000000000001e-05, "loss": 4.3515, "step": 20 }, { "epoch": 0.18502202643171806, "grad_norm": 11.79154109954834, "learning_rate": 7.191855733945387e-05, "loss": 4.4793, "step": 21 }, { "epoch": 0.19383259911894274, "grad_norm": 12.395256042480469, "learning_rate": 6.873032967079561e-05, "loss": 4.1954, "step": 22 }, { "epoch": 0.2026431718061674, "grad_norm": 12.652040481567383, "learning_rate": 6.545084971874738e-05, "loss": 4.2384, "step": 23 }, { "epoch": 0.21145374449339208, "grad_norm": 11.535292625427246, "learning_rate": 6.209609477998338e-05, "loss": 3.6493, "step": 24 }, { "epoch": 0.22026431718061673, "grad_norm": 12.167210578918457, "learning_rate": 5.868240888334653e-05, "loss": 4.1739, "step": 25 }, { "epoch": 0.22026431718061673, "eval_loss": 1.0978758335113525, "eval_runtime": 6.9278, "eval_samples_per_second": 27.715, "eval_steps_per_second": 3.464, "step": 25 }, { "epoch": 0.2290748898678414, "grad_norm": 13.215066909790039, "learning_rate": 5.522642316338268e-05, "loss": 4.2612, "step": 26 }, { "epoch": 0.23788546255506607, "grad_norm": 13.42513656616211, "learning_rate": 5.174497483512506e-05, "loss": 3.7723, "step": 27 }, { "epoch": 0.24669603524229075, "grad_norm": 29.157196044921875, "learning_rate": 4.825502516487497e-05, "loss": 5.5152, "step": 28 }, { "epoch": 0.2555066079295154, "grad_norm": 35.22280502319336, "learning_rate": 4.477357683661734e-05, "loss": 5.2248, "step": 29 }, { "epoch": 0.2643171806167401, "grad_norm": 28.826953887939453, "learning_rate": 4.131759111665349e-05, "loss": 5.2727, "step": 30 }, { "epoch": 0.27312775330396477, "grad_norm": 17.708860397338867, "learning_rate": 3.790390522001662e-05, "loss": 4.621, "step": 31 }, { "epoch": 0.28193832599118945, "grad_norm": 12.685564041137695, "learning_rate": 3.4549150281252636e-05, "loss": 4.4044, "step": 32 }, { "epoch": 0.2907488986784141, "grad_norm": 10.034717559814453, "learning_rate": 3.12696703292044e-05, "loss": 3.948, "step": 33 }, { "epoch": 0.29955947136563876, "grad_norm": 10.584023475646973, "learning_rate": 2.8081442660546125e-05, "loss": 4.025, "step": 34 }, { "epoch": 0.30837004405286345, "grad_norm": 10.168135643005371, "learning_rate": 2.500000000000001e-05, "loss": 3.8759, "step": 35 }, { "epoch": 0.31718061674008813, "grad_norm": 10.681046485900879, "learning_rate": 2.2040354826462668e-05, "loss": 3.8895, "step": 36 }, { "epoch": 0.32599118942731276, "grad_norm": 10.135737419128418, "learning_rate": 1.9216926233717085e-05, "loss": 3.7677, "step": 37 }, { "epoch": 0.33480176211453744, "grad_norm": 10.632367134094238, "learning_rate": 1.6543469682057106e-05, "loss": 3.898, "step": 38 }, { "epoch": 0.3436123348017621, "grad_norm": 10.495468139648438, "learning_rate": 1.4033009983067452e-05, "loss": 3.5627, "step": 39 }, { "epoch": 0.3524229074889868, "grad_norm": 12.4844970703125, "learning_rate": 1.1697777844051105e-05, "loss": 4.6167, "step": 40 }, { "epoch": 0.36123348017621143, "grad_norm": 11.088768005371094, "learning_rate": 9.549150281252633e-06, "loss": 4.134, "step": 41 }, { "epoch": 0.3700440528634361, "grad_norm": 10.535295486450195, "learning_rate": 7.597595192178702e-06, "loss": 4.1006, "step": 42 }, { "epoch": 0.3788546255506608, "grad_norm": 11.320088386535645, "learning_rate": 5.852620357053651e-06, "loss": 4.0713, "step": 43 }, { "epoch": 0.3876651982378855, "grad_norm": 10.537118911743164, "learning_rate": 4.322727117869951e-06, "loss": 3.7378, "step": 44 }, { "epoch": 0.3964757709251101, "grad_norm": 12.482701301574707, "learning_rate": 3.0153689607045845e-06, "loss": 4.5002, "step": 45 }, { "epoch": 0.4052863436123348, "grad_norm": 12.518914222717285, "learning_rate": 1.9369152030840556e-06, "loss": 3.9898, "step": 46 }, { "epoch": 0.41409691629955947, "grad_norm": 11.440350532531738, "learning_rate": 1.0926199633097157e-06, "loss": 3.7755, "step": 47 }, { "epoch": 0.42290748898678415, "grad_norm": 11.552277565002441, "learning_rate": 4.865965629214819e-07, "loss": 3.6187, "step": 48 }, { "epoch": 0.43171806167400884, "grad_norm": 13.912178039550781, "learning_rate": 1.2179748700879012e-07, "loss": 4.1552, "step": 49 }, { "epoch": 0.44052863436123346, "grad_norm": 12.204792022705078, "learning_rate": 0.0, "loss": 4.1909, "step": 50 }, { "epoch": 0.44052863436123346, "eval_loss": 1.0094060897827148, "eval_runtime": 6.9686, "eval_samples_per_second": 27.552, "eval_steps_per_second": 3.444, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.57749502246912e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }