{ "best_metric": 1.550691843032837, "best_model_checkpoint": "miner_id_24/checkpoint-25", "epoch": 0.00207671380807011, "eval_steps": 5, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 8.30685523228044e-05, "grad_norm": 0.32762518525123596, "learning_rate": 2e-05, "loss": 1.3143, "step": 1 }, { "epoch": 8.30685523228044e-05, "eval_loss": 2.673133611679077, "eval_runtime": 710.3871, "eval_samples_per_second": 7.136, "eval_steps_per_second": 3.568, "step": 1 }, { "epoch": 0.0001661371046456088, "grad_norm": 0.3983103036880493, "learning_rate": 4e-05, "loss": 0.9697, "step": 2 }, { "epoch": 0.00024920565696841317, "grad_norm": 0.48327869176864624, "learning_rate": 6e-05, "loss": 1.8776, "step": 3 }, { "epoch": 0.0003322742092912176, "grad_norm": 0.6413305401802063, "learning_rate": 8e-05, "loss": 1.7683, "step": 4 }, { "epoch": 0.000415342761614022, "grad_norm": 0.8627868890762329, "learning_rate": 0.0001, "loss": 1.8095, "step": 5 }, { "epoch": 0.000415342761614022, "eval_loss": 2.658660411834717, "eval_runtime": 710.1991, "eval_samples_per_second": 7.137, "eval_steps_per_second": 3.569, "step": 5 }, { "epoch": 0.0004984113139368263, "grad_norm": 0.4515976905822754, "learning_rate": 0.00012, "loss": 1.573, "step": 6 }, { "epoch": 0.0005814798662596307, "grad_norm": 1.1740977764129639, "learning_rate": 0.00014, "loss": 2.1893, "step": 7 }, { "epoch": 0.0006645484185824351, "grad_norm": 0.3599812984466553, "learning_rate": 0.00016, "loss": 1.1012, "step": 8 }, { "epoch": 0.0007476169709052396, "grad_norm": 0.5296670198440552, "learning_rate": 0.00018, "loss": 0.839, "step": 9 }, { "epoch": 0.000830685523228044, "grad_norm": 1.0727394819259644, "learning_rate": 0.0002, "loss": 1.8014, "step": 10 }, { "epoch": 0.000830685523228044, "eval_loss": 2.3930716514587402, "eval_runtime": 710.191, "eval_samples_per_second": 7.138, "eval_steps_per_second": 3.569, "step": 10 }, { "epoch": 0.0009137540755508484, "grad_norm": 0.43717700242996216, "learning_rate": 0.00019781476007338058, "loss": 1.3097, "step": 11 }, { "epoch": 0.0009968226278736527, "grad_norm": 0.30268394947052, "learning_rate": 0.0001913545457642601, "loss": 0.5942, "step": 12 }, { "epoch": 0.001079891180196457, "grad_norm": 0.6315892934799194, "learning_rate": 0.00018090169943749476, "loss": 0.9464, "step": 13 }, { "epoch": 0.0011629597325192615, "grad_norm": 0.6325842142105103, "learning_rate": 0.00016691306063588583, "loss": 1.3774, "step": 14 }, { "epoch": 0.0012460282848420659, "grad_norm": 0.46130749583244324, "learning_rate": 0.00015000000000000001, "loss": 1.2938, "step": 15 }, { "epoch": 0.0012460282848420659, "eval_loss": 1.8308182954788208, "eval_runtime": 720.8588, "eval_samples_per_second": 7.032, "eval_steps_per_second": 3.517, "step": 15 }, { "epoch": 0.0013290968371648703, "grad_norm": 0.438463419675827, "learning_rate": 0.00013090169943749476, "loss": 1.114, "step": 16 }, { "epoch": 0.0014121653894876747, "grad_norm": 0.6048122048377991, "learning_rate": 0.00011045284632676536, "loss": 1.6473, "step": 17 }, { "epoch": 0.001495233941810479, "grad_norm": 1.1923956871032715, "learning_rate": 8.954715367323468e-05, "loss": 1.4638, "step": 18 }, { "epoch": 0.0015783024941332835, "grad_norm": 0.5673062801361084, "learning_rate": 6.909830056250527e-05, "loss": 0.6572, "step": 19 }, { "epoch": 0.001661371046456088, "grad_norm": 0.7221763730049133, "learning_rate": 5.000000000000002e-05, "loss": 1.3469, "step": 20 }, { "epoch": 0.001661371046456088, "eval_loss": 1.5932053327560425, "eval_runtime": 718.0656, "eval_samples_per_second": 7.059, "eval_steps_per_second": 3.53, "step": 20 }, { "epoch": 0.0017444395987788923, "grad_norm": 0.8490703701972961, "learning_rate": 3.308693936411421e-05, "loss": 0.8171, "step": 21 }, { "epoch": 0.0018275081511016967, "grad_norm": 0.4198557734489441, "learning_rate": 1.9098300562505266e-05, "loss": 0.4519, "step": 22 }, { "epoch": 0.0019105767034245011, "grad_norm": 0.5982875823974609, "learning_rate": 8.645454235739903e-06, "loss": 0.4833, "step": 23 }, { "epoch": 0.0019936452557473053, "grad_norm": 0.6316483020782471, "learning_rate": 2.1852399266194314e-06, "loss": 0.8351, "step": 24 }, { "epoch": 0.00207671380807011, "grad_norm": 0.4150454103946686, "learning_rate": 0.0, "loss": 0.5263, "step": 25 }, { "epoch": 0.00207671380807011, "eval_loss": 1.550691843032837, "eval_runtime": 718.3958, "eval_samples_per_second": 7.056, "eval_steps_per_second": 3.529, "step": 25 } ], "logging_steps": 1, "max_steps": 25, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 10, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 2, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2808666677575680.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }