{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.007668711656441718, "eval_steps": 4, "global_step": 30, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0002556237218813906, "grad_norm": NaN, "learning_rate": 2.0000000000000003e-06, "loss": 0.0, "step": 1 }, { "epoch": 0.0002556237218813906, "eval_loss": NaN, "eval_runtime": 174.0269, "eval_samples_per_second": 9.47, "eval_steps_per_second": 2.367, "step": 1 }, { "epoch": 0.0005112474437627812, "grad_norm": NaN, "learning_rate": 4.000000000000001e-06, "loss": 0.0, "step": 2 }, { "epoch": 0.0007668711656441718, "grad_norm": NaN, "learning_rate": 6e-06, "loss": 0.0, "step": 3 }, { "epoch": 0.0010224948875255625, "grad_norm": NaN, "learning_rate": 8.000000000000001e-06, "loss": 0.0, "step": 4 }, { "epoch": 0.0010224948875255625, "eval_loss": NaN, "eval_runtime": 173.9954, "eval_samples_per_second": 9.472, "eval_steps_per_second": 2.368, "step": 4 }, { "epoch": 0.001278118609406953, "grad_norm": NaN, "learning_rate": 1e-05, "loss": 0.0, "step": 5 }, { "epoch": 0.0015337423312883436, "grad_norm": NaN, "learning_rate": 9.960573506572391e-06, "loss": 0.0, "step": 6 }, { "epoch": 0.0017893660531697342, "grad_norm": NaN, "learning_rate": 9.842915805643156e-06, "loss": 0.0, "step": 7 }, { "epoch": 0.002044989775051125, "grad_norm": NaN, "learning_rate": 9.648882429441258e-06, "loss": 0.0, "step": 8 }, { "epoch": 0.002044989775051125, "eval_loss": NaN, "eval_runtime": 173.9734, "eval_samples_per_second": 9.473, "eval_steps_per_second": 2.368, "step": 8 }, { "epoch": 0.0023006134969325155, "grad_norm": NaN, "learning_rate": 9.381533400219319e-06, "loss": 0.0, "step": 9 }, { "epoch": 0.002556237218813906, "grad_norm": NaN, "learning_rate": 9.045084971874738e-06, "loss": 0.0, "step": 10 }, { "epoch": 0.0028118609406952966, "grad_norm": NaN, "learning_rate": 8.644843137107058e-06, "loss": 0.0, "step": 11 }, { "epoch": 0.003067484662576687, "grad_norm": NaN, "learning_rate": 8.18711994874345e-06, "loss": 0.0, "step": 12 }, { "epoch": 0.003067484662576687, "eval_loss": NaN, "eval_runtime": 174.0067, "eval_samples_per_second": 9.471, "eval_steps_per_second": 2.368, "step": 12 }, { "epoch": 0.0033231083844580778, "grad_norm": NaN, "learning_rate": 7.679133974894984e-06, "loss": 0.0, "step": 13 }, { "epoch": 0.0035787321063394683, "grad_norm": NaN, "learning_rate": 7.128896457825364e-06, "loss": 0.0, "step": 14 }, { "epoch": 0.003834355828220859, "grad_norm": NaN, "learning_rate": 6.545084971874738e-06, "loss": 0.0, "step": 15 }, { "epoch": 0.00408997955010225, "grad_norm": NaN, "learning_rate": 5.936906572928625e-06, "loss": 0.0, "step": 16 }, { "epoch": 0.00408997955010225, "eval_loss": NaN, "eval_runtime": 174.0854, "eval_samples_per_second": 9.467, "eval_steps_per_second": 2.367, "step": 16 }, { "epoch": 0.0043456032719836404, "grad_norm": NaN, "learning_rate": 5.3139525976465675e-06, "loss": 0.0, "step": 17 }, { "epoch": 0.004601226993865031, "grad_norm": NaN, "learning_rate": 4.686047402353433e-06, "loss": 0.0, "step": 18 }, { "epoch": 0.0048568507157464216, "grad_norm": NaN, "learning_rate": 4.063093427071376e-06, "loss": 0.0, "step": 19 }, { "epoch": 0.005112474437627812, "grad_norm": NaN, "learning_rate": 3.4549150281252635e-06, "loss": 0.0, "step": 20 }, { "epoch": 0.005112474437627812, "eval_loss": NaN, "eval_runtime": 174.2011, "eval_samples_per_second": 9.46, "eval_steps_per_second": 2.365, "step": 20 }, { "epoch": 0.005368098159509203, "grad_norm": NaN, "learning_rate": 2.871103542174637e-06, "loss": 0.0, "step": 21 }, { "epoch": 0.005623721881390593, "grad_norm": NaN, "learning_rate": 2.320866025105016e-06, "loss": 0.0, "step": 22 }, { "epoch": 0.005879345603271984, "grad_norm": NaN, "learning_rate": 1.8128800512565514e-06, "loss": 0.0, "step": 23 }, { "epoch": 0.006134969325153374, "grad_norm": NaN, "learning_rate": 1.3551568628929434e-06, "loss": 0.0, "step": 24 }, { "epoch": 0.006134969325153374, "eval_loss": NaN, "eval_runtime": 174.2467, "eval_samples_per_second": 9.458, "eval_steps_per_second": 2.364, "step": 24 }, { "epoch": 0.006390593047034765, "grad_norm": NaN, "learning_rate": 9.549150281252633e-07, "loss": 0.0, "step": 25 }, { "epoch": 0.0066462167689161555, "grad_norm": NaN, "learning_rate": 6.184665997806832e-07, "loss": 0.0, "step": 26 }, { "epoch": 0.006901840490797546, "grad_norm": NaN, "learning_rate": 3.511175705587433e-07, "loss": 0.0, "step": 27 }, { "epoch": 0.007157464212678937, "grad_norm": NaN, "learning_rate": 1.5708419435684463e-07, "loss": 0.0, "step": 28 }, { "epoch": 0.007157464212678937, "eval_loss": NaN, "eval_runtime": 174.0326, "eval_samples_per_second": 9.469, "eval_steps_per_second": 2.367, "step": 28 }, { "epoch": 0.007413087934560327, "grad_norm": NaN, "learning_rate": 3.9426493427611177e-08, "loss": 0.0, "step": 29 }, { "epoch": 0.007668711656441718, "grad_norm": NaN, "learning_rate": 0.0, "loss": 0.0, "step": 30 } ], "logging_steps": 1, "max_steps": 30, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 20, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.054713575374848e+16, "train_batch_size": 4, "trial_name": null, "trial_params": null }