{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 18, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.17, "grad_norm": NaN, "learning_rate": 0.0, "loss": 8.9144, "step": 1 }, { "epoch": 0.33, "grad_norm": NaN, "learning_rate": 0.0, "loss": 12.2548, "step": 2 }, { "epoch": 0.5, "grad_norm": NaN, "learning_rate": 0.0, "loss": 8.0577, "step": 3 }, { "epoch": 0.67, "grad_norm": Infinity, "learning_rate": 0.0, "loss": 10.0668, "step": 4 }, { "epoch": 0.83, "grad_norm": 118.140869140625, "learning_rate": 1.5e-05, "loss": 11.3165, "step": 5 }, { "epoch": 1.0, "grad_norm": 119.7070541381836, "learning_rate": 3e-05, "loss": 8.0135, "step": 6 }, { "epoch": 1.17, "grad_norm": 73.31137084960938, "learning_rate": 2.8125e-05, "loss": 7.0185, "step": 7 }, { "epoch": 1.33, "grad_norm": 48.86985397338867, "learning_rate": 2.625e-05, "loss": 6.366, "step": 8 }, { "epoch": 1.5, "grad_norm": 63.3082389831543, "learning_rate": 2.4375e-05, "loss": 6.9771, "step": 9 }, { "epoch": 1.67, "grad_norm": 60.80664825439453, "learning_rate": 2.25e-05, "loss": 4.8249, "step": 10 }, { "epoch": 1.83, "grad_norm": 48.479217529296875, "learning_rate": 2.0625e-05, "loss": 4.9635, "step": 11 }, { "epoch": 2.0, "grad_norm": 24.786556243896484, "learning_rate": 1.8750000000000002e-05, "loss": 3.3671, "step": 12 }, { "epoch": 2.17, "grad_norm": 16.406631469726562, "learning_rate": 1.6875e-05, "loss": 2.361, "step": 13 }, { "epoch": 2.33, "grad_norm": 15.527493476867676, "learning_rate": 1.5e-05, "loss": 3.1927, "step": 14 }, { "epoch": 2.5, "grad_norm": 28.2950382232666, "learning_rate": 1.3125e-05, "loss": 4.0086, "step": 15 }, { "epoch": 2.67, "grad_norm": 25.018417358398438, "learning_rate": 1.125e-05, "loss": 3.4131, "step": 16 }, { "epoch": 2.83, "grad_norm": 15.129682540893555, "learning_rate": 9.375000000000001e-06, "loss": 2.6968, "step": 17 }, { "epoch": 3.0, "grad_norm": 13.875788688659668, "learning_rate": 7.5e-06, "loss": 2.714, "step": 18 } ], "logging_steps": 1, "max_steps": 18, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 6902048951894016.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }