{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.625, "eval_steps": 500, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.025, "grad_norm": 135.99700927734375, "learning_rate": 0.0, "loss": 1.8567, "step": 1 }, { "epoch": 0.05, "grad_norm": 132.19590759277344, "learning_rate": 1e-06, "loss": 1.6451, "step": 2 }, { "epoch": 0.075, "grad_norm": 126.63935852050781, "learning_rate": 1e-06, "loss": 1.6308, "step": 3 }, { "epoch": 0.1, "grad_norm": 132.1169891357422, "learning_rate": 1e-06, "loss": 1.1376, "step": 4 }, { "epoch": 0.125, "grad_norm": 111.94847869873047, "learning_rate": 1e-06, "loss": 0.9303, "step": 5 }, { "epoch": 0.15, "grad_norm": 47.188209533691406, "learning_rate": 1e-06, "loss": 0.5403, "step": 6 }, { "epoch": 0.175, "grad_norm": 32.26974868774414, "learning_rate": 1e-06, "loss": 0.4366, "step": 7 }, { "epoch": 0.2, "grad_norm": 26.274761199951172, "learning_rate": 1e-06, "loss": 0.3587, "step": 8 }, { "epoch": 0.225, "grad_norm": 26.633220672607422, "learning_rate": 1e-06, "loss": 0.3009, "step": 9 }, { "epoch": 0.25, "grad_norm": 19.493844985961914, "learning_rate": 1e-06, "loss": 0.091, "step": 10 }, { "epoch": 0.275, "grad_norm": 9.516935348510742, "learning_rate": 1e-06, "loss": 0.045, "step": 11 }, { "epoch": 0.3, "grad_norm": 5.694634914398193, "learning_rate": 1e-06, "loss": 0.0358, "step": 12 }, { "epoch": 0.325, "grad_norm": 3.269968271255493, "learning_rate": 1e-06, "loss": 0.0218, "step": 13 }, { "epoch": 0.35, "grad_norm": 2.6145660877227783, "learning_rate": 1e-06, "loss": 0.0161, "step": 14 }, { "epoch": 0.375, "grad_norm": 2.1988866329193115, "learning_rate": 1e-06, "loss": 0.0141, "step": 15 }, { "epoch": 0.4, "grad_norm": 1.8373610973358154, "learning_rate": 1e-06, "loss": 0.0061, "step": 16 }, { "epoch": 0.425, "grad_norm": 1.4379240274429321, "learning_rate": 1e-06, "loss": 0.0065, "step": 17 }, { "epoch": 0.45, "grad_norm": 1.604111671447754, "learning_rate": 1e-06, "loss": 0.0062, "step": 18 }, { "epoch": 0.475, "grad_norm": 2.3964450359344482, "learning_rate": 1e-06, "loss": 0.0062, "step": 19 }, { "epoch": 0.5, "grad_norm": 1.2232977151870728, "learning_rate": 1e-06, "loss": 0.0042, "step": 20 }, { "epoch": 0.525, "grad_norm": 2.4161126613616943, "learning_rate": 1e-06, "loss": 0.0044, "step": 21 }, { "epoch": 0.55, "grad_norm": 1.5427428483963013, "learning_rate": 1e-06, "loss": 0.0031, "step": 22 }, { "epoch": 0.575, "grad_norm": 0.6795447468757629, "learning_rate": 1e-06, "loss": 0.0008, "step": 23 }, { "epoch": 0.6, "grad_norm": 0.4597806930541992, "learning_rate": 1e-06, "loss": 0.0009, "step": 24 }, { "epoch": 0.625, "grad_norm": 1.3745927810668945, "learning_rate": 1e-06, "loss": 0.004, "step": 25 } ], "logging_steps": 1.0, "max_steps": 40, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 13389019086848.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }