{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.03712641544458883, "eval_steps": 9, "global_step": 100, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00037126415444588826, "eval_loss": 4.464370250701904, "eval_runtime": 102.0524, "eval_samples_per_second": 44.458, "eval_steps_per_second": 5.566, "step": 1 }, { "epoch": 0.0011137924633376648, "grad_norm": 0.9076333045959473, "learning_rate": 1.5e-05, "loss": 4.3192, "step": 3 }, { "epoch": 0.0022275849266753297, "grad_norm": 0.9957979321479797, "learning_rate": 3e-05, "loss": 4.4682, "step": 6 }, { "epoch": 0.003341377390012994, "grad_norm": 0.9132924675941467, "learning_rate": 4.5e-05, "loss": 4.3804, "step": 9 }, { "epoch": 0.003341377390012994, "eval_loss": 4.441404342651367, "eval_runtime": 102.102, "eval_samples_per_second": 44.436, "eval_steps_per_second": 5.563, "step": 9 }, { "epoch": 0.004455169853350659, "grad_norm": 1.0117743015289307, "learning_rate": 4.993910125649561e-05, "loss": 4.3551, "step": 12 }, { "epoch": 0.005568962316688323, "grad_norm": 0.9917817711830139, "learning_rate": 4.962019382530521e-05, "loss": 4.2708, "step": 15 }, { "epoch": 0.006682754780025988, "grad_norm": 1.0605599880218506, "learning_rate": 4.9031542398457974e-05, "loss": 4.2359, "step": 18 }, { "epoch": 0.006682754780025988, "eval_loss": 4.246806621551514, "eval_runtime": 102.1684, "eval_samples_per_second": 44.407, "eval_steps_per_second": 5.559, "step": 18 }, { "epoch": 0.007796547243363653, "grad_norm": 1.205475926399231, "learning_rate": 4.817959636416969e-05, "loss": 4.1931, "step": 21 }, { "epoch": 0.008910339706701319, "grad_norm": 1.218623399734497, "learning_rate": 4.707368982147318e-05, "loss": 4.0218, "step": 24 }, { "epoch": 0.010024132170038983, "grad_norm": 1.211789846420288, "learning_rate": 4.572593931387604e-05, "loss": 3.6903, "step": 27 }, { "epoch": 0.010024132170038983, "eval_loss": 3.9151904582977295, "eval_runtime": 102.1497, "eval_samples_per_second": 44.415, "eval_steps_per_second": 5.56, "step": 27 }, { "epoch": 0.011137924633376647, "grad_norm": 1.073923110961914, "learning_rate": 4.415111107797445e-05, "loss": 3.8025, "step": 30 }, { "epoch": 0.012251717096714312, "grad_norm": 1.2747162580490112, "learning_rate": 4.2366459261474933e-05, "loss": 3.785, "step": 33 }, { "epoch": 0.013365509560051976, "grad_norm": 1.1728018522262573, "learning_rate": 4.039153688314145e-05, "loss": 3.6833, "step": 36 }, { "epoch": 0.013365509560051976, "eval_loss": 3.583631753921509, "eval_runtime": 102.0645, "eval_samples_per_second": 44.452, "eval_steps_per_second": 5.565, "step": 36 }, { "epoch": 0.014479302023389642, "grad_norm": 1.2889246940612793, "learning_rate": 3.824798160583012e-05, "loss": 3.4318, "step": 39 }, { "epoch": 0.015593094486727306, "grad_norm": 1.2840162515640259, "learning_rate": 3.5959278669726935e-05, "loss": 3.2355, "step": 42 }, { "epoch": 0.016706886950064972, "grad_norm": 1.1799660921096802, "learning_rate": 3.355050358314172e-05, "loss": 3.2583, "step": 45 }, { "epoch": 0.016706886950064972, "eval_loss": 3.3041188716888428, "eval_runtime": 102.1114, "eval_samples_per_second": 44.432, "eval_steps_per_second": 5.563, "step": 45 }, { "epoch": 0.017820679413402638, "grad_norm": 1.1542611122131348, "learning_rate": 3.104804738999169e-05, "loss": 3.3552, "step": 48 }, { "epoch": 0.0189344718767403, "grad_norm": 1.1231194734573364, "learning_rate": 2.8479327524001636e-05, "loss": 3.2313, "step": 51 }, { "epoch": 0.020048264340077965, "grad_norm": 1.2060445547103882, "learning_rate": 2.587248741756253e-05, "loss": 3.1961, "step": 54 }, { "epoch": 0.020048264340077965, "eval_loss": 3.080096960067749, "eval_runtime": 102.2394, "eval_samples_per_second": 44.376, "eval_steps_per_second": 5.556, "step": 54 }, { "epoch": 0.02116205680341563, "grad_norm": 1.0807676315307617, "learning_rate": 2.3256088156396868e-05, "loss": 3.0027, "step": 57 }, { "epoch": 0.022275849266753293, "grad_norm": 1.1301020383834839, "learning_rate": 2.0658795558326743e-05, "loss": 2.9808, "step": 60 }, { "epoch": 0.02338964173009096, "grad_norm": 0.9327682852745056, "learning_rate": 1.8109066104575023e-05, "loss": 3.0352, "step": 63 }, { "epoch": 0.02338964173009096, "eval_loss": 2.921823501586914, "eval_runtime": 102.3331, "eval_samples_per_second": 44.336, "eval_steps_per_second": 5.551, "step": 63 }, { "epoch": 0.024503434193428625, "grad_norm": 1.1941728591918945, "learning_rate": 1.56348351646022e-05, "loss": 2.8402, "step": 66 }, { "epoch": 0.02561722665676629, "grad_norm": 1.0488781929016113, "learning_rate": 1.3263210930352737e-05, "loss": 2.7516, "step": 69 }, { "epoch": 0.026731019120103953, "grad_norm": 0.9377260804176331, "learning_rate": 1.1020177413231334e-05, "loss": 3.0401, "step": 72 }, { "epoch": 0.026731019120103953, "eval_loss": 2.823646306991577, "eval_runtime": 102.3141, "eval_samples_per_second": 44.344, "eval_steps_per_second": 5.552, "step": 72 }, { "epoch": 0.02784481158344162, "grad_norm": 1.002527117729187, "learning_rate": 8.930309757836517e-06, "loss": 2.8263, "step": 75 }, { "epoch": 0.028958604046779284, "grad_norm": 1.0062321424484253, "learning_rate": 7.016504991533726e-06, "loss": 2.7205, "step": 78 }, { "epoch": 0.03007239651011695, "grad_norm": 0.9706265330314636, "learning_rate": 5.299731159831953e-06, "loss": 2.7307, "step": 81 }, { "epoch": 0.03007239651011695, "eval_loss": 2.7717268466949463, "eval_runtime": 102.3153, "eval_samples_per_second": 44.343, "eval_steps_per_second": 5.551, "step": 81 }, { "epoch": 0.031186188973454612, "grad_norm": 0.9139003157615662, "learning_rate": 3.798797596089351e-06, "loss": 2.7478, "step": 84 }, { "epoch": 0.032299981436792274, "grad_norm": 0.9888792634010315, "learning_rate": 2.5301488425208296e-06, "loss": 2.7967, "step": 87 }, { "epoch": 0.033413773900129944, "grad_norm": 0.8691554665565491, "learning_rate": 1.5076844803522922e-06, "loss": 2.7622, "step": 90 }, { "epoch": 0.033413773900129944, "eval_loss": 2.7512753009796143, "eval_runtime": 102.3046, "eval_samples_per_second": 44.348, "eval_steps_per_second": 5.552, "step": 90 }, { "epoch": 0.034527566363467606, "grad_norm": 0.8410691022872925, "learning_rate": 7.426068431000882e-07, "loss": 2.5786, "step": 93 }, { "epoch": 0.035641358826805275, "grad_norm": 0.9793163537979126, "learning_rate": 2.4329828146074095e-07, "loss": 2.6365, "step": 96 }, { "epoch": 0.03675515129014294, "grad_norm": 0.8763136267662048, "learning_rate": 1.522932452260595e-08, "loss": 2.9456, "step": 99 }, { "epoch": 0.03675515129014294, "eval_loss": 2.747492551803589, "eval_runtime": 102.3186, "eval_samples_per_second": 44.342, "eval_steps_per_second": 5.551, "step": 99 } ], "logging_steps": 3, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 9, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 9932351078400000.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }