{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 59, "global_step": 236, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00423728813559322, "eval_loss": 1.7249085903167725, "eval_runtime": 8.4443, "eval_samples_per_second": 11.842, "eval_steps_per_second": 5.921, "step": 1 }, { "epoch": 0.0211864406779661, "grad_norm": 0.34860244393348694, "learning_rate": 1.6666666666666667e-05, "loss": 1.3692, "step": 5 }, { "epoch": 0.0423728813559322, "grad_norm": 0.24737317860126495, "learning_rate": 3.3333333333333335e-05, "loss": 1.7379, "step": 10 }, { "epoch": 0.0635593220338983, "grad_norm": 0.3292003571987152, "learning_rate": 5e-05, "loss": 1.8021, "step": 15 }, { "epoch": 0.0847457627118644, "grad_norm": 0.5094640851020813, "learning_rate": 6.666666666666667e-05, "loss": 1.7655, "step": 20 }, { "epoch": 0.1059322033898305, "grad_norm": 0.5904861688613892, "learning_rate": 8.333333333333334e-05, "loss": 1.7297, "step": 25 }, { "epoch": 0.1271186440677966, "grad_norm": 0.7554088830947876, "learning_rate": 0.0001, "loss": 1.6078, "step": 30 }, { "epoch": 0.1483050847457627, "grad_norm": 0.5578835606575012, "learning_rate": 9.985471028179154e-05, "loss": 1.6425, "step": 35 }, { "epoch": 0.1694915254237288, "grad_norm": 0.6163862943649292, "learning_rate": 9.941968549125481e-05, "loss": 1.5532, "step": 40 }, { "epoch": 0.1906779661016949, "grad_norm": 0.7516969442367554, "learning_rate": 9.869745381355906e-05, "loss": 1.7323, "step": 45 }, { "epoch": 0.211864406779661, "grad_norm": 2.109365940093994, "learning_rate": 9.769221256218164e-05, "loss": 1.5351, "step": 50 }, { "epoch": 0.2330508474576271, "grad_norm": 0.44315698742866516, "learning_rate": 9.64098037858483e-05, "loss": 1.2733, "step": 55 }, { "epoch": 0.25, "eval_loss": 1.4840701818466187, "eval_runtime": 8.6604, "eval_samples_per_second": 11.547, "eval_steps_per_second": 5.773, "step": 59 }, { "epoch": 0.2542372881355932, "grad_norm": 0.3630428910255432, "learning_rate": 9.485768031694872e-05, "loss": 1.5799, "step": 60 }, { "epoch": 0.2754237288135593, "grad_norm": 0.3667657673358917, "learning_rate": 9.304486245873972e-05, "loss": 1.6658, "step": 65 }, { "epoch": 0.2966101694915254, "grad_norm": 0.5239521265029907, "learning_rate": 9.098188556305263e-05, "loss": 1.793, "step": 70 }, { "epoch": 0.3177966101694915, "grad_norm": 0.4729391932487488, "learning_rate": 8.868073880316124e-05, "loss": 1.5419, "step": 75 }, { "epoch": 0.3389830508474576, "grad_norm": 0.46771642565727234, "learning_rate": 8.615479549763756e-05, "loss": 1.7167, "step": 80 }, { "epoch": 0.3601694915254237, "grad_norm": 0.6532709002494812, "learning_rate": 8.341873539012444e-05, "loss": 1.5274, "step": 85 }, { "epoch": 0.3813559322033898, "grad_norm": 0.7305452227592468, "learning_rate": 8.048845933670273e-05, "loss": 1.5357, "step": 90 }, { "epoch": 0.4025423728813559, "grad_norm": 0.9167357683181763, "learning_rate": 7.73809968966554e-05, "loss": 1.3621, "step": 95 }, { "epoch": 0.423728813559322, "grad_norm": 1.8313207626342773, "learning_rate": 7.411440736367281e-05, "loss": 1.4205, "step": 100 }, { "epoch": 0.4449152542372881, "grad_norm": 0.38461220264434814, "learning_rate": 7.070767481266492e-05, "loss": 1.1588, "step": 105 }, { "epoch": 0.4661016949152542, "grad_norm": 0.3785201907157898, "learning_rate": 6.718059777212567e-05, "loss": 1.5231, "step": 110 }, { "epoch": 0.4872881355932203, "grad_norm": 0.4216236472129822, "learning_rate": 6.355367416322779e-05, "loss": 1.662, "step": 115 }, { "epoch": 0.5, "eval_loss": 1.4526076316833496, "eval_runtime": 8.5518, "eval_samples_per_second": 11.693, "eval_steps_per_second": 5.847, "step": 118 }, { "epoch": 0.5084745762711864, "grad_norm": 0.45605048537254333, "learning_rate": 5.9847982174335316e-05, "loss": 1.7207, "step": 120 }, { "epoch": 0.5296610169491526, "grad_norm": 0.4846043288707733, "learning_rate": 5.608505776324158e-05, "loss": 1.4496, "step": 125 }, { "epoch": 0.5508474576271186, "grad_norm": 0.6062460541725159, "learning_rate": 5.228676949903973e-05, "loss": 1.5234, "step": 130 }, { "epoch": 0.5720338983050848, "grad_norm": 0.5876784324645996, "learning_rate": 4.847519147099294e-05, "loss": 1.4498, "step": 135 }, { "epoch": 0.5932203389830508, "grad_norm": 0.6600533723831177, "learning_rate": 4.46724750030062e-05, "loss": 1.6008, "step": 140 }, { "epoch": 0.614406779661017, "grad_norm": 0.8463802933692932, "learning_rate": 4.0900719919241935e-05, "loss": 1.4843, "step": 145 }, { "epoch": 0.635593220338983, "grad_norm": 1.585313081741333, "learning_rate": 3.7181846109031005e-05, "loss": 1.6269, "step": 150 }, { "epoch": 0.6567796610169492, "grad_norm": 0.3185465931892395, "learning_rate": 3.353746613749094e-05, "loss": 1.1721, "step": 155 }, { "epoch": 0.6779661016949152, "grad_norm": 0.3304313123226166, "learning_rate": 2.9988759642186097e-05, "loss": 1.3918, "step": 160 }, { "epoch": 0.6991525423728814, "grad_norm": 0.35727110505104065, "learning_rate": 2.6556350245784833e-05, "loss": 1.6054, "step": 165 }, { "epoch": 0.7203389830508474, "grad_norm": 0.41722163558006287, "learning_rate": 2.3260185700046294e-05, "loss": 1.612, "step": 170 }, { "epoch": 0.7415254237288136, "grad_norm": 0.44186127185821533, "learning_rate": 2.011942195769122e-05, "loss": 1.3327, "step": 175 }, { "epoch": 0.75, "eval_loss": 1.4384088516235352, "eval_runtime": 8.6854, "eval_samples_per_second": 11.514, "eval_steps_per_second": 5.757, "step": 177 }, { "epoch": 0.7627118644067796, "grad_norm": 0.5537221431732178, "learning_rate": 1.7152311845883095e-05, "loss": 1.4988, "step": 180 }, { "epoch": 0.7838983050847458, "grad_norm": 0.676834762096405, "learning_rate": 1.4376098988303405e-05, "loss": 1.3876, "step": 185 }, { "epoch": 0.8050847457627118, "grad_norm": 0.6886051297187805, "learning_rate": 1.1806917592302762e-05, "loss": 1.6185, "step": 190 }, { "epoch": 0.826271186440678, "grad_norm": 0.9627895951271057, "learning_rate": 9.459698683523204e-06, "loss": 1.4325, "step": 195 }, { "epoch": 0.847457627118644, "grad_norm": 1.889673113822937, "learning_rate": 7.348083332917926e-06, "loss": 1.5114, "step": 200 }, { "epoch": 0.8686440677966102, "grad_norm": 0.32479622960090637, "learning_rate": 5.484343380457125e-06, "loss": 1.3119, "step": 205 }, { "epoch": 0.8898305084745762, "grad_norm": 0.4083038568496704, "learning_rate": 3.879310116241042e-06, "loss": 1.5227, "step": 210 }, { "epoch": 0.9110169491525424, "grad_norm": 0.42401042580604553, "learning_rate": 2.542311333496622e-06, "loss": 1.4954, "step": 215 }, { "epoch": 0.9322033898305084, "grad_norm": 0.542686402797699, "learning_rate": 1.4811171192794627e-06, "loss": 1.5561, "step": 220 }, { "epoch": 0.9533898305084746, "grad_norm": 0.6815149784088135, "learning_rate": 7.018946979234997e-07, "loss": 1.4592, "step": 225 }, { "epoch": 0.9745762711864406, "grad_norm": 0.6367012858390808, "learning_rate": 2.0917258966953733e-07, "loss": 1.5196, "step": 230 }, { "epoch": 0.9957627118644068, "grad_norm": 1.104602575302124, "learning_rate": 5.814292768108187e-09, "loss": 1.5794, "step": 235 }, { "epoch": 1.0, "eval_loss": 1.404936671257019, "eval_runtime": 8.6769, "eval_samples_per_second": 11.525, "eval_steps_per_second": 5.762, "step": 236 } ], "logging_steps": 5, "max_steps": 236, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8.267951517990912e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }