{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.9627085377821394, "eval_steps": 500, "global_step": 500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.03925417075564279, "grad_norm": 81.5, "learning_rate": 8.000000000000001e-07, "loss": 0.3818, "step": 10 }, { "epoch": 0.07850834151128558, "grad_norm": 322.0, "learning_rate": 1.6000000000000001e-06, "loss": 0.367, "step": 20 }, { "epoch": 0.11776251226692837, "grad_norm": 784.0, "learning_rate": 2.4000000000000003e-06, "loss": 0.4472, "step": 30 }, { "epoch": 0.15701668302257116, "grad_norm": 350.0, "learning_rate": 3.2000000000000003e-06, "loss": 0.6806, "step": 40 }, { "epoch": 0.19627085377821393, "grad_norm": 186.0, "learning_rate": 4.000000000000001e-06, "loss": 0.9139, "step": 50 }, { "epoch": 0.23552502453385674, "grad_norm": 199.0, "learning_rate": 4.800000000000001e-06, "loss": 0.8205, "step": 60 }, { "epoch": 0.2747791952894995, "grad_norm": 118.0, "learning_rate": 5.600000000000001e-06, "loss": 0.9017, "step": 70 }, { "epoch": 0.3140333660451423, "grad_norm": 616.0, "learning_rate": 6.4000000000000006e-06, "loss": 3.1966, "step": 80 }, { "epoch": 0.35328753680078506, "grad_norm": 692.0, "learning_rate": 7.2000000000000005e-06, "loss": 1.0627, "step": 90 }, { "epoch": 0.39254170755642787, "grad_norm": 75.5, "learning_rate": 8.000000000000001e-06, "loss": 7.3782, "step": 100 }, { "epoch": 0.43179587831207067, "grad_norm": 908.0, "learning_rate": 8.8e-06, "loss": 1.1091, "step": 110 }, { "epoch": 0.47105004906771347, "grad_norm": 61.5, "learning_rate": 9.600000000000001e-06, "loss": 0.9262, "step": 120 }, { "epoch": 0.5103042198233563, "grad_norm": 45.25, "learning_rate": 1.04e-05, "loss": 0.6774, "step": 130 }, { "epoch": 0.549558390578999, "grad_norm": 39.75, "learning_rate": 1.1200000000000001e-05, "loss": 0.8204, "step": 140 }, { "epoch": 0.5888125613346418, "grad_norm": 38.0, "learning_rate": 1.2e-05, "loss": 0.6443, "step": 150 }, { "epoch": 0.6280667320902846, "grad_norm": 31.25, "learning_rate": 1.2800000000000001e-05, "loss": 0.607, "step": 160 }, { "epoch": 0.6673209028459274, "grad_norm": 33.5, "learning_rate": 1.3600000000000002e-05, "loss": 0.674, "step": 170 }, { "epoch": 0.7065750736015701, "grad_norm": 37.5, "learning_rate": 1.4400000000000001e-05, "loss": 0.6806, "step": 180 }, { "epoch": 0.745829244357213, "grad_norm": 36.25, "learning_rate": 1.5200000000000002e-05, "loss": 0.6131, "step": 190 }, { "epoch": 0.7850834151128557, "grad_norm": 43.25, "learning_rate": 1.6000000000000003e-05, "loss": 0.5914, "step": 200 }, { "epoch": 0.8243375858684985, "grad_norm": 27.75, "learning_rate": 1.6800000000000002e-05, "loss": 0.5488, "step": 210 }, { "epoch": 0.8635917566241413, "grad_norm": 47.75, "learning_rate": 1.76e-05, "loss": 0.7107, "step": 220 }, { "epoch": 0.9028459273797841, "grad_norm": 16.375, "learning_rate": 1.8400000000000003e-05, "loss": 1.2491, "step": 230 }, { "epoch": 0.9421000981354269, "grad_norm": 35.5, "learning_rate": 1.9200000000000003e-05, "loss": 0.6448, "step": 240 }, { "epoch": 0.9813542688910697, "grad_norm": 22.875, "learning_rate": 2e-05, "loss": 0.5682, "step": 250 }, { "epoch": 1.0206084396467126, "grad_norm": 32.75, "learning_rate": 1.9999025240093045e-05, "loss": 0.5689, "step": 260 }, { "epoch": 1.0598626104023552, "grad_norm": 28.375, "learning_rate": 1.9996101150403543e-05, "loss": 0.4836, "step": 270 }, { "epoch": 1.099116781157998, "grad_norm": 18.25, "learning_rate": 1.9991228300988586e-05, "loss": 0.519, "step": 280 }, { "epoch": 1.138370951913641, "grad_norm": 26.125, "learning_rate": 1.9984407641819812e-05, "loss": 0.4941, "step": 290 }, { "epoch": 1.1776251226692835, "grad_norm": 20.5, "learning_rate": 1.9975640502598243e-05, "loss": 0.4484, "step": 300 }, { "epoch": 1.2168792934249264, "grad_norm": 24.625, "learning_rate": 1.9964928592495046e-05, "loss": 0.5169, "step": 310 }, { "epoch": 1.2561334641805693, "grad_norm": 14.125, "learning_rate": 1.9952273999818312e-05, "loss": 0.4417, "step": 320 }, { "epoch": 1.295387634936212, "grad_norm": 26.25, "learning_rate": 1.9937679191605964e-05, "loss": 0.4361, "step": 330 }, { "epoch": 1.3346418056918548, "grad_norm": 23.125, "learning_rate": 1.9921147013144782e-05, "loss": 0.4454, "step": 340 }, { "epoch": 1.3738959764474976, "grad_norm": 23.625, "learning_rate": 1.9902680687415704e-05, "loss": 0.446, "step": 350 }, { "epoch": 1.4131501472031402, "grad_norm": 13.9375, "learning_rate": 1.988228381446553e-05, "loss": 0.4006, "step": 360 }, { "epoch": 1.452404317958783, "grad_norm": 31.5, "learning_rate": 1.985996037070505e-05, "loss": 0.4577, "step": 370 }, { "epoch": 1.491658488714426, "grad_norm": 47.5, "learning_rate": 1.983571470813386e-05, "loss": 0.4207, "step": 380 }, { "epoch": 1.5309126594700686, "grad_norm": 12.3125, "learning_rate": 1.9809551553491918e-05, "loss": 0.4251, "step": 390 }, { "epoch": 1.5701668302257115, "grad_norm": 20.75, "learning_rate": 1.9781476007338058e-05, "loss": 0.3968, "step": 400 }, { "epoch": 1.6094210009813543, "grad_norm": 15.0, "learning_rate": 1.9751493543055634e-05, "loss": 0.4019, "step": 410 }, { "epoch": 1.648675171736997, "grad_norm": 11.5, "learning_rate": 1.9719610005785466e-05, "loss": 0.3894, "step": 420 }, { "epoch": 1.6879293424926398, "grad_norm": 20.375, "learning_rate": 1.9685831611286312e-05, "loss": 0.3923, "step": 430 }, { "epoch": 1.7271835132482827, "grad_norm": 16.75, "learning_rate": 1.9650164944723116e-05, "loss": 0.374, "step": 440 }, { "epoch": 1.7664376840039253, "grad_norm": 21.375, "learning_rate": 1.961261695938319e-05, "loss": 0.402, "step": 450 }, { "epoch": 1.8056918547595682, "grad_norm": 14.25, "learning_rate": 1.9573194975320672e-05, "loss": 0.3646, "step": 460 }, { "epoch": 1.844946025515211, "grad_norm": 15.0625, "learning_rate": 1.9531906677929472e-05, "loss": 0.3451, "step": 470 }, { "epoch": 1.8842001962708537, "grad_norm": 12.75, "learning_rate": 1.9488760116444966e-05, "loss": 0.3373, "step": 480 }, { "epoch": 1.9234543670264965, "grad_norm": 12.5, "learning_rate": 1.944376370237481e-05, "loss": 0.3693, "step": 490 }, { "epoch": 1.9627085377821394, "grad_norm": 2.34375, "learning_rate": 1.9396926207859085e-05, "loss": 0.3595, "step": 500 } ], "logging_steps": 10, "max_steps": 2500, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.91328310788096e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }