|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 33.333333333333336, |
|
"eval_steps": 50, |
|
"global_step": 1500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 2.6699063777923584, |
|
"learning_rate": 2.443609022556391e-05, |
|
"loss": 1.8217, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"eval_loss": 1.0424705743789673, |
|
"eval_runtime": 4.0916, |
|
"eval_samples_per_second": 43.993, |
|
"eval_steps_per_second": 5.621, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 0.44133517146110535, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 0.5439, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"eval_loss": 0.3811015784740448, |
|
"eval_runtime": 4.0749, |
|
"eval_samples_per_second": 44.173, |
|
"eval_steps_per_second": 5.644, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.540984034538269, |
|
"learning_rate": 2.3182957393483708e-05, |
|
"loss": 0.3496, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"eval_loss": 0.3363141715526581, |
|
"eval_runtime": 4.089, |
|
"eval_samples_per_second": 44.02, |
|
"eval_steps_per_second": 5.625, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 0.4714152216911316, |
|
"learning_rate": 2.255639097744361e-05, |
|
"loss": 0.3178, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"eval_loss": 0.3166368007659912, |
|
"eval_runtime": 4.0511, |
|
"eval_samples_per_second": 44.432, |
|
"eval_steps_per_second": 5.677, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 0.5102563500404358, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.2971, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"eval_loss": 0.304388165473938, |
|
"eval_runtime": 4.0541, |
|
"eval_samples_per_second": 44.399, |
|
"eval_steps_per_second": 5.673, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 0.8356263041496277, |
|
"learning_rate": 2.130325814536341e-05, |
|
"loss": 0.2825, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"eval_loss": 0.29874512553215027, |
|
"eval_runtime": 4.0506, |
|
"eval_samples_per_second": 44.438, |
|
"eval_steps_per_second": 5.678, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.777777777777778, |
|
"grad_norm": 0.694858968257904, |
|
"learning_rate": 2.067669172932331e-05, |
|
"loss": 0.2714, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 7.777777777777778, |
|
"eval_loss": 0.2942558825016022, |
|
"eval_runtime": 4.074, |
|
"eval_samples_per_second": 44.182, |
|
"eval_steps_per_second": 5.646, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"grad_norm": 0.7505309581756592, |
|
"learning_rate": 2.0050125313283208e-05, |
|
"loss": 0.2616, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"eval_loss": 0.29128891229629517, |
|
"eval_runtime": 4.0871, |
|
"eval_samples_per_second": 44.041, |
|
"eval_steps_per_second": 5.627, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.7603775858879089, |
|
"learning_rate": 1.942355889724311e-05, |
|
"loss": 0.2552, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.28786608576774597, |
|
"eval_runtime": 4.052, |
|
"eval_samples_per_second": 44.422, |
|
"eval_steps_per_second": 5.676, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.11111111111111, |
|
"grad_norm": 0.831240177154541, |
|
"learning_rate": 1.8796992481203007e-05, |
|
"loss": 0.2472, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.11111111111111, |
|
"eval_loss": 0.28864821791648865, |
|
"eval_runtime": 4.0371, |
|
"eval_samples_per_second": 44.586, |
|
"eval_steps_per_second": 5.697, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.222222222222221, |
|
"grad_norm": 0.7524340152740479, |
|
"learning_rate": 1.8170426065162908e-05, |
|
"loss": 0.2388, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 12.222222222222221, |
|
"eval_loss": 0.28881773352622986, |
|
"eval_runtime": 4.0933, |
|
"eval_samples_per_second": 43.974, |
|
"eval_steps_per_second": 5.619, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"grad_norm": 0.8262473940849304, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.2309, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"eval_loss": 0.2914719581604004, |
|
"eval_runtime": 4.0773, |
|
"eval_samples_per_second": 44.147, |
|
"eval_steps_per_second": 5.641, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.444444444444445, |
|
"grad_norm": 0.8302382826805115, |
|
"learning_rate": 1.6917293233082707e-05, |
|
"loss": 0.2263, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 14.444444444444445, |
|
"eval_loss": 0.2899852395057678, |
|
"eval_runtime": 4.0443, |
|
"eval_samples_per_second": 44.507, |
|
"eval_steps_per_second": 5.687, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 15.555555555555555, |
|
"grad_norm": 0.957399845123291, |
|
"learning_rate": 1.6290726817042605e-05, |
|
"loss": 0.2181, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 15.555555555555555, |
|
"eval_loss": 0.29406097531318665, |
|
"eval_runtime": 4.0913, |
|
"eval_samples_per_second": 43.996, |
|
"eval_steps_per_second": 5.622, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 16.666666666666668, |
|
"grad_norm": 1.0171995162963867, |
|
"learning_rate": 1.5664160401002506e-05, |
|
"loss": 0.2115, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 16.666666666666668, |
|
"eval_loss": 0.2935585081577301, |
|
"eval_runtime": 4.0514, |
|
"eval_samples_per_second": 44.429, |
|
"eval_steps_per_second": 5.677, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 17.77777777777778, |
|
"grad_norm": 0.9579535722732544, |
|
"learning_rate": 1.5037593984962406e-05, |
|
"loss": 0.2056, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 17.77777777777778, |
|
"eval_loss": 0.2986494302749634, |
|
"eval_runtime": 4.0426, |
|
"eval_samples_per_second": 44.526, |
|
"eval_steps_per_second": 5.689, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 18.88888888888889, |
|
"grad_norm": 1.0283973217010498, |
|
"learning_rate": 1.4411027568922305e-05, |
|
"loss": 0.1983, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 18.88888888888889, |
|
"eval_loss": 0.30257824063301086, |
|
"eval_runtime": 4.0596, |
|
"eval_samples_per_second": 44.339, |
|
"eval_steps_per_second": 5.666, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 1.068368673324585, |
|
"learning_rate": 1.3784461152882205e-05, |
|
"loss": 0.1915, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.30601659417152405, |
|
"eval_runtime": 4.0465, |
|
"eval_samples_per_second": 44.483, |
|
"eval_steps_per_second": 5.684, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 21.11111111111111, |
|
"grad_norm": 0.987966001033783, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.1834, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 21.11111111111111, |
|
"eval_loss": 0.3163508176803589, |
|
"eval_runtime": 4.0462, |
|
"eval_samples_per_second": 44.486, |
|
"eval_steps_per_second": 5.684, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 22.22222222222222, |
|
"grad_norm": 1.046950101852417, |
|
"learning_rate": 1.2531328320802006e-05, |
|
"loss": 0.1773, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 22.22222222222222, |
|
"eval_loss": 0.32225680351257324, |
|
"eval_runtime": 4.0814, |
|
"eval_samples_per_second": 44.102, |
|
"eval_steps_per_second": 5.635, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 23.333333333333332, |
|
"grad_norm": 1.2292746305465698, |
|
"learning_rate": 1.1904761904761905e-05, |
|
"loss": 0.1707, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 23.333333333333332, |
|
"eval_loss": 0.3270200490951538, |
|
"eval_runtime": 4.0696, |
|
"eval_samples_per_second": 44.23, |
|
"eval_steps_per_second": 5.652, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 24.444444444444443, |
|
"grad_norm": 1.107651948928833, |
|
"learning_rate": 1.1278195488721805e-05, |
|
"loss": 0.1641, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 24.444444444444443, |
|
"eval_loss": 0.3326013684272766, |
|
"eval_runtime": 4.0592, |
|
"eval_samples_per_second": 44.343, |
|
"eval_steps_per_second": 5.666, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 25.555555555555557, |
|
"grad_norm": 1.3203223943710327, |
|
"learning_rate": 1.0651629072681704e-05, |
|
"loss": 0.1575, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 25.555555555555557, |
|
"eval_loss": 0.33997443318367004, |
|
"eval_runtime": 4.0804, |
|
"eval_samples_per_second": 44.113, |
|
"eval_steps_per_second": 5.637, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 26.666666666666668, |
|
"grad_norm": 1.2884823083877563, |
|
"learning_rate": 1.0025062656641604e-05, |
|
"loss": 0.1517, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 26.666666666666668, |
|
"eval_loss": 0.3428906202316284, |
|
"eval_runtime": 4.0803, |
|
"eval_samples_per_second": 44.114, |
|
"eval_steps_per_second": 5.637, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 27.77777777777778, |
|
"grad_norm": 1.4833685159683228, |
|
"learning_rate": 9.398496240601503e-06, |
|
"loss": 0.1442, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 27.77777777777778, |
|
"eval_loss": 0.35523876547813416, |
|
"eval_runtime": 4.0449, |
|
"eval_samples_per_second": 44.5, |
|
"eval_steps_per_second": 5.686, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 28.88888888888889, |
|
"grad_norm": 1.4259836673736572, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.1397, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 28.88888888888889, |
|
"eval_loss": 0.36296555399894714, |
|
"eval_runtime": 4.0386, |
|
"eval_samples_per_second": 44.57, |
|
"eval_steps_per_second": 5.695, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"grad_norm": 1.5832905769348145, |
|
"learning_rate": 8.145363408521302e-06, |
|
"loss": 0.1345, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.3718196153640747, |
|
"eval_runtime": 4.0526, |
|
"eval_samples_per_second": 44.416, |
|
"eval_steps_per_second": 5.675, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 31.11111111111111, |
|
"grad_norm": 1.5873230695724487, |
|
"learning_rate": 7.518796992481203e-06, |
|
"loss": 0.1289, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 31.11111111111111, |
|
"eval_loss": 0.38369277119636536, |
|
"eval_runtime": 4.0471, |
|
"eval_samples_per_second": 44.476, |
|
"eval_steps_per_second": 5.683, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 32.22222222222222, |
|
"grad_norm": 1.3492352962493896, |
|
"learning_rate": 6.892230576441102e-06, |
|
"loss": 0.1234, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 32.22222222222222, |
|
"eval_loss": 0.39153799414634705, |
|
"eval_runtime": 4.0507, |
|
"eval_samples_per_second": 44.436, |
|
"eval_steps_per_second": 5.678, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 33.333333333333336, |
|
"grad_norm": 1.5531470775604248, |
|
"learning_rate": 6.265664160401003e-06, |
|
"loss": 0.1194, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 33.333333333333336, |
|
"eval_loss": 0.3885114789009094, |
|
"eval_runtime": 4.0759, |
|
"eval_samples_per_second": 44.162, |
|
"eval_steps_per_second": 5.643, |
|
"step": 1500 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 2000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 45, |
|
"save_steps": 25, |
|
"total_flos": 3.996628758124954e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|