|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.42598509052183176, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004259850905218318, |
|
"eval_loss": 1.2758299112319946, |
|
"eval_runtime": 10.863, |
|
"eval_samples_per_second": 36.362, |
|
"eval_steps_per_second": 4.603, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012779552715654952, |
|
"grad_norm": 0.8314210176467896, |
|
"learning_rate": 3e-05, |
|
"loss": 1.2924, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.025559105431309903, |
|
"grad_norm": 0.8669793009757996, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2805, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.038338658146964855, |
|
"grad_norm": 1.0953078269958496, |
|
"learning_rate": 9e-05, |
|
"loss": 1.175, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.038338658146964855, |
|
"eval_loss": 1.003463864326477, |
|
"eval_runtime": 10.8921, |
|
"eval_samples_per_second": 36.265, |
|
"eval_steps_per_second": 4.59, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.051118210862619806, |
|
"grad_norm": 1.2419706583023071, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.017, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06389776357827476, |
|
"grad_norm": 0.8883413672447205, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.9621, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07667731629392971, |
|
"grad_norm": 1.3165100812911987, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.7864, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07667731629392971, |
|
"eval_loss": 0.7199711799621582, |
|
"eval_runtime": 10.8865, |
|
"eval_samples_per_second": 36.284, |
|
"eval_steps_per_second": 4.593, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.08945686900958466, |
|
"grad_norm": 0.6705540418624878, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.7165, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.10223642172523961, |
|
"grad_norm": 0.9872334003448486, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.6824, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.11501597444089456, |
|
"grad_norm": 0.7229445576667786, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.7033, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.11501597444089456, |
|
"eval_loss": 0.6306272745132446, |
|
"eval_runtime": 10.9156, |
|
"eval_samples_per_second": 36.187, |
|
"eval_steps_per_second": 4.581, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.12779552715654952, |
|
"grad_norm": 0.7949721217155457, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.6592, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.14057507987220447, |
|
"grad_norm": 0.5070735812187195, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.6234, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.15335463258785942, |
|
"grad_norm": 0.6172641515731812, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.6746, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.15335463258785942, |
|
"eval_loss": 0.6016714572906494, |
|
"eval_runtime": 10.9227, |
|
"eval_samples_per_second": 36.163, |
|
"eval_steps_per_second": 4.578, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.16613418530351437, |
|
"grad_norm": 0.7518579959869385, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.6258, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.17891373801916932, |
|
"grad_norm": 0.5821406841278076, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.587, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.19169329073482427, |
|
"grad_norm": 0.8557337522506714, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.6309, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.19169329073482427, |
|
"eval_loss": 0.5868030190467834, |
|
"eval_runtime": 10.9092, |
|
"eval_samples_per_second": 36.208, |
|
"eval_steps_per_second": 4.583, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.20447284345047922, |
|
"grad_norm": 0.6080703735351562, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.5596, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.21725239616613418, |
|
"grad_norm": 0.6161754727363586, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 0.6585, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.23003194888178913, |
|
"grad_norm": 0.682461142539978, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.5783, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.23003194888178913, |
|
"eval_loss": 0.5760997533798218, |
|
"eval_runtime": 10.904, |
|
"eval_samples_per_second": 36.225, |
|
"eval_steps_per_second": 4.585, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.24281150159744408, |
|
"grad_norm": 0.7546740174293518, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 0.6759, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.25559105431309903, |
|
"grad_norm": 0.7903650403022766, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.6119, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.268370607028754, |
|
"grad_norm": 0.5551833510398865, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.5778, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.268370607028754, |
|
"eval_loss": 0.5682376027107239, |
|
"eval_runtime": 10.9169, |
|
"eval_samples_per_second": 36.182, |
|
"eval_steps_per_second": 4.58, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.28115015974440893, |
|
"grad_norm": 0.6363338828086853, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.5992, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.2939297124600639, |
|
"grad_norm": 0.60361248254776, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.5925, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.30670926517571884, |
|
"grad_norm": 0.7037025690078735, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.6792, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.30670926517571884, |
|
"eval_loss": 0.5637584328651428, |
|
"eval_runtime": 10.9073, |
|
"eval_samples_per_second": 36.214, |
|
"eval_steps_per_second": 4.584, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3194888178913738, |
|
"grad_norm": 0.7148558497428894, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.6001, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.33226837060702874, |
|
"grad_norm": 0.5377858877182007, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.5808, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.3450479233226837, |
|
"grad_norm": 0.4729587733745575, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.517, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.3450479233226837, |
|
"eval_loss": 0.5606918931007385, |
|
"eval_runtime": 10.9225, |
|
"eval_samples_per_second": 36.164, |
|
"eval_steps_per_second": 4.578, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.35782747603833864, |
|
"grad_norm": 0.5885350108146667, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.5907, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.3706070287539936, |
|
"grad_norm": 0.6870759725570679, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.5172, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.38338658146964855, |
|
"grad_norm": 0.5195698142051697, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.5392, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.38338658146964855, |
|
"eval_loss": 0.5600429773330688, |
|
"eval_runtime": 10.9113, |
|
"eval_samples_per_second": 36.201, |
|
"eval_steps_per_second": 4.582, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.3961661341853035, |
|
"grad_norm": 0.5903887152671814, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.6567, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.40894568690095845, |
|
"grad_norm": 0.5635388493537903, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.587, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.4217252396166134, |
|
"grad_norm": 0.5114682912826538, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 0.5674, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.4217252396166134, |
|
"eval_loss": 0.5595129132270813, |
|
"eval_runtime": 10.9155, |
|
"eval_samples_per_second": 36.187, |
|
"eval_steps_per_second": 4.581, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.61253767364608e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|