|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 27.586206896551722, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.3793103448275863, |
|
"grad_norm": 0.300048828125, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 1.0738, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.7586206896551726, |
|
"grad_norm": 0.272705078125, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.8853, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 4.137931034482759, |
|
"grad_norm": 0.213134765625, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 0.7883, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.517241379310345, |
|
"grad_norm": 0.2236328125, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.7353, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 6.896551724137931, |
|
"grad_norm": 0.132568359375, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.7072, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 8.275862068965518, |
|
"grad_norm": 0.1220703125, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.6986, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 9.655172413793103, |
|
"grad_norm": 0.1285400390625, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 0.6873, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 11.03448275862069, |
|
"grad_norm": 0.1348876953125, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.6762, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 12.413793103448276, |
|
"grad_norm": 0.1441650390625, |
|
"learning_rate": 0.0001156434465040231, |
|
"loss": 0.6668, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 13.793103448275861, |
|
"grad_norm": 0.1851806640625, |
|
"learning_rate": 0.0001, |
|
"loss": 0.6648, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 15.172413793103448, |
|
"grad_norm": 0.1591796875, |
|
"learning_rate": 8.435655349597689e-05, |
|
"loss": 0.6564, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 16.551724137931036, |
|
"grad_norm": 0.1644287109375, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.6511, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 17.93103448275862, |
|
"grad_norm": 0.1719970703125, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 0.6476, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 19.310344827586206, |
|
"grad_norm": 0.1708984375, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.6387, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 20.689655172413794, |
|
"grad_norm": 0.170166015625, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.6438, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 22.06896551724138, |
|
"grad_norm": 0.185302734375, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.6341, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 23.448275862068964, |
|
"grad_norm": 0.1927490234375, |
|
"learning_rate": 1.0899347581163221e-05, |
|
"loss": 0.638, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 24.82758620689655, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.6323, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 26.20689655172414, |
|
"grad_norm": 0.183837890625, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 0.6338, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 27.586206896551722, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 0.0, |
|
"loss": 0.6344, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 27.586206896551722, |
|
"step": 200, |
|
"total_flos": 4.07672658591744e+16, |
|
"train_loss": 0.6996826219558716, |
|
"train_runtime": 575.4411, |
|
"train_samples_per_second": 11.122, |
|
"train_steps_per_second": 0.348 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 29, |
|
"save_steps": 500, |
|
"total_flos": 4.07672658591744e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|