|
{ |
|
"best_metric": 1.9184151887893677, |
|
"best_model_checkpoint": "./outputs/checkpoint-1200", |
|
"epoch": 1.7045454545454546, |
|
"eval_steps": 100, |
|
"global_step": 1200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4493, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 2.11466121673584, |
|
"eval_runtime": 56.3914, |
|
"eval_samples_per_second": 26.458, |
|
"eval_steps_per_second": 3.316, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0002, |
|
"loss": 2.253, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 2.072767734527588, |
|
"eval_runtime": 54.1215, |
|
"eval_samples_per_second": 27.568, |
|
"eval_steps_per_second": 3.455, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2227, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_loss": 2.044309616088867, |
|
"eval_runtime": 54.0469, |
|
"eval_samples_per_second": 27.606, |
|
"eval_steps_per_second": 3.46, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1901, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_loss": 2.029090642929077, |
|
"eval_runtime": 54.0385, |
|
"eval_samples_per_second": 27.61, |
|
"eval_steps_per_second": 3.46, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1699, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_loss": 2.004894971847534, |
|
"eval_runtime": 54.1383, |
|
"eval_samples_per_second": 27.559, |
|
"eval_steps_per_second": 3.454, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1541, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 1.9869917631149292, |
|
"eval_runtime": 53.9116, |
|
"eval_samples_per_second": 27.675, |
|
"eval_steps_per_second": 3.469, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002, |
|
"loss": 2.133, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": 1.9750773906707764, |
|
"eval_runtime": 53.9551, |
|
"eval_samples_per_second": 27.653, |
|
"eval_steps_per_second": 3.466, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1014, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_loss": 1.960465431213379, |
|
"eval_runtime": 54.1302, |
|
"eval_samples_per_second": 27.563, |
|
"eval_steps_per_second": 3.455, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0742, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 1.9468154907226562, |
|
"eval_runtime": 75.283, |
|
"eval_samples_per_second": 19.819, |
|
"eval_steps_per_second": 2.484, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0737, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"eval_loss": 1.9387000799179077, |
|
"eval_runtime": 75.1482, |
|
"eval_samples_per_second": 19.854, |
|
"eval_steps_per_second": 2.488, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0801, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_loss": 1.9253034591674805, |
|
"eval_runtime": 75.0032, |
|
"eval_samples_per_second": 19.892, |
|
"eval_steps_per_second": 2.493, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0589, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 1.9184151887893677, |
|
"eval_runtime": 75.1429, |
|
"eval_samples_per_second": 19.856, |
|
"eval_steps_per_second": 2.489, |
|
"step": 1200 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2112, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 3.65226999975936e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|