|
{ |
|
"best_metric": 1.8730626106262207, |
|
"best_model_checkpoint": "./outputs/checkpoint-1800", |
|
"epoch": 2.5568181818181817, |
|
"eval_steps": 100, |
|
"global_step": 1800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4493, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_loss": 2.11466121673584, |
|
"eval_runtime": 56.3914, |
|
"eval_samples_per_second": 26.458, |
|
"eval_steps_per_second": 3.316, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0002, |
|
"loss": 2.253, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_loss": 2.072767734527588, |
|
"eval_runtime": 54.1215, |
|
"eval_samples_per_second": 27.568, |
|
"eval_steps_per_second": 3.455, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2227, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_loss": 2.044309616088867, |
|
"eval_runtime": 54.0469, |
|
"eval_samples_per_second": 27.606, |
|
"eval_steps_per_second": 3.46, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1901, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_loss": 2.029090642929077, |
|
"eval_runtime": 54.0385, |
|
"eval_samples_per_second": 27.61, |
|
"eval_steps_per_second": 3.46, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1699, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_loss": 2.004894971847534, |
|
"eval_runtime": 54.1383, |
|
"eval_samples_per_second": 27.559, |
|
"eval_steps_per_second": 3.454, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1541, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 1.9869917631149292, |
|
"eval_runtime": 53.9116, |
|
"eval_samples_per_second": 27.675, |
|
"eval_steps_per_second": 3.469, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002, |
|
"loss": 2.133, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": 1.9750773906707764, |
|
"eval_runtime": 53.9551, |
|
"eval_samples_per_second": 27.653, |
|
"eval_steps_per_second": 3.466, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1014, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_loss": 1.960465431213379, |
|
"eval_runtime": 54.1302, |
|
"eval_samples_per_second": 27.563, |
|
"eval_steps_per_second": 3.455, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0742, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 1.9468154907226562, |
|
"eval_runtime": 75.283, |
|
"eval_samples_per_second": 19.819, |
|
"eval_steps_per_second": 2.484, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0737, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"eval_loss": 1.9387000799179077, |
|
"eval_runtime": 75.1482, |
|
"eval_samples_per_second": 19.854, |
|
"eval_steps_per_second": 2.488, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0801, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_loss": 1.9253034591674805, |
|
"eval_runtime": 75.0032, |
|
"eval_samples_per_second": 19.892, |
|
"eval_steps_per_second": 2.493, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0589, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 1.9184151887893677, |
|
"eval_runtime": 75.1429, |
|
"eval_samples_per_second": 19.856, |
|
"eval_steps_per_second": 2.489, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0002, |
|
"loss": 2.042, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_loss": 1.905383586883545, |
|
"eval_runtime": 75.0775, |
|
"eval_samples_per_second": 19.873, |
|
"eval_steps_per_second": 2.491, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0463, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_loss": 1.8999686241149902, |
|
"eval_runtime": 75.2179, |
|
"eval_samples_per_second": 19.836, |
|
"eval_steps_per_second": 2.486, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9962, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 1.895570158958435, |
|
"eval_runtime": 74.7767, |
|
"eval_samples_per_second": 19.953, |
|
"eval_steps_per_second": 2.501, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0009, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"eval_loss": 1.885960340499878, |
|
"eval_runtime": 75.0366, |
|
"eval_samples_per_second": 19.884, |
|
"eval_steps_per_second": 2.492, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9859, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"eval_loss": 1.8800793886184692, |
|
"eval_runtime": 78.108, |
|
"eval_samples_per_second": 19.102, |
|
"eval_steps_per_second": 2.394, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.0002, |
|
"loss": 1.977, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"eval_loss": 1.8730626106262207, |
|
"eval_runtime": 78.7309, |
|
"eval_samples_per_second": 18.951, |
|
"eval_steps_per_second": 2.375, |
|
"step": 1800 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2112, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 5.483734889084928e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|