|
{ |
|
"best_metric": 2.202601909637451, |
|
"best_model_checkpoint": "./outputs/checkpoint-1900", |
|
"epoch": 1.384335154826958, |
|
"eval_steps": 100, |
|
"global_step": 1900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7202, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6173269748687744, |
|
"eval_runtime": 210.4944, |
|
"eval_samples_per_second": 29.806, |
|
"eval_steps_per_second": 3.729, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.566875696182251, |
|
"eval_runtime": 206.05, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5403, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.531456708908081, |
|
"eval_runtime": 205.7527, |
|
"eval_samples_per_second": 30.493, |
|
"eval_steps_per_second": 3.815, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5135, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.4987542629241943, |
|
"eval_runtime": 205.7584, |
|
"eval_samples_per_second": 30.492, |
|
"eval_steps_per_second": 3.815, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4703, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.4730148315429688, |
|
"eval_runtime": 205.8153, |
|
"eval_samples_per_second": 30.484, |
|
"eval_steps_per_second": 3.814, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4449, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4445066452026367, |
|
"eval_runtime": 205.9496, |
|
"eval_samples_per_second": 30.464, |
|
"eval_steps_per_second": 3.812, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4265, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.421610116958618, |
|
"eval_runtime": 206.1548, |
|
"eval_samples_per_second": 30.433, |
|
"eval_steps_per_second": 3.808, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4059, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.401268720626831, |
|
"eval_runtime": 206.1413, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3694, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3795337677001953, |
|
"eval_runtime": 206.144, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3664, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3618950843811035, |
|
"eval_runtime": 205.9336, |
|
"eval_samples_per_second": 30.466, |
|
"eval_steps_per_second": 3.812, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3599, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.338806390762329, |
|
"eval_runtime": 205.8736, |
|
"eval_samples_per_second": 30.475, |
|
"eval_steps_per_second": 3.813, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3162, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.3209476470947266, |
|
"eval_runtime": 206.0155, |
|
"eval_samples_per_second": 30.454, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3272, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3050100803375244, |
|
"eval_runtime": 205.8768, |
|
"eval_samples_per_second": 30.475, |
|
"eval_steps_per_second": 3.813, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2787, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.2802278995513916, |
|
"eval_runtime": 206.0479, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2387, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.263134479522705, |
|
"eval_runtime": 205.9604, |
|
"eval_samples_per_second": 30.462, |
|
"eval_steps_per_second": 3.811, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2303, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.246272325515747, |
|
"eval_runtime": 206.1382, |
|
"eval_samples_per_second": 30.436, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2323, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.231445789337158, |
|
"eval_runtime": 205.9926, |
|
"eval_samples_per_second": 30.457, |
|
"eval_steps_per_second": 3.811, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.193, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2156155109405518, |
|
"eval_runtime": 217.8526, |
|
"eval_samples_per_second": 28.799, |
|
"eval_steps_per_second": 3.603, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1776, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.202601909637451, |
|
"eval_runtime": 529.3371, |
|
"eval_samples_per_second": 11.853, |
|
"eval_steps_per_second": 1.483, |
|
"step": 1900 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 5.588270155302912e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|