|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.961038961038961, |
|
"eval_steps": 500, |
|
"global_step": 57, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 1.4391919003064584, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 2.7465, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 1.01878785429204, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.3521, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.7432627531351906, |
|
"learning_rate": 1.9697969360350098e-05, |
|
"loss": 2.3746, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.6306502028658637, |
|
"learning_rate": 1.8502171357296144e-05, |
|
"loss": 2.2849, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": 1.2240214347839355, |
|
"eval_runtime": 38.055, |
|
"eval_samples_per_second": 9.618, |
|
"eval_steps_per_second": 0.604, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.8323697335437488, |
|
"learning_rate": 1.650618300204242e-05, |
|
"loss": 2.41, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.8356330876376933, |
|
"learning_rate": 1.3897858732926794e-05, |
|
"loss": 2.078, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.8229304096804662, |
|
"learning_rate": 1.092268359463302e-05, |
|
"loss": 2.0295, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 0.6758717282694323, |
|
"learning_rate": 7.860669167935028e-06, |
|
"loss": 1.9826, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 1.252795934677124, |
|
"eval_runtime": 38.2606, |
|
"eval_samples_per_second": 9.566, |
|
"eval_steps_per_second": 0.601, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 1.0361066126350802, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 1.9803, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"grad_norm": 0.9815477883010376, |
|
"learning_rate": 2.6099108277934105e-06, |
|
"loss": 1.8108, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 0.894384535047576, |
|
"learning_rate": 9.153472818047627e-07, |
|
"loss": 1.8236, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"grad_norm": 0.8145920730047358, |
|
"learning_rate": 7.579490328064265e-08, |
|
"loss": 1.7301, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"eval_loss": 1.304147720336914, |
|
"eval_runtime": 38.5373, |
|
"eval_samples_per_second": 9.497, |
|
"eval_steps_per_second": 0.597, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"step": 57, |
|
"total_flos": 11882295459840.0, |
|
"train_loss": 2.079949947825649, |
|
"train_runtime": 1266.1143, |
|
"train_samples_per_second": 2.912, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 57, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 11882295459840.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|