|
{ |
|
"best_metric": 2.1192965507507324, |
|
"best_model_checkpoint": "./outputs/checkpoint-2500", |
|
"epoch": 1.8214936247723132, |
|
"eval_steps": 100, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7205, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6170873641967773, |
|
"eval_runtime": 228.505, |
|
"eval_samples_per_second": 27.457, |
|
"eval_steps_per_second": 3.435, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.5663836002349854, |
|
"eval_runtime": 210.5353, |
|
"eval_samples_per_second": 29.8, |
|
"eval_steps_per_second": 3.729, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5398, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.531459331512451, |
|
"eval_runtime": 245.5646, |
|
"eval_samples_per_second": 25.549, |
|
"eval_steps_per_second": 3.197, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5133, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.497565746307373, |
|
"eval_runtime": 244.6469, |
|
"eval_samples_per_second": 25.645, |
|
"eval_steps_per_second": 3.209, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4697, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.4726672172546387, |
|
"eval_runtime": 244.9375, |
|
"eval_samples_per_second": 25.615, |
|
"eval_steps_per_second": 3.205, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4435, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4451191425323486, |
|
"eval_runtime": 216.439, |
|
"eval_samples_per_second": 28.987, |
|
"eval_steps_per_second": 3.627, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.426, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.4199516773223877, |
|
"eval_runtime": 206.0859, |
|
"eval_samples_per_second": 30.444, |
|
"eval_steps_per_second": 3.809, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4041, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.399696111679077, |
|
"eval_runtime": 205.7858, |
|
"eval_samples_per_second": 30.488, |
|
"eval_steps_per_second": 3.815, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.369, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.380272150039673, |
|
"eval_runtime": 206.0924, |
|
"eval_samples_per_second": 30.443, |
|
"eval_steps_per_second": 3.809, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3663, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.361511707305908, |
|
"eval_runtime": 206.0953, |
|
"eval_samples_per_second": 30.442, |
|
"eval_steps_per_second": 3.809, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3596, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.337871551513672, |
|
"eval_runtime": 206.2542, |
|
"eval_samples_per_second": 30.419, |
|
"eval_steps_per_second": 3.806, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3156, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.318875789642334, |
|
"eval_runtime": 205.5733, |
|
"eval_samples_per_second": 30.52, |
|
"eval_steps_per_second": 3.819, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3267, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3044281005859375, |
|
"eval_runtime": 206.2612, |
|
"eval_samples_per_second": 30.418, |
|
"eval_steps_per_second": 3.806, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2788, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.2812631130218506, |
|
"eval_runtime": 205.7511, |
|
"eval_samples_per_second": 30.493, |
|
"eval_steps_per_second": 3.815, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2397, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.2644264698028564, |
|
"eval_runtime": 206.0922, |
|
"eval_samples_per_second": 30.443, |
|
"eval_steps_per_second": 3.809, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.232, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2463176250457764, |
|
"eval_runtime": 205.9251, |
|
"eval_samples_per_second": 30.467, |
|
"eval_steps_per_second": 3.812, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.231, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.2307546138763428, |
|
"eval_runtime": 206.164, |
|
"eval_samples_per_second": 30.432, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1938, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.214383125305176, |
|
"eval_runtime": 206.1484, |
|
"eval_samples_per_second": 30.434, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1772, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.2003238201141357, |
|
"eval_runtime": 207.0672, |
|
"eval_samples_per_second": 30.299, |
|
"eval_steps_per_second": 3.791, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1719, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.1876213550567627, |
|
"eval_runtime": 206.1329, |
|
"eval_samples_per_second": 30.437, |
|
"eval_steps_per_second": 3.808, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1558, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.173450469970703, |
|
"eval_runtime": 208.4992, |
|
"eval_samples_per_second": 30.091, |
|
"eval_steps_per_second": 3.765, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1384, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.157057762145996, |
|
"eval_runtime": 206.6067, |
|
"eval_samples_per_second": 30.367, |
|
"eval_steps_per_second": 3.799, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1292, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.145142078399658, |
|
"eval_runtime": 311.6945, |
|
"eval_samples_per_second": 20.129, |
|
"eval_steps_per_second": 2.518, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.135, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.132389545440674, |
|
"eval_runtime": 446.4033, |
|
"eval_samples_per_second": 14.055, |
|
"eval_steps_per_second": 1.758, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1029, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1192965507507324, |
|
"eval_runtime": 563.307, |
|
"eval_samples_per_second": 11.138, |
|
"eval_steps_per_second": 1.394, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 7.346343543662592e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|