|
{ |
|
"best_metric": 1.9760409593582153, |
|
"best_model_checkpoint": "./outputs/checkpoint-3900", |
|
"epoch": 2.841530054644809, |
|
"eval_steps": 100, |
|
"global_step": 3900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7194, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.617887258529663, |
|
"eval_runtime": 205.5207, |
|
"eval_samples_per_second": 30.527, |
|
"eval_steps_per_second": 3.82, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5817, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.5672028064727783, |
|
"eval_runtime": 205.4308, |
|
"eval_samples_per_second": 30.541, |
|
"eval_steps_per_second": 3.821, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.54, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.5315635204315186, |
|
"eval_runtime": 205.1897, |
|
"eval_samples_per_second": 30.577, |
|
"eval_steps_per_second": 3.826, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5136, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.4977593421936035, |
|
"eval_runtime": 204.833, |
|
"eval_samples_per_second": 30.63, |
|
"eval_steps_per_second": 3.832, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4701, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.472073554992676, |
|
"eval_runtime": 205.144, |
|
"eval_samples_per_second": 30.583, |
|
"eval_steps_per_second": 3.827, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4445, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4433655738830566, |
|
"eval_runtime": 204.9978, |
|
"eval_samples_per_second": 30.605, |
|
"eval_steps_per_second": 3.829, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4276, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.4212682247161865, |
|
"eval_runtime": 204.9092, |
|
"eval_samples_per_second": 30.618, |
|
"eval_steps_per_second": 3.831, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4062, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.4029204845428467, |
|
"eval_runtime": 205.0316, |
|
"eval_samples_per_second": 30.6, |
|
"eval_steps_per_second": 3.829, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3706, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3802640438079834, |
|
"eval_runtime": 205.3365, |
|
"eval_samples_per_second": 30.555, |
|
"eval_steps_per_second": 3.823, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3666, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.361499547958374, |
|
"eval_runtime": 205.1533, |
|
"eval_samples_per_second": 30.582, |
|
"eval_steps_per_second": 3.826, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3602, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.340102434158325, |
|
"eval_runtime": 205.0301, |
|
"eval_samples_per_second": 30.6, |
|
"eval_steps_per_second": 3.829, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3174, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.3213117122650146, |
|
"eval_runtime": 205.0977, |
|
"eval_samples_per_second": 30.59, |
|
"eval_steps_per_second": 3.827, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3283, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3045783042907715, |
|
"eval_runtime": 205.3162, |
|
"eval_samples_per_second": 30.558, |
|
"eval_steps_per_second": 3.823, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2798, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.280726671218872, |
|
"eval_runtime": 205.3248, |
|
"eval_samples_per_second": 30.556, |
|
"eval_steps_per_second": 3.823, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2397, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.26517915725708, |
|
"eval_runtime": 205.5768, |
|
"eval_samples_per_second": 30.519, |
|
"eval_steps_per_second": 3.819, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2329, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2466039657592773, |
|
"eval_runtime": 221.8099, |
|
"eval_samples_per_second": 28.285, |
|
"eval_steps_per_second": 3.539, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2321, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.2312474250793457, |
|
"eval_runtime": 221.2202, |
|
"eval_samples_per_second": 28.361, |
|
"eval_steps_per_second": 3.549, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1942, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2154178619384766, |
|
"eval_runtime": 205.4645, |
|
"eval_samples_per_second": 30.536, |
|
"eval_steps_per_second": 3.821, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1794, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.2015323638916016, |
|
"eval_runtime": 205.5536, |
|
"eval_samples_per_second": 30.522, |
|
"eval_steps_per_second": 3.819, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1733, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.188199758529663, |
|
"eval_runtime": 205.7545, |
|
"eval_samples_per_second": 30.493, |
|
"eval_steps_per_second": 3.815, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.158, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.175708770751953, |
|
"eval_runtime": 206.3248, |
|
"eval_samples_per_second": 30.408, |
|
"eval_steps_per_second": 3.805, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.14, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.158797025680542, |
|
"eval_runtime": 205.208, |
|
"eval_samples_per_second": 30.574, |
|
"eval_steps_per_second": 3.825, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.13, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.147514581680298, |
|
"eval_runtime": 205.8056, |
|
"eval_samples_per_second": 30.485, |
|
"eval_steps_per_second": 3.814, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1364, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.135024309158325, |
|
"eval_runtime": 272.6766, |
|
"eval_samples_per_second": 23.009, |
|
"eval_steps_per_second": 2.879, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1042, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.120384454727173, |
|
"eval_runtime": 205.6423, |
|
"eval_samples_per_second": 30.509, |
|
"eval_steps_per_second": 3.817, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1049, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 2.1099581718444824, |
|
"eval_runtime": 212.4604, |
|
"eval_samples_per_second": 29.53, |
|
"eval_steps_per_second": 3.695, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0887, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 2.09414005279541, |
|
"eval_runtime": 205.8822, |
|
"eval_samples_per_second": 30.474, |
|
"eval_steps_per_second": 3.813, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0444, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.0865635871887207, |
|
"eval_runtime": 205.997, |
|
"eval_samples_per_second": 30.457, |
|
"eval_steps_per_second": 3.811, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0172, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 2.0748651027679443, |
|
"eval_runtime": 206.0085, |
|
"eval_samples_per_second": 30.455, |
|
"eval_steps_per_second": 3.811, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0389, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 2.0672781467437744, |
|
"eval_runtime": 206.2433, |
|
"eval_samples_per_second": 30.42, |
|
"eval_steps_per_second": 3.806, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002, |
|
"loss": 2.009, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_loss": 2.0537331104278564, |
|
"eval_runtime": 205.4748, |
|
"eval_samples_per_second": 30.534, |
|
"eval_steps_per_second": 3.82, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0242, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 2.0447185039520264, |
|
"eval_runtime": 205.62, |
|
"eval_samples_per_second": 30.513, |
|
"eval_steps_per_second": 3.818, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9883, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 2.0326316356658936, |
|
"eval_runtime": 205.8972, |
|
"eval_samples_per_second": 30.472, |
|
"eval_steps_per_second": 3.813, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9854, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_loss": 2.0283098220825195, |
|
"eval_runtime": 205.6153, |
|
"eval_samples_per_second": 30.513, |
|
"eval_steps_per_second": 3.818, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9806, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.0158398151397705, |
|
"eval_runtime": 206.095, |
|
"eval_samples_per_second": 30.442, |
|
"eval_steps_per_second": 3.809, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9797, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 2.0079221725463867, |
|
"eval_runtime": 205.9217, |
|
"eval_samples_per_second": 30.468, |
|
"eval_steps_per_second": 3.812, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9575, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_loss": 1.996368646621704, |
|
"eval_runtime": 206.2331, |
|
"eval_samples_per_second": 30.422, |
|
"eval_steps_per_second": 3.806, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9523, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_loss": 1.9896615743637085, |
|
"eval_runtime": 206.6251, |
|
"eval_samples_per_second": 30.364, |
|
"eval_steps_per_second": 3.799, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9386, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 1.9760409593582153, |
|
"eval_runtime": 206.2709, |
|
"eval_samples_per_second": 30.416, |
|
"eval_steps_per_second": 3.806, |
|
"step": 3900 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.1452801265934336e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|