|
{ |
|
"best_metric": 1.9589763879776, |
|
"best_model_checkpoint": "./outputs/checkpoint-4100", |
|
"epoch": 2.987249544626594, |
|
"eval_steps": 100, |
|
"global_step": 4100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7202, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6173269748687744, |
|
"eval_runtime": 210.4944, |
|
"eval_samples_per_second": 29.806, |
|
"eval_steps_per_second": 3.729, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.566875696182251, |
|
"eval_runtime": 206.05, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5403, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.531456708908081, |
|
"eval_runtime": 205.7527, |
|
"eval_samples_per_second": 30.493, |
|
"eval_steps_per_second": 3.815, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5135, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.4987542629241943, |
|
"eval_runtime": 205.7584, |
|
"eval_samples_per_second": 30.492, |
|
"eval_steps_per_second": 3.815, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4703, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.4730148315429688, |
|
"eval_runtime": 205.8153, |
|
"eval_samples_per_second": 30.484, |
|
"eval_steps_per_second": 3.814, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4449, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4445066452026367, |
|
"eval_runtime": 205.9496, |
|
"eval_samples_per_second": 30.464, |
|
"eval_steps_per_second": 3.812, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4265, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.421610116958618, |
|
"eval_runtime": 206.1548, |
|
"eval_samples_per_second": 30.433, |
|
"eval_steps_per_second": 3.808, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4059, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.401268720626831, |
|
"eval_runtime": 206.1413, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3694, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3795337677001953, |
|
"eval_runtime": 206.144, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3664, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3618950843811035, |
|
"eval_runtime": 205.9336, |
|
"eval_samples_per_second": 30.466, |
|
"eval_steps_per_second": 3.812, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3599, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.338806390762329, |
|
"eval_runtime": 205.8736, |
|
"eval_samples_per_second": 30.475, |
|
"eval_steps_per_second": 3.813, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3162, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.3209476470947266, |
|
"eval_runtime": 206.0155, |
|
"eval_samples_per_second": 30.454, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3272, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3050100803375244, |
|
"eval_runtime": 205.8768, |
|
"eval_samples_per_second": 30.475, |
|
"eval_steps_per_second": 3.813, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2787, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.2802278995513916, |
|
"eval_runtime": 206.0479, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2387, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.263134479522705, |
|
"eval_runtime": 205.9604, |
|
"eval_samples_per_second": 30.462, |
|
"eval_steps_per_second": 3.811, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2303, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.246272325515747, |
|
"eval_runtime": 206.1382, |
|
"eval_samples_per_second": 30.436, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2323, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.231445789337158, |
|
"eval_runtime": 205.9926, |
|
"eval_samples_per_second": 30.457, |
|
"eval_steps_per_second": 3.811, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.193, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2156155109405518, |
|
"eval_runtime": 217.8526, |
|
"eval_samples_per_second": 28.799, |
|
"eval_steps_per_second": 3.603, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1776, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.202601909637451, |
|
"eval_runtime": 529.3371, |
|
"eval_samples_per_second": 11.853, |
|
"eval_steps_per_second": 1.483, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1728, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.1862785816192627, |
|
"eval_runtime": 232.6763, |
|
"eval_samples_per_second": 26.965, |
|
"eval_steps_per_second": 3.374, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1562, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.174529552459717, |
|
"eval_runtime": 205.9758, |
|
"eval_samples_per_second": 30.46, |
|
"eval_steps_per_second": 3.811, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1386, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.1577847003936768, |
|
"eval_runtime": 206.4236, |
|
"eval_samples_per_second": 30.394, |
|
"eval_steps_per_second": 3.803, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1296, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.147472858428955, |
|
"eval_runtime": 206.3739, |
|
"eval_samples_per_second": 30.401, |
|
"eval_steps_per_second": 3.804, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1374, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.135169506072998, |
|
"eval_runtime": 206.2542, |
|
"eval_samples_per_second": 30.419, |
|
"eval_steps_per_second": 3.806, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1035, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1208178997039795, |
|
"eval_runtime": 205.9676, |
|
"eval_samples_per_second": 30.461, |
|
"eval_steps_per_second": 3.811, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1034, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 2.1102426052093506, |
|
"eval_runtime": 217.9461, |
|
"eval_samples_per_second": 28.787, |
|
"eval_steps_per_second": 3.602, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0912, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 2.0947265625, |
|
"eval_runtime": 732.5061, |
|
"eval_samples_per_second": 8.565, |
|
"eval_steps_per_second": 1.072, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0451, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.0868542194366455, |
|
"eval_runtime": 206.5812, |
|
"eval_samples_per_second": 30.371, |
|
"eval_steps_per_second": 3.8, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0179, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 2.074641227722168, |
|
"eval_runtime": 206.384, |
|
"eval_samples_per_second": 30.4, |
|
"eval_steps_per_second": 3.804, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0386, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 2.0661864280700684, |
|
"eval_runtime": 206.2191, |
|
"eval_samples_per_second": 30.424, |
|
"eval_steps_per_second": 3.807, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0078, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_loss": 2.05454158782959, |
|
"eval_runtime": 206.3207, |
|
"eval_samples_per_second": 30.409, |
|
"eval_steps_per_second": 3.805, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0222, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 2.0429155826568604, |
|
"eval_runtime": 207.289, |
|
"eval_samples_per_second": 30.267, |
|
"eval_steps_per_second": 3.787, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9896, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 2.031524896621704, |
|
"eval_runtime": 254.9126, |
|
"eval_samples_per_second": 24.612, |
|
"eval_steps_per_second": 3.079, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9847, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_loss": 2.02608060836792, |
|
"eval_runtime": 206.1634, |
|
"eval_samples_per_second": 30.432, |
|
"eval_steps_per_second": 3.808, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9801, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.013916492462158, |
|
"eval_runtime": 215.475, |
|
"eval_samples_per_second": 29.117, |
|
"eval_steps_per_second": 3.643, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9764, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 2.0044028759002686, |
|
"eval_runtime": 208.9999, |
|
"eval_samples_per_second": 30.019, |
|
"eval_steps_per_second": 3.756, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9572, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_loss": 1.994431972503662, |
|
"eval_runtime": 209.7442, |
|
"eval_samples_per_second": 29.913, |
|
"eval_steps_per_second": 3.743, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9505, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_loss": 1.9890899658203125, |
|
"eval_runtime": 225.3887, |
|
"eval_samples_per_second": 27.836, |
|
"eval_steps_per_second": 3.483, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9371, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 1.9771522283554077, |
|
"eval_runtime": 216.1751, |
|
"eval_samples_per_second": 29.023, |
|
"eval_steps_per_second": 3.631, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9399, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"eval_loss": 1.9712703227996826, |
|
"eval_runtime": 210.5398, |
|
"eval_samples_per_second": 29.8, |
|
"eval_steps_per_second": 3.729, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9391, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_loss": 1.9589763879776, |
|
"eval_runtime": 209.0465, |
|
"eval_samples_per_second": 30.012, |
|
"eval_steps_per_second": 3.755, |
|
"step": 4100 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.2037527679500288e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|