|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 19.607843137254903, |
|
"eval_steps": 250, |
|
"global_step": 12000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.2254901960784313e-05, |
|
"loss": 2.8121, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_loss": 0.8146461248397827, |
|
"eval_runtime": 28.4909, |
|
"eval_samples_per_second": 50.086, |
|
"eval_steps_per_second": 3.159, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.4509803921568626e-05, |
|
"loss": 0.7537, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_loss": 0.7322247624397278, |
|
"eval_runtime": 28.2904, |
|
"eval_samples_per_second": 50.441, |
|
"eval_steps_per_second": 3.181, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.9643962848297217e-05, |
|
"loss": 0.6434, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"eval_loss": 0.6870973706245422, |
|
"eval_runtime": 28.2349, |
|
"eval_samples_per_second": 50.54, |
|
"eval_steps_per_second": 3.188, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.8998968008255932e-05, |
|
"loss": 0.6207, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_loss": 0.6630086898803711, |
|
"eval_runtime": 28.2684, |
|
"eval_samples_per_second": 50.48, |
|
"eval_steps_per_second": 3.184, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 2.8353973168214654e-05, |
|
"loss": 0.5771, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 0.6516671776771545, |
|
"eval_runtime": 28.2447, |
|
"eval_samples_per_second": 50.523, |
|
"eval_steps_per_second": 3.186, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 2.7708978328173376e-05, |
|
"loss": 0.54, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"eval_loss": 0.6349040269851685, |
|
"eval_runtime": 28.3054, |
|
"eval_samples_per_second": 50.414, |
|
"eval_steps_per_second": 3.18, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 2.7063983488132095e-05, |
|
"loss": 0.5286, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_loss": 0.6304510831832886, |
|
"eval_runtime": 28.2763, |
|
"eval_samples_per_second": 50.466, |
|
"eval_steps_per_second": 3.183, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 2.6418988648090816e-05, |
|
"loss": 0.4998, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"eval_loss": 0.6260914206504822, |
|
"eval_runtime": 28.3237, |
|
"eval_samples_per_second": 50.382, |
|
"eval_steps_per_second": 3.178, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 2.577399380804954e-05, |
|
"loss": 0.4874, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"eval_loss": 0.6250064969062805, |
|
"eval_runtime": 28.23, |
|
"eval_samples_per_second": 50.549, |
|
"eval_steps_per_second": 3.188, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 2.5128998968008257e-05, |
|
"loss": 0.4704, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"eval_loss": 0.6252643465995789, |
|
"eval_runtime": 28.3447, |
|
"eval_samples_per_second": 50.344, |
|
"eval_steps_per_second": 3.175, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.4484004127966975e-05, |
|
"loss": 0.4431, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_loss": 0.6222314238548279, |
|
"eval_runtime": 28.3136, |
|
"eval_samples_per_second": 50.4, |
|
"eval_steps_per_second": 3.179, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 2.3839009287925697e-05, |
|
"loss": 0.4502, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"eval_loss": 0.6138860583305359, |
|
"eval_runtime": 28.2755, |
|
"eval_samples_per_second": 50.468, |
|
"eval_steps_per_second": 3.183, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 2.3194014447884416e-05, |
|
"loss": 0.4216, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"eval_loss": 0.6158970594406128, |
|
"eval_runtime": 28.2142, |
|
"eval_samples_per_second": 50.577, |
|
"eval_steps_per_second": 3.19, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 2.2549019607843138e-05, |
|
"loss": 0.4146, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"eval_loss": 0.6141221523284912, |
|
"eval_runtime": 28.2223, |
|
"eval_samples_per_second": 50.563, |
|
"eval_steps_per_second": 3.189, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 2.190402476780186e-05, |
|
"loss": 0.4079, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"eval_loss": 0.6209476590156555, |
|
"eval_runtime": 28.2459, |
|
"eval_samples_per_second": 50.521, |
|
"eval_steps_per_second": 3.186, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 2.1259029927760578e-05, |
|
"loss": 0.3882, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"eval_loss": 0.6084998250007629, |
|
"eval_runtime": 28.195, |
|
"eval_samples_per_second": 50.612, |
|
"eval_steps_per_second": 3.192, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 2.06140350877193e-05, |
|
"loss": 0.3917, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"eval_loss": 0.6080483198165894, |
|
"eval_runtime": 28.2021, |
|
"eval_samples_per_second": 50.599, |
|
"eval_steps_per_second": 3.191, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 1.9969040247678018e-05, |
|
"loss": 0.3654, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"eval_loss": 0.616654634475708, |
|
"eval_runtime": 28.1521, |
|
"eval_samples_per_second": 50.689, |
|
"eval_steps_per_second": 3.197, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"learning_rate": 1.9324045407636737e-05, |
|
"loss": 0.3672, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"eval_loss": 0.6128713488578796, |
|
"eval_runtime": 28.1986, |
|
"eval_samples_per_second": 50.605, |
|
"eval_steps_per_second": 3.192, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 1.867905056759546e-05, |
|
"loss": 0.3562, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"eval_loss": 0.6237680315971375, |
|
"eval_runtime": 28.209, |
|
"eval_samples_per_second": 50.587, |
|
"eval_steps_per_second": 3.19, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"learning_rate": 1.803405572755418e-05, |
|
"loss": 0.3383, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 8.58, |
|
"eval_loss": 0.6211274266242981, |
|
"eval_runtime": 28.1959, |
|
"eval_samples_per_second": 50.61, |
|
"eval_steps_per_second": 3.192, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"learning_rate": 1.73890608875129e-05, |
|
"loss": 0.347, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"eval_loss": 0.6140613555908203, |
|
"eval_runtime": 28.169, |
|
"eval_samples_per_second": 50.659, |
|
"eval_steps_per_second": 3.195, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 1.674406604747162e-05, |
|
"loss": 0.3209, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"eval_loss": 0.6265636682510376, |
|
"eval_runtime": 28.1893, |
|
"eval_samples_per_second": 50.622, |
|
"eval_steps_per_second": 3.193, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 1.6099071207430343e-05, |
|
"loss": 0.3268, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"eval_loss": 0.6181433796882629, |
|
"eval_runtime": 28.1932, |
|
"eval_samples_per_second": 50.615, |
|
"eval_steps_per_second": 3.192, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"learning_rate": 1.545407636738906e-05, |
|
"loss": 0.3126, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 10.21, |
|
"eval_loss": 0.6234831809997559, |
|
"eval_runtime": 28.1977, |
|
"eval_samples_per_second": 50.607, |
|
"eval_steps_per_second": 3.192, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 1.4809081527347781e-05, |
|
"loss": 0.3075, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"eval_loss": 0.6242398023605347, |
|
"eval_runtime": 28.2355, |
|
"eval_samples_per_second": 50.539, |
|
"eval_steps_per_second": 3.187, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"learning_rate": 1.4164086687306502e-05, |
|
"loss": 0.3113, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"eval_loss": 0.6290929913520813, |
|
"eval_runtime": 28.2357, |
|
"eval_samples_per_second": 50.539, |
|
"eval_steps_per_second": 3.187, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 11.44, |
|
"learning_rate": 1.3519091847265222e-05, |
|
"loss": 0.2921, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 11.44, |
|
"eval_loss": 0.6291936039924622, |
|
"eval_runtime": 28.3781, |
|
"eval_samples_per_second": 50.285, |
|
"eval_steps_per_second": 3.171, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 11.85, |
|
"learning_rate": 1.2874097007223944e-05, |
|
"loss": 0.2933, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 11.85, |
|
"eval_loss": 0.6279457807540894, |
|
"eval_runtime": 28.2812, |
|
"eval_samples_per_second": 50.457, |
|
"eval_steps_per_second": 3.182, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 1.2229102167182664e-05, |
|
"loss": 0.2808, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"eval_loss": 0.6309792995452881, |
|
"eval_runtime": 28.2292, |
|
"eval_samples_per_second": 50.55, |
|
"eval_steps_per_second": 3.188, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 12.66, |
|
"learning_rate": 1.1584107327141382e-05, |
|
"loss": 0.2809, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 12.66, |
|
"eval_loss": 0.6356364488601685, |
|
"eval_runtime": 28.2615, |
|
"eval_samples_per_second": 50.493, |
|
"eval_steps_per_second": 3.185, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 13.07, |
|
"learning_rate": 1.0939112487100104e-05, |
|
"loss": 0.2778, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 13.07, |
|
"eval_loss": 0.6354484558105469, |
|
"eval_runtime": 28.3136, |
|
"eval_samples_per_second": 50.4, |
|
"eval_steps_per_second": 3.179, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 13.48, |
|
"learning_rate": 1.0294117647058824e-05, |
|
"loss": 0.2707, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 13.48, |
|
"eval_loss": 0.6366216540336609, |
|
"eval_runtime": 28.3106, |
|
"eval_samples_per_second": 50.405, |
|
"eval_steps_per_second": 3.179, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"learning_rate": 9.649122807017543e-06, |
|
"loss": 0.2644, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"eval_loss": 0.6386236548423767, |
|
"eval_runtime": 28.3478, |
|
"eval_samples_per_second": 50.339, |
|
"eval_steps_per_second": 3.175, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 9.004127966976265e-06, |
|
"loss": 0.2574, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"eval_loss": 0.6385951638221741, |
|
"eval_runtime": 28.3245, |
|
"eval_samples_per_second": 50.38, |
|
"eval_steps_per_second": 3.177, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 8.359133126934985e-06, |
|
"loss": 0.2564, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"eval_loss": 0.6378169059753418, |
|
"eval_runtime": 28.3461, |
|
"eval_samples_per_second": 50.342, |
|
"eval_steps_per_second": 3.175, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 15.11, |
|
"learning_rate": 7.714138286893705e-06, |
|
"loss": 0.2555, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 15.11, |
|
"eval_loss": 0.6417073011398315, |
|
"eval_runtime": 28.3435, |
|
"eval_samples_per_second": 50.347, |
|
"eval_steps_per_second": 3.175, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 15.52, |
|
"learning_rate": 7.069143446852426e-06, |
|
"loss": 0.2478, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 15.52, |
|
"eval_loss": 0.6406723260879517, |
|
"eval_runtime": 28.3209, |
|
"eval_samples_per_second": 50.387, |
|
"eval_steps_per_second": 3.178, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 15.93, |
|
"learning_rate": 6.424148606811145e-06, |
|
"loss": 0.2504, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 15.93, |
|
"eval_loss": 0.6424481272697449, |
|
"eval_runtime": 28.3698, |
|
"eval_samples_per_second": 50.3, |
|
"eval_steps_per_second": 3.172, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 16.34, |
|
"learning_rate": 5.779153766769866e-06, |
|
"loss": 0.2437, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 16.34, |
|
"eval_loss": 0.6423542499542236, |
|
"eval_runtime": 28.3599, |
|
"eval_samples_per_second": 50.318, |
|
"eval_steps_per_second": 3.173, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"learning_rate": 5.134158926728587e-06, |
|
"loss": 0.2399, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"eval_loss": 0.645331621170044, |
|
"eval_runtime": 28.3144, |
|
"eval_samples_per_second": 50.398, |
|
"eval_steps_per_second": 3.179, |
|
"step": 10250 |
|
}, |
|
{ |
|
"epoch": 17.16, |
|
"learning_rate": 4.489164086687306e-06, |
|
"loss": 0.238, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 17.16, |
|
"eval_loss": 0.6478310227394104, |
|
"eval_runtime": 28.3979, |
|
"eval_samples_per_second": 50.25, |
|
"eval_steps_per_second": 3.169, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"learning_rate": 3.844169246646027e-06, |
|
"loss": 0.2359, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"eval_loss": 0.6469157934188843, |
|
"eval_runtime": 28.3531, |
|
"eval_samples_per_second": 50.33, |
|
"eval_steps_per_second": 3.174, |
|
"step": 10750 |
|
}, |
|
{ |
|
"epoch": 17.97, |
|
"learning_rate": 3.1991744066047476e-06, |
|
"loss": 0.2325, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 17.97, |
|
"eval_loss": 0.6457414031028748, |
|
"eval_runtime": 28.3364, |
|
"eval_samples_per_second": 50.359, |
|
"eval_steps_per_second": 3.176, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"learning_rate": 2.5541795665634673e-06, |
|
"loss": 0.2294, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"eval_loss": 0.6480973958969116, |
|
"eval_runtime": 28.308, |
|
"eval_samples_per_second": 50.41, |
|
"eval_steps_per_second": 3.179, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 18.79, |
|
"learning_rate": 1.909184726522188e-06, |
|
"loss": 0.2306, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 18.79, |
|
"eval_loss": 0.6470589637756348, |
|
"eval_runtime": 28.2173, |
|
"eval_samples_per_second": 50.572, |
|
"eval_steps_per_second": 3.19, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 1.264189886480908e-06, |
|
"loss": 0.2289, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"eval_loss": 0.6486860513687134, |
|
"eval_runtime": 28.2611, |
|
"eval_samples_per_second": 50.493, |
|
"eval_steps_per_second": 3.185, |
|
"step": 11750 |
|
}, |
|
{ |
|
"epoch": 19.61, |
|
"learning_rate": 6.191950464396285e-07, |
|
"loss": 0.2269, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 19.61, |
|
"eval_loss": 0.6482470631599426, |
|
"eval_runtime": 28.2376, |
|
"eval_samples_per_second": 50.535, |
|
"eval_steps_per_second": 3.187, |
|
"step": 12000 |
|
} |
|
], |
|
"logging_steps": 250, |
|
"max_steps": 12240, |
|
"num_train_epochs": 20, |
|
"save_steps": 250, |
|
"total_flos": 5.845452187030733e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|