|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.4078254326561326, |
|
"eval_steps": 20, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5e-07, |
|
"loss": 1.1046, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1e-06, |
|
"loss": 1.0664, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_accuracy": 0.44479243019924036, |
|
"eval_loss": 1.065091609954834, |
|
"eval_runtime": 50.348, |
|
"eval_samples_per_second": 596.131, |
|
"eval_steps_per_second": 1.172, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.5e-06, |
|
"loss": 1.0672, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 2e-06, |
|
"loss": 1.0423, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.5033650962883988, |
|
"eval_loss": 1.0188277959823608, |
|
"eval_runtime": 49.614, |
|
"eval_samples_per_second": 604.95, |
|
"eval_steps_per_second": 1.189, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.027, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3e-06, |
|
"loss": 1.0137, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.5279203038581995, |
|
"eval_loss": 0.9871189594268799, |
|
"eval_runtime": 49.5201, |
|
"eval_samples_per_second": 606.098, |
|
"eval_steps_per_second": 1.191, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 3.5e-06, |
|
"loss": 1.0005, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4e-06, |
|
"loss": 1.0027, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.5308189511561271, |
|
"eval_loss": 0.9888613224029541, |
|
"eval_runtime": 49.4469, |
|
"eval_samples_per_second": 606.994, |
|
"eval_steps_per_second": 1.193, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.9937, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5e-06, |
|
"loss": 0.9914, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_accuracy": 0.5307856333710935, |
|
"eval_loss": 0.9762536883354187, |
|
"eval_runtime": 49.4804, |
|
"eval_samples_per_second": 606.583, |
|
"eval_steps_per_second": 1.192, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 5.5e-06, |
|
"loss": 0.9884, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 6e-06, |
|
"loss": 0.9826, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.5387819017791697, |
|
"eval_loss": 0.9713281989097595, |
|
"eval_runtime": 49.4983, |
|
"eval_samples_per_second": 606.364, |
|
"eval_steps_per_second": 1.192, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 6.5e-06, |
|
"loss": 0.9882, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 7e-06, |
|
"loss": 0.9788, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.5312520823615646, |
|
"eval_loss": 0.9766249656677246, |
|
"eval_runtime": 49.5213, |
|
"eval_samples_per_second": 606.083, |
|
"eval_steps_per_second": 1.191, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.9854, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 8e-06, |
|
"loss": 0.984, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_accuracy": 0.5398147531152129, |
|
"eval_loss": 0.9589501619338989, |
|
"eval_runtime": 49.4977, |
|
"eval_samples_per_second": 606.372, |
|
"eval_steps_per_second": 1.192, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 8.500000000000002e-06, |
|
"loss": 0.9795, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9e-06, |
|
"loss": 0.9694, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.5423469047777704, |
|
"eval_loss": 0.953514814376831, |
|
"eval_runtime": 49.4511, |
|
"eval_samples_per_second": 606.943, |
|
"eval_steps_per_second": 1.193, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.9757, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9676, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_accuracy": 0.567201972412874, |
|
"eval_loss": 0.9273685812950134, |
|
"eval_runtime": 49.4122, |
|
"eval_samples_per_second": 607.421, |
|
"eval_steps_per_second": 1.194, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.0500000000000001e-05, |
|
"loss": 0.9708, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.1e-05, |
|
"loss": 0.9753, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.573598987139335, |
|
"eval_loss": 0.912590503692627, |
|
"eval_runtime": 49.2827, |
|
"eval_samples_per_second": 609.017, |
|
"eval_steps_per_second": 1.197, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.15e-05, |
|
"loss": 0.9586, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.9557, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.5759645498767242, |
|
"eval_loss": 0.9052607417106628, |
|
"eval_runtime": 49.4457, |
|
"eval_samples_per_second": 607.01, |
|
"eval_steps_per_second": 1.193, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.9518, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3e-05, |
|
"loss": 0.9508, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.5766975411474645, |
|
"eval_loss": 0.9178985953330994, |
|
"eval_runtime": 49.3132, |
|
"eval_samples_per_second": 608.641, |
|
"eval_steps_per_second": 1.196, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.35e-05, |
|
"loss": 0.9405, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.9355, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.58915839275005, |
|
"eval_loss": 0.8937407732009888, |
|
"eval_runtime": 49.4342, |
|
"eval_samples_per_second": 607.15, |
|
"eval_steps_per_second": 1.194, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.4500000000000002e-05, |
|
"loss": 0.9235, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.9, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.613013926834144, |
|
"eval_loss": 0.8468813300132751, |
|
"eval_runtime": 49.5046, |
|
"eval_samples_per_second": 606.287, |
|
"eval_steps_per_second": 1.192, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.55e-05, |
|
"loss": 0.8857, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.6e-05, |
|
"loss": 0.993, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_accuracy": 0.6046511627906976, |
|
"eval_loss": 0.8615403771400452, |
|
"eval_runtime": 49.5041, |
|
"eval_samples_per_second": 606.294, |
|
"eval_steps_per_second": 1.192, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.65e-05, |
|
"loss": 0.8418, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.7000000000000003e-05, |
|
"loss": 0.8527, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.6439328313453722, |
|
"eval_loss": 0.7896137237548828, |
|
"eval_runtime": 49.4448, |
|
"eval_samples_per_second": 607.02, |
|
"eval_steps_per_second": 1.193, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 1.7500000000000002e-05, |
|
"loss": 0.8982, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.966, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"eval_accuracy": 0.5315852602119011, |
|
"eval_loss": 1.0123510360717773, |
|
"eval_runtime": 49.4838, |
|
"eval_samples_per_second": 606.542, |
|
"eval_steps_per_second": 1.192, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.85e-05, |
|
"loss": 0.9413, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.9e-05, |
|
"loss": 0.8441, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.6488638635303525, |
|
"eval_loss": 0.791083574295044, |
|
"eval_runtime": 49.4701, |
|
"eval_samples_per_second": 606.71, |
|
"eval_steps_per_second": 1.193, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 1.95e-05, |
|
"loss": 0.8223, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2e-05, |
|
"loss": 0.8226, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.6699873392416872, |
|
"eval_loss": 0.7472424507141113, |
|
"eval_runtime": 49.4938, |
|
"eval_samples_per_second": 606.419, |
|
"eval_steps_per_second": 1.192, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.05e-05, |
|
"loss": 0.7924, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 2.1000000000000002e-05, |
|
"loss": 0.7948, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.6580595721996402, |
|
"eval_loss": 0.7663838863372803, |
|
"eval_runtime": 49.5064, |
|
"eval_samples_per_second": 606.265, |
|
"eval_steps_per_second": 1.192, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 2.1499999999999997e-05, |
|
"loss": 0.776, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 2.2e-05, |
|
"loss": 0.7428, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"eval_accuracy": 0.6991737189311654, |
|
"eval_loss": 0.6993714570999146, |
|
"eval_runtime": 49.4888, |
|
"eval_samples_per_second": 606.481, |
|
"eval_steps_per_second": 1.192, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 2.2499999999999998e-05, |
|
"loss": 0.7512, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 2.3e-05, |
|
"loss": 0.7109, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_accuracy": 0.7283600986206437, |
|
"eval_loss": 0.6510820984840393, |
|
"eval_runtime": 49.5999, |
|
"eval_samples_per_second": 605.122, |
|
"eval_steps_per_second": 1.19, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.3500000000000002e-05, |
|
"loss": 0.701, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.6882, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_accuracy": 0.7576797494502565, |
|
"eval_loss": 0.5987845063209534, |
|
"eval_runtime": 49.5123, |
|
"eval_samples_per_second": 606.192, |
|
"eval_steps_per_second": 1.192, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 2.4500000000000003e-05, |
|
"loss": 0.651, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.7296, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_accuracy": 0.7564136736189778, |
|
"eval_loss": 0.5992804765701294, |
|
"eval_runtime": 49.5641, |
|
"eval_samples_per_second": 605.56, |
|
"eval_steps_per_second": 1.19, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 2.55e-05, |
|
"loss": 0.5943, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.6e-05, |
|
"loss": 0.5677, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_accuracy": 0.8125874591857133, |
|
"eval_loss": 0.5067983865737915, |
|
"eval_runtime": 49.3712, |
|
"eval_samples_per_second": 607.925, |
|
"eval_steps_per_second": 1.195, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.65e-05, |
|
"loss": 0.5818, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.7e-05, |
|
"loss": 0.5096, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.8520023988805224, |
|
"eval_loss": 0.42726200819015503, |
|
"eval_runtime": 49.3448, |
|
"eval_samples_per_second": 608.251, |
|
"eval_steps_per_second": 1.196, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.75e-05, |
|
"loss": 0.462, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.4452, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"eval_accuracy": 0.8721929766109149, |
|
"eval_loss": 0.37960025668144226, |
|
"eval_runtime": 49.2893, |
|
"eval_samples_per_second": 608.936, |
|
"eval_steps_per_second": 1.197, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.85e-05, |
|
"loss": 0.4267, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.9000000000000004e-05, |
|
"loss": 0.3836, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.8756580262544146, |
|
"eval_loss": 0.3854842483997345, |
|
"eval_runtime": 49.5207, |
|
"eval_samples_per_second": 606.09, |
|
"eval_steps_per_second": 1.191, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.95e-05, |
|
"loss": 0.3828, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 3e-05, |
|
"loss": 0.3783, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"eval_accuracy": 0.8893849536882789, |
|
"eval_loss": 0.35855188965797424, |
|
"eval_runtime": 49.3852, |
|
"eval_samples_per_second": 607.753, |
|
"eval_steps_per_second": 1.195, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.05e-05, |
|
"loss": 0.3466, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 3.1e-05, |
|
"loss": 0.3496, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_accuracy": 0.8971813153861531, |
|
"eval_loss": 0.321043998003006, |
|
"eval_runtime": 49.4982, |
|
"eval_samples_per_second": 606.366, |
|
"eval_steps_per_second": 1.192, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.15e-05, |
|
"loss": 0.3421, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 3.2e-05, |
|
"loss": 0.3585, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"eval_accuracy": 0.9035450123275804, |
|
"eval_loss": 0.3006099760532379, |
|
"eval_runtime": 49.5179, |
|
"eval_samples_per_second": 606.124, |
|
"eval_steps_per_second": 1.191, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 0.3442, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 3.3e-05, |
|
"loss": 0.345, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_accuracy": 0.9014126740854268, |
|
"eval_loss": 0.30540063977241516, |
|
"eval_runtime": 49.5044, |
|
"eval_samples_per_second": 606.289, |
|
"eval_steps_per_second": 1.192, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 3.35e-05, |
|
"loss": 0.3246, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.3327, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"eval_accuracy": 0.8912507496501633, |
|
"eval_loss": 0.3174073100090027, |
|
"eval_runtime": 49.4656, |
|
"eval_samples_per_second": 606.765, |
|
"eval_steps_per_second": 1.193, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 3.4500000000000005e-05, |
|
"loss": 0.3172, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 0.2962, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_accuracy": 0.9122076364363297, |
|
"eval_loss": 0.2769884169101715, |
|
"eval_runtime": 49.5225, |
|
"eval_samples_per_second": 606.068, |
|
"eval_steps_per_second": 1.191, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.5499999999999996e-05, |
|
"loss": 0.287, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.3032, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_accuracy": 0.9062437529153062, |
|
"eval_loss": 0.2979203760623932, |
|
"eval_runtime": 49.2758, |
|
"eval_samples_per_second": 609.102, |
|
"eval_steps_per_second": 1.197, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.65e-05, |
|
"loss": 0.3017, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.7e-05, |
|
"loss": 0.27, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_accuracy": 0.8997801026187779, |
|
"eval_loss": 0.29734131693840027, |
|
"eval_runtime": 49.4882, |
|
"eval_samples_per_second": 606.488, |
|
"eval_steps_per_second": 1.192, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 3.75e-05, |
|
"loss": 0.3042, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.2912, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"eval_accuracy": 0.9221696541613914, |
|
"eval_loss": 0.24671417474746704, |
|
"eval_runtime": 49.5036, |
|
"eval_samples_per_second": 606.299, |
|
"eval_steps_per_second": 1.192, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 3.85e-05, |
|
"loss": 0.2609, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 3.9e-05, |
|
"loss": 0.2412, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"eval_accuracy": 0.9113080562404211, |
|
"eval_loss": 0.2760636806488037, |
|
"eval_runtime": 49.4644, |
|
"eval_samples_per_second": 606.78, |
|
"eval_steps_per_second": 1.193, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 3.95e-05, |
|
"loss": 0.2755, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4e-05, |
|
"loss": 0.2746, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"eval_accuracy": 0.9260011994402612, |
|
"eval_loss": 0.24102580547332764, |
|
"eval_runtime": 49.464, |
|
"eval_samples_per_second": 606.785, |
|
"eval_steps_per_second": 1.193, |
|
"step": 800 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 996, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.077670425216942e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|