|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 15.0, |
|
"global_step": 15315, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.9347045380346067e-05, |
|
"loss": 0.1842, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.8694090760692135e-05, |
|
"loss": 0.1537, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9574627876281738, |
|
"eval_loss": 0.13882458209991455, |
|
"eval_runtime": 3.6569, |
|
"eval_samples_per_second": 2552.167, |
|
"eval_steps_per_second": 79.849, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.80411361410382e-05, |
|
"loss": 0.1102, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.7388181521384265e-05, |
|
"loss": 0.1123, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9614272117614746, |
|
"eval_loss": 0.12761715054512024, |
|
"eval_runtime": 3.6293, |
|
"eval_samples_per_second": 2571.601, |
|
"eval_steps_per_second": 80.457, |
|
"step": 2042 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.673522690173033e-05, |
|
"loss": 0.0822, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.6082272282076395e-05, |
|
"loss": 0.0811, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9639987349510193, |
|
"eval_loss": 0.130422443151474, |
|
"eval_runtime": 3.6605, |
|
"eval_samples_per_second": 2549.638, |
|
"eval_steps_per_second": 79.77, |
|
"step": 3063 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.5429317662422463e-05, |
|
"loss": 0.0602, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.4776363042768528e-05, |
|
"loss": 0.0479, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9649630188941956, |
|
"eval_loss": 0.17105060815811157, |
|
"eval_runtime": 3.6351, |
|
"eval_samples_per_second": 2567.485, |
|
"eval_steps_per_second": 80.328, |
|
"step": 4084 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.4123408423114595e-05, |
|
"loss": 0.032, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.3470453803460662e-05, |
|
"loss": 0.03, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9616414904594421, |
|
"eval_loss": 0.19338741898536682, |
|
"eval_runtime": 3.6341, |
|
"eval_samples_per_second": 2568.197, |
|
"eval_steps_per_second": 80.351, |
|
"step": 5105 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 1.2817499183806728e-05, |
|
"loss": 0.0243, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.2164544564152792e-05, |
|
"loss": 0.0172, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.962498664855957, |
|
"eval_loss": 0.23469704389572144, |
|
"eval_runtime": 3.639, |
|
"eval_samples_per_second": 2564.695, |
|
"eval_steps_per_second": 80.241, |
|
"step": 6126 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 1.1511589944498858e-05, |
|
"loss": 0.0139, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 1.0858635324844923e-05, |
|
"loss": 0.0102, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9654987454414368, |
|
"eval_loss": 0.27059507369995117, |
|
"eval_runtime": 3.6202, |
|
"eval_samples_per_second": 2578.034, |
|
"eval_steps_per_second": 80.659, |
|
"step": 7147 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 1.020568070519099e-05, |
|
"loss": 0.0131, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 9.552726085537057e-06, |
|
"loss": 0.0094, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9667845368385315, |
|
"eval_loss": 0.2712595760822296, |
|
"eval_runtime": 3.6231, |
|
"eval_samples_per_second": 2575.978, |
|
"eval_steps_per_second": 80.594, |
|
"step": 8168 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 8.899771465883122e-06, |
|
"loss": 0.0051, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 8.246816846229188e-06, |
|
"loss": 0.0059, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9651773571968079, |
|
"eval_loss": 0.31283998489379883, |
|
"eval_runtime": 3.6304, |
|
"eval_samples_per_second": 2570.776, |
|
"eval_steps_per_second": 80.431, |
|
"step": 9189 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 7.593862226575253e-06, |
|
"loss": 0.0038, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 6.94090760692132e-06, |
|
"loss": 0.0045, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9661416411399841, |
|
"eval_loss": 0.31859999895095825, |
|
"eval_runtime": 3.6286, |
|
"eval_samples_per_second": 2572.061, |
|
"eval_steps_per_second": 80.472, |
|
"step": 10210 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 6.287952987267385e-06, |
|
"loss": 0.0008, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 10.77, |
|
"learning_rate": 5.634998367613451e-06, |
|
"loss": 0.005, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9667845368385315, |
|
"eval_loss": 0.32250431180000305, |
|
"eval_runtime": 3.623, |
|
"eval_samples_per_second": 2576.056, |
|
"eval_steps_per_second": 80.597, |
|
"step": 11231 |
|
}, |
|
{ |
|
"epoch": 11.26, |
|
"learning_rate": 4.9820437479595175e-06, |
|
"loss": 0.0024, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 11.75, |
|
"learning_rate": 4.329089128305583e-06, |
|
"loss": 0.0031, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9653916358947754, |
|
"eval_loss": 0.32603543996810913, |
|
"eval_runtime": 3.6195, |
|
"eval_samples_per_second": 2578.552, |
|
"eval_steps_per_second": 80.675, |
|
"step": 12252 |
|
}, |
|
{ |
|
"epoch": 12.24, |
|
"learning_rate": 3.676134508651649e-06, |
|
"loss": 0.0024, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 3.023179888997715e-06, |
|
"loss": 0.0016, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9679631590843201, |
|
"eval_loss": 0.33604705333709717, |
|
"eval_runtime": 3.6293, |
|
"eval_samples_per_second": 2571.596, |
|
"eval_steps_per_second": 80.457, |
|
"step": 13273 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 2.370225269343781e-06, |
|
"loss": 0.0029, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 13.71, |
|
"learning_rate": 1.7172706496898467e-06, |
|
"loss": 0.0016, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9681774377822876, |
|
"eval_loss": 0.3418048024177551, |
|
"eval_runtime": 3.6597, |
|
"eval_samples_per_second": 2550.205, |
|
"eval_steps_per_second": 79.788, |
|
"step": 14294 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 1.0643160300359125e-06, |
|
"loss": 0.0002, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 14.69, |
|
"learning_rate": 4.1136141038197843e-07, |
|
"loss": 0.0007, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9683917164802551, |
|
"eval_loss": 0.33835601806640625, |
|
"eval_runtime": 3.6312, |
|
"eval_samples_per_second": 2570.225, |
|
"eval_steps_per_second": 80.414, |
|
"step": 15315 |
|
} |
|
], |
|
"max_steps": 15315, |
|
"num_train_epochs": 15, |
|
"total_flos": 3.222847324631859e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|