|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 11.0, |
|
"global_step": 11231, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.9347045380346067e-05, |
|
"loss": 0.1574, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.8694090760692135e-05, |
|
"loss": 0.1384, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9631415605545044, |
|
"eval_loss": 0.12948766350746155, |
|
"eval_runtime": 3.6006, |
|
"eval_samples_per_second": 2592.076, |
|
"eval_steps_per_second": 81.098, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.80411361410382e-05, |
|
"loss": 0.1024, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.7388181521384265e-05, |
|
"loss": 0.109, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9664630889892578, |
|
"eval_loss": 0.11389727890491486, |
|
"eval_runtime": 3.5512, |
|
"eval_samples_per_second": 2628.103, |
|
"eval_steps_per_second": 82.225, |
|
"step": 2042 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.673522690173033e-05, |
|
"loss": 0.0816, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.6082272282076395e-05, |
|
"loss": 0.0764, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9662488102912903, |
|
"eval_loss": 0.11298216134309769, |
|
"eval_runtime": 3.5558, |
|
"eval_samples_per_second": 2624.692, |
|
"eval_steps_per_second": 82.118, |
|
"step": 3063 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.5429317662422463e-05, |
|
"loss": 0.065, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.4776363042768528e-05, |
|
"loss": 0.0517, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9630343914031982, |
|
"eval_loss": 0.1652730107307434, |
|
"eval_runtime": 3.5703, |
|
"eval_samples_per_second": 2614.08, |
|
"eval_steps_per_second": 81.786, |
|
"step": 4084 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.4123408423114595e-05, |
|
"loss": 0.0348, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 1.3470453803460662e-05, |
|
"loss": 0.0385, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9632486701011658, |
|
"eval_loss": 0.18385031819343567, |
|
"eval_runtime": 3.5894, |
|
"eval_samples_per_second": 2600.184, |
|
"eval_steps_per_second": 81.352, |
|
"step": 5105 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 1.2817499183806728e-05, |
|
"loss": 0.0244, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.2164544564152792e-05, |
|
"loss": 0.0245, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9693560600280762, |
|
"eval_loss": 0.1997281014919281, |
|
"eval_runtime": 3.7802, |
|
"eval_samples_per_second": 2468.931, |
|
"eval_steps_per_second": 77.245, |
|
"step": 6126 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 1.1511589944498858e-05, |
|
"loss": 0.0175, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 1.0858635324844923e-05, |
|
"loss": 0.0171, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9687131643295288, |
|
"eval_loss": 0.1954147070646286, |
|
"eval_runtime": 3.5659, |
|
"eval_samples_per_second": 2617.281, |
|
"eval_steps_per_second": 81.886, |
|
"step": 7147 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 1.020568070519099e-05, |
|
"loss": 0.0127, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 9.552726085537057e-06, |
|
"loss": 0.0102, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9687131643295288, |
|
"eval_loss": 0.23564255237579346, |
|
"eval_runtime": 3.5772, |
|
"eval_samples_per_second": 2609.01, |
|
"eval_steps_per_second": 81.628, |
|
"step": 8168 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 8.899771465883122e-06, |
|
"loss": 0.0092, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 8.246816846229188e-06, |
|
"loss": 0.0078, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.966570258140564, |
|
"eval_loss": 0.2585618495941162, |
|
"eval_runtime": 3.6436, |
|
"eval_samples_per_second": 2561.473, |
|
"eval_steps_per_second": 80.14, |
|
"step": 9189 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 7.593862226575253e-06, |
|
"loss": 0.0061, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 6.94090760692132e-06, |
|
"loss": 0.0034, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9699989557266235, |
|
"eval_loss": 0.2755224406719208, |
|
"eval_runtime": 3.5689, |
|
"eval_samples_per_second": 2615.084, |
|
"eval_steps_per_second": 81.818, |
|
"step": 10210 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 6.287952987267385e-06, |
|
"loss": 0.0035, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 10.77, |
|
"learning_rate": 5.634998367613451e-06, |
|
"loss": 0.0029, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9706417918205261, |
|
"eval_loss": 0.2912203371524811, |
|
"eval_runtime": 3.5951, |
|
"eval_samples_per_second": 2596.026, |
|
"eval_steps_per_second": 81.221, |
|
"step": 11231 |
|
} |
|
], |
|
"max_steps": 15315, |
|
"num_train_epochs": 15, |
|
"total_flos": 2.363421365580595e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|