|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"global_step": 12130, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.9175597691673537e-05, |
|
"loss": 0.2206, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.8351195383347076e-05, |
|
"loss": 0.1699, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9739486703772418, |
|
"eval_loss": 0.07855913788080215, |
|
"eval_runtime": 265.3419, |
|
"eval_samples_per_second": 292.513, |
|
"eval_steps_per_second": 4.571, |
|
"step": 1213 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.752679307502061e-05, |
|
"loss": 0.118, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.6702390766694146e-05, |
|
"loss": 0.0991, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9866393527107813, |
|
"eval_loss": 0.04234647750854492, |
|
"eval_runtime": 267.207, |
|
"eval_samples_per_second": 290.471, |
|
"eval_steps_per_second": 4.54, |
|
"step": 2426 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.5877988458367685e-05, |
|
"loss": 0.0979, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.5053586150041222e-05, |
|
"loss": 0.0573, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.4229183841714757e-05, |
|
"loss": 0.0653, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9903241599670171, |
|
"eval_loss": 0.029069583863019943, |
|
"eval_runtime": 266.0985, |
|
"eval_samples_per_second": 291.681, |
|
"eval_steps_per_second": 4.558, |
|
"step": 3639 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.3404781533388294e-05, |
|
"loss": 0.0492, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.2580379225061831e-05, |
|
"loss": 0.0442, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9942151102865389, |
|
"eval_loss": 0.01643509231507778, |
|
"eval_runtime": 266.3375, |
|
"eval_samples_per_second": 291.42, |
|
"eval_steps_per_second": 4.554, |
|
"step": 4852 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.1755976916735368e-05, |
|
"loss": 0.0361, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 1.0931574608408903e-05, |
|
"loss": 0.0296, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.0107172300082442e-05, |
|
"loss": 0.0315, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9958256029684601, |
|
"eval_loss": 0.010281954891979694, |
|
"eval_runtime": 266.335, |
|
"eval_samples_per_second": 291.422, |
|
"eval_steps_per_second": 4.554, |
|
"step": 6065 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 9.282769991755977e-06, |
|
"loss": 0.0189, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 8.458367683429514e-06, |
|
"loss": 0.0207, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9973587920016491, |
|
"eval_loss": 0.0071565331891179085, |
|
"eval_runtime": 266.5437, |
|
"eval_samples_per_second": 291.194, |
|
"eval_steps_per_second": 4.551, |
|
"step": 7278 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 7.633965375103051e-06, |
|
"loss": 0.0171, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 6.809563066776587e-06, |
|
"loss": 0.0133, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9979514533085961, |
|
"eval_loss": 0.004438882227987051, |
|
"eval_runtime": 266.6565, |
|
"eval_samples_per_second": 291.071, |
|
"eval_steps_per_second": 4.549, |
|
"step": 8491 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 5.9851607584501235e-06, |
|
"loss": 0.0152, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 5.1607584501236605e-06, |
|
"loss": 0.0082, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 4.3363561417971975e-06, |
|
"loss": 0.0109, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9985183467326324, |
|
"eval_loss": 0.0034066857770085335, |
|
"eval_runtime": 266.0238, |
|
"eval_samples_per_second": 291.763, |
|
"eval_steps_per_second": 4.56, |
|
"step": 9704 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 3.5119538334707345e-06, |
|
"loss": 0.0077, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.66, |
|
"learning_rate": 2.6875515251442706e-06, |
|
"loss": 0.0059, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9989177489177489, |
|
"eval_loss": 0.0024579998571425676, |
|
"eval_runtime": 264.7367, |
|
"eval_samples_per_second": 293.182, |
|
"eval_steps_per_second": 4.582, |
|
"step": 10917 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 1.8631492168178072e-06, |
|
"loss": 0.0077, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 1.038746908491344e-06, |
|
"loss": 0.0033, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 2.1434460016488048e-07, |
|
"loss": 0.0043, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9992398474541332, |
|
"eval_loss": 0.0018210469279438257, |
|
"eval_runtime": 264.6034, |
|
"eval_samples_per_second": 293.33, |
|
"eval_steps_per_second": 4.584, |
|
"step": 12130 |
|
} |
|
], |
|
"max_steps": 12130, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.8083196174016512e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|