nbme-gpt2 / trainer_state.json
smeoni's picture
End of training
31f0026
{
"best_metric": 2.3684260845184326,
"best_model_checkpoint": "models/checkpoint-303",
"epoch": 2.9914110429447853,
"global_step": 303,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.99,
"eval_accuracy": 0.48085171490501083,
"eval_loss": 2.563568353652954,
"eval_runtime": 9.7149,
"eval_samples_per_second": 59.187,
"eval_steps_per_second": 7.411,
"step": 101
},
{
"epoch": 1.99,
"eval_accuracy": 0.501789281312423,
"eval_loss": 2.4074697494506836,
"eval_runtime": 9.6785,
"eval_samples_per_second": 59.41,
"eval_steps_per_second": 7.439,
"step": 202
},
{
"epoch": 2.99,
"eval_accuracy": 0.5070321730630286,
"eval_loss": 2.3684260845184326,
"eval_runtime": 9.6634,
"eval_samples_per_second": 59.503,
"eval_steps_per_second": 7.451,
"step": 303
},
{
"epoch": 2.99,
"step": 303,
"total_flos": 5095194624000000.0,
"train_loss": 2.700293814781869,
"train_runtime": 444.9433,
"train_samples_per_second": 21.974,
"train_steps_per_second": 0.681
}
],
"max_steps": 303,
"num_train_epochs": 3,
"total_flos": 5095194624000000.0,
"trial_name": null,
"trial_params": null
}