t5_base-qg-ap-nopeft / trainer_state.json
tiagoblima's picture
End of training
886f169
raw
history blame
6.56 kB
{
"best_metric": 1.2066526412963867,
"best_model_checkpoint": "/temp/t5_base-qg-ap-nopeft/checkpoint-3232",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 4040,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 9.752475247524753e-05,
"loss": 1.9421,
"step": 100
},
{
"epoch": 0.25,
"learning_rate": 9.504950495049505e-05,
"loss": 1.3367,
"step": 200
},
{
"epoch": 0.37,
"learning_rate": 9.257425742574259e-05,
"loss": 1.2944,
"step": 300
},
{
"epoch": 0.5,
"learning_rate": 9.009900990099011e-05,
"loss": 1.267,
"step": 400
},
{
"epoch": 0.62,
"learning_rate": 8.762376237623763e-05,
"loss": 1.2404,
"step": 500
},
{
"epoch": 0.74,
"learning_rate": 8.514851485148515e-05,
"loss": 1.2277,
"step": 600
},
{
"epoch": 0.87,
"learning_rate": 8.267326732673268e-05,
"loss": 1.2245,
"step": 700
},
{
"epoch": 0.99,
"learning_rate": 8.019801980198021e-05,
"loss": 1.2123,
"step": 800
},
{
"epoch": 1.0,
"eval_loss": 1.24958074092865,
"eval_runtime": 62.4161,
"eval_samples_per_second": 57.437,
"eval_steps_per_second": 14.371,
"step": 808
},
{
"epoch": 1.11,
"learning_rate": 7.772277227722773e-05,
"loss": 1.1527,
"step": 900
},
{
"epoch": 1.24,
"learning_rate": 7.524752475247526e-05,
"loss": 1.1486,
"step": 1000
},
{
"epoch": 1.36,
"learning_rate": 7.277227722772278e-05,
"loss": 1.1403,
"step": 1100
},
{
"epoch": 1.49,
"learning_rate": 7.02970297029703e-05,
"loss": 1.1483,
"step": 1200
},
{
"epoch": 1.61,
"learning_rate": 6.782178217821783e-05,
"loss": 1.1423,
"step": 1300
},
{
"epoch": 1.73,
"learning_rate": 6.534653465346535e-05,
"loss": 1.1231,
"step": 1400
},
{
"epoch": 1.86,
"learning_rate": 6.287128712871287e-05,
"loss": 1.1312,
"step": 1500
},
{
"epoch": 1.98,
"learning_rate": 6.03960396039604e-05,
"loss": 1.1329,
"step": 1600
},
{
"epoch": 2.0,
"eval_loss": 1.2206790447235107,
"eval_runtime": 62.4282,
"eval_samples_per_second": 57.426,
"eval_steps_per_second": 14.369,
"step": 1616
},
{
"epoch": 2.1,
"learning_rate": 5.792079207920792e-05,
"loss": 1.0849,
"step": 1700
},
{
"epoch": 2.23,
"learning_rate": 5.544554455445545e-05,
"loss": 1.0893,
"step": 1800
},
{
"epoch": 2.35,
"learning_rate": 5.2970297029702974e-05,
"loss": 1.0767,
"step": 1900
},
{
"epoch": 2.48,
"learning_rate": 5.0495049504950497e-05,
"loss": 1.0773,
"step": 2000
},
{
"epoch": 2.6,
"learning_rate": 4.801980198019802e-05,
"loss": 1.0791,
"step": 2100
},
{
"epoch": 2.72,
"learning_rate": 4.554455445544555e-05,
"loss": 1.0749,
"step": 2200
},
{
"epoch": 2.85,
"learning_rate": 4.306930693069307e-05,
"loss": 1.0918,
"step": 2300
},
{
"epoch": 2.97,
"learning_rate": 4.05940594059406e-05,
"loss": 1.0819,
"step": 2400
},
{
"epoch": 3.0,
"eval_loss": 1.2097089290618896,
"eval_runtime": 62.4812,
"eval_samples_per_second": 57.377,
"eval_steps_per_second": 14.356,
"step": 2424
},
{
"epoch": 3.09,
"learning_rate": 3.811881188118812e-05,
"loss": 1.0497,
"step": 2500
},
{
"epoch": 3.22,
"learning_rate": 3.5643564356435645e-05,
"loss": 1.0372,
"step": 2600
},
{
"epoch": 3.34,
"learning_rate": 3.3168316831683175e-05,
"loss": 1.0545,
"step": 2700
},
{
"epoch": 3.47,
"learning_rate": 3.06930693069307e-05,
"loss": 1.0511,
"step": 2800
},
{
"epoch": 3.59,
"learning_rate": 2.8217821782178216e-05,
"loss": 1.0473,
"step": 2900
},
{
"epoch": 3.71,
"learning_rate": 2.5742574257425746e-05,
"loss": 1.0321,
"step": 3000
},
{
"epoch": 3.84,
"learning_rate": 2.326732673267327e-05,
"loss": 1.0439,
"step": 3100
},
{
"epoch": 3.96,
"learning_rate": 2.079207920792079e-05,
"loss": 1.0447,
"step": 3200
},
{
"epoch": 4.0,
"eval_loss": 1.2066526412963867,
"eval_runtime": 62.569,
"eval_samples_per_second": 57.297,
"eval_steps_per_second": 14.336,
"step": 3232
},
{
"epoch": 4.08,
"learning_rate": 1.8316831683168317e-05,
"loss": 1.0334,
"step": 3300
},
{
"epoch": 4.21,
"learning_rate": 1.5841584158415843e-05,
"loss": 1.0142,
"step": 3400
},
{
"epoch": 4.33,
"learning_rate": 1.3366336633663367e-05,
"loss": 1.0318,
"step": 3500
},
{
"epoch": 4.46,
"learning_rate": 1.0891089108910891e-05,
"loss": 1.0127,
"step": 3600
},
{
"epoch": 4.58,
"learning_rate": 8.415841584158417e-06,
"loss": 1.0219,
"step": 3700
},
{
"epoch": 4.7,
"learning_rate": 5.940594059405941e-06,
"loss": 1.0135,
"step": 3800
},
{
"epoch": 4.83,
"learning_rate": 3.4653465346534657e-06,
"loss": 1.0245,
"step": 3900
},
{
"epoch": 4.95,
"learning_rate": 9.900990099009902e-07,
"loss": 1.0244,
"step": 4000
},
{
"epoch": 5.0,
"eval_loss": 1.2074495553970337,
"eval_runtime": 62.6556,
"eval_samples_per_second": 57.218,
"eval_steps_per_second": 14.316,
"step": 4040
},
{
"epoch": 5.0,
"step": 4040,
"total_flos": 1.574277938675712e+17,
"train_loss": 1.1255827186131242,
"train_runtime": 15011.7488,
"train_samples_per_second": 17.221,
"train_steps_per_second": 0.269
}
],
"logging_steps": 100,
"max_steps": 4040,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.574277938675712e+17,
"trial_name": null,
"trial_params": null
}