t5_base-qg-aas-oficial / trainer_state.json
tiagoblima's picture
End of training
bc19902 verified
{
"best_metric": 2.036170721054077,
"best_model_checkpoint": "/temp/t5_base-qg-aas-oficial/checkpoint-5544",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 6930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.855699855699856e-05,
"loss": 3.6582,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 9.711399711399713e-05,
"loss": 2.5239,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 9.567099567099568e-05,
"loss": 2.3885,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 9.422799422799424e-05,
"loss": 2.3317,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 9.278499278499279e-05,
"loss": 2.3093,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 9.134199134199136e-05,
"loss": 2.329,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 8.98989898989899e-05,
"loss": 2.2779,
"step": 700
},
{
"epoch": 0.58,
"learning_rate": 8.845598845598845e-05,
"loss": 2.3146,
"step": 800
},
{
"epoch": 0.65,
"learning_rate": 8.701298701298701e-05,
"loss": 2.223,
"step": 900
},
{
"epoch": 0.72,
"learning_rate": 8.556998556998557e-05,
"loss": 2.1984,
"step": 1000
},
{
"epoch": 0.79,
"learning_rate": 8.412698412698413e-05,
"loss": 2.2265,
"step": 1100
},
{
"epoch": 0.87,
"learning_rate": 8.268398268398268e-05,
"loss": 2.2022,
"step": 1200
},
{
"epoch": 0.94,
"learning_rate": 8.124098124098124e-05,
"loss": 2.1854,
"step": 1300
},
{
"epoch": 1.0,
"eval_loss": 2.091059923171997,
"eval_runtime": 24.4996,
"eval_samples_per_second": 258.249,
"eval_steps_per_second": 4.041,
"step": 1386
},
{
"epoch": 1.01,
"learning_rate": 7.97979797979798e-05,
"loss": 2.1502,
"step": 1400
},
{
"epoch": 1.08,
"learning_rate": 7.835497835497836e-05,
"loss": 2.0936,
"step": 1500
},
{
"epoch": 1.15,
"learning_rate": 7.691197691197691e-05,
"loss": 2.0818,
"step": 1600
},
{
"epoch": 1.23,
"learning_rate": 7.546897546897548e-05,
"loss": 2.082,
"step": 1700
},
{
"epoch": 1.3,
"learning_rate": 7.402597402597404e-05,
"loss": 2.0505,
"step": 1800
},
{
"epoch": 1.37,
"learning_rate": 7.258297258297259e-05,
"loss": 2.0912,
"step": 1900
},
{
"epoch": 1.44,
"learning_rate": 7.113997113997114e-05,
"loss": 2.0529,
"step": 2000
},
{
"epoch": 1.52,
"learning_rate": 6.96969696969697e-05,
"loss": 2.061,
"step": 2100
},
{
"epoch": 1.59,
"learning_rate": 6.825396825396825e-05,
"loss": 2.0558,
"step": 2200
},
{
"epoch": 1.66,
"learning_rate": 6.681096681096681e-05,
"loss": 2.053,
"step": 2300
},
{
"epoch": 1.73,
"learning_rate": 6.536796536796536e-05,
"loss": 2.057,
"step": 2400
},
{
"epoch": 1.8,
"learning_rate": 6.392496392496393e-05,
"loss": 2.0676,
"step": 2500
},
{
"epoch": 1.88,
"learning_rate": 6.248196248196248e-05,
"loss": 2.0586,
"step": 2600
},
{
"epoch": 1.95,
"learning_rate": 6.103896103896104e-05,
"loss": 2.048,
"step": 2700
},
{
"epoch": 2.0,
"eval_loss": 2.0532631874084473,
"eval_runtime": 24.4977,
"eval_samples_per_second": 258.269,
"eval_steps_per_second": 4.041,
"step": 2772
},
{
"epoch": 2.02,
"learning_rate": 5.959595959595959e-05,
"loss": 2.016,
"step": 2800
},
{
"epoch": 2.09,
"learning_rate": 5.815295815295816e-05,
"loss": 1.9649,
"step": 2900
},
{
"epoch": 2.16,
"learning_rate": 5.6709956709956715e-05,
"loss": 1.9517,
"step": 3000
},
{
"epoch": 2.24,
"learning_rate": 5.526695526695527e-05,
"loss": 1.9704,
"step": 3100
},
{
"epoch": 2.31,
"learning_rate": 5.382395382395382e-05,
"loss": 1.9744,
"step": 3200
},
{
"epoch": 2.38,
"learning_rate": 5.2380952380952384e-05,
"loss": 1.9204,
"step": 3300
},
{
"epoch": 2.45,
"learning_rate": 5.093795093795094e-05,
"loss": 1.9549,
"step": 3400
},
{
"epoch": 2.53,
"learning_rate": 4.94949494949495e-05,
"loss": 1.9189,
"step": 3500
},
{
"epoch": 2.6,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.9681,
"step": 3600
},
{
"epoch": 2.67,
"learning_rate": 4.6608946608946615e-05,
"loss": 1.9397,
"step": 3700
},
{
"epoch": 2.74,
"learning_rate": 4.516594516594517e-05,
"loss": 1.9633,
"step": 3800
},
{
"epoch": 2.81,
"learning_rate": 4.3722943722943724e-05,
"loss": 1.9725,
"step": 3900
},
{
"epoch": 2.89,
"learning_rate": 4.227994227994228e-05,
"loss": 1.9389,
"step": 4000
},
{
"epoch": 2.96,
"learning_rate": 4.083694083694084e-05,
"loss": 1.9295,
"step": 4100
},
{
"epoch": 3.0,
"eval_loss": 2.0421714782714844,
"eval_runtime": 24.4812,
"eval_samples_per_second": 258.443,
"eval_steps_per_second": 4.044,
"step": 4158
},
{
"epoch": 3.03,
"learning_rate": 3.939393939393939e-05,
"loss": 1.9131,
"step": 4200
},
{
"epoch": 3.1,
"learning_rate": 3.7950937950937954e-05,
"loss": 1.8774,
"step": 4300
},
{
"epoch": 3.17,
"learning_rate": 3.650793650793651e-05,
"loss": 1.8652,
"step": 4400
},
{
"epoch": 3.25,
"learning_rate": 3.506493506493507e-05,
"loss": 1.8847,
"step": 4500
},
{
"epoch": 3.32,
"learning_rate": 3.3621933621933624e-05,
"loss": 1.882,
"step": 4600
},
{
"epoch": 3.39,
"learning_rate": 3.217893217893218e-05,
"loss": 1.8934,
"step": 4700
},
{
"epoch": 3.46,
"learning_rate": 3.073593073593073e-05,
"loss": 1.8797,
"step": 4800
},
{
"epoch": 3.54,
"learning_rate": 2.9292929292929294e-05,
"loss": 1.841,
"step": 4900
},
{
"epoch": 3.61,
"learning_rate": 2.7849927849927855e-05,
"loss": 1.8802,
"step": 5000
},
{
"epoch": 3.68,
"learning_rate": 2.640692640692641e-05,
"loss": 1.8624,
"step": 5100
},
{
"epoch": 3.75,
"learning_rate": 2.4963924963924963e-05,
"loss": 1.8767,
"step": 5200
},
{
"epoch": 3.82,
"learning_rate": 2.352092352092352e-05,
"loss": 1.8797,
"step": 5300
},
{
"epoch": 3.9,
"learning_rate": 2.207792207792208e-05,
"loss": 1.8858,
"step": 5400
},
{
"epoch": 3.97,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.9142,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 2.036170721054077,
"eval_runtime": 24.6535,
"eval_samples_per_second": 256.637,
"eval_steps_per_second": 4.016,
"step": 5544
},
{
"epoch": 4.04,
"learning_rate": 1.919191919191919e-05,
"loss": 1.823,
"step": 5600
},
{
"epoch": 4.11,
"learning_rate": 1.7748917748917752e-05,
"loss": 1.8206,
"step": 5700
},
{
"epoch": 4.18,
"learning_rate": 1.630591630591631e-05,
"loss": 1.8403,
"step": 5800
},
{
"epoch": 4.26,
"learning_rate": 1.4862914862914865e-05,
"loss": 1.829,
"step": 5900
},
{
"epoch": 4.33,
"learning_rate": 1.3419913419913421e-05,
"loss": 1.8324,
"step": 6000
},
{
"epoch": 4.4,
"learning_rate": 1.1976911976911977e-05,
"loss": 1.8527,
"step": 6100
},
{
"epoch": 4.47,
"learning_rate": 1.0533910533910535e-05,
"loss": 1.8468,
"step": 6200
},
{
"epoch": 4.55,
"learning_rate": 9.090909090909091e-06,
"loss": 1.834,
"step": 6300
},
{
"epoch": 4.62,
"learning_rate": 7.647907647907649e-06,
"loss": 1.8307,
"step": 6400
},
{
"epoch": 4.69,
"learning_rate": 6.204906204906205e-06,
"loss": 1.8416,
"step": 6500
},
{
"epoch": 4.76,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.8078,
"step": 6600
},
{
"epoch": 4.83,
"learning_rate": 3.318903318903319e-06,
"loss": 1.8596,
"step": 6700
},
{
"epoch": 4.91,
"learning_rate": 1.875901875901876e-06,
"loss": 1.8369,
"step": 6800
},
{
"epoch": 4.98,
"learning_rate": 4.329004329004329e-07,
"loss": 1.8512,
"step": 6900
},
{
"epoch": 5.0,
"eval_loss": 2.0398988723754883,
"eval_runtime": 24.5296,
"eval_samples_per_second": 257.933,
"eval_steps_per_second": 4.036,
"step": 6930
},
{
"epoch": 5.0,
"step": 6930,
"total_flos": 1.349876956004352e+17,
"train_loss": 2.0221760005317897,
"train_runtime": 3428.354,
"train_samples_per_second": 64.658,
"train_steps_per_second": 2.021
}
],
"logging_steps": 100,
"max_steps": 6930,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.349876956004352e+17,
"trial_name": null,
"trial_params": null
}