Extr-QA-DistilBERT / trainer_state.json
alexrodpas's picture
Upload trainer_state.json with huggingface_hub
7690249
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 44368,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 1.9776866209880996e-05,
"loss": 1.6544,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 1.9551478543094125e-05,
"loss": 1.4272,
"step": 1000
},
{
"epoch": 0.14,
"learning_rate": 1.932609087630725e-05,
"loss": 1.3783,
"step": 1500
},
{
"epoch": 0.18,
"learning_rate": 1.9100703209520376e-05,
"loss": 1.324,
"step": 2000
},
{
"epoch": 0.23,
"learning_rate": 1.8875315542733504e-05,
"loss": 1.2637,
"step": 2500
},
{
"epoch": 0.27,
"learning_rate": 1.864992787594663e-05,
"loss": 1.2255,
"step": 3000
},
{
"epoch": 0.32,
"learning_rate": 1.8424540209159756e-05,
"loss": 1.211,
"step": 3500
},
{
"epoch": 0.36,
"learning_rate": 1.8199603317706455e-05,
"loss": 1.1977,
"step": 4000
},
{
"epoch": 0.41,
"learning_rate": 1.7974215650919584e-05,
"loss": 1.1879,
"step": 4500
},
{
"epoch": 0.45,
"learning_rate": 1.774882798413271e-05,
"loss": 1.1487,
"step": 5000
},
{
"epoch": 0.5,
"learning_rate": 1.7523440317345835e-05,
"loss": 1.1427,
"step": 5500
},
{
"epoch": 0.54,
"learning_rate": 1.7298052650558964e-05,
"loss": 1.1066,
"step": 6000
},
{
"epoch": 0.59,
"learning_rate": 1.707266498377209e-05,
"loss": 1.1576,
"step": 6500
},
{
"epoch": 0.63,
"learning_rate": 1.6847277316985215e-05,
"loss": 1.0895,
"step": 7000
},
{
"epoch": 0.68,
"learning_rate": 1.662188965019834e-05,
"loss": 1.0893,
"step": 7500
},
{
"epoch": 0.72,
"learning_rate": 1.639650198341147e-05,
"loss": 1.0693,
"step": 8000
},
{
"epoch": 0.77,
"learning_rate": 1.6171114316624595e-05,
"loss": 1.0979,
"step": 8500
},
{
"epoch": 0.81,
"learning_rate": 1.5945726649837724e-05,
"loss": 1.0202,
"step": 9000
},
{
"epoch": 0.86,
"learning_rate": 1.572033898305085e-05,
"loss": 1.0574,
"step": 9500
},
{
"epoch": 0.9,
"learning_rate": 1.549540209159755e-05,
"loss": 0.9914,
"step": 10000
},
{
"epoch": 0.95,
"learning_rate": 1.5270014424810675e-05,
"loss": 1.0377,
"step": 10500
},
{
"epoch": 0.99,
"learning_rate": 1.5045077533357375e-05,
"loss": 0.9989,
"step": 11000
},
{
"epoch": 1.04,
"learning_rate": 1.4820140641904077e-05,
"loss": 0.8036,
"step": 11500
},
{
"epoch": 1.08,
"learning_rate": 1.4594752975117204e-05,
"loss": 0.7445,
"step": 12000
},
{
"epoch": 1.13,
"learning_rate": 1.436936530833033e-05,
"loss": 0.7566,
"step": 12500
},
{
"epoch": 1.17,
"learning_rate": 1.4143977641543456e-05,
"loss": 0.7529,
"step": 13000
},
{
"epoch": 1.22,
"learning_rate": 1.3918589974756582e-05,
"loss": 0.7446,
"step": 13500
},
{
"epoch": 1.26,
"learning_rate": 1.369320230796971e-05,
"loss": 0.7539,
"step": 14000
},
{
"epoch": 1.31,
"learning_rate": 1.3468265416516409e-05,
"loss": 0.7389,
"step": 14500
},
{
"epoch": 1.35,
"learning_rate": 1.3242877749729536e-05,
"loss": 0.7502,
"step": 15000
},
{
"epoch": 1.4,
"learning_rate": 1.3017940858276236e-05,
"loss": 0.7319,
"step": 15500
},
{
"epoch": 1.44,
"learning_rate": 1.2792553191489363e-05,
"loss": 0.7732,
"step": 16000
},
{
"epoch": 1.49,
"learning_rate": 1.2567165524702489e-05,
"loss": 0.7458,
"step": 16500
},
{
"epoch": 1.53,
"learning_rate": 1.2341777857915616e-05,
"loss": 0.7277,
"step": 17000
},
{
"epoch": 1.58,
"learning_rate": 1.2116390191128742e-05,
"loss": 0.7559,
"step": 17500
},
{
"epoch": 1.62,
"learning_rate": 1.1891002524341869e-05,
"loss": 0.7572,
"step": 18000
},
{
"epoch": 1.67,
"learning_rate": 1.1665614857554996e-05,
"loss": 0.7387,
"step": 18500
},
{
"epoch": 1.71,
"learning_rate": 1.1440227190768123e-05,
"loss": 0.7502,
"step": 19000
},
{
"epoch": 1.76,
"learning_rate": 1.1214839523981249e-05,
"loss": 0.7007,
"step": 19500
},
{
"epoch": 1.8,
"learning_rate": 1.0989451857194376e-05,
"loss": 0.7435,
"step": 20000
},
{
"epoch": 1.85,
"learning_rate": 1.0764514965741076e-05,
"loss": 0.7114,
"step": 20500
},
{
"epoch": 1.89,
"learning_rate": 1.0539127298954201e-05,
"loss": 0.7574,
"step": 21000
},
{
"epoch": 1.94,
"learning_rate": 1.0313739632167327e-05,
"loss": 0.7184,
"step": 21500
},
{
"epoch": 1.98,
"learning_rate": 1.0088351965380456e-05,
"loss": 0.6812,
"step": 22000
},
{
"epoch": 2.03,
"learning_rate": 9.862964298593581e-06,
"loss": 0.5916,
"step": 22500
},
{
"epoch": 2.07,
"learning_rate": 9.637576631806709e-06,
"loss": 0.5152,
"step": 23000
},
{
"epoch": 2.12,
"learning_rate": 9.412188965019836e-06,
"loss": 0.4892,
"step": 23500
},
{
"epoch": 2.16,
"learning_rate": 9.187252073566536e-06,
"loss": 0.5003,
"step": 24000
},
{
"epoch": 2.21,
"learning_rate": 8.961864406779663e-06,
"loss": 0.4962,
"step": 24500
},
{
"epoch": 2.25,
"learning_rate": 8.736476739992788e-06,
"loss": 0.4955,
"step": 25000
},
{
"epoch": 2.3,
"learning_rate": 8.511089073205916e-06,
"loss": 0.5183,
"step": 25500
},
{
"epoch": 2.34,
"learning_rate": 8.285701406419041e-06,
"loss": 0.501,
"step": 26000
},
{
"epoch": 2.39,
"learning_rate": 8.060313739632168e-06,
"loss": 0.4932,
"step": 26500
},
{
"epoch": 2.43,
"learning_rate": 7.835376848178868e-06,
"loss": 0.5092,
"step": 27000
},
{
"epoch": 2.48,
"learning_rate": 7.609989181391995e-06,
"loss": 0.5327,
"step": 27500
},
{
"epoch": 2.52,
"learning_rate": 7.384601514605121e-06,
"loss": 0.4913,
"step": 28000
},
{
"epoch": 2.57,
"learning_rate": 7.159213847818248e-06,
"loss": 0.4862,
"step": 28500
},
{
"epoch": 2.61,
"learning_rate": 6.9338261810313745e-06,
"loss": 0.5005,
"step": 29000
},
{
"epoch": 2.66,
"learning_rate": 6.708889289578074e-06,
"loss": 0.498,
"step": 29500
},
{
"epoch": 2.7,
"learning_rate": 6.483952398124775e-06,
"loss": 0.5037,
"step": 30000
},
{
"epoch": 2.75,
"learning_rate": 6.2585647313379015e-06,
"loss": 0.4922,
"step": 30500
},
{
"epoch": 2.79,
"learning_rate": 6.033177064551028e-06,
"loss": 0.4944,
"step": 31000
},
{
"epoch": 2.84,
"learning_rate": 5.807789397764155e-06,
"loss": 0.4795,
"step": 31500
},
{
"epoch": 2.88,
"learning_rate": 5.5824017309772815e-06,
"loss": 0.498,
"step": 32000
},
{
"epoch": 2.93,
"learning_rate": 5.357014064190408e-06,
"loss": 0.4985,
"step": 32500
},
{
"epoch": 2.98,
"learning_rate": 5.131626397403534e-06,
"loss": 0.4915,
"step": 33000
},
{
"epoch": 3.02,
"learning_rate": 4.9062387306166605e-06,
"loss": 0.4185,
"step": 33500
},
{
"epoch": 3.07,
"learning_rate": 4.680851063829788e-06,
"loss": 0.3421,
"step": 34000
},
{
"epoch": 3.11,
"learning_rate": 4.455463397042914e-06,
"loss": 0.353,
"step": 34500
},
{
"epoch": 3.16,
"learning_rate": 4.2300757302560405e-06,
"loss": 0.33,
"step": 35000
},
{
"epoch": 3.2,
"learning_rate": 4.004688063469167e-06,
"loss": 0.3277,
"step": 35500
},
{
"epoch": 3.25,
"learning_rate": 3.779300396682294e-06,
"loss": 0.3456,
"step": 36000
},
{
"epoch": 3.29,
"learning_rate": 3.553912729895421e-06,
"loss": 0.3295,
"step": 36500
},
{
"epoch": 3.34,
"learning_rate": 3.3289758384421207e-06,
"loss": 0.3293,
"step": 37000
},
{
"epoch": 3.38,
"learning_rate": 3.1035881716552475e-06,
"loss": 0.3478,
"step": 37500
},
{
"epoch": 3.43,
"learning_rate": 2.8782005048683735e-06,
"loss": 0.3639,
"step": 38000
},
{
"epoch": 3.47,
"learning_rate": 2.6528128380815007e-06,
"loss": 0.3333,
"step": 38500
},
{
"epoch": 3.52,
"learning_rate": 2.427425171294627e-06,
"loss": 0.3324,
"step": 39000
},
{
"epoch": 3.56,
"learning_rate": 2.2024882798413274e-06,
"loss": 0.3435,
"step": 39500
},
{
"epoch": 3.61,
"learning_rate": 1.9771006130544537e-06,
"loss": 0.3512,
"step": 40000
},
{
"epoch": 3.65,
"learning_rate": 1.7517129462675803e-06,
"loss": 0.3388,
"step": 40500
},
{
"epoch": 3.7,
"learning_rate": 1.526325279480707e-06,
"loss": 0.3421,
"step": 41000
},
{
"epoch": 3.74,
"learning_rate": 1.3009376126938337e-06,
"loss": 0.3648,
"step": 41500
},
{
"epoch": 3.79,
"learning_rate": 1.0760007212405338e-06,
"loss": 0.3271,
"step": 42000
},
{
"epoch": 3.83,
"learning_rate": 8.506130544536603e-07,
"loss": 0.321,
"step": 42500
},
{
"epoch": 3.88,
"learning_rate": 6.256761630003607e-07,
"loss": 0.3432,
"step": 43000
},
{
"epoch": 3.92,
"learning_rate": 4.0028849621348723e-07,
"loss": 0.322,
"step": 43500
},
{
"epoch": 3.97,
"learning_rate": 1.7490082942661378e-07,
"loss": 0.3316,
"step": 44000
}
],
"logging_steps": 500,
"max_steps": 44368,
"num_train_epochs": 4,
"save_steps": 500,
"total_flos": 6.955379978528563e+16,
"trial_name": null,
"trial_params": null
}