common7 / trainer_state.json
ghofrani's picture
End of training
1592989
raw
history blame
239 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 122.99509001636662,
"global_step": 18696,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 6e-06,
"loss": 27.848,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 1.1400000000000001e-05,
"loss": 28.8912,
"step": 20
},
{
"epoch": 0.2,
"learning_rate": 1.6800000000000002e-05,
"loss": 30.7944,
"step": 30
},
{
"epoch": 0.26,
"learning_rate": 2.2800000000000002e-05,
"loss": 24.0868,
"step": 40
},
{
"epoch": 0.33,
"learning_rate": 2.88e-05,
"loss": 16.5729,
"step": 50
},
{
"epoch": 0.39,
"learning_rate": 3.48e-05,
"loss": 14.2674,
"step": 60
},
{
"epoch": 0.46,
"learning_rate": 4.08e-05,
"loss": 14.6699,
"step": 70
},
{
"epoch": 0.52,
"learning_rate": 4.6800000000000006e-05,
"loss": 10.5936,
"step": 80
},
{
"epoch": 0.59,
"learning_rate": 5.28e-05,
"loss": 9.7296,
"step": 90
},
{
"epoch": 0.65,
"learning_rate": 5.88e-05,
"loss": 9.0511,
"step": 100
},
{
"epoch": 0.72,
"learning_rate": 5.9952399841332805e-05,
"loss": 9.3894,
"step": 110
},
{
"epoch": 0.79,
"learning_rate": 5.989289964299881e-05,
"loss": 7.1183,
"step": 120
},
{
"epoch": 0.85,
"learning_rate": 5.983339944466482e-05,
"loss": 6.6377,
"step": 130
},
{
"epoch": 0.92,
"learning_rate": 5.9773899246330826e-05,
"loss": 6.5715,
"step": 140
},
{
"epoch": 0.98,
"learning_rate": 5.9714399047996826e-05,
"loss": 6.2473,
"step": 150
},
{
"epoch": 1.05,
"learning_rate": 5.9654898849662834e-05,
"loss": 5.4192,
"step": 160
},
{
"epoch": 1.12,
"learning_rate": 5.959539865132884e-05,
"loss": 4.773,
"step": 170
},
{
"epoch": 1.18,
"learning_rate": 5.953589845299485e-05,
"loss": 4.6221,
"step": 180
},
{
"epoch": 1.25,
"learning_rate": 5.947639825466085e-05,
"loss": 4.446,
"step": 190
},
{
"epoch": 1.31,
"learning_rate": 5.9416898056326856e-05,
"loss": 3.785,
"step": 200
},
{
"epoch": 1.38,
"learning_rate": 5.935739785799286e-05,
"loss": 3.7092,
"step": 210
},
{
"epoch": 1.45,
"learning_rate": 5.929789765965886e-05,
"loss": 3.7962,
"step": 220
},
{
"epoch": 1.51,
"learning_rate": 5.923839746132487e-05,
"loss": 3.5789,
"step": 230
},
{
"epoch": 1.58,
"learning_rate": 5.917889726299088e-05,
"loss": 3.2916,
"step": 240
},
{
"epoch": 1.64,
"learning_rate": 5.9119397064656885e-05,
"loss": 3.2991,
"step": 250
},
{
"epoch": 1.71,
"learning_rate": 5.9059896866322885e-05,
"loss": 3.3718,
"step": 260
},
{
"epoch": 1.77,
"learning_rate": 5.900039666798889e-05,
"loss": 3.2297,
"step": 270
},
{
"epoch": 1.84,
"learning_rate": 5.89408964696549e-05,
"loss": 3.1159,
"step": 280
},
{
"epoch": 1.9,
"learning_rate": 5.888139627132091e-05,
"loss": 3.1361,
"step": 290
},
{
"epoch": 1.97,
"learning_rate": 5.882189607298691e-05,
"loss": 3.2319,
"step": 300
},
{
"epoch": 2.04,
"learning_rate": 5.8762395874652914e-05,
"loss": 3.3508,
"step": 310
},
{
"epoch": 2.1,
"learning_rate": 5.870289567631892e-05,
"loss": 3.0835,
"step": 320
},
{
"epoch": 2.17,
"learning_rate": 5.864339547798493e-05,
"loss": 3.115,
"step": 330
},
{
"epoch": 2.24,
"learning_rate": 5.858389527965093e-05,
"loss": 3.1754,
"step": 340
},
{
"epoch": 2.3,
"learning_rate": 5.8524395081316936e-05,
"loss": 3.0651,
"step": 350
},
{
"epoch": 2.37,
"learning_rate": 5.846489488298294e-05,
"loss": 3.0475,
"step": 360
},
{
"epoch": 2.43,
"learning_rate": 5.840539468464895e-05,
"loss": 3.0804,
"step": 370
},
{
"epoch": 2.5,
"learning_rate": 5.834589448631496e-05,
"loss": 3.1193,
"step": 380
},
{
"epoch": 2.56,
"learning_rate": 5.8286394287980965e-05,
"loss": 3.0054,
"step": 390
},
{
"epoch": 2.63,
"learning_rate": 5.822689408964697e-05,
"loss": 3.005,
"step": 400
},
{
"epoch": 2.69,
"learning_rate": 5.816739389131297e-05,
"loss": 3.0306,
"step": 410
},
{
"epoch": 2.76,
"learning_rate": 5.810789369297898e-05,
"loss": 3.0225,
"step": 420
},
{
"epoch": 2.82,
"learning_rate": 5.804839349464499e-05,
"loss": 2.9728,
"step": 430
},
{
"epoch": 2.89,
"learning_rate": 5.7988893296310994e-05,
"loss": 2.9658,
"step": 440
},
{
"epoch": 2.96,
"learning_rate": 5.7929393097976994e-05,
"loss": 2.9793,
"step": 450
},
{
"epoch": 3.03,
"learning_rate": 5.7869892899643e-05,
"loss": 3.1856,
"step": 460
},
{
"epoch": 3.09,
"learning_rate": 5.781039270130901e-05,
"loss": 2.9505,
"step": 470
},
{
"epoch": 3.16,
"learning_rate": 5.7750892502975016e-05,
"loss": 2.9522,
"step": 480
},
{
"epoch": 3.22,
"learning_rate": 5.7691392304641016e-05,
"loss": 2.9528,
"step": 490
},
{
"epoch": 3.29,
"learning_rate": 5.7631892106307024e-05,
"loss": 2.957,
"step": 500
},
{
"epoch": 3.29,
"eval_loss": 2.95032000541687,
"eval_runtime": 300.7309,
"eval_samples_per_second": 23.642,
"eval_steps_per_second": 1.48,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 3.35,
"learning_rate": 5.757239190797303e-05,
"loss": 2.9297,
"step": 510
},
{
"epoch": 3.42,
"learning_rate": 5.751289170963903e-05,
"loss": 2.9175,
"step": 520
},
{
"epoch": 3.48,
"learning_rate": 5.745339151130504e-05,
"loss": 2.9267,
"step": 530
},
{
"epoch": 3.55,
"learning_rate": 5.7393891312971046e-05,
"loss": 2.9391,
"step": 540
},
{
"epoch": 3.62,
"learning_rate": 5.733439111463705e-05,
"loss": 2.9073,
"step": 550
},
{
"epoch": 3.68,
"learning_rate": 5.727489091630305e-05,
"loss": 2.8946,
"step": 560
},
{
"epoch": 3.75,
"learning_rate": 5.721539071796906e-05,
"loss": 2.9003,
"step": 570
},
{
"epoch": 3.81,
"learning_rate": 5.715589051963507e-05,
"loss": 2.9124,
"step": 580
},
{
"epoch": 3.88,
"learning_rate": 5.7096390321301075e-05,
"loss": 2.8812,
"step": 590
},
{
"epoch": 3.94,
"learning_rate": 5.7036890122967075e-05,
"loss": 2.8715,
"step": 600
},
{
"epoch": 4.01,
"learning_rate": 5.697738992463308e-05,
"loss": 3.0957,
"step": 610
},
{
"epoch": 4.08,
"learning_rate": 5.691788972629909e-05,
"loss": 2.8812,
"step": 620
},
{
"epoch": 4.14,
"learning_rate": 5.6858389527965097e-05,
"loss": 2.8435,
"step": 630
},
{
"epoch": 4.21,
"learning_rate": 5.67988893296311e-05,
"loss": 2.8249,
"step": 640
},
{
"epoch": 4.27,
"learning_rate": 5.6739389131297104e-05,
"loss": 2.8412,
"step": 650
},
{
"epoch": 4.34,
"learning_rate": 5.667988893296311e-05,
"loss": 2.7992,
"step": 660
},
{
"epoch": 4.41,
"learning_rate": 5.662038873462911e-05,
"loss": 2.7854,
"step": 670
},
{
"epoch": 4.47,
"learning_rate": 5.656088853629512e-05,
"loss": 2.7678,
"step": 680
},
{
"epoch": 4.54,
"learning_rate": 5.6501388337961126e-05,
"loss": 2.7767,
"step": 690
},
{
"epoch": 4.6,
"learning_rate": 5.644188813962713e-05,
"loss": 2.6956,
"step": 700
},
{
"epoch": 4.67,
"learning_rate": 5.6382387941293134e-05,
"loss": 2.6563,
"step": 710
},
{
"epoch": 4.73,
"learning_rate": 5.632288774295914e-05,
"loss": 2.6166,
"step": 720
},
{
"epoch": 4.8,
"learning_rate": 5.626338754462515e-05,
"loss": 2.6088,
"step": 730
},
{
"epoch": 4.86,
"learning_rate": 5.6203887346291155e-05,
"loss": 2.4939,
"step": 740
},
{
"epoch": 4.93,
"learning_rate": 5.614438714795716e-05,
"loss": 2.4259,
"step": 750
},
{
"epoch": 5.0,
"learning_rate": 5.608488694962317e-05,
"loss": 2.4021,
"step": 760
},
{
"epoch": 5.07,
"learning_rate": 5.602538675128918e-05,
"loss": 2.4815,
"step": 770
},
{
"epoch": 5.13,
"learning_rate": 5.5965886552955184e-05,
"loss": 2.2209,
"step": 780
},
{
"epoch": 5.2,
"learning_rate": 5.5906386354621184e-05,
"loss": 2.1851,
"step": 790
},
{
"epoch": 5.26,
"learning_rate": 5.584688615628719e-05,
"loss": 2.2135,
"step": 800
},
{
"epoch": 5.33,
"learning_rate": 5.57873859579532e-05,
"loss": 2.0608,
"step": 810
},
{
"epoch": 5.39,
"learning_rate": 5.57278857596192e-05,
"loss": 2.0331,
"step": 820
},
{
"epoch": 5.46,
"learning_rate": 5.5668385561285206e-05,
"loss": 2.0301,
"step": 830
},
{
"epoch": 5.52,
"learning_rate": 5.5608885362951213e-05,
"loss": 2.0197,
"step": 840
},
{
"epoch": 5.59,
"learning_rate": 5.554938516461722e-05,
"loss": 1.9322,
"step": 850
},
{
"epoch": 5.65,
"learning_rate": 5.548988496628322e-05,
"loss": 1.9336,
"step": 860
},
{
"epoch": 5.72,
"learning_rate": 5.543038476794923e-05,
"loss": 1.9208,
"step": 870
},
{
"epoch": 5.79,
"learning_rate": 5.5370884569615235e-05,
"loss": 1.9261,
"step": 880
},
{
"epoch": 5.85,
"learning_rate": 5.531138437128124e-05,
"loss": 1.8571,
"step": 890
},
{
"epoch": 5.92,
"learning_rate": 5.525188417294724e-05,
"loss": 1.8512,
"step": 900
},
{
"epoch": 5.98,
"learning_rate": 5.519238397461325e-05,
"loss": 1.831,
"step": 910
},
{
"epoch": 6.05,
"learning_rate": 5.513288377627926e-05,
"loss": 1.9426,
"step": 920
},
{
"epoch": 6.12,
"learning_rate": 5.5073383577945265e-05,
"loss": 1.7983,
"step": 930
},
{
"epoch": 6.18,
"learning_rate": 5.5013883379611265e-05,
"loss": 1.7945,
"step": 940
},
{
"epoch": 6.25,
"learning_rate": 5.495438318127727e-05,
"loss": 1.8402,
"step": 950
},
{
"epoch": 6.31,
"learning_rate": 5.489488298294328e-05,
"loss": 1.7505,
"step": 960
},
{
"epoch": 6.38,
"learning_rate": 5.483538278460928e-05,
"loss": 1.7542,
"step": 970
},
{
"epoch": 6.45,
"learning_rate": 5.477588258627529e-05,
"loss": 1.7426,
"step": 980
},
{
"epoch": 6.51,
"learning_rate": 5.4716382387941294e-05,
"loss": 1.8086,
"step": 990
},
{
"epoch": 6.58,
"learning_rate": 5.46568821896073e-05,
"loss": 1.7225,
"step": 1000
},
{
"epoch": 6.58,
"eval_loss": 0.8859816789627075,
"eval_runtime": 314.8736,
"eval_samples_per_second": 22.58,
"eval_steps_per_second": 1.413,
"eval_wer": 0.7703087196361172,
"step": 1000
},
{
"epoch": 6.64,
"learning_rate": 5.45973819912733e-05,
"loss": 1.7249,
"step": 1010
},
{
"epoch": 6.71,
"learning_rate": 5.453788179293931e-05,
"loss": 1.7171,
"step": 1020
},
{
"epoch": 6.77,
"learning_rate": 5.4478381594605316e-05,
"loss": 1.7518,
"step": 1030
},
{
"epoch": 6.84,
"learning_rate": 5.441888139627132e-05,
"loss": 1.7065,
"step": 1040
},
{
"epoch": 6.9,
"learning_rate": 5.4359381197937324e-05,
"loss": 1.6863,
"step": 1050
},
{
"epoch": 6.97,
"learning_rate": 5.429988099960333e-05,
"loss": 1.6835,
"step": 1060
},
{
"epoch": 7.04,
"learning_rate": 5.424038080126934e-05,
"loss": 1.8348,
"step": 1070
},
{
"epoch": 7.1,
"learning_rate": 5.4180880602935345e-05,
"loss": 1.6682,
"step": 1080
},
{
"epoch": 7.17,
"learning_rate": 5.4121380404601346e-05,
"loss": 1.6787,
"step": 1090
},
{
"epoch": 7.24,
"learning_rate": 5.406188020626735e-05,
"loss": 1.6797,
"step": 1100
},
{
"epoch": 7.3,
"learning_rate": 5.400238000793336e-05,
"loss": 1.6773,
"step": 1110
},
{
"epoch": 7.37,
"learning_rate": 5.394287980959936e-05,
"loss": 1.631,
"step": 1120
},
{
"epoch": 7.43,
"learning_rate": 5.3883379611265374e-05,
"loss": 1.6511,
"step": 1130
},
{
"epoch": 7.5,
"learning_rate": 5.382387941293138e-05,
"loss": 1.6923,
"step": 1140
},
{
"epoch": 7.56,
"learning_rate": 5.376437921459739e-05,
"loss": 1.6204,
"step": 1150
},
{
"epoch": 7.63,
"learning_rate": 5.370487901626339e-05,
"loss": 1.6243,
"step": 1160
},
{
"epoch": 7.69,
"learning_rate": 5.3645378817929396e-05,
"loss": 1.6422,
"step": 1170
},
{
"epoch": 7.76,
"learning_rate": 5.35858786195954e-05,
"loss": 1.7036,
"step": 1180
},
{
"epoch": 7.82,
"learning_rate": 5.352637842126141e-05,
"loss": 1.5981,
"step": 1190
},
{
"epoch": 7.89,
"learning_rate": 5.346687822292741e-05,
"loss": 1.6271,
"step": 1200
},
{
"epoch": 7.96,
"learning_rate": 5.340737802459342e-05,
"loss": 1.5931,
"step": 1210
},
{
"epoch": 8.03,
"learning_rate": 5.3347877826259425e-05,
"loss": 1.79,
"step": 1220
},
{
"epoch": 8.09,
"learning_rate": 5.328837762792543e-05,
"loss": 1.5735,
"step": 1230
},
{
"epoch": 8.16,
"learning_rate": 5.322887742959143e-05,
"loss": 1.6044,
"step": 1240
},
{
"epoch": 8.22,
"learning_rate": 5.316937723125744e-05,
"loss": 1.5838,
"step": 1250
},
{
"epoch": 8.29,
"learning_rate": 5.310987703292345e-05,
"loss": 1.6321,
"step": 1260
},
{
"epoch": 8.35,
"learning_rate": 5.305037683458945e-05,
"loss": 1.58,
"step": 1270
},
{
"epoch": 8.42,
"learning_rate": 5.2990876636255455e-05,
"loss": 1.5729,
"step": 1280
},
{
"epoch": 8.48,
"learning_rate": 5.293137643792146e-05,
"loss": 1.6058,
"step": 1290
},
{
"epoch": 8.55,
"learning_rate": 5.287187623958747e-05,
"loss": 1.6066,
"step": 1300
},
{
"epoch": 8.62,
"learning_rate": 5.281237604125347e-05,
"loss": 1.5613,
"step": 1310
},
{
"epoch": 8.68,
"learning_rate": 5.275287584291948e-05,
"loss": 1.5781,
"step": 1320
},
{
"epoch": 8.75,
"learning_rate": 5.2693375644585484e-05,
"loss": 1.596,
"step": 1330
},
{
"epoch": 8.81,
"learning_rate": 5.263387544625149e-05,
"loss": 1.5344,
"step": 1340
},
{
"epoch": 8.88,
"learning_rate": 5.257437524791749e-05,
"loss": 1.5456,
"step": 1350
},
{
"epoch": 8.94,
"learning_rate": 5.25148750495835e-05,
"loss": 1.5413,
"step": 1360
},
{
"epoch": 9.01,
"learning_rate": 5.2455374851249506e-05,
"loss": 1.7312,
"step": 1370
},
{
"epoch": 9.08,
"learning_rate": 5.239587465291551e-05,
"loss": 1.5234,
"step": 1380
},
{
"epoch": 9.14,
"learning_rate": 5.2336374454581513e-05,
"loss": 1.5335,
"step": 1390
},
{
"epoch": 9.21,
"learning_rate": 5.227687425624752e-05,
"loss": 1.5345,
"step": 1400
},
{
"epoch": 9.27,
"learning_rate": 5.221737405791353e-05,
"loss": 1.6354,
"step": 1410
},
{
"epoch": 9.34,
"learning_rate": 5.215787385957953e-05,
"loss": 1.5181,
"step": 1420
},
{
"epoch": 9.41,
"learning_rate": 5.2098373661245535e-05,
"loss": 1.5537,
"step": 1430
},
{
"epoch": 9.47,
"learning_rate": 5.203887346291154e-05,
"loss": 1.521,
"step": 1440
},
{
"epoch": 9.54,
"learning_rate": 5.197937326457755e-05,
"loss": 1.5633,
"step": 1450
},
{
"epoch": 9.6,
"learning_rate": 5.191987306624355e-05,
"loss": 1.5192,
"step": 1460
},
{
"epoch": 9.67,
"learning_rate": 5.186037286790956e-05,
"loss": 1.5376,
"step": 1470
},
{
"epoch": 9.73,
"learning_rate": 5.1800872669575565e-05,
"loss": 1.5484,
"step": 1480
},
{
"epoch": 9.8,
"learning_rate": 5.174137247124157e-05,
"loss": 1.5301,
"step": 1490
},
{
"epoch": 9.86,
"learning_rate": 5.168187227290757e-05,
"loss": 1.4907,
"step": 1500
},
{
"epoch": 9.86,
"eval_loss": 0.6554940342903137,
"eval_runtime": 311.8278,
"eval_samples_per_second": 22.801,
"eval_steps_per_second": 1.427,
"eval_wer": 0.6672602342011033,
"step": 1500
},
{
"epoch": 9.93,
"learning_rate": 5.1622372074573586e-05,
"loss": 1.476,
"step": 1510
},
{
"epoch": 10.0,
"learning_rate": 5.156287187623959e-05,
"loss": 1.5746,
"step": 1520
},
{
"epoch": 10.07,
"learning_rate": 5.15033716779056e-05,
"loss": 1.5885,
"step": 1530
},
{
"epoch": 10.13,
"learning_rate": 5.14438714795716e-05,
"loss": 1.4942,
"step": 1540
},
{
"epoch": 10.2,
"learning_rate": 5.138437128123761e-05,
"loss": 1.5069,
"step": 1550
},
{
"epoch": 10.26,
"learning_rate": 5.1324871082903615e-05,
"loss": 1.5527,
"step": 1560
},
{
"epoch": 10.33,
"learning_rate": 5.1265370884569616e-05,
"loss": 1.4691,
"step": 1570
},
{
"epoch": 10.39,
"learning_rate": 5.120587068623562e-05,
"loss": 1.5139,
"step": 1580
},
{
"epoch": 10.46,
"learning_rate": 5.114637048790163e-05,
"loss": 1.4942,
"step": 1590
},
{
"epoch": 10.52,
"learning_rate": 5.108687028956764e-05,
"loss": 1.5169,
"step": 1600
},
{
"epoch": 10.59,
"learning_rate": 5.102737009123364e-05,
"loss": 1.4577,
"step": 1610
},
{
"epoch": 10.65,
"learning_rate": 5.0967869892899645e-05,
"loss": 1.4895,
"step": 1620
},
{
"epoch": 10.72,
"learning_rate": 5.090836969456565e-05,
"loss": 1.4558,
"step": 1630
},
{
"epoch": 10.79,
"learning_rate": 5.084886949623166e-05,
"loss": 1.547,
"step": 1640
},
{
"epoch": 10.85,
"learning_rate": 5.078936929789766e-05,
"loss": 1.4679,
"step": 1650
},
{
"epoch": 10.92,
"learning_rate": 5.072986909956367e-05,
"loss": 1.4676,
"step": 1660
},
{
"epoch": 10.98,
"learning_rate": 5.0670368901229674e-05,
"loss": 1.496,
"step": 1670
},
{
"epoch": 11.05,
"learning_rate": 5.061086870289568e-05,
"loss": 1.6186,
"step": 1680
},
{
"epoch": 11.12,
"learning_rate": 5.055136850456168e-05,
"loss": 1.4611,
"step": 1690
},
{
"epoch": 11.18,
"learning_rate": 5.049186830622769e-05,
"loss": 1.4517,
"step": 1700
},
{
"epoch": 11.25,
"learning_rate": 5.0432368107893696e-05,
"loss": 1.4987,
"step": 1710
},
{
"epoch": 11.31,
"learning_rate": 5.0372867909559696e-05,
"loss": 1.4539,
"step": 1720
},
{
"epoch": 11.38,
"learning_rate": 5.03133677112257e-05,
"loss": 1.4677,
"step": 1730
},
{
"epoch": 11.45,
"learning_rate": 5.025386751289171e-05,
"loss": 1.4523,
"step": 1740
},
{
"epoch": 11.51,
"learning_rate": 5.019436731455772e-05,
"loss": 1.5092,
"step": 1750
},
{
"epoch": 11.58,
"learning_rate": 5.013486711622372e-05,
"loss": 1.4423,
"step": 1760
},
{
"epoch": 11.64,
"learning_rate": 5.0075366917889725e-05,
"loss": 1.4498,
"step": 1770
},
{
"epoch": 11.71,
"learning_rate": 5.001586671955573e-05,
"loss": 1.4076,
"step": 1780
},
{
"epoch": 11.77,
"learning_rate": 4.995636652122174e-05,
"loss": 1.5411,
"step": 1790
},
{
"epoch": 11.84,
"learning_rate": 4.989686632288774e-05,
"loss": 1.4117,
"step": 1800
},
{
"epoch": 11.9,
"learning_rate": 4.983736612455375e-05,
"loss": 1.4459,
"step": 1810
},
{
"epoch": 11.97,
"learning_rate": 4.9777865926219754e-05,
"loss": 1.4549,
"step": 1820
},
{
"epoch": 12.04,
"learning_rate": 4.971836572788576e-05,
"loss": 1.5976,
"step": 1830
},
{
"epoch": 12.1,
"learning_rate": 4.965886552955176e-05,
"loss": 1.4083,
"step": 1840
},
{
"epoch": 12.17,
"learning_rate": 4.959936533121777e-05,
"loss": 1.4588,
"step": 1850
},
{
"epoch": 12.24,
"learning_rate": 4.9539865132883776e-05,
"loss": 1.4425,
"step": 1860
},
{
"epoch": 12.3,
"learning_rate": 4.948036493454978e-05,
"loss": 1.4567,
"step": 1870
},
{
"epoch": 12.37,
"learning_rate": 4.9420864736215784e-05,
"loss": 1.4363,
"step": 1880
},
{
"epoch": 12.43,
"learning_rate": 4.93613645378818e-05,
"loss": 1.4431,
"step": 1890
},
{
"epoch": 12.5,
"learning_rate": 4.9301864339547805e-05,
"loss": 1.4858,
"step": 1900
},
{
"epoch": 12.56,
"learning_rate": 4.9242364141213805e-05,
"loss": 1.4147,
"step": 1910
},
{
"epoch": 12.63,
"learning_rate": 4.918286394287981e-05,
"loss": 1.4268,
"step": 1920
},
{
"epoch": 12.69,
"learning_rate": 4.912336374454582e-05,
"loss": 1.4174,
"step": 1930
},
{
"epoch": 12.76,
"learning_rate": 4.906386354621183e-05,
"loss": 1.4528,
"step": 1940
},
{
"epoch": 12.82,
"learning_rate": 4.900436334787783e-05,
"loss": 1.4058,
"step": 1950
},
{
"epoch": 12.89,
"learning_rate": 4.8944863149543835e-05,
"loss": 1.441,
"step": 1960
},
{
"epoch": 12.96,
"learning_rate": 4.888536295120984e-05,
"loss": 1.3988,
"step": 1970
},
{
"epoch": 13.03,
"learning_rate": 4.882586275287585e-05,
"loss": 1.586,
"step": 1980
},
{
"epoch": 13.09,
"learning_rate": 4.876636255454185e-05,
"loss": 1.3778,
"step": 1990
},
{
"epoch": 13.16,
"learning_rate": 4.8706862356207857e-05,
"loss": 1.4177,
"step": 2000
},
{
"epoch": 13.16,
"eval_loss": 0.5784419775009155,
"eval_runtime": 314.6324,
"eval_samples_per_second": 22.598,
"eval_steps_per_second": 1.414,
"eval_wer": 0.6075679860640666,
"step": 2000
},
{
"epoch": 13.22,
"learning_rate": 4.8647362157873864e-05,
"loss": 1.4404,
"step": 2010
},
{
"epoch": 13.29,
"learning_rate": 4.8587861959539864e-05,
"loss": 1.4586,
"step": 2020
},
{
"epoch": 13.35,
"learning_rate": 4.852836176120587e-05,
"loss": 1.4015,
"step": 2030
},
{
"epoch": 13.42,
"learning_rate": 4.846886156287188e-05,
"loss": 1.4202,
"step": 2040
},
{
"epoch": 13.48,
"learning_rate": 4.8409361364537886e-05,
"loss": 1.4328,
"step": 2050
},
{
"epoch": 13.55,
"learning_rate": 4.8349861166203886e-05,
"loss": 1.4464,
"step": 2060
},
{
"epoch": 13.62,
"learning_rate": 4.829036096786989e-05,
"loss": 1.4048,
"step": 2070
},
{
"epoch": 13.68,
"learning_rate": 4.82308607695359e-05,
"loss": 1.3997,
"step": 2080
},
{
"epoch": 13.75,
"learning_rate": 4.817136057120191e-05,
"loss": 1.446,
"step": 2090
},
{
"epoch": 13.81,
"learning_rate": 4.811186037286791e-05,
"loss": 1.391,
"step": 2100
},
{
"epoch": 13.88,
"learning_rate": 4.8052360174533915e-05,
"loss": 1.4091,
"step": 2110
},
{
"epoch": 13.94,
"learning_rate": 4.799285997619992e-05,
"loss": 1.4267,
"step": 2120
},
{
"epoch": 14.01,
"learning_rate": 4.793335977786593e-05,
"loss": 1.5417,
"step": 2130
},
{
"epoch": 14.08,
"learning_rate": 4.787385957953193e-05,
"loss": 1.3711,
"step": 2140
},
{
"epoch": 14.14,
"learning_rate": 4.781435938119794e-05,
"loss": 1.3764,
"step": 2150
},
{
"epoch": 14.21,
"learning_rate": 4.7754859182863944e-05,
"loss": 1.3796,
"step": 2160
},
{
"epoch": 14.27,
"learning_rate": 4.7695358984529945e-05,
"loss": 1.4684,
"step": 2170
},
{
"epoch": 14.34,
"learning_rate": 4.763585878619595e-05,
"loss": 1.3748,
"step": 2180
},
{
"epoch": 14.41,
"learning_rate": 4.757635858786196e-05,
"loss": 1.3977,
"step": 2190
},
{
"epoch": 14.47,
"learning_rate": 4.7516858389527966e-05,
"loss": 1.4021,
"step": 2200
},
{
"epoch": 14.54,
"learning_rate": 4.745735819119397e-05,
"loss": 1.4422,
"step": 2210
},
{
"epoch": 14.6,
"learning_rate": 4.7397857992859974e-05,
"loss": 1.3541,
"step": 2220
},
{
"epoch": 14.67,
"learning_rate": 4.733835779452598e-05,
"loss": 1.4021,
"step": 2230
},
{
"epoch": 14.73,
"learning_rate": 4.727885759619199e-05,
"loss": 1.4237,
"step": 2240
},
{
"epoch": 14.8,
"learning_rate": 4.721935739785799e-05,
"loss": 1.3847,
"step": 2250
},
{
"epoch": 14.86,
"learning_rate": 4.7159857199523996e-05,
"loss": 1.365,
"step": 2260
},
{
"epoch": 14.93,
"learning_rate": 4.710035700119001e-05,
"loss": 1.3724,
"step": 2270
},
{
"epoch": 15.0,
"learning_rate": 4.704085680285602e-05,
"loss": 1.4679,
"step": 2280
},
{
"epoch": 15.07,
"learning_rate": 4.698135660452202e-05,
"loss": 1.4707,
"step": 2290
},
{
"epoch": 15.13,
"learning_rate": 4.6921856406188024e-05,
"loss": 1.3633,
"step": 2300
},
{
"epoch": 15.2,
"learning_rate": 4.686235620785403e-05,
"loss": 1.3463,
"step": 2310
},
{
"epoch": 15.26,
"learning_rate": 4.680285600952003e-05,
"loss": 1.4541,
"step": 2320
},
{
"epoch": 15.33,
"learning_rate": 4.674335581118604e-05,
"loss": 1.3625,
"step": 2330
},
{
"epoch": 15.39,
"learning_rate": 4.6683855612852046e-05,
"loss": 1.3401,
"step": 2340
},
{
"epoch": 15.46,
"learning_rate": 4.6624355414518054e-05,
"loss": 1.3935,
"step": 2350
},
{
"epoch": 15.52,
"learning_rate": 4.6564855216184054e-05,
"loss": 1.4198,
"step": 2360
},
{
"epoch": 15.59,
"learning_rate": 4.650535501785006e-05,
"loss": 1.3487,
"step": 2370
},
{
"epoch": 15.65,
"learning_rate": 4.644585481951607e-05,
"loss": 1.3766,
"step": 2380
},
{
"epoch": 15.72,
"learning_rate": 4.6386354621182076e-05,
"loss": 1.3678,
"step": 2390
},
{
"epoch": 15.79,
"learning_rate": 4.6326854422848076e-05,
"loss": 1.3904,
"step": 2400
},
{
"epoch": 15.85,
"learning_rate": 4.626735422451408e-05,
"loss": 1.3508,
"step": 2410
},
{
"epoch": 15.92,
"learning_rate": 4.620785402618009e-05,
"loss": 1.383,
"step": 2420
},
{
"epoch": 15.98,
"learning_rate": 4.61483538278461e-05,
"loss": 1.3906,
"step": 2430
},
{
"epoch": 16.05,
"learning_rate": 4.60888536295121e-05,
"loss": 1.4781,
"step": 2440
},
{
"epoch": 16.12,
"learning_rate": 4.6029353431178105e-05,
"loss": 1.3411,
"step": 2450
},
{
"epoch": 16.18,
"learning_rate": 4.596985323284411e-05,
"loss": 1.3709,
"step": 2460
},
{
"epoch": 16.25,
"learning_rate": 4.591035303451011e-05,
"loss": 1.395,
"step": 2470
},
{
"epoch": 16.31,
"learning_rate": 4.585085283617612e-05,
"loss": 1.3344,
"step": 2480
},
{
"epoch": 16.38,
"learning_rate": 4.579135263784213e-05,
"loss": 1.3411,
"step": 2490
},
{
"epoch": 16.45,
"learning_rate": 4.5731852439508134e-05,
"loss": 1.3425,
"step": 2500
},
{
"epoch": 16.45,
"eval_loss": 0.5378673672676086,
"eval_runtime": 315.1097,
"eval_samples_per_second": 22.564,
"eval_steps_per_second": 1.412,
"eval_wer": 0.5718184457563147,
"step": 2500
},
{
"epoch": 16.51,
"learning_rate": 4.5672352241174135e-05,
"loss": 1.3892,
"step": 2510
},
{
"epoch": 16.58,
"learning_rate": 4.561285204284014e-05,
"loss": 1.3285,
"step": 2520
},
{
"epoch": 16.64,
"learning_rate": 4.555335184450615e-05,
"loss": 1.3038,
"step": 2530
},
{
"epoch": 16.71,
"learning_rate": 4.5493851646172156e-05,
"loss": 1.3316,
"step": 2540
},
{
"epoch": 16.77,
"learning_rate": 4.5434351447838157e-05,
"loss": 1.3842,
"step": 2550
},
{
"epoch": 16.84,
"learning_rate": 4.5374851249504164e-05,
"loss": 1.3268,
"step": 2560
},
{
"epoch": 16.9,
"learning_rate": 4.531535105117017e-05,
"loss": 1.3091,
"step": 2570
},
{
"epoch": 16.97,
"learning_rate": 4.525585085283618e-05,
"loss": 1.3346,
"step": 2580
},
{
"epoch": 17.04,
"learning_rate": 4.519635065450218e-05,
"loss": 1.4809,
"step": 2590
},
{
"epoch": 17.1,
"learning_rate": 4.5136850456168186e-05,
"loss": 1.3026,
"step": 2600
},
{
"epoch": 17.17,
"learning_rate": 4.507735025783419e-05,
"loss": 1.332,
"step": 2610
},
{
"epoch": 17.24,
"learning_rate": 4.501785005950019e-05,
"loss": 1.3768,
"step": 2620
},
{
"epoch": 17.3,
"learning_rate": 4.49583498611662e-05,
"loss": 1.3643,
"step": 2630
},
{
"epoch": 17.37,
"learning_rate": 4.489884966283221e-05,
"loss": 1.3337,
"step": 2640
},
{
"epoch": 17.43,
"learning_rate": 4.483934946449822e-05,
"loss": 1.3713,
"step": 2650
},
{
"epoch": 17.5,
"learning_rate": 4.477984926616422e-05,
"loss": 1.3875,
"step": 2660
},
{
"epoch": 17.56,
"learning_rate": 4.472034906783023e-05,
"loss": 1.3109,
"step": 2670
},
{
"epoch": 17.63,
"learning_rate": 4.4660848869496236e-05,
"loss": 1.2984,
"step": 2680
},
{
"epoch": 17.69,
"learning_rate": 4.4601348671162243e-05,
"loss": 1.3055,
"step": 2690
},
{
"epoch": 17.76,
"learning_rate": 4.4541848472828244e-05,
"loss": 1.3925,
"step": 2700
},
{
"epoch": 17.82,
"learning_rate": 4.448234827449425e-05,
"loss": 1.2958,
"step": 2710
},
{
"epoch": 17.89,
"learning_rate": 4.442284807616026e-05,
"loss": 1.3094,
"step": 2720
},
{
"epoch": 17.96,
"learning_rate": 4.4363347877826265e-05,
"loss": 1.3485,
"step": 2730
},
{
"epoch": 18.03,
"learning_rate": 4.4303847679492266e-05,
"loss": 1.4891,
"step": 2740
},
{
"epoch": 18.09,
"learning_rate": 4.424434748115827e-05,
"loss": 1.3315,
"step": 2750
},
{
"epoch": 18.16,
"learning_rate": 4.418484728282428e-05,
"loss": 1.3337,
"step": 2760
},
{
"epoch": 18.22,
"learning_rate": 4.412534708449028e-05,
"loss": 1.3189,
"step": 2770
},
{
"epoch": 18.29,
"learning_rate": 4.406584688615629e-05,
"loss": 1.3324,
"step": 2780
},
{
"epoch": 18.35,
"learning_rate": 4.4006346687822295e-05,
"loss": 1.2899,
"step": 2790
},
{
"epoch": 18.42,
"learning_rate": 4.39468464894883e-05,
"loss": 1.3146,
"step": 2800
},
{
"epoch": 18.48,
"learning_rate": 4.38873462911543e-05,
"loss": 1.3373,
"step": 2810
},
{
"epoch": 18.55,
"learning_rate": 4.382784609282031e-05,
"loss": 1.3389,
"step": 2820
},
{
"epoch": 18.62,
"learning_rate": 4.376834589448632e-05,
"loss": 1.3078,
"step": 2830
},
{
"epoch": 18.68,
"learning_rate": 4.3708845696152324e-05,
"loss": 1.3183,
"step": 2840
},
{
"epoch": 18.75,
"learning_rate": 4.3649345497818324e-05,
"loss": 1.3632,
"step": 2850
},
{
"epoch": 18.81,
"learning_rate": 4.358984529948433e-05,
"loss": 1.2758,
"step": 2860
},
{
"epoch": 18.88,
"learning_rate": 4.353034510115034e-05,
"loss": 1.302,
"step": 2870
},
{
"epoch": 18.94,
"learning_rate": 4.3470844902816346e-05,
"loss": 1.3001,
"step": 2880
},
{
"epoch": 19.01,
"learning_rate": 4.3411344704482346e-05,
"loss": 1.4967,
"step": 2890
},
{
"epoch": 19.08,
"learning_rate": 4.3351844506148354e-05,
"loss": 1.2879,
"step": 2900
},
{
"epoch": 19.14,
"learning_rate": 4.329234430781436e-05,
"loss": 1.3083,
"step": 2910
},
{
"epoch": 19.21,
"learning_rate": 4.323284410948036e-05,
"loss": 1.2781,
"step": 2920
},
{
"epoch": 19.27,
"learning_rate": 4.317334391114637e-05,
"loss": 1.3786,
"step": 2930
},
{
"epoch": 19.34,
"learning_rate": 4.3113843712812376e-05,
"loss": 1.2858,
"step": 2940
},
{
"epoch": 19.41,
"learning_rate": 4.305434351447838e-05,
"loss": 1.3195,
"step": 2950
},
{
"epoch": 19.47,
"learning_rate": 4.299484331614438e-05,
"loss": 1.3097,
"step": 2960
},
{
"epoch": 19.54,
"learning_rate": 4.293534311781039e-05,
"loss": 1.3194,
"step": 2970
},
{
"epoch": 19.6,
"learning_rate": 4.28758429194764e-05,
"loss": 1.2795,
"step": 2980
},
{
"epoch": 19.67,
"learning_rate": 4.2816342721142405e-05,
"loss": 1.3051,
"step": 2990
},
{
"epoch": 19.73,
"learning_rate": 4.2756842522808405e-05,
"loss": 1.33,
"step": 3000
},
{
"epoch": 19.73,
"eval_loss": 0.49618762731552124,
"eval_runtime": 314.2943,
"eval_samples_per_second": 22.622,
"eval_steps_per_second": 1.416,
"eval_wer": 0.5245330494532082,
"step": 3000
},
{
"epoch": 19.8,
"learning_rate": 4.269734232447441e-05,
"loss": 1.3265,
"step": 3010
},
{
"epoch": 19.86,
"learning_rate": 4.263784212614042e-05,
"loss": 1.3148,
"step": 3020
},
{
"epoch": 19.93,
"learning_rate": 4.257834192780643e-05,
"loss": 1.3019,
"step": 3030
},
{
"epoch": 20.0,
"learning_rate": 4.2518841729472434e-05,
"loss": 1.3688,
"step": 3040
},
{
"epoch": 20.07,
"learning_rate": 4.245934153113844e-05,
"loss": 1.357,
"step": 3050
},
{
"epoch": 20.13,
"learning_rate": 4.239984133280445e-05,
"loss": 1.2812,
"step": 3060
},
{
"epoch": 20.2,
"learning_rate": 4.234034113447045e-05,
"loss": 1.2739,
"step": 3070
},
{
"epoch": 20.26,
"learning_rate": 4.2280840936136456e-05,
"loss": 1.3482,
"step": 3080
},
{
"epoch": 20.33,
"learning_rate": 4.222134073780246e-05,
"loss": 1.2779,
"step": 3090
},
{
"epoch": 20.39,
"learning_rate": 4.216184053946847e-05,
"loss": 1.3029,
"step": 3100
},
{
"epoch": 20.46,
"learning_rate": 4.210234034113447e-05,
"loss": 1.3525,
"step": 3110
},
{
"epoch": 20.52,
"learning_rate": 4.204284014280048e-05,
"loss": 1.3344,
"step": 3120
},
{
"epoch": 20.59,
"learning_rate": 4.1983339944466485e-05,
"loss": 1.2693,
"step": 3130
},
{
"epoch": 20.65,
"learning_rate": 4.192383974613249e-05,
"loss": 1.2751,
"step": 3140
},
{
"epoch": 20.72,
"learning_rate": 4.186433954779849e-05,
"loss": 1.2668,
"step": 3150
},
{
"epoch": 20.79,
"learning_rate": 4.18048393494645e-05,
"loss": 1.3379,
"step": 3160
},
{
"epoch": 20.85,
"learning_rate": 4.174533915113051e-05,
"loss": 1.2763,
"step": 3170
},
{
"epoch": 20.92,
"learning_rate": 4.1685838952796514e-05,
"loss": 1.2917,
"step": 3180
},
{
"epoch": 20.98,
"learning_rate": 4.1626338754462514e-05,
"loss": 1.3203,
"step": 3190
},
{
"epoch": 21.05,
"learning_rate": 4.156683855612852e-05,
"loss": 1.3707,
"step": 3200
},
{
"epoch": 21.12,
"learning_rate": 4.150733835779453e-05,
"loss": 1.265,
"step": 3210
},
{
"epoch": 21.18,
"learning_rate": 4.144783815946053e-05,
"loss": 1.268,
"step": 3220
},
{
"epoch": 21.25,
"learning_rate": 4.1388337961126536e-05,
"loss": 1.3269,
"step": 3230
},
{
"epoch": 21.31,
"learning_rate": 4.1328837762792543e-05,
"loss": 1.2943,
"step": 3240
},
{
"epoch": 21.38,
"learning_rate": 4.126933756445855e-05,
"loss": 1.269,
"step": 3250
},
{
"epoch": 21.45,
"learning_rate": 4.120983736612455e-05,
"loss": 1.2659,
"step": 3260
},
{
"epoch": 21.51,
"learning_rate": 4.115033716779056e-05,
"loss": 1.3543,
"step": 3270
},
{
"epoch": 21.58,
"learning_rate": 4.1090836969456565e-05,
"loss": 1.2307,
"step": 3280
},
{
"epoch": 21.64,
"learning_rate": 4.103133677112257e-05,
"loss": 1.2711,
"step": 3290
},
{
"epoch": 21.71,
"learning_rate": 4.097183657278857e-05,
"loss": 1.2897,
"step": 3300
},
{
"epoch": 21.77,
"learning_rate": 4.091233637445458e-05,
"loss": 1.2884,
"step": 3310
},
{
"epoch": 21.84,
"learning_rate": 4.085283617612059e-05,
"loss": 1.2466,
"step": 3320
},
{
"epoch": 21.9,
"learning_rate": 4.0793335977786595e-05,
"loss": 1.2744,
"step": 3330
},
{
"epoch": 21.97,
"learning_rate": 4.0733835779452595e-05,
"loss": 1.2278,
"step": 3340
},
{
"epoch": 22.04,
"learning_rate": 4.06743355811186e-05,
"loss": 1.416,
"step": 3350
},
{
"epoch": 22.1,
"learning_rate": 4.061483538278461e-05,
"loss": 1.2256,
"step": 3360
},
{
"epoch": 22.17,
"learning_rate": 4.055533518445061e-05,
"loss": 1.2733,
"step": 3370
},
{
"epoch": 22.24,
"learning_rate": 4.049583498611662e-05,
"loss": 1.3062,
"step": 3380
},
{
"epoch": 22.3,
"learning_rate": 4.0436334787782624e-05,
"loss": 1.2772,
"step": 3390
},
{
"epoch": 22.37,
"learning_rate": 4.037683458944863e-05,
"loss": 1.2667,
"step": 3400
},
{
"epoch": 22.43,
"learning_rate": 4.031733439111464e-05,
"loss": 1.2501,
"step": 3410
},
{
"epoch": 22.5,
"learning_rate": 4.0257834192780646e-05,
"loss": 1.3233,
"step": 3420
},
{
"epoch": 22.56,
"learning_rate": 4.019833399444665e-05,
"loss": 1.2382,
"step": 3430
},
{
"epoch": 22.63,
"learning_rate": 4.013883379611266e-05,
"loss": 1.2485,
"step": 3440
},
{
"epoch": 22.69,
"learning_rate": 4.007933359777866e-05,
"loss": 1.2442,
"step": 3450
},
{
"epoch": 22.76,
"learning_rate": 4.001983339944467e-05,
"loss": 1.3278,
"step": 3460
},
{
"epoch": 22.82,
"learning_rate": 3.9960333201110675e-05,
"loss": 1.2325,
"step": 3470
},
{
"epoch": 22.89,
"learning_rate": 3.990083300277668e-05,
"loss": 1.276,
"step": 3480
},
{
"epoch": 22.96,
"learning_rate": 3.984133280444268e-05,
"loss": 1.27,
"step": 3490
},
{
"epoch": 23.03,
"learning_rate": 3.978183260610869e-05,
"loss": 1.4378,
"step": 3500
},
{
"epoch": 23.03,
"eval_loss": 0.46987584233283997,
"eval_runtime": 315.8211,
"eval_samples_per_second": 22.513,
"eval_steps_per_second": 1.409,
"eval_wer": 0.5097841865866641,
"step": 3500
},
{
"epoch": 23.09,
"learning_rate": 3.97223324077747e-05,
"loss": 1.2222,
"step": 3510
},
{
"epoch": 23.16,
"learning_rate": 3.96628322094407e-05,
"loss": 1.2432,
"step": 3520
},
{
"epoch": 23.22,
"learning_rate": 3.9603332011106704e-05,
"loss": 1.236,
"step": 3530
},
{
"epoch": 23.29,
"learning_rate": 3.954383181277271e-05,
"loss": 1.2867,
"step": 3540
},
{
"epoch": 23.35,
"learning_rate": 3.948433161443872e-05,
"loss": 1.2229,
"step": 3550
},
{
"epoch": 23.42,
"learning_rate": 3.942483141610472e-05,
"loss": 1.2702,
"step": 3560
},
{
"epoch": 23.48,
"learning_rate": 3.9365331217770726e-05,
"loss": 1.273,
"step": 3570
},
{
"epoch": 23.55,
"learning_rate": 3.930583101943673e-05,
"loss": 1.278,
"step": 3580
},
{
"epoch": 23.62,
"learning_rate": 3.924633082110274e-05,
"loss": 1.2205,
"step": 3590
},
{
"epoch": 23.68,
"learning_rate": 3.918683062276874e-05,
"loss": 1.267,
"step": 3600
},
{
"epoch": 23.75,
"learning_rate": 3.912733042443475e-05,
"loss": 1.2885,
"step": 3610
},
{
"epoch": 23.81,
"learning_rate": 3.9067830226100755e-05,
"loss": 1.2384,
"step": 3620
},
{
"epoch": 23.88,
"learning_rate": 3.900833002776676e-05,
"loss": 1.2373,
"step": 3630
},
{
"epoch": 23.94,
"learning_rate": 3.894882982943276e-05,
"loss": 1.2617,
"step": 3640
},
{
"epoch": 24.01,
"learning_rate": 3.888932963109877e-05,
"loss": 1.412,
"step": 3650
},
{
"epoch": 24.08,
"learning_rate": 3.882982943276478e-05,
"loss": 1.1934,
"step": 3660
},
{
"epoch": 24.14,
"learning_rate": 3.877032923443078e-05,
"loss": 1.2418,
"step": 3670
},
{
"epoch": 24.21,
"learning_rate": 3.8710829036096785e-05,
"loss": 1.2606,
"step": 3680
},
{
"epoch": 24.27,
"learning_rate": 3.865132883776279e-05,
"loss": 1.3288,
"step": 3690
},
{
"epoch": 24.34,
"learning_rate": 3.85918286394288e-05,
"loss": 1.2383,
"step": 3700
},
{
"epoch": 24.41,
"learning_rate": 3.85323284410948e-05,
"loss": 1.2176,
"step": 3710
},
{
"epoch": 24.47,
"learning_rate": 3.847282824276081e-05,
"loss": 1.2255,
"step": 3720
},
{
"epoch": 24.54,
"learning_rate": 3.8413328044426814e-05,
"loss": 1.308,
"step": 3730
},
{
"epoch": 24.6,
"learning_rate": 3.835382784609282e-05,
"loss": 1.2243,
"step": 3740
},
{
"epoch": 24.67,
"learning_rate": 3.829432764775882e-05,
"loss": 1.2591,
"step": 3750
},
{
"epoch": 24.73,
"learning_rate": 3.823482744942483e-05,
"loss": 1.2523,
"step": 3760
},
{
"epoch": 24.8,
"learning_rate": 3.8175327251090836e-05,
"loss": 1.2499,
"step": 3770
},
{
"epoch": 24.86,
"learning_rate": 3.811582705275684e-05,
"loss": 1.2022,
"step": 3780
},
{
"epoch": 24.93,
"learning_rate": 3.805632685442285e-05,
"loss": 1.254,
"step": 3790
},
{
"epoch": 25.0,
"learning_rate": 3.799682665608886e-05,
"loss": 1.3321,
"step": 3800
},
{
"epoch": 25.07,
"learning_rate": 3.7937326457754865e-05,
"loss": 1.3216,
"step": 3810
},
{
"epoch": 25.13,
"learning_rate": 3.7877826259420865e-05,
"loss": 1.2439,
"step": 3820
},
{
"epoch": 25.2,
"learning_rate": 3.781832606108687e-05,
"loss": 1.2297,
"step": 3830
},
{
"epoch": 25.26,
"learning_rate": 3.775882586275288e-05,
"loss": 1.2818,
"step": 3840
},
{
"epoch": 25.33,
"learning_rate": 3.7699325664418887e-05,
"loss": 1.2019,
"step": 3850
},
{
"epoch": 25.39,
"learning_rate": 3.763982546608489e-05,
"loss": 1.2258,
"step": 3860
},
{
"epoch": 25.46,
"learning_rate": 3.7580325267750894e-05,
"loss": 1.2137,
"step": 3870
},
{
"epoch": 25.52,
"learning_rate": 3.75208250694169e-05,
"loss": 1.2879,
"step": 3880
},
{
"epoch": 25.59,
"learning_rate": 3.746132487108291e-05,
"loss": 1.199,
"step": 3890
},
{
"epoch": 25.65,
"learning_rate": 3.740182467274891e-05,
"loss": 1.2393,
"step": 3900
},
{
"epoch": 25.72,
"learning_rate": 3.7342324474414916e-05,
"loss": 1.2505,
"step": 3910
},
{
"epoch": 25.79,
"learning_rate": 3.728282427608092e-05,
"loss": 1.2483,
"step": 3920
},
{
"epoch": 25.85,
"learning_rate": 3.722332407774693e-05,
"loss": 1.1909,
"step": 3930
},
{
"epoch": 25.92,
"learning_rate": 3.716382387941293e-05,
"loss": 1.2198,
"step": 3940
},
{
"epoch": 25.98,
"learning_rate": 3.710432368107894e-05,
"loss": 1.2687,
"step": 3950
},
{
"epoch": 26.05,
"learning_rate": 3.7044823482744945e-05,
"loss": 1.3287,
"step": 3960
},
{
"epoch": 26.12,
"learning_rate": 3.6985323284410946e-05,
"loss": 1.206,
"step": 3970
},
{
"epoch": 26.18,
"learning_rate": 3.692582308607695e-05,
"loss": 1.229,
"step": 3980
},
{
"epoch": 26.25,
"learning_rate": 3.686632288774296e-05,
"loss": 1.2915,
"step": 3990
},
{
"epoch": 26.31,
"learning_rate": 3.680682268940897e-05,
"loss": 1.1894,
"step": 4000
},
{
"epoch": 26.31,
"eval_loss": 0.4526805579662323,
"eval_runtime": 315.4313,
"eval_samples_per_second": 22.541,
"eval_steps_per_second": 1.411,
"eval_wer": 0.48479628375108874,
"step": 4000
},
{
"epoch": 26.38,
"learning_rate": 4.739083673908368e-05,
"loss": 1.1884,
"step": 4010
},
{
"epoch": 26.45,
"learning_rate": 4.7358571735857174e-05,
"loss": 1.2414,
"step": 4020
},
{
"epoch": 26.51,
"learning_rate": 4.7326306732630676e-05,
"loss": 1.2956,
"step": 4030
},
{
"epoch": 26.58,
"learning_rate": 4.729404172940417e-05,
"loss": 1.2162,
"step": 4040
},
{
"epoch": 26.64,
"learning_rate": 4.726177672617767e-05,
"loss": 1.2153,
"step": 4050
},
{
"epoch": 26.71,
"learning_rate": 4.7229511722951175e-05,
"loss": 1.2511,
"step": 4060
},
{
"epoch": 26.77,
"learning_rate": 4.7197246719724676e-05,
"loss": 1.2947,
"step": 4070
},
{
"epoch": 26.84,
"learning_rate": 4.716498171649817e-05,
"loss": 1.1932,
"step": 4080
},
{
"epoch": 26.9,
"learning_rate": 4.713271671327167e-05,
"loss": 1.2163,
"step": 4090
},
{
"epoch": 26.97,
"learning_rate": 4.710045171004517e-05,
"loss": 1.2215,
"step": 4100
},
{
"epoch": 27.04,
"learning_rate": 4.706818670681867e-05,
"loss": 1.3785,
"step": 4110
},
{
"epoch": 27.1,
"learning_rate": 4.703592170359217e-05,
"loss": 1.2023,
"step": 4120
},
{
"epoch": 27.17,
"learning_rate": 4.7003656700365674e-05,
"loss": 1.2092,
"step": 4130
},
{
"epoch": 27.24,
"learning_rate": 4.697139169713917e-05,
"loss": 1.2561,
"step": 4140
},
{
"epoch": 27.3,
"learning_rate": 4.693912669391267e-05,
"loss": 1.231,
"step": 4150
},
{
"epoch": 27.37,
"learning_rate": 4.690686169068617e-05,
"loss": 1.2007,
"step": 4160
},
{
"epoch": 27.43,
"learning_rate": 4.687459668745967e-05,
"loss": 1.2063,
"step": 4170
},
{
"epoch": 27.5,
"learning_rate": 4.684233168423317e-05,
"loss": 1.2924,
"step": 4180
},
{
"epoch": 27.56,
"learning_rate": 4.681006668100667e-05,
"loss": 1.1982,
"step": 4190
},
{
"epoch": 27.63,
"learning_rate": 4.677780167778017e-05,
"loss": 1.1973,
"step": 4200
},
{
"epoch": 27.69,
"learning_rate": 4.674553667455367e-05,
"loss": 1.2303,
"step": 4210
},
{
"epoch": 27.76,
"learning_rate": 4.671327167132717e-05,
"loss": 1.2865,
"step": 4220
},
{
"epoch": 27.82,
"learning_rate": 4.6681006668100665e-05,
"loss": 1.2205,
"step": 4230
},
{
"epoch": 27.89,
"learning_rate": 4.6648741664874167e-05,
"loss": 1.2056,
"step": 4240
},
{
"epoch": 27.96,
"learning_rate": 4.661647666164767e-05,
"loss": 1.1766,
"step": 4250
},
{
"epoch": 28.03,
"learning_rate": 4.658421165842117e-05,
"loss": 1.376,
"step": 4260
},
{
"epoch": 28.09,
"learning_rate": 4.6551946655194665e-05,
"loss": 1.1793,
"step": 4270
},
{
"epoch": 28.16,
"learning_rate": 4.651968165196817e-05,
"loss": 1.2529,
"step": 4280
},
{
"epoch": 28.22,
"learning_rate": 4.648741664874166e-05,
"loss": 1.2434,
"step": 4290
},
{
"epoch": 28.29,
"learning_rate": 4.6455151645515164e-05,
"loss": 1.2635,
"step": 4300
},
{
"epoch": 28.35,
"learning_rate": 4.6422886642288666e-05,
"loss": 1.1739,
"step": 4310
},
{
"epoch": 28.42,
"learning_rate": 4.639062163906217e-05,
"loss": 1.2323,
"step": 4320
},
{
"epoch": 28.48,
"learning_rate": 4.635835663583567e-05,
"loss": 1.2578,
"step": 4330
},
{
"epoch": 28.55,
"learning_rate": 4.6326091632609164e-05,
"loss": 1.2275,
"step": 4340
},
{
"epoch": 28.62,
"learning_rate": 4.6293826629382666e-05,
"loss": 1.2,
"step": 4350
},
{
"epoch": 28.68,
"learning_rate": 4.626156162615616e-05,
"loss": 1.2251,
"step": 4360
},
{
"epoch": 28.75,
"learning_rate": 4.622929662292966e-05,
"loss": 1.2697,
"step": 4370
},
{
"epoch": 28.81,
"learning_rate": 4.6197031619703165e-05,
"loss": 1.1808,
"step": 4380
},
{
"epoch": 28.88,
"learning_rate": 4.616476661647667e-05,
"loss": 1.1927,
"step": 4390
},
{
"epoch": 28.94,
"learning_rate": 4.613250161325016e-05,
"loss": 1.206,
"step": 4400
},
{
"epoch": 29.01,
"learning_rate": 4.6100236610023664e-05,
"loss": 1.366,
"step": 4410
},
{
"epoch": 29.08,
"learning_rate": 4.606797160679716e-05,
"loss": 1.1786,
"step": 4420
},
{
"epoch": 29.14,
"learning_rate": 4.603570660357066e-05,
"loss": 1.2063,
"step": 4430
},
{
"epoch": 29.21,
"learning_rate": 4.600344160034416e-05,
"loss": 1.1814,
"step": 4440
},
{
"epoch": 29.27,
"learning_rate": 4.5971176597117664e-05,
"loss": 1.2752,
"step": 4450
},
{
"epoch": 29.34,
"learning_rate": 4.593891159389116e-05,
"loss": 1.1697,
"step": 4460
},
{
"epoch": 29.41,
"learning_rate": 4.590664659066466e-05,
"loss": 1.2066,
"step": 4470
},
{
"epoch": 29.47,
"learning_rate": 4.5874381587438156e-05,
"loss": 1.2298,
"step": 4480
},
{
"epoch": 29.54,
"learning_rate": 4.584211658421166e-05,
"loss": 1.2676,
"step": 4490
},
{
"epoch": 29.6,
"learning_rate": 4.580985158098516e-05,
"loss": 1.1844,
"step": 4500
},
{
"epoch": 29.6,
"eval_loss": 0.43086153268814087,
"eval_runtime": 323.4912,
"eval_samples_per_second": 21.979,
"eval_steps_per_second": 1.376,
"eval_wer": 0.46511177779928387,
"step": 4500
},
{
"epoch": 29.67,
"learning_rate": 4.577758657775866e-05,
"loss": 1.2059,
"step": 4510
},
{
"epoch": 29.73,
"learning_rate": 4.574532157453216e-05,
"loss": 1.2293,
"step": 4520
},
{
"epoch": 29.8,
"learning_rate": 4.571305657130566e-05,
"loss": 1.2226,
"step": 4530
},
{
"epoch": 29.86,
"learning_rate": 4.568079156807915e-05,
"loss": 1.159,
"step": 4540
},
{
"epoch": 29.93,
"learning_rate": 4.5648526564852655e-05,
"loss": 1.212,
"step": 4550
},
{
"epoch": 30.0,
"learning_rate": 4.561626156162616e-05,
"loss": 1.2842,
"step": 4560
},
{
"epoch": 30.07,
"learning_rate": 4.558399655839966e-05,
"loss": 1.218,
"step": 4570
},
{
"epoch": 30.13,
"learning_rate": 4.555173155517316e-05,
"loss": 1.1917,
"step": 4580
},
{
"epoch": 30.2,
"learning_rate": 4.5519466551946655e-05,
"loss": 1.1997,
"step": 4590
},
{
"epoch": 30.26,
"learning_rate": 4.548720154872016e-05,
"loss": 1.2586,
"step": 4600
},
{
"epoch": 30.33,
"learning_rate": 4.545493654549365e-05,
"loss": 1.1813,
"step": 4610
},
{
"epoch": 30.39,
"learning_rate": 4.5422671542267154e-05,
"loss": 1.1828,
"step": 4620
},
{
"epoch": 30.46,
"learning_rate": 4.5390406539040656e-05,
"loss": 1.1856,
"step": 4630
},
{
"epoch": 30.52,
"learning_rate": 4.535814153581416e-05,
"loss": 1.2412,
"step": 4640
},
{
"epoch": 30.59,
"learning_rate": 4.532587653258765e-05,
"loss": 1.1547,
"step": 4650
},
{
"epoch": 30.65,
"learning_rate": 4.5293611529361155e-05,
"loss": 1.179,
"step": 4660
},
{
"epoch": 30.72,
"learning_rate": 4.526134652613465e-05,
"loss": 1.1997,
"step": 4670
},
{
"epoch": 30.79,
"learning_rate": 4.522908152290815e-05,
"loss": 1.2361,
"step": 4680
},
{
"epoch": 30.85,
"learning_rate": 4.519681651968165e-05,
"loss": 1.1425,
"step": 4690
},
{
"epoch": 30.92,
"learning_rate": 4.5164551516455155e-05,
"loss": 1.1794,
"step": 4700
},
{
"epoch": 30.98,
"learning_rate": 4.513228651322866e-05,
"loss": 1.2061,
"step": 4710
},
{
"epoch": 31.05,
"learning_rate": 4.510002151000215e-05,
"loss": 1.2714,
"step": 4720
},
{
"epoch": 31.12,
"learning_rate": 4.506775650677565e-05,
"loss": 1.1805,
"step": 4730
},
{
"epoch": 31.18,
"learning_rate": 4.503549150354915e-05,
"loss": 1.1843,
"step": 4740
},
{
"epoch": 31.25,
"learning_rate": 4.5006453000645305e-05,
"loss": 1.2111,
"step": 4750
},
{
"epoch": 31.31,
"learning_rate": 4.49741879974188e-05,
"loss": 1.1613,
"step": 4760
},
{
"epoch": 31.38,
"learning_rate": 4.49419229941923e-05,
"loss": 1.1599,
"step": 4770
},
{
"epoch": 31.45,
"learning_rate": 4.49096579909658e-05,
"loss": 1.1621,
"step": 4780
},
{
"epoch": 31.51,
"learning_rate": 4.48773929877393e-05,
"loss": 1.2187,
"step": 4790
},
{
"epoch": 31.58,
"learning_rate": 4.48451279845128e-05,
"loss": 1.1546,
"step": 4800
},
{
"epoch": 31.64,
"learning_rate": 4.48128629812863e-05,
"loss": 1.2005,
"step": 4810
},
{
"epoch": 31.71,
"learning_rate": 4.47805979780598e-05,
"loss": 1.1889,
"step": 4820
},
{
"epoch": 31.77,
"learning_rate": 4.47483329748333e-05,
"loss": 1.2627,
"step": 4830
},
{
"epoch": 31.84,
"learning_rate": 4.4716067971606794e-05,
"loss": 1.1345,
"step": 4840
},
{
"epoch": 31.9,
"learning_rate": 4.4683802968380296e-05,
"loss": 1.1812,
"step": 4850
},
{
"epoch": 31.97,
"learning_rate": 4.46515379651538e-05,
"loss": 1.2073,
"step": 4860
},
{
"epoch": 32.04,
"learning_rate": 4.46192729619273e-05,
"loss": 1.3344,
"step": 4870
},
{
"epoch": 32.1,
"learning_rate": 4.45870079587008e-05,
"loss": 1.1497,
"step": 4880
},
{
"epoch": 32.17,
"learning_rate": 4.45547429554743e-05,
"loss": 1.1874,
"step": 4890
},
{
"epoch": 32.24,
"learning_rate": 4.452247795224779e-05,
"loss": 1.1958,
"step": 4900
},
{
"epoch": 32.3,
"learning_rate": 4.4490212949021294e-05,
"loss": 1.2074,
"step": 4910
},
{
"epoch": 32.37,
"learning_rate": 4.4457947945794795e-05,
"loss": 1.1549,
"step": 4920
},
{
"epoch": 32.43,
"learning_rate": 4.44256829425683e-05,
"loss": 1.1614,
"step": 4930
},
{
"epoch": 32.5,
"learning_rate": 4.43934179393418e-05,
"loss": 1.2384,
"step": 4940
},
{
"epoch": 32.56,
"learning_rate": 4.4361152936115294e-05,
"loss": 1.1331,
"step": 4950
},
{
"epoch": 32.63,
"learning_rate": 4.4328887932888796e-05,
"loss": 1.153,
"step": 4960
},
{
"epoch": 32.69,
"learning_rate": 4.429662292966229e-05,
"loss": 1.1782,
"step": 4970
},
{
"epoch": 32.76,
"learning_rate": 4.426435792643579e-05,
"loss": 1.2534,
"step": 4980
},
{
"epoch": 32.82,
"learning_rate": 4.4232092923209294e-05,
"loss": 1.1444,
"step": 4990
},
{
"epoch": 32.89,
"learning_rate": 4.4199827919982796e-05,
"loss": 1.1795,
"step": 5000
},
{
"epoch": 32.89,
"eval_loss": 0.41311895847320557,
"eval_runtime": 319.5742,
"eval_samples_per_second": 22.248,
"eval_steps_per_second": 1.392,
"eval_wer": 0.45235652762992357,
"step": 5000
},
{
"epoch": 32.96,
"learning_rate": 4.416756291675629e-05,
"loss": 1.1466,
"step": 5010
},
{
"epoch": 33.03,
"learning_rate": 4.413529791352979e-05,
"loss": 1.3266,
"step": 5020
},
{
"epoch": 33.09,
"learning_rate": 4.410303291030329e-05,
"loss": 1.1433,
"step": 5030
},
{
"epoch": 33.16,
"learning_rate": 4.407076790707679e-05,
"loss": 1.1581,
"step": 5040
},
{
"epoch": 33.22,
"learning_rate": 4.403850290385029e-05,
"loss": 1.1718,
"step": 5050
},
{
"epoch": 33.29,
"learning_rate": 4.4006237900623794e-05,
"loss": 1.1991,
"step": 5060
},
{
"epoch": 33.35,
"learning_rate": 4.3973972897397295e-05,
"loss": 1.1464,
"step": 5070
},
{
"epoch": 33.42,
"learning_rate": 4.394170789417079e-05,
"loss": 1.1498,
"step": 5080
},
{
"epoch": 33.48,
"learning_rate": 4.3909442890944285e-05,
"loss": 1.1941,
"step": 5090
},
{
"epoch": 33.55,
"learning_rate": 4.387717788771779e-05,
"loss": 1.2034,
"step": 5100
},
{
"epoch": 33.62,
"learning_rate": 4.384491288449129e-05,
"loss": 1.1222,
"step": 5110
},
{
"epoch": 33.68,
"learning_rate": 4.381264788126479e-05,
"loss": 1.2138,
"step": 5120
},
{
"epoch": 33.75,
"learning_rate": 4.378038287803829e-05,
"loss": 1.2057,
"step": 5130
},
{
"epoch": 33.81,
"learning_rate": 4.374811787481179e-05,
"loss": 1.1263,
"step": 5140
},
{
"epoch": 33.88,
"learning_rate": 4.371585287158529e-05,
"loss": 1.1765,
"step": 5150
},
{
"epoch": 33.94,
"learning_rate": 4.3683587868358785e-05,
"loss": 1.1571,
"step": 5160
},
{
"epoch": 34.01,
"learning_rate": 4.3651322865132286e-05,
"loss": 1.3424,
"step": 5170
},
{
"epoch": 34.08,
"learning_rate": 4.361905786190579e-05,
"loss": 1.1235,
"step": 5180
},
{
"epoch": 34.14,
"learning_rate": 4.358679285867929e-05,
"loss": 1.1396,
"step": 5190
},
{
"epoch": 34.21,
"learning_rate": 4.3554527855452785e-05,
"loss": 1.1382,
"step": 5200
},
{
"epoch": 34.27,
"learning_rate": 4.352226285222629e-05,
"loss": 1.2624,
"step": 5210
},
{
"epoch": 34.34,
"learning_rate": 4.348999784899978e-05,
"loss": 1.1128,
"step": 5220
},
{
"epoch": 34.41,
"learning_rate": 4.3457732845773284e-05,
"loss": 1.153,
"step": 5230
},
{
"epoch": 34.47,
"learning_rate": 4.3425467842546786e-05,
"loss": 1.157,
"step": 5240
},
{
"epoch": 34.54,
"learning_rate": 4.339320283932029e-05,
"loss": 1.2042,
"step": 5250
},
{
"epoch": 34.6,
"learning_rate": 4.336093783609378e-05,
"loss": 1.125,
"step": 5260
},
{
"epoch": 34.67,
"learning_rate": 4.3328672832867284e-05,
"loss": 1.1504,
"step": 5270
},
{
"epoch": 34.73,
"learning_rate": 4.3296407829640786e-05,
"loss": 1.2242,
"step": 5280
},
{
"epoch": 34.8,
"learning_rate": 4.326414282641428e-05,
"loss": 1.1771,
"step": 5290
},
{
"epoch": 34.86,
"learning_rate": 4.323187782318778e-05,
"loss": 1.1526,
"step": 5300
},
{
"epoch": 34.93,
"learning_rate": 4.3199612819961285e-05,
"loss": 1.1573,
"step": 5310
},
{
"epoch": 35.0,
"learning_rate": 4.3167347816734787e-05,
"loss": 1.1895,
"step": 5320
},
{
"epoch": 35.07,
"learning_rate": 4.313508281350828e-05,
"loss": 1.2058,
"step": 5330
},
{
"epoch": 35.13,
"learning_rate": 4.310281781028178e-05,
"loss": 1.1529,
"step": 5340
},
{
"epoch": 35.2,
"learning_rate": 4.307055280705528e-05,
"loss": 1.152,
"step": 5350
},
{
"epoch": 35.26,
"learning_rate": 4.303828780382878e-05,
"loss": 1.1925,
"step": 5360
},
{
"epoch": 35.33,
"learning_rate": 4.300602280060228e-05,
"loss": 1.118,
"step": 5370
},
{
"epoch": 35.39,
"learning_rate": 4.2973757797375784e-05,
"loss": 1.1376,
"step": 5380
},
{
"epoch": 35.46,
"learning_rate": 4.294149279414928e-05,
"loss": 1.1493,
"step": 5390
},
{
"epoch": 35.52,
"learning_rate": 4.290922779092278e-05,
"loss": 1.1937,
"step": 5400
},
{
"epoch": 35.59,
"learning_rate": 4.2876962787696276e-05,
"loss": 1.1275,
"step": 5410
},
{
"epoch": 35.65,
"learning_rate": 4.284469778446978e-05,
"loss": 1.133,
"step": 5420
},
{
"epoch": 35.72,
"learning_rate": 4.281243278124328e-05,
"loss": 1.1574,
"step": 5430
},
{
"epoch": 35.79,
"learning_rate": 4.278016777801678e-05,
"loss": 1.212,
"step": 5440
},
{
"epoch": 35.85,
"learning_rate": 4.2747902774790276e-05,
"loss": 1.0953,
"step": 5450
},
{
"epoch": 35.92,
"learning_rate": 4.271563777156378e-05,
"loss": 1.1724,
"step": 5460
},
{
"epoch": 35.98,
"learning_rate": 4.268337276833728e-05,
"loss": 1.2039,
"step": 5470
},
{
"epoch": 36.05,
"learning_rate": 4.2651107765110775e-05,
"loss": 1.273,
"step": 5480
},
{
"epoch": 36.12,
"learning_rate": 4.261884276188428e-05,
"loss": 1.1196,
"step": 5490
},
{
"epoch": 36.18,
"learning_rate": 4.258657775865778e-05,
"loss": 1.1471,
"step": 5500
},
{
"epoch": 36.18,
"eval_loss": 0.4052400290966034,
"eval_runtime": 317.7832,
"eval_samples_per_second": 22.374,
"eval_steps_per_second": 1.4,
"eval_wer": 0.44349172553953353,
"step": 5500
},
{
"epoch": 36.25,
"learning_rate": 4.255431275543128e-05,
"loss": 1.1871,
"step": 5510
},
{
"epoch": 36.31,
"learning_rate": 4.2522047752204775e-05,
"loss": 1.1086,
"step": 5520
},
{
"epoch": 36.38,
"learning_rate": 4.248978274897828e-05,
"loss": 1.1285,
"step": 5530
},
{
"epoch": 36.45,
"learning_rate": 4.245751774575177e-05,
"loss": 1.1219,
"step": 5540
},
{
"epoch": 36.51,
"learning_rate": 4.2425252742525274e-05,
"loss": 1.2096,
"step": 5550
},
{
"epoch": 36.58,
"learning_rate": 4.2392987739298776e-05,
"loss": 1.0957,
"step": 5560
},
{
"epoch": 36.64,
"learning_rate": 4.236072273607228e-05,
"loss": 1.1271,
"step": 5570
},
{
"epoch": 36.71,
"learning_rate": 4.232845773284577e-05,
"loss": 1.1579,
"step": 5580
},
{
"epoch": 36.77,
"learning_rate": 4.2296192729619274e-05,
"loss": 1.2091,
"step": 5590
},
{
"epoch": 36.84,
"learning_rate": 4.2263927726392776e-05,
"loss": 1.1073,
"step": 5600
},
{
"epoch": 36.9,
"learning_rate": 4.223166272316627e-05,
"loss": 1.1487,
"step": 5610
},
{
"epoch": 36.97,
"learning_rate": 4.219939771993977e-05,
"loss": 1.1298,
"step": 5620
},
{
"epoch": 37.04,
"learning_rate": 4.2167132716713275e-05,
"loss": 1.2871,
"step": 5630
},
{
"epoch": 37.1,
"learning_rate": 4.213486771348677e-05,
"loss": 1.0937,
"step": 5640
},
{
"epoch": 37.17,
"learning_rate": 4.210260271026027e-05,
"loss": 1.1284,
"step": 5650
},
{
"epoch": 37.24,
"learning_rate": 4.2070337707033774e-05,
"loss": 1.1632,
"step": 5660
},
{
"epoch": 37.3,
"learning_rate": 4.203807270380727e-05,
"loss": 1.1688,
"step": 5670
},
{
"epoch": 37.37,
"learning_rate": 4.200580770058077e-05,
"loss": 1.1007,
"step": 5680
},
{
"epoch": 37.43,
"learning_rate": 4.197354269735427e-05,
"loss": 1.1261,
"step": 5690
},
{
"epoch": 37.5,
"learning_rate": 4.1941277694127774e-05,
"loss": 1.1808,
"step": 5700
},
{
"epoch": 37.56,
"learning_rate": 4.190901269090127e-05,
"loss": 1.0999,
"step": 5710
},
{
"epoch": 37.63,
"learning_rate": 4.187674768767477e-05,
"loss": 1.1271,
"step": 5720
},
{
"epoch": 37.69,
"learning_rate": 4.1844482684448266e-05,
"loss": 1.1519,
"step": 5730
},
{
"epoch": 37.76,
"learning_rate": 4.181221768122177e-05,
"loss": 1.1955,
"step": 5740
},
{
"epoch": 37.82,
"learning_rate": 4.177995267799527e-05,
"loss": 1.1145,
"step": 5750
},
{
"epoch": 37.89,
"learning_rate": 4.174768767476877e-05,
"loss": 1.1182,
"step": 5760
},
{
"epoch": 37.96,
"learning_rate": 4.1715422671542266e-05,
"loss": 1.1376,
"step": 5770
},
{
"epoch": 38.03,
"learning_rate": 4.168315766831577e-05,
"loss": 1.3017,
"step": 5780
},
{
"epoch": 38.09,
"learning_rate": 4.165089266508927e-05,
"loss": 1.0855,
"step": 5790
},
{
"epoch": 38.16,
"learning_rate": 4.1618627661862765e-05,
"loss": 1.1092,
"step": 5800
},
{
"epoch": 38.22,
"learning_rate": 4.158636265863627e-05,
"loss": 1.1129,
"step": 5810
},
{
"epoch": 38.29,
"learning_rate": 4.155409765540977e-05,
"loss": 1.1781,
"step": 5820
},
{
"epoch": 38.35,
"learning_rate": 4.1521832652183264e-05,
"loss": 1.1033,
"step": 5830
},
{
"epoch": 38.42,
"learning_rate": 4.1489567648956766e-05,
"loss": 1.1188,
"step": 5840
},
{
"epoch": 38.48,
"learning_rate": 4.145730264573027e-05,
"loss": 1.1868,
"step": 5850
},
{
"epoch": 38.55,
"learning_rate": 4.142503764250376e-05,
"loss": 1.1578,
"step": 5860
},
{
"epoch": 38.62,
"learning_rate": 4.1392772639277264e-05,
"loss": 1.112,
"step": 5870
},
{
"epoch": 38.68,
"learning_rate": 4.1360507636050766e-05,
"loss": 1.1149,
"step": 5880
},
{
"epoch": 38.75,
"learning_rate": 4.132824263282427e-05,
"loss": 1.2071,
"step": 5890
},
{
"epoch": 38.81,
"learning_rate": 4.129597762959776e-05,
"loss": 1.0991,
"step": 5900
},
{
"epoch": 38.88,
"learning_rate": 4.1263712626371265e-05,
"loss": 1.1303,
"step": 5910
},
{
"epoch": 38.94,
"learning_rate": 4.123144762314476e-05,
"loss": 1.1012,
"step": 5920
},
{
"epoch": 39.01,
"learning_rate": 4.119918261991826e-05,
"loss": 1.2124,
"step": 5930
},
{
"epoch": 39.08,
"learning_rate": 4.116691761669176e-05,
"loss": 1.089,
"step": 5940
},
{
"epoch": 39.14,
"learning_rate": 4.1134652613465265e-05,
"loss": 1.0932,
"step": 5950
},
{
"epoch": 39.21,
"learning_rate": 4.110238761023876e-05,
"loss": 1.1506,
"step": 5960
},
{
"epoch": 39.27,
"learning_rate": 4.107012260701226e-05,
"loss": 1.205,
"step": 5970
},
{
"epoch": 39.34,
"learning_rate": 4.1037857603785764e-05,
"loss": 1.0693,
"step": 5980
},
{
"epoch": 39.41,
"learning_rate": 4.100559260055926e-05,
"loss": 1.1025,
"step": 5990
},
{
"epoch": 39.47,
"learning_rate": 4.097332759733276e-05,
"loss": 1.1337,
"step": 6000
},
{
"epoch": 39.47,
"eval_loss": 0.39270928502082825,
"eval_runtime": 319.378,
"eval_samples_per_second": 22.262,
"eval_steps_per_second": 1.393,
"eval_wer": 0.4363495596632149,
"step": 6000
},
{
"epoch": 39.54,
"learning_rate": 4.094106259410626e-05,
"loss": 1.1722,
"step": 6010
},
{
"epoch": 39.6,
"learning_rate": 4.090879759087976e-05,
"loss": 1.0861,
"step": 6020
},
{
"epoch": 39.67,
"learning_rate": 4.087653258765326e-05,
"loss": 1.1247,
"step": 6030
},
{
"epoch": 39.73,
"learning_rate": 4.084426758442676e-05,
"loss": 1.1784,
"step": 6040
},
{
"epoch": 39.8,
"learning_rate": 4.0812002581200256e-05,
"loss": 1.1134,
"step": 6050
},
{
"epoch": 39.86,
"learning_rate": 4.077973757797376e-05,
"loss": 1.1039,
"step": 6060
},
{
"epoch": 39.93,
"learning_rate": 4.074747257474726e-05,
"loss": 1.1316,
"step": 6070
},
{
"epoch": 40.0,
"learning_rate": 4.071520757152076e-05,
"loss": 1.17,
"step": 6080
},
{
"epoch": 40.07,
"learning_rate": 4.068294256829426e-05,
"loss": 1.1694,
"step": 6090
},
{
"epoch": 40.13,
"learning_rate": 4.065067756506776e-05,
"loss": 1.0948,
"step": 6100
},
{
"epoch": 40.2,
"learning_rate": 4.061841256184126e-05,
"loss": 1.1219,
"step": 6110
},
{
"epoch": 40.26,
"learning_rate": 4.0586147558614755e-05,
"loss": 1.1615,
"step": 6120
},
{
"epoch": 40.33,
"learning_rate": 4.055388255538826e-05,
"loss": 1.0687,
"step": 6130
},
{
"epoch": 40.39,
"learning_rate": 4.052161755216176e-05,
"loss": 1.1161,
"step": 6140
},
{
"epoch": 40.46,
"learning_rate": 4.0489352548935254e-05,
"loss": 1.1113,
"step": 6150
},
{
"epoch": 40.52,
"learning_rate": 4.0457087545708756e-05,
"loss": 1.1724,
"step": 6160
},
{
"epoch": 40.59,
"learning_rate": 4.042482254248226e-05,
"loss": 1.0693,
"step": 6170
},
{
"epoch": 40.65,
"learning_rate": 4.039255753925575e-05,
"loss": 1.1136,
"step": 6180
},
{
"epoch": 40.72,
"learning_rate": 4.0360292536029254e-05,
"loss": 1.13,
"step": 6190
},
{
"epoch": 40.79,
"learning_rate": 4.0328027532802756e-05,
"loss": 1.1549,
"step": 6200
},
{
"epoch": 40.85,
"learning_rate": 4.029576252957625e-05,
"loss": 1.058,
"step": 6210
},
{
"epoch": 40.92,
"learning_rate": 4.026349752634975e-05,
"loss": 1.1233,
"step": 6220
},
{
"epoch": 40.98,
"learning_rate": 4.0231232523123255e-05,
"loss": 1.157,
"step": 6230
},
{
"epoch": 41.05,
"learning_rate": 4.019896751989675e-05,
"loss": 1.1953,
"step": 6240
},
{
"epoch": 41.12,
"learning_rate": 4.016670251667025e-05,
"loss": 1.0919,
"step": 6250
},
{
"epoch": 41.18,
"learning_rate": 4.0134437513443754e-05,
"loss": 1.0925,
"step": 6260
},
{
"epoch": 41.25,
"learning_rate": 4.0102172510217255e-05,
"loss": 1.1612,
"step": 6270
},
{
"epoch": 41.31,
"learning_rate": 4.006990750699075e-05,
"loss": 1.0826,
"step": 6280
},
{
"epoch": 41.38,
"learning_rate": 4.003764250376425e-05,
"loss": 1.0994,
"step": 6290
},
{
"epoch": 41.45,
"learning_rate": 4.0005377500537754e-05,
"loss": 1.1424,
"step": 6300
},
{
"epoch": 41.51,
"learning_rate": 3.997311249731125e-05,
"loss": 1.1677,
"step": 6310
},
{
"epoch": 41.58,
"learning_rate": 3.994084749408475e-05,
"loss": 1.0776,
"step": 6320
},
{
"epoch": 41.64,
"learning_rate": 3.990858249085825e-05,
"loss": 1.0904,
"step": 6330
},
{
"epoch": 41.71,
"learning_rate": 3.987631748763175e-05,
"loss": 1.1126,
"step": 6340
},
{
"epoch": 41.77,
"learning_rate": 3.984405248440525e-05,
"loss": 1.1589,
"step": 6350
},
{
"epoch": 41.84,
"learning_rate": 3.981178748117875e-05,
"loss": 1.0549,
"step": 6360
},
{
"epoch": 41.9,
"learning_rate": 3.9779522477952246e-05,
"loss": 1.1058,
"step": 6370
},
{
"epoch": 41.97,
"learning_rate": 3.974725747472575e-05,
"loss": 1.1154,
"step": 6380
},
{
"epoch": 42.04,
"learning_rate": 3.971499247149925e-05,
"loss": 1.2687,
"step": 6390
},
{
"epoch": 42.1,
"learning_rate": 3.9682727468272745e-05,
"loss": 1.0773,
"step": 6400
},
{
"epoch": 42.17,
"learning_rate": 3.965046246504625e-05,
"loss": 1.0969,
"step": 6410
},
{
"epoch": 42.24,
"learning_rate": 3.961819746181975e-05,
"loss": 1.1189,
"step": 6420
},
{
"epoch": 42.3,
"learning_rate": 3.9585932458593244e-05,
"loss": 1.14,
"step": 6430
},
{
"epoch": 42.37,
"learning_rate": 3.9553667455366746e-05,
"loss": 1.0748,
"step": 6440
},
{
"epoch": 42.43,
"learning_rate": 3.952140245214025e-05,
"loss": 1.107,
"step": 6450
},
{
"epoch": 42.5,
"learning_rate": 3.948913744891374e-05,
"loss": 1.1683,
"step": 6460
},
{
"epoch": 42.56,
"learning_rate": 3.9456872445687244e-05,
"loss": 1.0603,
"step": 6470
},
{
"epoch": 42.63,
"learning_rate": 3.9424607442460746e-05,
"loss": 1.081,
"step": 6480
},
{
"epoch": 42.69,
"learning_rate": 3.939234243923425e-05,
"loss": 1.1066,
"step": 6490
},
{
"epoch": 42.76,
"learning_rate": 3.936007743600774e-05,
"loss": 1.1896,
"step": 6500
},
{
"epoch": 42.76,
"eval_loss": 0.38105571269989014,
"eval_runtime": 323.0014,
"eval_samples_per_second": 22.012,
"eval_steps_per_second": 1.378,
"eval_wer": 0.42543307848640277,
"step": 6500
},
{
"epoch": 42.82,
"learning_rate": 3.9327812432781245e-05,
"loss": 1.062,
"step": 6510
},
{
"epoch": 42.89,
"learning_rate": 3.9295547429554747e-05,
"loss": 1.1064,
"step": 6520
},
{
"epoch": 42.96,
"learning_rate": 3.926328242632824e-05,
"loss": 1.0806,
"step": 6530
},
{
"epoch": 43.03,
"learning_rate": 3.9231017423101743e-05,
"loss": 1.25,
"step": 6540
},
{
"epoch": 43.09,
"learning_rate": 3.9198752419875245e-05,
"loss": 1.034,
"step": 6550
},
{
"epoch": 43.16,
"learning_rate": 3.916648741664874e-05,
"loss": 1.0961,
"step": 6560
},
{
"epoch": 43.22,
"learning_rate": 3.913422241342224e-05,
"loss": 1.1108,
"step": 6570
},
{
"epoch": 43.29,
"learning_rate": 3.9101957410195744e-05,
"loss": 1.1584,
"step": 6580
},
{
"epoch": 43.35,
"learning_rate": 3.906969240696924e-05,
"loss": 1.1061,
"step": 6590
},
{
"epoch": 43.42,
"learning_rate": 3.903742740374274e-05,
"loss": 1.0788,
"step": 6600
},
{
"epoch": 43.48,
"learning_rate": 3.900516240051624e-05,
"loss": 1.1452,
"step": 6610
},
{
"epoch": 43.55,
"learning_rate": 3.8972897397289744e-05,
"loss": 1.1088,
"step": 6620
},
{
"epoch": 43.62,
"learning_rate": 3.894063239406324e-05,
"loss": 1.0395,
"step": 6630
},
{
"epoch": 43.68,
"learning_rate": 3.890836739083674e-05,
"loss": 1.104,
"step": 6640
},
{
"epoch": 43.75,
"learning_rate": 3.8876102387610236e-05,
"loss": 1.1578,
"step": 6650
},
{
"epoch": 43.81,
"learning_rate": 3.884383738438374e-05,
"loss": 1.0595,
"step": 6660
},
{
"epoch": 43.88,
"learning_rate": 3.881157238115724e-05,
"loss": 1.0959,
"step": 6670
},
{
"epoch": 43.94,
"learning_rate": 3.877930737793074e-05,
"loss": 1.0656,
"step": 6680
},
{
"epoch": 44.01,
"learning_rate": 3.874704237470424e-05,
"loss": 1.2342,
"step": 6690
},
{
"epoch": 44.08,
"learning_rate": 3.871477737147774e-05,
"loss": 1.0533,
"step": 6700
},
{
"epoch": 44.14,
"learning_rate": 3.868251236825124e-05,
"loss": 1.0577,
"step": 6710
},
{
"epoch": 44.21,
"learning_rate": 3.8650247365024735e-05,
"loss": 1.0793,
"step": 6720
},
{
"epoch": 44.27,
"learning_rate": 3.861798236179824e-05,
"loss": 1.1463,
"step": 6730
},
{
"epoch": 44.34,
"learning_rate": 3.858571735857174e-05,
"loss": 1.0532,
"step": 6740
},
{
"epoch": 44.41,
"learning_rate": 3.8553452355345234e-05,
"loss": 1.0896,
"step": 6750
},
{
"epoch": 44.47,
"learning_rate": 3.8521187352118736e-05,
"loss": 1.0941,
"step": 6760
},
{
"epoch": 44.54,
"learning_rate": 3.848892234889224e-05,
"loss": 1.1166,
"step": 6770
},
{
"epoch": 44.6,
"learning_rate": 3.845665734566573e-05,
"loss": 1.0476,
"step": 6780
},
{
"epoch": 44.67,
"learning_rate": 3.8424392342439234e-05,
"loss": 1.0674,
"step": 6790
},
{
"epoch": 44.73,
"learning_rate": 3.8392127339212736e-05,
"loss": 1.107,
"step": 6800
},
{
"epoch": 44.8,
"learning_rate": 3.835986233598624e-05,
"loss": 1.0972,
"step": 6810
},
{
"epoch": 44.86,
"learning_rate": 3.832759733275973e-05,
"loss": 1.0456,
"step": 6820
},
{
"epoch": 44.93,
"learning_rate": 3.8295332329533235e-05,
"loss": 1.0916,
"step": 6830
},
{
"epoch": 45.0,
"learning_rate": 3.8266293826629385e-05,
"loss": 1.1377,
"step": 6840
},
{
"epoch": 45.07,
"learning_rate": 3.8234028823402886e-05,
"loss": 1.1153,
"step": 6850
},
{
"epoch": 45.13,
"learning_rate": 3.820176382017639e-05,
"loss": 1.0292,
"step": 6860
},
{
"epoch": 45.2,
"learning_rate": 3.816949881694988e-05,
"loss": 1.0737,
"step": 6870
},
{
"epoch": 45.26,
"learning_rate": 3.813723381372338e-05,
"loss": 1.1493,
"step": 6880
},
{
"epoch": 45.33,
"learning_rate": 3.810496881049688e-05,
"loss": 1.0346,
"step": 6890
},
{
"epoch": 45.39,
"learning_rate": 3.807270380727038e-05,
"loss": 1.0626,
"step": 6900
},
{
"epoch": 45.46,
"learning_rate": 3.8040438804043884e-05,
"loss": 1.1238,
"step": 6910
},
{
"epoch": 45.52,
"learning_rate": 3.8008173800817386e-05,
"loss": 1.1701,
"step": 6920
},
{
"epoch": 45.59,
"learning_rate": 3.797590879759088e-05,
"loss": 1.0208,
"step": 6930
},
{
"epoch": 45.65,
"learning_rate": 3.7943643794364376e-05,
"loss": 1.0844,
"step": 6940
},
{
"epoch": 45.72,
"learning_rate": 3.791137879113788e-05,
"loss": 1.0585,
"step": 6950
},
{
"epoch": 45.79,
"learning_rate": 3.787911378791138e-05,
"loss": 1.1088,
"step": 6960
},
{
"epoch": 45.85,
"learning_rate": 3.784684878468488e-05,
"loss": 1.0437,
"step": 6970
},
{
"epoch": 45.92,
"learning_rate": 3.781458378145838e-05,
"loss": 1.0495,
"step": 6980
},
{
"epoch": 45.98,
"learning_rate": 3.778554527855453e-05,
"loss": 1.1288,
"step": 6990
},
{
"epoch": 46.05,
"learning_rate": 3.775328027532803e-05,
"loss": 1.1847,
"step": 7000
},
{
"epoch": 46.05,
"eval_loss": 0.3855203092098236,
"eval_runtime": 320.0475,
"eval_samples_per_second": 22.215,
"eval_steps_per_second": 1.39,
"eval_wer": 0.41294880480015483,
"step": 7000
},
{
"epoch": 46.12,
"learning_rate": 3.772101527210153e-05,
"loss": 1.0635,
"step": 7010
},
{
"epoch": 46.18,
"learning_rate": 3.7688750268875024e-05,
"loss": 1.1075,
"step": 7020
},
{
"epoch": 46.25,
"learning_rate": 3.7656485265648526e-05,
"loss": 1.143,
"step": 7030
},
{
"epoch": 46.31,
"learning_rate": 3.762422026242203e-05,
"loss": 1.0112,
"step": 7040
},
{
"epoch": 46.38,
"learning_rate": 3.759195525919553e-05,
"loss": 1.05,
"step": 7050
},
{
"epoch": 46.45,
"learning_rate": 3.7559690255969025e-05,
"loss": 1.0578,
"step": 7060
},
{
"epoch": 46.51,
"learning_rate": 3.7527425252742527e-05,
"loss": 1.1365,
"step": 7070
},
{
"epoch": 46.58,
"learning_rate": 3.749516024951603e-05,
"loss": 1.0467,
"step": 7080
},
{
"epoch": 46.64,
"learning_rate": 3.7462895246289523e-05,
"loss": 1.0554,
"step": 7090
},
{
"epoch": 46.71,
"learning_rate": 3.7430630243063025e-05,
"loss": 1.0705,
"step": 7100
},
{
"epoch": 46.77,
"learning_rate": 3.739836523983653e-05,
"loss": 1.1119,
"step": 7110
},
{
"epoch": 46.84,
"learning_rate": 3.736610023661002e-05,
"loss": 1.0608,
"step": 7120
},
{
"epoch": 46.9,
"learning_rate": 3.7333835233383524e-05,
"loss": 1.0459,
"step": 7130
},
{
"epoch": 46.97,
"learning_rate": 3.7301570230157026e-05,
"loss": 1.06,
"step": 7140
},
{
"epoch": 47.04,
"learning_rate": 3.726930522693052e-05,
"loss": 1.2005,
"step": 7150
},
{
"epoch": 47.1,
"learning_rate": 3.723704022370402e-05,
"loss": 1.0354,
"step": 7160
},
{
"epoch": 47.17,
"learning_rate": 3.7204775220477524e-05,
"loss": 1.0732,
"step": 7170
},
{
"epoch": 47.24,
"learning_rate": 3.7172510217251026e-05,
"loss": 1.1015,
"step": 7180
},
{
"epoch": 47.3,
"learning_rate": 3.714024521402452e-05,
"loss": 1.0637,
"step": 7190
},
{
"epoch": 47.37,
"learning_rate": 3.710798021079802e-05,
"loss": 1.0185,
"step": 7200
},
{
"epoch": 47.43,
"learning_rate": 3.707571520757152e-05,
"loss": 1.0629,
"step": 7210
},
{
"epoch": 47.5,
"learning_rate": 3.704345020434502e-05,
"loss": 1.1096,
"step": 7220
},
{
"epoch": 47.56,
"learning_rate": 3.701118520111852e-05,
"loss": 1.0534,
"step": 7230
},
{
"epoch": 47.63,
"learning_rate": 3.6978920197892024e-05,
"loss": 1.0467,
"step": 7240
},
{
"epoch": 47.69,
"learning_rate": 3.694665519466552e-05,
"loss": 1.0602,
"step": 7250
},
{
"epoch": 47.76,
"learning_rate": 3.691439019143902e-05,
"loss": 1.1408,
"step": 7260
},
{
"epoch": 47.82,
"learning_rate": 3.6882125188212515e-05,
"loss": 1.0303,
"step": 7270
},
{
"epoch": 47.89,
"learning_rate": 3.684986018498602e-05,
"loss": 1.0803,
"step": 7280
},
{
"epoch": 47.96,
"learning_rate": 3.681759518175952e-05,
"loss": 1.0774,
"step": 7290
},
{
"epoch": 48.03,
"learning_rate": 3.678533017853302e-05,
"loss": 1.2145,
"step": 7300
},
{
"epoch": 48.09,
"learning_rate": 3.675306517530652e-05,
"loss": 1.0268,
"step": 7310
},
{
"epoch": 48.16,
"learning_rate": 3.672080017208002e-05,
"loss": 1.0546,
"step": 7320
},
{
"epoch": 48.22,
"learning_rate": 3.668853516885352e-05,
"loss": 1.0875,
"step": 7330
},
{
"epoch": 48.29,
"learning_rate": 3.6656270165627015e-05,
"loss": 1.1135,
"step": 7340
},
{
"epoch": 48.35,
"learning_rate": 3.6624005162400516e-05,
"loss": 1.05,
"step": 7350
},
{
"epoch": 48.42,
"learning_rate": 3.659174015917402e-05,
"loss": 1.0716,
"step": 7360
},
{
"epoch": 48.48,
"learning_rate": 3.655947515594752e-05,
"loss": 1.0956,
"step": 7370
},
{
"epoch": 48.55,
"learning_rate": 3.6527210152721015e-05,
"loss": 1.0675,
"step": 7380
},
{
"epoch": 48.62,
"learning_rate": 3.649494514949452e-05,
"loss": 1.0263,
"step": 7390
},
{
"epoch": 48.68,
"learning_rate": 3.646268014626801e-05,
"loss": 1.0499,
"step": 7400
},
{
"epoch": 48.75,
"learning_rate": 3.6430415143041514e-05,
"loss": 1.1132,
"step": 7410
},
{
"epoch": 48.81,
"learning_rate": 3.6398150139815016e-05,
"loss": 1.0487,
"step": 7420
},
{
"epoch": 48.88,
"learning_rate": 3.636588513658852e-05,
"loss": 1.0571,
"step": 7430
},
{
"epoch": 48.94,
"learning_rate": 3.633362013336201e-05,
"loss": 1.0477,
"step": 7440
},
{
"epoch": 49.01,
"learning_rate": 3.6301355130135514e-05,
"loss": 1.2352,
"step": 7450
},
{
"epoch": 49.08,
"learning_rate": 3.626909012690901e-05,
"loss": 1.0383,
"step": 7460
},
{
"epoch": 49.14,
"learning_rate": 3.623682512368251e-05,
"loss": 1.03,
"step": 7470
},
{
"epoch": 49.21,
"learning_rate": 3.620456012045601e-05,
"loss": 1.0397,
"step": 7480
},
{
"epoch": 49.27,
"learning_rate": 3.6172295117229515e-05,
"loss": 1.1075,
"step": 7490
},
{
"epoch": 49.34,
"learning_rate": 3.6140030114003016e-05,
"loss": 0.9954,
"step": 7500
},
{
"epoch": 49.34,
"eval_loss": 0.37294888496398926,
"eval_runtime": 321.681,
"eval_samples_per_second": 22.103,
"eval_steps_per_second": 1.383,
"eval_wer": 0.3981418755443724,
"step": 7500
},
{
"epoch": 49.41,
"learning_rate": 3.610776511077651e-05,
"loss": 1.0484,
"step": 7510
},
{
"epoch": 49.47,
"learning_rate": 3.607550010755001e-05,
"loss": 1.0598,
"step": 7520
},
{
"epoch": 49.54,
"learning_rate": 3.604323510432351e-05,
"loss": 1.1166,
"step": 7530
},
{
"epoch": 49.6,
"learning_rate": 3.601097010109701e-05,
"loss": 1.0265,
"step": 7540
},
{
"epoch": 49.67,
"learning_rate": 3.597870509787051e-05,
"loss": 1.0442,
"step": 7550
},
{
"epoch": 49.73,
"learning_rate": 3.5946440094644014e-05,
"loss": 1.0912,
"step": 7560
},
{
"epoch": 49.8,
"learning_rate": 3.591417509141751e-05,
"loss": 1.0775,
"step": 7570
},
{
"epoch": 49.86,
"learning_rate": 3.588191008819101e-05,
"loss": 1.0295,
"step": 7580
},
{
"epoch": 49.93,
"learning_rate": 3.5849645084964506e-05,
"loss": 1.0578,
"step": 7590
},
{
"epoch": 50.0,
"learning_rate": 3.581738008173801e-05,
"loss": 1.1105,
"step": 7600
},
{
"epoch": 50.07,
"learning_rate": 3.578511507851151e-05,
"loss": 1.1128,
"step": 7610
},
{
"epoch": 50.13,
"learning_rate": 3.575285007528501e-05,
"loss": 1.0356,
"step": 7620
},
{
"epoch": 50.2,
"learning_rate": 3.572058507205851e-05,
"loss": 1.0443,
"step": 7630
},
{
"epoch": 50.26,
"learning_rate": 3.568832006883201e-05,
"loss": 1.1233,
"step": 7640
},
{
"epoch": 50.33,
"learning_rate": 3.56560550656055e-05,
"loss": 1.0156,
"step": 7650
},
{
"epoch": 50.39,
"learning_rate": 3.5623790062379005e-05,
"loss": 1.0593,
"step": 7660
},
{
"epoch": 50.46,
"learning_rate": 3.559152505915251e-05,
"loss": 1.0744,
"step": 7670
},
{
"epoch": 50.52,
"learning_rate": 3.555926005592601e-05,
"loss": 1.1256,
"step": 7680
},
{
"epoch": 50.59,
"learning_rate": 3.552699505269951e-05,
"loss": 1.0113,
"step": 7690
},
{
"epoch": 50.65,
"learning_rate": 3.5494730049473005e-05,
"loss": 1.0382,
"step": 7700
},
{
"epoch": 50.72,
"learning_rate": 3.546246504624651e-05,
"loss": 1.0535,
"step": 7710
},
{
"epoch": 50.79,
"learning_rate": 3.543020004302e-05,
"loss": 1.0927,
"step": 7720
},
{
"epoch": 50.85,
"learning_rate": 3.5397935039793504e-05,
"loss": 1.04,
"step": 7730
},
{
"epoch": 50.92,
"learning_rate": 3.5365670036567006e-05,
"loss": 1.0484,
"step": 7740
},
{
"epoch": 50.98,
"learning_rate": 3.533340503334051e-05,
"loss": 1.0908,
"step": 7750
},
{
"epoch": 51.05,
"learning_rate": 3.5301140030114e-05,
"loss": 1.1377,
"step": 7760
},
{
"epoch": 51.12,
"learning_rate": 3.5268875026887504e-05,
"loss": 1.0487,
"step": 7770
},
{
"epoch": 51.18,
"learning_rate": 3.5236610023661e-05,
"loss": 1.0859,
"step": 7780
},
{
"epoch": 51.25,
"learning_rate": 3.52043450204345e-05,
"loss": 1.1054,
"step": 7790
},
{
"epoch": 51.31,
"learning_rate": 3.5172080017208e-05,
"loss": 0.9997,
"step": 7800
},
{
"epoch": 51.38,
"learning_rate": 3.5139815013981505e-05,
"loss": 1.0613,
"step": 7810
},
{
"epoch": 51.45,
"learning_rate": 3.510755001075501e-05,
"loss": 1.0611,
"step": 7820
},
{
"epoch": 51.51,
"learning_rate": 3.50752850075285e-05,
"loss": 1.1271,
"step": 7830
},
{
"epoch": 51.58,
"learning_rate": 3.5043020004302e-05,
"loss": 1.027,
"step": 7840
},
{
"epoch": 51.64,
"learning_rate": 3.50107550010755e-05,
"loss": 1.0344,
"step": 7850
},
{
"epoch": 51.71,
"learning_rate": 3.4978489997849e-05,
"loss": 1.052,
"step": 7860
},
{
"epoch": 51.77,
"learning_rate": 3.49462249946225e-05,
"loss": 1.1153,
"step": 7870
},
{
"epoch": 51.84,
"learning_rate": 3.4913959991396004e-05,
"loss": 1.0125,
"step": 7880
},
{
"epoch": 51.9,
"learning_rate": 3.48816949881695e-05,
"loss": 1.0259,
"step": 7890
},
{
"epoch": 51.97,
"learning_rate": 3.4849429984943e-05,
"loss": 1.0642,
"step": 7900
},
{
"epoch": 52.04,
"learning_rate": 3.4817164981716496e-05,
"loss": 1.1743,
"step": 7910
},
{
"epoch": 52.1,
"learning_rate": 3.478489997849e-05,
"loss": 1.0113,
"step": 7920
},
{
"epoch": 52.17,
"learning_rate": 3.47526349752635e-05,
"loss": 1.0284,
"step": 7930
},
{
"epoch": 52.24,
"learning_rate": 3.4720369972037e-05,
"loss": 1.0396,
"step": 7940
},
{
"epoch": 52.3,
"learning_rate": 3.4688104968810496e-05,
"loss": 1.082,
"step": 7950
},
{
"epoch": 52.37,
"learning_rate": 3.4655839965584e-05,
"loss": 1.0178,
"step": 7960
},
{
"epoch": 52.43,
"learning_rate": 3.462357496235749e-05,
"loss": 1.0592,
"step": 7970
},
{
"epoch": 52.5,
"learning_rate": 3.4591309959130995e-05,
"loss": 1.0798,
"step": 7980
},
{
"epoch": 52.56,
"learning_rate": 3.45590449559045e-05,
"loss": 1.0219,
"step": 7990
},
{
"epoch": 52.63,
"learning_rate": 3.4526779952678e-05,
"loss": 1.0293,
"step": 8000
},
{
"epoch": 52.63,
"eval_loss": 0.36370599269866943,
"eval_runtime": 317.8982,
"eval_samples_per_second": 22.366,
"eval_steps_per_second": 1.4,
"eval_wer": 0.4014323042678796,
"step": 8000
},
{
"epoch": 52.69,
"learning_rate": 3.44945149494515e-05,
"loss": 1.0684,
"step": 8010
},
{
"epoch": 52.76,
"learning_rate": 3.4462249946224996e-05,
"loss": 1.1228,
"step": 8020
},
{
"epoch": 52.82,
"learning_rate": 3.442998494299849e-05,
"loss": 1.0044,
"step": 8030
},
{
"epoch": 52.89,
"learning_rate": 3.439771993977199e-05,
"loss": 1.0101,
"step": 8040
},
{
"epoch": 52.96,
"learning_rate": 3.4365454936545494e-05,
"loss": 1.0626,
"step": 8050
},
{
"epoch": 53.03,
"learning_rate": 3.4333189933318996e-05,
"loss": 1.1676,
"step": 8060
},
{
"epoch": 53.09,
"learning_rate": 3.43009249300925e-05,
"loss": 1.0056,
"step": 8070
},
{
"epoch": 53.16,
"learning_rate": 3.426865992686599e-05,
"loss": 1.0096,
"step": 8080
},
{
"epoch": 53.22,
"learning_rate": 3.4236394923639495e-05,
"loss": 1.061,
"step": 8090
},
{
"epoch": 53.29,
"learning_rate": 3.420412992041299e-05,
"loss": 1.1068,
"step": 8100
},
{
"epoch": 53.35,
"learning_rate": 3.417186491718649e-05,
"loss": 0.997,
"step": 8110
},
{
"epoch": 53.42,
"learning_rate": 3.413959991395999e-05,
"loss": 1.0424,
"step": 8120
},
{
"epoch": 53.48,
"learning_rate": 3.4107334910733495e-05,
"loss": 1.085,
"step": 8130
},
{
"epoch": 53.55,
"learning_rate": 3.4075069907507e-05,
"loss": 1.057,
"step": 8140
},
{
"epoch": 53.62,
"learning_rate": 3.404280490428049e-05,
"loss": 1.0096,
"step": 8150
},
{
"epoch": 53.68,
"learning_rate": 3.401053990105399e-05,
"loss": 1.0426,
"step": 8160
},
{
"epoch": 53.75,
"learning_rate": 3.397827489782749e-05,
"loss": 1.1106,
"step": 8170
},
{
"epoch": 53.81,
"learning_rate": 3.394600989460099e-05,
"loss": 1.0268,
"step": 8180
},
{
"epoch": 53.88,
"learning_rate": 3.391374489137449e-05,
"loss": 1.0431,
"step": 8190
},
{
"epoch": 53.94,
"learning_rate": 3.3881479888147994e-05,
"loss": 1.071,
"step": 8200
},
{
"epoch": 54.01,
"learning_rate": 3.384921488492149e-05,
"loss": 1.1877,
"step": 8210
},
{
"epoch": 54.08,
"learning_rate": 3.3816949881694984e-05,
"loss": 0.9915,
"step": 8220
},
{
"epoch": 54.14,
"learning_rate": 3.3784684878468486e-05,
"loss": 1.0106,
"step": 8230
},
{
"epoch": 54.21,
"learning_rate": 3.375241987524199e-05,
"loss": 1.0178,
"step": 8240
},
{
"epoch": 54.27,
"learning_rate": 3.372015487201549e-05,
"loss": 1.096,
"step": 8250
},
{
"epoch": 54.34,
"learning_rate": 3.368788986878899e-05,
"loss": 1.0004,
"step": 8260
},
{
"epoch": 54.41,
"learning_rate": 3.365562486556249e-05,
"loss": 1.0407,
"step": 8270
},
{
"epoch": 54.47,
"learning_rate": 3.362335986233598e-05,
"loss": 1.0405,
"step": 8280
},
{
"epoch": 54.54,
"learning_rate": 3.3591094859109483e-05,
"loss": 1.0556,
"step": 8290
},
{
"epoch": 54.6,
"learning_rate": 3.3558829855882985e-05,
"loss": 1.012,
"step": 8300
},
{
"epoch": 54.67,
"learning_rate": 3.352656485265649e-05,
"loss": 1.0279,
"step": 8310
},
{
"epoch": 54.73,
"learning_rate": 3.349429984942999e-05,
"loss": 1.0697,
"step": 8320
},
{
"epoch": 54.8,
"learning_rate": 3.346203484620349e-05,
"loss": 1.0495,
"step": 8330
},
{
"epoch": 54.86,
"learning_rate": 3.3429769842976986e-05,
"loss": 1.0157,
"step": 8340
},
{
"epoch": 54.93,
"learning_rate": 3.339750483975048e-05,
"loss": 1.0558,
"step": 8350
},
{
"epoch": 55.0,
"learning_rate": 3.336523983652398e-05,
"loss": 1.1224,
"step": 8360
},
{
"epoch": 55.07,
"learning_rate": 3.3332974833297484e-05,
"loss": 1.0981,
"step": 8370
},
{
"epoch": 55.13,
"learning_rate": 3.3300709830070986e-05,
"loss": 1.0167,
"step": 8380
},
{
"epoch": 55.2,
"learning_rate": 3.326844482684449e-05,
"loss": 1.016,
"step": 8390
},
{
"epoch": 55.26,
"learning_rate": 3.323617982361798e-05,
"loss": 1.0748,
"step": 8400
},
{
"epoch": 55.33,
"learning_rate": 3.320391482039148e-05,
"loss": 0.9908,
"step": 8410
},
{
"epoch": 55.39,
"learning_rate": 3.317164981716498e-05,
"loss": 0.9933,
"step": 8420
},
{
"epoch": 55.46,
"learning_rate": 3.313938481393848e-05,
"loss": 1.031,
"step": 8430
},
{
"epoch": 55.52,
"learning_rate": 3.3107119810711984e-05,
"loss": 1.0889,
"step": 8440
},
{
"epoch": 55.59,
"learning_rate": 3.3074854807485485e-05,
"loss": 0.9891,
"step": 8450
},
{
"epoch": 55.65,
"learning_rate": 3.304258980425898e-05,
"loss": 1.0279,
"step": 8460
},
{
"epoch": 55.72,
"learning_rate": 3.3010324801032475e-05,
"loss": 1.0436,
"step": 8470
},
{
"epoch": 55.79,
"learning_rate": 3.297805979780598e-05,
"loss": 1.097,
"step": 8480
},
{
"epoch": 55.85,
"learning_rate": 3.294579479457948e-05,
"loss": 1.0016,
"step": 8490
},
{
"epoch": 55.92,
"learning_rate": 3.291352979135298e-05,
"loss": 1.0224,
"step": 8500
},
{
"epoch": 55.92,
"eval_loss": 0.35778653621673584,
"eval_runtime": 322.7444,
"eval_samples_per_second": 22.03,
"eval_steps_per_second": 1.379,
"eval_wer": 0.38854156585696314,
"step": 8500
},
{
"epoch": 55.98,
"learning_rate": 3.288126478812648e-05,
"loss": 1.0758,
"step": 8510
},
{
"epoch": 56.05,
"learning_rate": 3.2848999784899985e-05,
"loss": 1.1085,
"step": 8520
},
{
"epoch": 56.12,
"learning_rate": 3.281673478167348e-05,
"loss": 0.9923,
"step": 8530
},
{
"epoch": 56.18,
"learning_rate": 3.2784469778446975e-05,
"loss": 1.0392,
"step": 8540
},
{
"epoch": 56.25,
"learning_rate": 3.2752204775220476e-05,
"loss": 1.0738,
"step": 8550
},
{
"epoch": 56.31,
"learning_rate": 3.271993977199398e-05,
"loss": 1.0161,
"step": 8560
},
{
"epoch": 56.38,
"learning_rate": 3.268767476876748e-05,
"loss": 0.982,
"step": 8570
},
{
"epoch": 56.45,
"learning_rate": 3.265540976554098e-05,
"loss": 1.0302,
"step": 8580
},
{
"epoch": 56.51,
"learning_rate": 3.262314476231448e-05,
"loss": 1.1226,
"step": 8590
},
{
"epoch": 56.58,
"learning_rate": 3.259087975908797e-05,
"loss": 1.0014,
"step": 8600
},
{
"epoch": 56.64,
"learning_rate": 3.2558614755861474e-05,
"loss": 1.0055,
"step": 8610
},
{
"epoch": 56.71,
"learning_rate": 3.2526349752634976e-05,
"loss": 1.0368,
"step": 8620
},
{
"epoch": 56.77,
"learning_rate": 3.249408474940848e-05,
"loss": 1.0788,
"step": 8630
},
{
"epoch": 56.84,
"learning_rate": 3.246181974618198e-05,
"loss": 0.9906,
"step": 8640
},
{
"epoch": 56.9,
"learning_rate": 3.242955474295548e-05,
"loss": 1.0247,
"step": 8650
},
{
"epoch": 56.97,
"learning_rate": 3.239728973972897e-05,
"loss": 1.0547,
"step": 8660
},
{
"epoch": 57.04,
"learning_rate": 3.236502473650247e-05,
"loss": 1.1407,
"step": 8670
},
{
"epoch": 57.1,
"learning_rate": 3.233275973327597e-05,
"loss": 0.9864,
"step": 8680
},
{
"epoch": 57.17,
"learning_rate": 3.2300494730049475e-05,
"loss": 1.0169,
"step": 8690
},
{
"epoch": 57.24,
"learning_rate": 3.2268229726822976e-05,
"loss": 1.0632,
"step": 8700
},
{
"epoch": 57.3,
"learning_rate": 3.223596472359648e-05,
"loss": 1.0245,
"step": 8710
},
{
"epoch": 57.37,
"learning_rate": 3.220369972036997e-05,
"loss": 0.9978,
"step": 8720
},
{
"epoch": 57.43,
"learning_rate": 3.217143471714347e-05,
"loss": 1.0595,
"step": 8730
},
{
"epoch": 57.5,
"learning_rate": 3.213916971391697e-05,
"loss": 1.0714,
"step": 8740
},
{
"epoch": 57.56,
"learning_rate": 3.210690471069047e-05,
"loss": 1.0067,
"step": 8750
},
{
"epoch": 57.63,
"learning_rate": 3.2074639707463974e-05,
"loss": 1.0159,
"step": 8760
},
{
"epoch": 57.69,
"learning_rate": 3.2042374704237476e-05,
"loss": 1.0272,
"step": 8770
},
{
"epoch": 57.76,
"learning_rate": 3.201010970101097e-05,
"loss": 1.1016,
"step": 8780
},
{
"epoch": 57.82,
"learning_rate": 3.1977844697784466e-05,
"loss": 0.9925,
"step": 8790
},
{
"epoch": 57.89,
"learning_rate": 3.194557969455797e-05,
"loss": 1.0156,
"step": 8800
},
{
"epoch": 57.96,
"learning_rate": 3.191331469133147e-05,
"loss": 1.016,
"step": 8810
},
{
"epoch": 58.03,
"learning_rate": 3.188104968810497e-05,
"loss": 1.1536,
"step": 8820
},
{
"epoch": 58.09,
"learning_rate": 3.184878468487847e-05,
"loss": 0.9755,
"step": 8830
},
{
"epoch": 58.16,
"learning_rate": 3.1816519681651975e-05,
"loss": 0.9959,
"step": 8840
},
{
"epoch": 58.22,
"learning_rate": 3.178425467842546e-05,
"loss": 1.0229,
"step": 8850
},
{
"epoch": 58.29,
"learning_rate": 3.1751989675198965e-05,
"loss": 1.0698,
"step": 8860
},
{
"epoch": 58.35,
"learning_rate": 3.171972467197247e-05,
"loss": 0.9774,
"step": 8870
},
{
"epoch": 58.42,
"learning_rate": 3.168745966874597e-05,
"loss": 1.0372,
"step": 8880
},
{
"epoch": 58.48,
"learning_rate": 3.165519466551947e-05,
"loss": 1.0503,
"step": 8890
},
{
"epoch": 58.55,
"learning_rate": 3.162292966229297e-05,
"loss": 1.0177,
"step": 8900
},
{
"epoch": 58.62,
"learning_rate": 3.159066465906647e-05,
"loss": 0.9865,
"step": 8910
},
{
"epoch": 58.68,
"learning_rate": 3.155839965583996e-05,
"loss": 1.0472,
"step": 8920
},
{
"epoch": 58.75,
"learning_rate": 3.1526134652613464e-05,
"loss": 1.0973,
"step": 8930
},
{
"epoch": 58.81,
"learning_rate": 3.1493869649386966e-05,
"loss": 0.991,
"step": 8940
},
{
"epoch": 58.88,
"learning_rate": 3.146160464616047e-05,
"loss": 0.9552,
"step": 8950
},
{
"epoch": 58.94,
"learning_rate": 3.142933964293397e-05,
"loss": 0.9968,
"step": 8960
},
{
"epoch": 59.01,
"learning_rate": 3.1397074639707464e-05,
"loss": 1.1483,
"step": 8970
},
{
"epoch": 59.08,
"learning_rate": 3.136480963648096e-05,
"loss": 1.0066,
"step": 8980
},
{
"epoch": 59.14,
"learning_rate": 3.133254463325446e-05,
"loss": 1.0109,
"step": 8990
},
{
"epoch": 59.21,
"learning_rate": 3.130027963002796e-05,
"loss": 1.012,
"step": 9000
},
{
"epoch": 59.21,
"eval_loss": 0.36289963126182556,
"eval_runtime": 322.3576,
"eval_samples_per_second": 22.056,
"eval_steps_per_second": 1.38,
"eval_wer": 0.3929933223652376,
"step": 9000
},
{
"epoch": 59.27,
"learning_rate": 3.1268014626801465e-05,
"loss": 1.06,
"step": 9010
},
{
"epoch": 59.34,
"learning_rate": 3.123574962357497e-05,
"loss": 0.9861,
"step": 9020
},
{
"epoch": 59.41,
"learning_rate": 3.120348462034847e-05,
"loss": 1.012,
"step": 9030
},
{
"epoch": 59.47,
"learning_rate": 3.117121961712196e-05,
"loss": 1.0072,
"step": 9040
},
{
"epoch": 59.54,
"learning_rate": 3.113895461389546e-05,
"loss": 1.0687,
"step": 9050
},
{
"epoch": 59.6,
"learning_rate": 3.110668961066896e-05,
"loss": 0.9707,
"step": 9060
},
{
"epoch": 59.67,
"learning_rate": 3.107442460744246e-05,
"loss": 1.0095,
"step": 9070
},
{
"epoch": 59.73,
"learning_rate": 3.1042159604215964e-05,
"loss": 1.0323,
"step": 9080
},
{
"epoch": 59.8,
"learning_rate": 3.1009894600989466e-05,
"loss": 1.0193,
"step": 9090
},
{
"epoch": 59.86,
"learning_rate": 3.097762959776296e-05,
"loss": 0.9957,
"step": 9100
},
{
"epoch": 59.93,
"learning_rate": 3.0945364594536456e-05,
"loss": 1.0012,
"step": 9110
},
{
"epoch": 60.0,
"learning_rate": 3.091309959130996e-05,
"loss": 1.1186,
"step": 9120
},
{
"epoch": 60.07,
"learning_rate": 3.088083458808346e-05,
"loss": 1.0609,
"step": 9130
},
{
"epoch": 60.13,
"learning_rate": 3.084856958485696e-05,
"loss": 0.987,
"step": 9140
},
{
"epoch": 60.2,
"learning_rate": 3.081630458163046e-05,
"loss": 0.9959,
"step": 9150
},
{
"epoch": 60.26,
"learning_rate": 3.0784039578403965e-05,
"loss": 1.0595,
"step": 9160
},
{
"epoch": 60.33,
"learning_rate": 3.075177457517745e-05,
"loss": 0.9742,
"step": 9170
},
{
"epoch": 60.39,
"learning_rate": 3.0719509571950955e-05,
"loss": 1.0124,
"step": 9180
},
{
"epoch": 60.46,
"learning_rate": 3.068724456872446e-05,
"loss": 1.0143,
"step": 9190
},
{
"epoch": 60.52,
"learning_rate": 3.065497956549796e-05,
"loss": 1.063,
"step": 9200
},
{
"epoch": 60.59,
"learning_rate": 3.062271456227146e-05,
"loss": 0.9921,
"step": 9210
},
{
"epoch": 60.65,
"learning_rate": 3.059044955904496e-05,
"loss": 0.9886,
"step": 9220
},
{
"epoch": 60.72,
"learning_rate": 3.055818455581845e-05,
"loss": 1.0083,
"step": 9230
},
{
"epoch": 60.79,
"learning_rate": 3.052591955259195e-05,
"loss": 1.0394,
"step": 9240
},
{
"epoch": 60.85,
"learning_rate": 3.0493654549365454e-05,
"loss": 0.9481,
"step": 9250
},
{
"epoch": 60.92,
"learning_rate": 3.0461389546138956e-05,
"loss": 1.0265,
"step": 9260
},
{
"epoch": 60.98,
"learning_rate": 3.0429124542912458e-05,
"loss": 1.046,
"step": 9270
},
{
"epoch": 61.05,
"learning_rate": 3.0396859539685956e-05,
"loss": 1.094,
"step": 9280
},
{
"epoch": 61.12,
"learning_rate": 3.036459453645945e-05,
"loss": 0.9846,
"step": 9290
},
{
"epoch": 61.18,
"learning_rate": 3.0332329533232953e-05,
"loss": 1.0055,
"step": 9300
},
{
"epoch": 61.25,
"learning_rate": 3.030006453000645e-05,
"loss": 1.0646,
"step": 9310
},
{
"epoch": 61.31,
"learning_rate": 3.0267799526779953e-05,
"loss": 0.9717,
"step": 9320
},
{
"epoch": 61.38,
"learning_rate": 3.0235534523553455e-05,
"loss": 0.986,
"step": 9330
},
{
"epoch": 61.45,
"learning_rate": 3.0203269520326954e-05,
"loss": 1.0067,
"step": 9340
},
{
"epoch": 61.51,
"learning_rate": 3.0171004517100455e-05,
"loss": 1.0631,
"step": 9350
},
{
"epoch": 61.58,
"learning_rate": 3.013873951387395e-05,
"loss": 0.9652,
"step": 9360
},
{
"epoch": 61.64,
"learning_rate": 3.010647451064745e-05,
"loss": 0.9762,
"step": 9370
},
{
"epoch": 61.71,
"learning_rate": 3.007420950742095e-05,
"loss": 1.015,
"step": 9380
},
{
"epoch": 61.77,
"learning_rate": 3.0041944504194452e-05,
"loss": 1.064,
"step": 9390
},
{
"epoch": 61.84,
"learning_rate": 3.000967950096795e-05,
"loss": 0.9729,
"step": 9400
},
{
"epoch": 61.9,
"learning_rate": 2.997741449774145e-05,
"loss": 1.0035,
"step": 9410
},
{
"epoch": 61.97,
"learning_rate": 2.994514949451495e-05,
"loss": 0.9994,
"step": 9420
},
{
"epoch": 62.04,
"learning_rate": 2.9912884491288453e-05,
"loss": 1.1234,
"step": 9430
},
{
"epoch": 62.1,
"learning_rate": 2.9880619488061948e-05,
"loss": 0.9815,
"step": 9440
},
{
"epoch": 62.17,
"learning_rate": 2.984835448483545e-05,
"loss": 0.9917,
"step": 9450
},
{
"epoch": 62.24,
"learning_rate": 2.981608948160895e-05,
"loss": 1.0241,
"step": 9460
},
{
"epoch": 62.3,
"learning_rate": 2.9783824478382447e-05,
"loss": 1.0205,
"step": 9470
},
{
"epoch": 62.37,
"learning_rate": 2.975155947515595e-05,
"loss": 0.9658,
"step": 9480
},
{
"epoch": 62.43,
"learning_rate": 2.971929447192945e-05,
"loss": 1.0278,
"step": 9490
},
{
"epoch": 62.5,
"learning_rate": 2.9687029468702945e-05,
"loss": 1.0772,
"step": 9500
},
{
"epoch": 62.5,
"eval_loss": 0.36351296305656433,
"eval_runtime": 322.4995,
"eval_samples_per_second": 22.047,
"eval_steps_per_second": 1.38,
"eval_wer": 0.3905738894803058,
"step": 9500
},
{
"epoch": 62.56,
"learning_rate": 2.9654764465476447e-05,
"loss": 0.9703,
"step": 9510
},
{
"epoch": 62.63,
"learning_rate": 2.962249946224995e-05,
"loss": 0.9938,
"step": 9520
},
{
"epoch": 62.69,
"learning_rate": 2.9590234459023444e-05,
"loss": 0.9653,
"step": 9530
},
{
"epoch": 62.76,
"learning_rate": 2.9557969455796946e-05,
"loss": 1.0557,
"step": 9540
},
{
"epoch": 62.82,
"learning_rate": 2.9525704452570448e-05,
"loss": 0.9614,
"step": 9550
},
{
"epoch": 62.89,
"learning_rate": 2.9493439449343943e-05,
"loss": 0.9751,
"step": 9560
},
{
"epoch": 62.96,
"learning_rate": 2.9461174446117444e-05,
"loss": 0.991,
"step": 9570
},
{
"epoch": 63.03,
"learning_rate": 2.9428909442890946e-05,
"loss": 1.1272,
"step": 9580
},
{
"epoch": 63.09,
"learning_rate": 2.9396644439664445e-05,
"loss": 0.9885,
"step": 9590
},
{
"epoch": 63.16,
"learning_rate": 2.9367605936760594e-05,
"loss": 1.0147,
"step": 9600
},
{
"epoch": 63.22,
"learning_rate": 2.9335340933534096e-05,
"loss": 1.0171,
"step": 9610
},
{
"epoch": 63.29,
"learning_rate": 2.930307593030759e-05,
"loss": 1.0517,
"step": 9620
},
{
"epoch": 63.35,
"learning_rate": 2.9270810927081093e-05,
"loss": 0.969,
"step": 9630
},
{
"epoch": 63.42,
"learning_rate": 2.9238545923854595e-05,
"loss": 0.981,
"step": 9640
},
{
"epoch": 63.48,
"learning_rate": 2.920628092062809e-05,
"loss": 1.0505,
"step": 9650
},
{
"epoch": 63.55,
"learning_rate": 2.917401591740159e-05,
"loss": 1.0056,
"step": 9660
},
{
"epoch": 63.62,
"learning_rate": 2.9141750914175093e-05,
"loss": 0.9903,
"step": 9670
},
{
"epoch": 63.68,
"learning_rate": 2.9109485910948592e-05,
"loss": 1.018,
"step": 9680
},
{
"epoch": 63.75,
"learning_rate": 2.907722090772209e-05,
"loss": 1.0411,
"step": 9690
},
{
"epoch": 63.81,
"learning_rate": 2.9044955904495592e-05,
"loss": 0.9895,
"step": 9700
},
{
"epoch": 63.88,
"learning_rate": 2.901269090126909e-05,
"loss": 0.9717,
"step": 9710
},
{
"epoch": 63.94,
"learning_rate": 2.898042589804259e-05,
"loss": 1.0232,
"step": 9720
},
{
"epoch": 64.01,
"learning_rate": 2.894816089481609e-05,
"loss": 1.1187,
"step": 9730
},
{
"epoch": 64.08,
"learning_rate": 2.8915895891589593e-05,
"loss": 0.9369,
"step": 9740
},
{
"epoch": 64.14,
"learning_rate": 2.8883630888363088e-05,
"loss": 1.0006,
"step": 9750
},
{
"epoch": 64.21,
"learning_rate": 2.885136588513659e-05,
"loss": 0.9803,
"step": 9760
},
{
"epoch": 64.27,
"learning_rate": 2.881910088191009e-05,
"loss": 1.0538,
"step": 9770
},
{
"epoch": 64.34,
"learning_rate": 2.8786835878683586e-05,
"loss": 0.9796,
"step": 9780
},
{
"epoch": 64.41,
"learning_rate": 2.8754570875457088e-05,
"loss": 0.9798,
"step": 9790
},
{
"epoch": 64.47,
"learning_rate": 2.872230587223059e-05,
"loss": 1.0311,
"step": 9800
},
{
"epoch": 64.54,
"learning_rate": 2.8690040869004085e-05,
"loss": 1.0271,
"step": 9810
},
{
"epoch": 64.6,
"learning_rate": 2.8657775865777587e-05,
"loss": 0.9556,
"step": 9820
},
{
"epoch": 64.67,
"learning_rate": 2.862551086255109e-05,
"loss": 0.9889,
"step": 9830
},
{
"epoch": 64.73,
"learning_rate": 2.8593245859324587e-05,
"loss": 1.0261,
"step": 9840
},
{
"epoch": 64.8,
"learning_rate": 2.8560980856098085e-05,
"loss": 0.993,
"step": 9850
},
{
"epoch": 64.86,
"learning_rate": 2.8528715852871587e-05,
"loss": 0.9598,
"step": 9860
},
{
"epoch": 64.93,
"learning_rate": 2.8496450849645086e-05,
"loss": 1.0138,
"step": 9870
},
{
"epoch": 65.0,
"learning_rate": 2.8464185846418584e-05,
"loss": 1.0809,
"step": 9880
},
{
"epoch": 65.07,
"learning_rate": 2.8431920843192086e-05,
"loss": 1.0243,
"step": 9890
},
{
"epoch": 65.13,
"learning_rate": 2.8399655839965584e-05,
"loss": 0.9741,
"step": 9900
},
{
"epoch": 65.2,
"learning_rate": 2.8367390836739083e-05,
"loss": 1.0103,
"step": 9910
},
{
"epoch": 65.26,
"learning_rate": 2.8335125833512585e-05,
"loss": 1.0557,
"step": 9920
},
{
"epoch": 65.33,
"learning_rate": 2.8302860830286086e-05,
"loss": 0.954,
"step": 9930
},
{
"epoch": 65.39,
"learning_rate": 2.827059582705958e-05,
"loss": 0.9687,
"step": 9940
},
{
"epoch": 65.46,
"learning_rate": 2.8238330823833083e-05,
"loss": 1.0184,
"step": 9950
},
{
"epoch": 65.52,
"learning_rate": 2.8206065820606585e-05,
"loss": 1.0414,
"step": 9960
},
{
"epoch": 65.59,
"learning_rate": 2.817380081738008e-05,
"loss": 0.9417,
"step": 9970
},
{
"epoch": 65.65,
"learning_rate": 2.8141535814153582e-05,
"loss": 0.9696,
"step": 9980
},
{
"epoch": 65.72,
"learning_rate": 2.8109270810927084e-05,
"loss": 0.9997,
"step": 9990
},
{
"epoch": 65.79,
"learning_rate": 2.8077005807700582e-05,
"loss": 1.0344,
"step": 10000
},
{
"epoch": 65.79,
"eval_loss": 0.3469240963459015,
"eval_runtime": 323.3838,
"eval_samples_per_second": 21.986,
"eval_steps_per_second": 1.376,
"eval_wer": 0.37708313171392627,
"step": 10000
},
{
"epoch": 65.85,
"learning_rate": 2.804474080447408e-05,
"loss": 0.9707,
"step": 10010
},
{
"epoch": 65.92,
"learning_rate": 2.8012475801247582e-05,
"loss": 1.0091,
"step": 10020
},
{
"epoch": 65.98,
"learning_rate": 2.798021079802108e-05,
"loss": 1.058,
"step": 10030
},
{
"epoch": 66.05,
"learning_rate": 2.794794579479458e-05,
"loss": 1.0737,
"step": 10040
},
{
"epoch": 66.12,
"learning_rate": 2.791568079156808e-05,
"loss": 0.9489,
"step": 10050
},
{
"epoch": 66.18,
"learning_rate": 2.788341578834158e-05,
"loss": 0.9957,
"step": 10060
},
{
"epoch": 66.25,
"learning_rate": 2.7851150785115078e-05,
"loss": 1.037,
"step": 10070
},
{
"epoch": 66.31,
"learning_rate": 2.781888578188858e-05,
"loss": 0.9564,
"step": 10080
},
{
"epoch": 66.38,
"learning_rate": 2.7786620778662078e-05,
"loss": 0.9497,
"step": 10090
},
{
"epoch": 66.45,
"learning_rate": 2.7754355775435576e-05,
"loss": 0.9675,
"step": 10100
},
{
"epoch": 66.51,
"learning_rate": 2.7722090772209078e-05,
"loss": 1.0325,
"step": 10110
},
{
"epoch": 66.58,
"learning_rate": 2.768982576898258e-05,
"loss": 0.9818,
"step": 10120
},
{
"epoch": 66.64,
"learning_rate": 2.7657560765756075e-05,
"loss": 0.9883,
"step": 10130
},
{
"epoch": 66.71,
"learning_rate": 2.7625295762529577e-05,
"loss": 1.0175,
"step": 10140
},
{
"epoch": 66.77,
"learning_rate": 2.759303075930308e-05,
"loss": 1.0583,
"step": 10150
},
{
"epoch": 66.84,
"learning_rate": 2.7560765756076574e-05,
"loss": 0.9348,
"step": 10160
},
{
"epoch": 66.9,
"learning_rate": 2.7528500752850076e-05,
"loss": 0.9898,
"step": 10170
},
{
"epoch": 66.97,
"learning_rate": 2.7496235749623577e-05,
"loss": 0.9972,
"step": 10180
},
{
"epoch": 67.04,
"learning_rate": 2.7463970746397076e-05,
"loss": 1.1137,
"step": 10190
},
{
"epoch": 67.1,
"learning_rate": 2.7431705743170574e-05,
"loss": 0.9396,
"step": 10200
},
{
"epoch": 67.17,
"learning_rate": 2.7399440739944076e-05,
"loss": 0.9777,
"step": 10210
},
{
"epoch": 67.24,
"learning_rate": 2.7367175736717575e-05,
"loss": 1.0345,
"step": 10220
},
{
"epoch": 67.3,
"learning_rate": 2.7334910733491073e-05,
"loss": 1.0047,
"step": 10230
},
{
"epoch": 67.37,
"learning_rate": 2.7302645730264575e-05,
"loss": 0.9188,
"step": 10240
},
{
"epoch": 67.43,
"learning_rate": 2.7270380727038073e-05,
"loss": 0.9834,
"step": 10250
},
{
"epoch": 67.5,
"learning_rate": 2.723811572381157e-05,
"loss": 1.038,
"step": 10260
},
{
"epoch": 67.56,
"learning_rate": 2.7205850720585073e-05,
"loss": 0.9701,
"step": 10270
},
{
"epoch": 67.63,
"learning_rate": 2.7173585717358572e-05,
"loss": 0.99,
"step": 10280
},
{
"epoch": 67.69,
"learning_rate": 2.714132071413207e-05,
"loss": 0.9713,
"step": 10290
},
{
"epoch": 67.76,
"learning_rate": 2.7109055710905572e-05,
"loss": 1.033,
"step": 10300
},
{
"epoch": 67.82,
"learning_rate": 2.707679070767907e-05,
"loss": 0.947,
"step": 10310
},
{
"epoch": 67.89,
"learning_rate": 2.704452570445257e-05,
"loss": 0.9757,
"step": 10320
},
{
"epoch": 67.96,
"learning_rate": 2.701226070122607e-05,
"loss": 0.9942,
"step": 10330
},
{
"epoch": 68.03,
"learning_rate": 2.6979995697999573e-05,
"loss": 1.108,
"step": 10340
},
{
"epoch": 68.09,
"learning_rate": 2.694773069477307e-05,
"loss": 0.947,
"step": 10350
},
{
"epoch": 68.16,
"learning_rate": 2.691546569154657e-05,
"loss": 0.974,
"step": 10360
},
{
"epoch": 68.22,
"learning_rate": 2.688320068832007e-05,
"loss": 0.9951,
"step": 10370
},
{
"epoch": 68.29,
"learning_rate": 2.685093568509357e-05,
"loss": 1.0165,
"step": 10380
},
{
"epoch": 68.35,
"learning_rate": 2.6818670681867068e-05,
"loss": 0.9588,
"step": 10390
},
{
"epoch": 68.42,
"learning_rate": 2.678640567864057e-05,
"loss": 0.9858,
"step": 10400
},
{
"epoch": 68.48,
"learning_rate": 2.6754140675414068e-05,
"loss": 1.0087,
"step": 10410
},
{
"epoch": 68.55,
"learning_rate": 2.6721875672187567e-05,
"loss": 0.9829,
"step": 10420
},
{
"epoch": 68.62,
"learning_rate": 2.668961066896107e-05,
"loss": 0.9589,
"step": 10430
},
{
"epoch": 68.68,
"learning_rate": 2.6657345665734567e-05,
"loss": 0.9937,
"step": 10440
},
{
"epoch": 68.75,
"learning_rate": 2.6625080662508065e-05,
"loss": 1.0606,
"step": 10450
},
{
"epoch": 68.81,
"learning_rate": 2.6592815659281567e-05,
"loss": 0.9556,
"step": 10460
},
{
"epoch": 68.88,
"learning_rate": 2.6560550656055066e-05,
"loss": 0.9569,
"step": 10470
},
{
"epoch": 68.94,
"learning_rate": 2.6528285652828564e-05,
"loss": 0.9859,
"step": 10480
},
{
"epoch": 69.01,
"learning_rate": 2.6496020649602066e-05,
"loss": 1.1064,
"step": 10490
},
{
"epoch": 69.08,
"learning_rate": 2.6463755646375564e-05,
"loss": 0.9457,
"step": 10500
},
{
"epoch": 69.08,
"eval_loss": 0.3435259163379669,
"eval_runtime": 322.1557,
"eval_samples_per_second": 22.07,
"eval_steps_per_second": 1.381,
"eval_wer": 0.37350237104422723,
"step": 10500
},
{
"epoch": 69.14,
"learning_rate": 2.6431490643149066e-05,
"loss": 0.9544,
"step": 10510
},
{
"epoch": 69.21,
"learning_rate": 2.6399225639922565e-05,
"loss": 0.9302,
"step": 10520
},
{
"epoch": 69.27,
"learning_rate": 2.6366960636696066e-05,
"loss": 1.0351,
"step": 10530
},
{
"epoch": 69.34,
"learning_rate": 2.6334695633469565e-05,
"loss": 0.9441,
"step": 10540
},
{
"epoch": 69.41,
"learning_rate": 2.6302430630243063e-05,
"loss": 1.0024,
"step": 10550
},
{
"epoch": 69.47,
"learning_rate": 2.6270165627016565e-05,
"loss": 0.9795,
"step": 10560
},
{
"epoch": 69.54,
"learning_rate": 2.6237900623790063e-05,
"loss": 1.0247,
"step": 10570
},
{
"epoch": 69.6,
"learning_rate": 2.6205635620563562e-05,
"loss": 0.9349,
"step": 10580
},
{
"epoch": 69.67,
"learning_rate": 2.6173370617337064e-05,
"loss": 0.9871,
"step": 10590
},
{
"epoch": 69.73,
"learning_rate": 2.6141105614110562e-05,
"loss": 1.0088,
"step": 10600
},
{
"epoch": 69.8,
"learning_rate": 2.610884061088406e-05,
"loss": 0.9886,
"step": 10610
},
{
"epoch": 69.86,
"learning_rate": 2.6076575607657562e-05,
"loss": 0.9544,
"step": 10620
},
{
"epoch": 69.93,
"learning_rate": 2.604431060443106e-05,
"loss": 0.979,
"step": 10630
},
{
"epoch": 70.0,
"learning_rate": 2.601204560120456e-05,
"loss": 0.9918,
"step": 10640
},
{
"epoch": 70.07,
"learning_rate": 2.597978059797806e-05,
"loss": 1.0281,
"step": 10650
},
{
"epoch": 70.13,
"learning_rate": 2.594751559475156e-05,
"loss": 0.9626,
"step": 10660
},
{
"epoch": 70.2,
"learning_rate": 2.5915250591525058e-05,
"loss": 1.0007,
"step": 10670
},
{
"epoch": 70.26,
"learning_rate": 2.588298558829856e-05,
"loss": 1.0266,
"step": 10680
},
{
"epoch": 70.33,
"learning_rate": 2.5850720585072058e-05,
"loss": 0.9427,
"step": 10690
},
{
"epoch": 70.39,
"learning_rate": 2.581845558184556e-05,
"loss": 0.9498,
"step": 10700
},
{
"epoch": 70.46,
"learning_rate": 2.5786190578619058e-05,
"loss": 0.9637,
"step": 10710
},
{
"epoch": 70.52,
"learning_rate": 2.575392557539256e-05,
"loss": 1.0638,
"step": 10720
},
{
"epoch": 70.59,
"learning_rate": 2.572166057216606e-05,
"loss": 0.9378,
"step": 10730
},
{
"epoch": 70.65,
"learning_rate": 2.5689395568939557e-05,
"loss": 0.9692,
"step": 10740
},
{
"epoch": 70.72,
"learning_rate": 2.565713056571306e-05,
"loss": 0.968,
"step": 10750
},
{
"epoch": 70.79,
"learning_rate": 2.5624865562486557e-05,
"loss": 1.0133,
"step": 10760
},
{
"epoch": 70.85,
"learning_rate": 2.5592600559260056e-05,
"loss": 0.9445,
"step": 10770
},
{
"epoch": 70.92,
"learning_rate": 2.5560335556033557e-05,
"loss": 0.9751,
"step": 10780
},
{
"epoch": 70.98,
"learning_rate": 2.5528070552807056e-05,
"loss": 0.9768,
"step": 10790
},
{
"epoch": 71.05,
"learning_rate": 2.5495805549580554e-05,
"loss": 1.058,
"step": 10800
},
{
"epoch": 71.12,
"learning_rate": 2.5463540546354056e-05,
"loss": 0.9296,
"step": 10810
},
{
"epoch": 71.18,
"learning_rate": 2.5431275543127555e-05,
"loss": 0.9902,
"step": 10820
},
{
"epoch": 71.25,
"learning_rate": 2.5399010539901053e-05,
"loss": 1.012,
"step": 10830
},
{
"epoch": 71.31,
"learning_rate": 2.5366745536674555e-05,
"loss": 0.953,
"step": 10840
},
{
"epoch": 71.38,
"learning_rate": 2.5334480533448053e-05,
"loss": 0.9268,
"step": 10850
},
{
"epoch": 71.45,
"learning_rate": 2.5302215530221555e-05,
"loss": 0.9632,
"step": 10860
},
{
"epoch": 71.51,
"learning_rate": 2.5269950526995053e-05,
"loss": 1.0485,
"step": 10870
},
{
"epoch": 71.58,
"learning_rate": 2.5237685523768552e-05,
"loss": 0.9445,
"step": 10880
},
{
"epoch": 71.64,
"learning_rate": 2.5205420520542054e-05,
"loss": 0.9855,
"step": 10890
},
{
"epoch": 71.71,
"learning_rate": 2.5173155517315552e-05,
"loss": 0.9577,
"step": 10900
},
{
"epoch": 71.77,
"learning_rate": 2.514089051408905e-05,
"loss": 1.0271,
"step": 10910
},
{
"epoch": 71.84,
"learning_rate": 2.5108625510862552e-05,
"loss": 0.9161,
"step": 10920
},
{
"epoch": 71.9,
"learning_rate": 2.507636050763605e-05,
"loss": 0.9792,
"step": 10930
},
{
"epoch": 71.97,
"learning_rate": 2.5044095504409553e-05,
"loss": 0.9772,
"step": 10940
},
{
"epoch": 72.04,
"learning_rate": 2.501183050118305e-05,
"loss": 1.1076,
"step": 10950
},
{
"epoch": 72.1,
"learning_rate": 2.497956549795655e-05,
"loss": 0.9289,
"step": 10960
},
{
"epoch": 72.17,
"learning_rate": 2.494730049473005e-05,
"loss": 0.9363,
"step": 10970
},
{
"epoch": 72.24,
"learning_rate": 2.491503549150355e-05,
"loss": 0.9921,
"step": 10980
},
{
"epoch": 72.3,
"learning_rate": 2.4882770488277048e-05,
"loss": 0.9726,
"step": 10990
},
{
"epoch": 72.37,
"learning_rate": 2.485050548505055e-05,
"loss": 0.9307,
"step": 11000
},
{
"epoch": 72.37,
"eval_loss": 0.3518897294998169,
"eval_runtime": 318.8359,
"eval_samples_per_second": 22.3,
"eval_steps_per_second": 1.396,
"eval_wer": 0.37615406948611246,
"step": 11000
},
{
"epoch": 72.43,
"learning_rate": 2.481824048182405e-05,
"loss": 0.9837,
"step": 11010
},
{
"epoch": 72.5,
"learning_rate": 2.478597547859755e-05,
"loss": 1.0279,
"step": 11020
},
{
"epoch": 72.56,
"learning_rate": 2.475371047537105e-05,
"loss": 0.934,
"step": 11030
},
{
"epoch": 72.63,
"learning_rate": 2.4721445472144547e-05,
"loss": 0.9445,
"step": 11040
},
{
"epoch": 72.69,
"learning_rate": 2.468918046891805e-05,
"loss": 0.9884,
"step": 11050
},
{
"epoch": 72.76,
"learning_rate": 2.4656915465691547e-05,
"loss": 0.9982,
"step": 11060
},
{
"epoch": 72.82,
"learning_rate": 2.4624650462465046e-05,
"loss": 0.9551,
"step": 11070
},
{
"epoch": 72.89,
"learning_rate": 2.4592385459238547e-05,
"loss": 0.9328,
"step": 11080
},
{
"epoch": 72.96,
"learning_rate": 2.4560120456012046e-05,
"loss": 0.9785,
"step": 11090
},
{
"epoch": 73.03,
"learning_rate": 2.4527855452785544e-05,
"loss": 1.1105,
"step": 11100
},
{
"epoch": 73.09,
"learning_rate": 2.4495590449559046e-05,
"loss": 0.9251,
"step": 11110
},
{
"epoch": 73.16,
"learning_rate": 2.4463325446332545e-05,
"loss": 0.9442,
"step": 11120
},
{
"epoch": 73.22,
"learning_rate": 2.4431060443106046e-05,
"loss": 0.9843,
"step": 11130
},
{
"epoch": 73.29,
"learning_rate": 2.4398795439879545e-05,
"loss": 1.0037,
"step": 11140
},
{
"epoch": 73.35,
"learning_rate": 2.4366530436653043e-05,
"loss": 0.921,
"step": 11150
},
{
"epoch": 73.42,
"learning_rate": 2.4334265433426545e-05,
"loss": 0.9825,
"step": 11160
},
{
"epoch": 73.48,
"learning_rate": 2.4302000430200043e-05,
"loss": 0.9837,
"step": 11170
},
{
"epoch": 73.55,
"learning_rate": 2.4269735426973545e-05,
"loss": 0.998,
"step": 11180
},
{
"epoch": 73.62,
"learning_rate": 2.4237470423747044e-05,
"loss": 0.9317,
"step": 11190
},
{
"epoch": 73.68,
"learning_rate": 2.4205205420520542e-05,
"loss": 0.958,
"step": 11200
},
{
"epoch": 73.75,
"learning_rate": 2.4172940417294044e-05,
"loss": 1.0169,
"step": 11210
},
{
"epoch": 73.81,
"learning_rate": 2.4140675414067542e-05,
"loss": 0.9379,
"step": 11220
},
{
"epoch": 73.88,
"learning_rate": 2.410841041084104e-05,
"loss": 0.9534,
"step": 11230
},
{
"epoch": 73.94,
"learning_rate": 2.4076145407614543e-05,
"loss": 0.9791,
"step": 11240
},
{
"epoch": 74.01,
"learning_rate": 2.404388040438804e-05,
"loss": 1.1134,
"step": 11250
},
{
"epoch": 74.08,
"learning_rate": 2.401161540116154e-05,
"loss": 0.9459,
"step": 11260
},
{
"epoch": 74.14,
"learning_rate": 2.397935039793504e-05,
"loss": 0.9346,
"step": 11270
},
{
"epoch": 74.21,
"learning_rate": 2.394708539470854e-05,
"loss": 0.932,
"step": 11280
},
{
"epoch": 74.27,
"learning_rate": 2.3914820391482038e-05,
"loss": 1.0175,
"step": 11290
},
{
"epoch": 74.34,
"learning_rate": 2.388255538825554e-05,
"loss": 0.9262,
"step": 11300
},
{
"epoch": 74.41,
"learning_rate": 2.385029038502904e-05,
"loss": 0.9554,
"step": 11310
},
{
"epoch": 74.47,
"learning_rate": 2.3818025381802537e-05,
"loss": 0.9507,
"step": 11320
},
{
"epoch": 74.54,
"learning_rate": 2.378576037857604e-05,
"loss": 0.9911,
"step": 11330
},
{
"epoch": 74.6,
"learning_rate": 2.3753495375349537e-05,
"loss": 0.9243,
"step": 11340
},
{
"epoch": 74.67,
"learning_rate": 2.372123037212304e-05,
"loss": 0.9713,
"step": 11350
},
{
"epoch": 74.73,
"learning_rate": 2.3688965368896537e-05,
"loss": 1.0012,
"step": 11360
},
{
"epoch": 74.8,
"learning_rate": 2.365670036567004e-05,
"loss": 0.9809,
"step": 11370
},
{
"epoch": 74.86,
"learning_rate": 2.3624435362443537e-05,
"loss": 0.9423,
"step": 11380
},
{
"epoch": 74.93,
"learning_rate": 2.3592170359217036e-05,
"loss": 0.9665,
"step": 11390
},
{
"epoch": 75.0,
"learning_rate": 2.3559905355990538e-05,
"loss": 1.0262,
"step": 11400
},
{
"epoch": 75.07,
"learning_rate": 2.3527640352764036e-05,
"loss": 1.027,
"step": 11410
},
{
"epoch": 75.13,
"learning_rate": 2.3495375349537535e-05,
"loss": 0.9059,
"step": 11420
},
{
"epoch": 75.2,
"learning_rate": 2.3463110346311036e-05,
"loss": 0.9484,
"step": 11430
},
{
"epoch": 75.26,
"learning_rate": 2.3430845343084535e-05,
"loss": 1.012,
"step": 11440
},
{
"epoch": 75.33,
"learning_rate": 2.3398580339858033e-05,
"loss": 0.9146,
"step": 11450
},
{
"epoch": 75.39,
"learning_rate": 2.3366315336631535e-05,
"loss": 0.9199,
"step": 11460
},
{
"epoch": 75.46,
"learning_rate": 2.3334050333405033e-05,
"loss": 0.9703,
"step": 11470
},
{
"epoch": 75.52,
"learning_rate": 2.3301785330178532e-05,
"loss": 1.0307,
"step": 11480
},
{
"epoch": 75.59,
"learning_rate": 2.3269520326952034e-05,
"loss": 0.918,
"step": 11490
},
{
"epoch": 75.65,
"learning_rate": 2.3237255323725532e-05,
"loss": 0.9523,
"step": 11500
},
{
"epoch": 75.65,
"eval_loss": 0.34434974193573,
"eval_runtime": 319.3514,
"eval_samples_per_second": 22.264,
"eval_steps_per_second": 1.393,
"eval_wer": 0.3666118261879415,
"step": 11500
},
{
"epoch": 75.72,
"learning_rate": 2.320499032049903e-05,
"loss": 0.9908,
"step": 11510
},
{
"epoch": 75.79,
"learning_rate": 2.3172725317272532e-05,
"loss": 0.9971,
"step": 11520
},
{
"epoch": 75.85,
"learning_rate": 2.3140460314046034e-05,
"loss": 0.9447,
"step": 11530
},
{
"epoch": 75.92,
"learning_rate": 2.3108195310819533e-05,
"loss": 0.961,
"step": 11540
},
{
"epoch": 75.98,
"learning_rate": 2.307593030759303e-05,
"loss": 0.9799,
"step": 11550
},
{
"epoch": 76.05,
"learning_rate": 2.3043665304366533e-05,
"loss": 1.0397,
"step": 11560
},
{
"epoch": 76.12,
"learning_rate": 2.301140030114003e-05,
"loss": 0.9228,
"step": 11570
},
{
"epoch": 76.18,
"learning_rate": 2.297913529791353e-05,
"loss": 0.9437,
"step": 11580
},
{
"epoch": 76.25,
"learning_rate": 2.294687029468703e-05,
"loss": 1.0054,
"step": 11590
},
{
"epoch": 76.31,
"learning_rate": 2.291460529146053e-05,
"loss": 0.9331,
"step": 11600
},
{
"epoch": 76.38,
"learning_rate": 2.288234028823403e-05,
"loss": 0.9308,
"step": 11610
},
{
"epoch": 76.45,
"learning_rate": 2.285007528500753e-05,
"loss": 0.9729,
"step": 11620
},
{
"epoch": 76.51,
"learning_rate": 2.281781028178103e-05,
"loss": 1.0308,
"step": 11630
},
{
"epoch": 76.58,
"learning_rate": 2.2785545278554527e-05,
"loss": 0.9252,
"step": 11640
},
{
"epoch": 76.64,
"learning_rate": 2.275328027532803e-05,
"loss": 0.9327,
"step": 11650
},
{
"epoch": 76.71,
"learning_rate": 2.2721015272101527e-05,
"loss": 0.9551,
"step": 11660
},
{
"epoch": 76.77,
"learning_rate": 2.2688750268875026e-05,
"loss": 1.0287,
"step": 11670
},
{
"epoch": 76.84,
"learning_rate": 2.2656485265648527e-05,
"loss": 0.916,
"step": 11680
},
{
"epoch": 76.9,
"learning_rate": 2.262422026242203e-05,
"loss": 0.9475,
"step": 11690
},
{
"epoch": 76.97,
"learning_rate": 2.2591955259195524e-05,
"loss": 0.9593,
"step": 11700
},
{
"epoch": 77.04,
"learning_rate": 2.2559690255969026e-05,
"loss": 1.0743,
"step": 11710
},
{
"epoch": 77.1,
"learning_rate": 2.2527425252742528e-05,
"loss": 0.8937,
"step": 11720
},
{
"epoch": 77.17,
"learning_rate": 2.2495160249516026e-05,
"loss": 0.9349,
"step": 11730
},
{
"epoch": 77.24,
"learning_rate": 2.2462895246289525e-05,
"loss": 0.961,
"step": 11740
},
{
"epoch": 77.3,
"learning_rate": 2.2430630243063027e-05,
"loss": 0.9697,
"step": 11750
},
{
"epoch": 77.37,
"learning_rate": 2.2398365239836525e-05,
"loss": 0.9366,
"step": 11760
},
{
"epoch": 77.43,
"learning_rate": 2.2366100236610023e-05,
"loss": 0.9625,
"step": 11770
},
{
"epoch": 77.5,
"learning_rate": 2.2333835233383525e-05,
"loss": 1.0514,
"step": 11780
},
{
"epoch": 77.56,
"learning_rate": 2.2301570230157024e-05,
"loss": 0.9365,
"step": 11790
},
{
"epoch": 77.63,
"learning_rate": 2.2269305226930522e-05,
"loss": 0.9364,
"step": 11800
},
{
"epoch": 77.69,
"learning_rate": 2.2237040223704024e-05,
"loss": 0.9634,
"step": 11810
},
{
"epoch": 77.76,
"learning_rate": 2.2204775220477522e-05,
"loss": 0.9954,
"step": 11820
},
{
"epoch": 77.82,
"learning_rate": 2.217251021725102e-05,
"loss": 0.9266,
"step": 11830
},
{
"epoch": 77.89,
"learning_rate": 2.2140245214024523e-05,
"loss": 0.9328,
"step": 11840
},
{
"epoch": 77.96,
"learning_rate": 2.210798021079802e-05,
"loss": 0.9468,
"step": 11850
},
{
"epoch": 78.03,
"learning_rate": 2.207571520757152e-05,
"loss": 1.0674,
"step": 11860
},
{
"epoch": 78.09,
"learning_rate": 2.204345020434502e-05,
"loss": 0.9287,
"step": 11870
},
{
"epoch": 78.16,
"learning_rate": 2.2011185201118523e-05,
"loss": 0.9528,
"step": 11880
},
{
"epoch": 78.22,
"learning_rate": 2.1978920197892018e-05,
"loss": 0.9728,
"step": 11890
},
{
"epoch": 78.29,
"learning_rate": 2.194665519466552e-05,
"loss": 1.0022,
"step": 11900
},
{
"epoch": 78.35,
"learning_rate": 2.1914390191439022e-05,
"loss": 0.9094,
"step": 11910
},
{
"epoch": 78.42,
"learning_rate": 2.1882125188212517e-05,
"loss": 0.9332,
"step": 11920
},
{
"epoch": 78.48,
"learning_rate": 2.184986018498602e-05,
"loss": 0.9625,
"step": 11930
},
{
"epoch": 78.55,
"learning_rate": 2.181759518175952e-05,
"loss": 0.9592,
"step": 11940
},
{
"epoch": 78.62,
"learning_rate": 2.178533017853302e-05,
"loss": 0.9387,
"step": 11950
},
{
"epoch": 78.68,
"learning_rate": 2.1753065175306517e-05,
"loss": 0.9453,
"step": 11960
},
{
"epoch": 78.75,
"learning_rate": 2.172402667240267e-05,
"loss": 1.0156,
"step": 11970
},
{
"epoch": 78.81,
"learning_rate": 2.1691761669176165e-05,
"loss": 0.9209,
"step": 11980
},
{
"epoch": 78.88,
"learning_rate": 2.1659496665949667e-05,
"loss": 0.899,
"step": 11990
},
{
"epoch": 78.94,
"learning_rate": 2.162723166272317e-05,
"loss": 0.9523,
"step": 12000
},
{
"epoch": 78.94,
"eval_loss": 0.35024702548980713,
"eval_runtime": 330.0841,
"eval_samples_per_second": 21.54,
"eval_steps_per_second": 1.348,
"eval_wer": 0.375708893835285,
"step": 12000
},
{
"epoch": 79.01,
"learning_rate": 2.1594966659496664e-05,
"loss": 1.0633,
"step": 12010
},
{
"epoch": 79.08,
"learning_rate": 2.1562701656270166e-05,
"loss": 0.9186,
"step": 12020
},
{
"epoch": 79.14,
"learning_rate": 2.1530436653043668e-05,
"loss": 0.9239,
"step": 12030
},
{
"epoch": 79.21,
"learning_rate": 2.1498171649817166e-05,
"loss": 0.9601,
"step": 12040
},
{
"epoch": 79.27,
"learning_rate": 2.1465906646590664e-05,
"loss": 0.9821,
"step": 12050
},
{
"epoch": 79.34,
"learning_rate": 2.1433641643364166e-05,
"loss": 0.9105,
"step": 12060
},
{
"epoch": 79.41,
"learning_rate": 2.1401376640137665e-05,
"loss": 0.935,
"step": 12070
},
{
"epoch": 79.47,
"learning_rate": 2.1369111636911163e-05,
"loss": 0.9417,
"step": 12080
},
{
"epoch": 79.54,
"learning_rate": 2.1336846633684665e-05,
"loss": 1.0025,
"step": 12090
},
{
"epoch": 79.6,
"learning_rate": 2.1304581630458163e-05,
"loss": 0.8884,
"step": 12100
},
{
"epoch": 79.67,
"learning_rate": 2.1272316627231662e-05,
"loss": 0.9733,
"step": 12110
},
{
"epoch": 79.73,
"learning_rate": 2.1240051624005164e-05,
"loss": 0.9558,
"step": 12120
},
{
"epoch": 79.8,
"learning_rate": 2.1207786620778665e-05,
"loss": 0.9564,
"step": 12130
},
{
"epoch": 79.86,
"learning_rate": 2.117552161755216e-05,
"loss": 0.9139,
"step": 12140
},
{
"epoch": 79.93,
"learning_rate": 2.1143256614325662e-05,
"loss": 0.9615,
"step": 12150
},
{
"epoch": 80.0,
"learning_rate": 2.1110991611099164e-05,
"loss": 0.9671,
"step": 12160
},
{
"epoch": 80.07,
"learning_rate": 2.107872660787266e-05,
"loss": 1.0023,
"step": 12170
},
{
"epoch": 80.13,
"learning_rate": 2.104646160464616e-05,
"loss": 0.9213,
"step": 12180
},
{
"epoch": 80.2,
"learning_rate": 2.1014196601419663e-05,
"loss": 0.9354,
"step": 12190
},
{
"epoch": 80.26,
"learning_rate": 2.0981931598193158e-05,
"loss": 0.9967,
"step": 12200
},
{
"epoch": 80.33,
"learning_rate": 2.094966659496666e-05,
"loss": 0.8934,
"step": 12210
},
{
"epoch": 80.39,
"learning_rate": 2.091740159174016e-05,
"loss": 0.9723,
"step": 12220
},
{
"epoch": 80.46,
"learning_rate": 2.0885136588513656e-05,
"loss": 0.9434,
"step": 12230
},
{
"epoch": 80.52,
"learning_rate": 2.0852871585287158e-05,
"loss": 1.0069,
"step": 12240
},
{
"epoch": 80.59,
"learning_rate": 2.082060658206066e-05,
"loss": 0.9033,
"step": 12250
},
{
"epoch": 80.65,
"learning_rate": 2.078834157883416e-05,
"loss": 0.9583,
"step": 12260
},
{
"epoch": 80.72,
"learning_rate": 2.0756076575607657e-05,
"loss": 0.9529,
"step": 12270
},
{
"epoch": 80.79,
"learning_rate": 2.072381157238116e-05,
"loss": 1.0007,
"step": 12280
},
{
"epoch": 80.85,
"learning_rate": 2.069154656915466e-05,
"loss": 0.9083,
"step": 12290
},
{
"epoch": 80.92,
"learning_rate": 2.0659281565928155e-05,
"loss": 0.9298,
"step": 12300
},
{
"epoch": 80.98,
"learning_rate": 2.0627016562701657e-05,
"loss": 0.9816,
"step": 12310
},
{
"epoch": 81.05,
"learning_rate": 2.059475155947516e-05,
"loss": 1.0128,
"step": 12320
},
{
"epoch": 81.12,
"learning_rate": 2.0562486556248654e-05,
"loss": 0.8981,
"step": 12330
},
{
"epoch": 81.18,
"learning_rate": 2.0530221553022156e-05,
"loss": 0.9378,
"step": 12340
},
{
"epoch": 81.25,
"learning_rate": 2.0497956549795658e-05,
"loss": 0.9966,
"step": 12350
},
{
"epoch": 81.31,
"learning_rate": 2.0465691546569153e-05,
"loss": 0.9404,
"step": 12360
},
{
"epoch": 81.38,
"learning_rate": 2.0433426543342655e-05,
"loss": 0.929,
"step": 12370
},
{
"epoch": 81.45,
"learning_rate": 2.0401161540116156e-05,
"loss": 0.9378,
"step": 12380
},
{
"epoch": 81.51,
"learning_rate": 2.036889653688965e-05,
"loss": 1.0067,
"step": 12390
},
{
"epoch": 81.58,
"learning_rate": 2.0336631533663153e-05,
"loss": 0.9035,
"step": 12400
},
{
"epoch": 81.64,
"learning_rate": 2.0304366530436655e-05,
"loss": 0.9394,
"step": 12410
},
{
"epoch": 81.71,
"learning_rate": 2.027210152721015e-05,
"loss": 0.9561,
"step": 12420
},
{
"epoch": 81.77,
"learning_rate": 2.0239836523983652e-05,
"loss": 0.9793,
"step": 12430
},
{
"epoch": 81.84,
"learning_rate": 2.0207571520757154e-05,
"loss": 0.9155,
"step": 12440
},
{
"epoch": 81.9,
"learning_rate": 2.0175306517530656e-05,
"loss": 0.946,
"step": 12450
},
{
"epoch": 81.97,
"learning_rate": 2.014304151430415e-05,
"loss": 0.956,
"step": 12460
},
{
"epoch": 82.04,
"learning_rate": 2.0110776511077652e-05,
"loss": 1.0618,
"step": 12470
},
{
"epoch": 82.1,
"learning_rate": 2.0078511507851154e-05,
"loss": 0.9279,
"step": 12480
},
{
"epoch": 82.17,
"learning_rate": 2.004624650462465e-05,
"loss": 0.936,
"step": 12490
},
{
"epoch": 82.24,
"learning_rate": 2.001398150139815e-05,
"loss": 0.9475,
"step": 12500
},
{
"epoch": 82.24,
"eval_loss": 0.3509426712989807,
"eval_runtime": 320.8861,
"eval_samples_per_second": 22.157,
"eval_steps_per_second": 1.387,
"eval_wer": 0.3642504596922481,
"step": 12500
},
{
"epoch": 82.3,
"learning_rate": 1.9981716498171653e-05,
"loss": 0.9396,
"step": 12510
},
{
"epoch": 82.37,
"learning_rate": 1.9949451494945148e-05,
"loss": 0.8946,
"step": 12520
},
{
"epoch": 82.43,
"learning_rate": 1.991718649171865e-05,
"loss": 0.9433,
"step": 12530
},
{
"epoch": 82.5,
"learning_rate": 1.988492148849215e-05,
"loss": 0.9939,
"step": 12540
},
{
"epoch": 82.56,
"learning_rate": 1.9852656485265647e-05,
"loss": 0.9078,
"step": 12550
},
{
"epoch": 82.63,
"learning_rate": 1.982039148203915e-05,
"loss": 0.9229,
"step": 12560
},
{
"epoch": 82.69,
"learning_rate": 1.978812647881265e-05,
"loss": 0.9342,
"step": 12570
},
{
"epoch": 82.76,
"learning_rate": 1.9755861475586145e-05,
"loss": 0.9655,
"step": 12580
},
{
"epoch": 82.82,
"learning_rate": 1.9723596472359647e-05,
"loss": 0.9196,
"step": 12590
},
{
"epoch": 82.89,
"learning_rate": 1.969133146913315e-05,
"loss": 0.922,
"step": 12600
},
{
"epoch": 82.96,
"learning_rate": 1.9659066465906644e-05,
"loss": 0.9361,
"step": 12610
},
{
"epoch": 83.03,
"learning_rate": 1.9626801462680146e-05,
"loss": 1.0438,
"step": 12620
},
{
"epoch": 83.09,
"learning_rate": 1.9594536459453648e-05,
"loss": 0.9,
"step": 12630
},
{
"epoch": 83.16,
"learning_rate": 1.9562271456227146e-05,
"loss": 0.9131,
"step": 12640
},
{
"epoch": 83.22,
"learning_rate": 1.9530006453000644e-05,
"loss": 0.9195,
"step": 12650
},
{
"epoch": 83.29,
"learning_rate": 1.9497741449774146e-05,
"loss": 0.9638,
"step": 12660
},
{
"epoch": 83.35,
"learning_rate": 1.9465476446547648e-05,
"loss": 0.9233,
"step": 12670
},
{
"epoch": 83.42,
"learning_rate": 1.9433211443321143e-05,
"loss": 0.9477,
"step": 12680
},
{
"epoch": 83.48,
"learning_rate": 1.9400946440094645e-05,
"loss": 0.9753,
"step": 12690
},
{
"epoch": 83.55,
"learning_rate": 1.9368681436868147e-05,
"loss": 0.936,
"step": 12700
},
{
"epoch": 83.62,
"learning_rate": 1.9336416433641642e-05,
"loss": 0.9149,
"step": 12710
},
{
"epoch": 83.68,
"learning_rate": 1.9304151430415144e-05,
"loss": 0.9407,
"step": 12720
},
{
"epoch": 83.75,
"learning_rate": 1.9271886427188645e-05,
"loss": 0.9956,
"step": 12730
},
{
"epoch": 83.81,
"learning_rate": 1.923962142396214e-05,
"loss": 0.9145,
"step": 12740
},
{
"epoch": 83.88,
"learning_rate": 1.9207356420735642e-05,
"loss": 0.932,
"step": 12750
},
{
"epoch": 83.94,
"learning_rate": 1.9175091417509144e-05,
"loss": 0.9436,
"step": 12760
},
{
"epoch": 84.01,
"learning_rate": 1.914282641428264e-05,
"loss": 1.0773,
"step": 12770
},
{
"epoch": 84.08,
"learning_rate": 1.911056141105614e-05,
"loss": 0.8948,
"step": 12780
},
{
"epoch": 84.14,
"learning_rate": 1.9078296407829643e-05,
"loss": 0.9046,
"step": 12790
},
{
"epoch": 84.21,
"learning_rate": 1.904603140460314e-05,
"loss": 0.9352,
"step": 12800
},
{
"epoch": 84.27,
"learning_rate": 1.901376640137664e-05,
"loss": 1.0043,
"step": 12810
},
{
"epoch": 84.34,
"learning_rate": 1.898150139815014e-05,
"loss": 0.9053,
"step": 12820
},
{
"epoch": 84.41,
"learning_rate": 1.894923639492364e-05,
"loss": 0.8973,
"step": 12830
},
{
"epoch": 84.47,
"learning_rate": 1.8916971391697138e-05,
"loss": 0.9208,
"step": 12840
},
{
"epoch": 84.54,
"learning_rate": 1.888470638847064e-05,
"loss": 0.9667,
"step": 12850
},
{
"epoch": 84.6,
"learning_rate": 1.8852441385244142e-05,
"loss": 0.9048,
"step": 12860
},
{
"epoch": 84.67,
"learning_rate": 1.8820176382017637e-05,
"loss": 0.9502,
"step": 12870
},
{
"epoch": 84.73,
"learning_rate": 1.878791137879114e-05,
"loss": 0.9538,
"step": 12880
},
{
"epoch": 84.8,
"learning_rate": 1.875564637556464e-05,
"loss": 0.9497,
"step": 12890
},
{
"epoch": 84.86,
"learning_rate": 1.8723381372338135e-05,
"loss": 0.9217,
"step": 12900
},
{
"epoch": 84.93,
"learning_rate": 1.8691116369111637e-05,
"loss": 0.9125,
"step": 12910
},
{
"epoch": 85.0,
"learning_rate": 1.865885136588514e-05,
"loss": 1.0031,
"step": 12920
},
{
"epoch": 85.07,
"learning_rate": 1.8626586362658634e-05,
"loss": 0.9734,
"step": 12930
},
{
"epoch": 85.13,
"learning_rate": 1.8594321359432136e-05,
"loss": 0.9017,
"step": 12940
},
{
"epoch": 85.2,
"learning_rate": 1.8562056356205638e-05,
"loss": 0.9443,
"step": 12950
},
{
"epoch": 85.26,
"learning_rate": 1.8529791352979136e-05,
"loss": 0.9767,
"step": 12960
},
{
"epoch": 85.33,
"learning_rate": 1.8497526349752635e-05,
"loss": 0.8967,
"step": 12970
},
{
"epoch": 85.39,
"learning_rate": 1.8465261346526136e-05,
"loss": 0.9203,
"step": 12980
},
{
"epoch": 85.46,
"learning_rate": 1.8432996343299635e-05,
"loss": 0.9375,
"step": 12990
},
{
"epoch": 85.52,
"learning_rate": 1.8400731340073133e-05,
"loss": 0.9971,
"step": 13000
},
{
"epoch": 85.52,
"eval_loss": 0.3501547574996948,
"eval_runtime": 319.789,
"eval_samples_per_second": 22.233,
"eval_steps_per_second": 1.392,
"eval_wer": 0.36260524533049454,
"step": 13000
},
{
"epoch": 85.59,
"learning_rate": 1.8368466336846635e-05,
"loss": 0.9016,
"step": 13010
},
{
"epoch": 85.65,
"learning_rate": 1.8336201333620134e-05,
"loss": 0.9285,
"step": 13020
},
{
"epoch": 85.72,
"learning_rate": 1.8303936330393632e-05,
"loss": 0.9353,
"step": 13030
},
{
"epoch": 85.79,
"learning_rate": 1.8271671327167134e-05,
"loss": 0.9592,
"step": 13040
},
{
"epoch": 85.85,
"learning_rate": 1.8239406323940636e-05,
"loss": 0.8962,
"step": 13050
},
{
"epoch": 85.92,
"learning_rate": 1.820714132071413e-05,
"loss": 0.9172,
"step": 13060
},
{
"epoch": 85.98,
"learning_rate": 1.8174876317487632e-05,
"loss": 0.955,
"step": 13070
},
{
"epoch": 86.05,
"learning_rate": 1.8142611314261134e-05,
"loss": 1.0116,
"step": 13080
},
{
"epoch": 86.12,
"learning_rate": 1.811034631103463e-05,
"loss": 0.9055,
"step": 13090
},
{
"epoch": 86.18,
"learning_rate": 1.807808130780813e-05,
"loss": 0.9329,
"step": 13100
},
{
"epoch": 86.25,
"learning_rate": 1.8045816304581633e-05,
"loss": 0.9708,
"step": 13110
},
{
"epoch": 86.31,
"learning_rate": 1.8013551301355128e-05,
"loss": 0.9177,
"step": 13120
},
{
"epoch": 86.38,
"learning_rate": 1.798128629812863e-05,
"loss": 0.9202,
"step": 13130
},
{
"epoch": 86.45,
"learning_rate": 1.794902129490213e-05,
"loss": 0.9273,
"step": 13140
},
{
"epoch": 86.51,
"learning_rate": 1.791675629167563e-05,
"loss": 1.008,
"step": 13150
},
{
"epoch": 86.58,
"learning_rate": 1.788449128844913e-05,
"loss": 0.8852,
"step": 13160
},
{
"epoch": 86.64,
"learning_rate": 1.785222628522263e-05,
"loss": 0.9258,
"step": 13170
},
{
"epoch": 86.71,
"learning_rate": 1.781996128199613e-05,
"loss": 0.9329,
"step": 13180
},
{
"epoch": 86.77,
"learning_rate": 1.7787696278769627e-05,
"loss": 0.9703,
"step": 13190
},
{
"epoch": 86.84,
"learning_rate": 1.775543127554313e-05,
"loss": 0.8914,
"step": 13200
},
{
"epoch": 86.9,
"learning_rate": 1.7723166272316627e-05,
"loss": 0.9237,
"step": 13210
},
{
"epoch": 86.97,
"learning_rate": 1.7690901269090126e-05,
"loss": 0.9279,
"step": 13220
},
{
"epoch": 87.04,
"learning_rate": 1.7658636265863628e-05,
"loss": 1.0655,
"step": 13230
},
{
"epoch": 87.1,
"learning_rate": 1.7626371262637126e-05,
"loss": 0.8964,
"step": 13240
},
{
"epoch": 87.17,
"learning_rate": 1.7594106259410624e-05,
"loss": 0.9203,
"step": 13250
},
{
"epoch": 87.24,
"learning_rate": 1.7561841256184126e-05,
"loss": 0.9486,
"step": 13260
},
{
"epoch": 87.3,
"learning_rate": 1.7529576252957628e-05,
"loss": 0.9635,
"step": 13270
},
{
"epoch": 87.37,
"learning_rate": 1.7497311249731123e-05,
"loss": 0.8983,
"step": 13280
},
{
"epoch": 87.43,
"learning_rate": 1.7465046246504625e-05,
"loss": 0.9039,
"step": 13290
},
{
"epoch": 87.5,
"learning_rate": 1.7432781243278127e-05,
"loss": 0.9694,
"step": 13300
},
{
"epoch": 87.56,
"learning_rate": 1.7400516240051625e-05,
"loss": 0.9031,
"step": 13310
},
{
"epoch": 87.63,
"learning_rate": 1.7368251236825124e-05,
"loss": 0.901,
"step": 13320
},
{
"epoch": 87.69,
"learning_rate": 1.7335986233598625e-05,
"loss": 0.9082,
"step": 13330
},
{
"epoch": 87.76,
"learning_rate": 1.7303721230372124e-05,
"loss": 1.013,
"step": 13340
},
{
"epoch": 87.82,
"learning_rate": 1.7271456227145622e-05,
"loss": 0.8796,
"step": 13350
},
{
"epoch": 87.89,
"learning_rate": 1.7239191223919124e-05,
"loss": 0.9077,
"step": 13360
},
{
"epoch": 87.96,
"learning_rate": 1.7206926220692622e-05,
"loss": 0.9101,
"step": 13370
},
{
"epoch": 88.03,
"learning_rate": 1.717466121746612e-05,
"loss": 1.0345,
"step": 13380
},
{
"epoch": 88.09,
"learning_rate": 1.7142396214239623e-05,
"loss": 0.8971,
"step": 13390
},
{
"epoch": 88.16,
"learning_rate": 1.711013121101312e-05,
"loss": 0.913,
"step": 13400
},
{
"epoch": 88.22,
"learning_rate": 1.707786620778662e-05,
"loss": 0.9057,
"step": 13410
},
{
"epoch": 88.29,
"learning_rate": 1.704560120456012e-05,
"loss": 0.9604,
"step": 13420
},
{
"epoch": 88.35,
"learning_rate": 1.701333620133362e-05,
"loss": 0.8986,
"step": 13430
},
{
"epoch": 88.42,
"learning_rate": 1.6981071198107118e-05,
"loss": 0.9097,
"step": 13440
},
{
"epoch": 88.48,
"learning_rate": 1.694880619488062e-05,
"loss": 0.9543,
"step": 13450
},
{
"epoch": 88.55,
"learning_rate": 1.6916541191654122e-05,
"loss": 0.9289,
"step": 13460
},
{
"epoch": 88.62,
"learning_rate": 1.688427618842762e-05,
"loss": 0.9003,
"step": 13470
},
{
"epoch": 88.68,
"learning_rate": 1.685201118520112e-05,
"loss": 0.9253,
"step": 13480
},
{
"epoch": 88.75,
"learning_rate": 1.681974618197462e-05,
"loss": 0.9481,
"step": 13490
},
{
"epoch": 88.81,
"learning_rate": 1.678748117874812e-05,
"loss": 0.9058,
"step": 13500
},
{
"epoch": 88.81,
"eval_loss": 0.34722036123275757,
"eval_runtime": 317.2905,
"eval_samples_per_second": 22.408,
"eval_steps_per_second": 1.403,
"eval_wer": 0.3604567889286751,
"step": 13500
},
{
"epoch": 88.88,
"learning_rate": 1.6755216175521617e-05,
"loss": 0.9168,
"step": 13510
},
{
"epoch": 88.94,
"learning_rate": 1.672295117229512e-05,
"loss": 0.9548,
"step": 13520
},
{
"epoch": 89.01,
"learning_rate": 1.6690686169068618e-05,
"loss": 1.082,
"step": 13530
},
{
"epoch": 89.08,
"learning_rate": 1.6658421165842116e-05,
"loss": 0.8845,
"step": 13540
},
{
"epoch": 89.14,
"learning_rate": 1.6626156162615618e-05,
"loss": 0.8901,
"step": 13550
},
{
"epoch": 89.21,
"learning_rate": 1.6593891159389116e-05,
"loss": 0.9123,
"step": 13560
},
{
"epoch": 89.27,
"learning_rate": 1.6561626156162615e-05,
"loss": 0.9849,
"step": 13570
},
{
"epoch": 89.34,
"learning_rate": 1.6529361152936116e-05,
"loss": 0.88,
"step": 13580
},
{
"epoch": 89.41,
"learning_rate": 1.6497096149709615e-05,
"loss": 0.9248,
"step": 13590
},
{
"epoch": 89.47,
"learning_rate": 1.6464831146483113e-05,
"loss": 0.9346,
"step": 13600
},
{
"epoch": 89.54,
"learning_rate": 1.6432566143256615e-05,
"loss": 0.9791,
"step": 13610
},
{
"epoch": 89.6,
"learning_rate": 1.6400301140030114e-05,
"loss": 0.8904,
"step": 13620
},
{
"epoch": 89.67,
"learning_rate": 1.6368036136803612e-05,
"loss": 0.8909,
"step": 13630
},
{
"epoch": 89.73,
"learning_rate": 1.6335771133577114e-05,
"loss": 0.9394,
"step": 13640
},
{
"epoch": 89.8,
"learning_rate": 1.6303506130350612e-05,
"loss": 0.9302,
"step": 13650
},
{
"epoch": 89.86,
"learning_rate": 1.6271241127124114e-05,
"loss": 0.9093,
"step": 13660
},
{
"epoch": 89.93,
"learning_rate": 1.6238976123897612e-05,
"loss": 0.9307,
"step": 13670
},
{
"epoch": 90.0,
"learning_rate": 1.6206711120671114e-05,
"loss": 0.9548,
"step": 13680
},
{
"epoch": 90.07,
"learning_rate": 1.6174446117444613e-05,
"loss": 0.9712,
"step": 13690
},
{
"epoch": 90.13,
"learning_rate": 1.614218111421811e-05,
"loss": 0.8957,
"step": 13700
},
{
"epoch": 90.2,
"learning_rate": 1.6109916110991613e-05,
"loss": 0.9104,
"step": 13710
},
{
"epoch": 90.26,
"learning_rate": 1.607765110776511e-05,
"loss": 0.9518,
"step": 13720
},
{
"epoch": 90.33,
"learning_rate": 1.604538610453861e-05,
"loss": 0.8965,
"step": 13730
},
{
"epoch": 90.39,
"learning_rate": 1.601312110131211e-05,
"loss": 0.9143,
"step": 13740
},
{
"epoch": 90.46,
"learning_rate": 1.598085609808561e-05,
"loss": 0.9043,
"step": 13750
},
{
"epoch": 90.52,
"learning_rate": 1.594859109485911e-05,
"loss": 0.9751,
"step": 13760
},
{
"epoch": 90.59,
"learning_rate": 1.591632609163261e-05,
"loss": 0.8719,
"step": 13770
},
{
"epoch": 90.65,
"learning_rate": 1.588406108840611e-05,
"loss": 0.9339,
"step": 13780
},
{
"epoch": 90.72,
"learning_rate": 1.5851796085179607e-05,
"loss": 0.9233,
"step": 13790
},
{
"epoch": 90.79,
"learning_rate": 1.581953108195311e-05,
"loss": 0.9587,
"step": 13800
},
{
"epoch": 90.85,
"learning_rate": 1.5787266078726607e-05,
"loss": 0.9144,
"step": 13810
},
{
"epoch": 90.92,
"learning_rate": 1.575500107550011e-05,
"loss": 0.9202,
"step": 13820
},
{
"epoch": 90.98,
"learning_rate": 1.5722736072273608e-05,
"loss": 0.9147,
"step": 13830
},
{
"epoch": 91.05,
"learning_rate": 1.5690471069047106e-05,
"loss": 1.0151,
"step": 13840
},
{
"epoch": 91.12,
"learning_rate": 1.5658206065820608e-05,
"loss": 0.893,
"step": 13850
},
{
"epoch": 91.18,
"learning_rate": 1.5625941062594106e-05,
"loss": 0.9064,
"step": 13860
},
{
"epoch": 91.25,
"learning_rate": 1.5593676059367608e-05,
"loss": 0.956,
"step": 13870
},
{
"epoch": 91.31,
"learning_rate": 1.5561411056141106e-05,
"loss": 0.8682,
"step": 13880
},
{
"epoch": 91.38,
"learning_rate": 1.5529146052914605e-05,
"loss": 0.8912,
"step": 13890
},
{
"epoch": 91.45,
"learning_rate": 1.5496881049688107e-05,
"loss": 0.9084,
"step": 13900
},
{
"epoch": 91.51,
"learning_rate": 1.5464616046461605e-05,
"loss": 0.9605,
"step": 13910
},
{
"epoch": 91.58,
"learning_rate": 1.5432351043235104e-05,
"loss": 0.8805,
"step": 13920
},
{
"epoch": 91.64,
"learning_rate": 1.5400086040008605e-05,
"loss": 0.9132,
"step": 13930
},
{
"epoch": 91.71,
"learning_rate": 1.5367821036782104e-05,
"loss": 0.9581,
"step": 13940
},
{
"epoch": 91.77,
"learning_rate": 1.5335556033555602e-05,
"loss": 0.9395,
"step": 13950
},
{
"epoch": 91.84,
"learning_rate": 1.5303291030329104e-05,
"loss": 0.8723,
"step": 13960
},
{
"epoch": 91.9,
"learning_rate": 1.5271026027102602e-05,
"loss": 0.9009,
"step": 13970
},
{
"epoch": 91.97,
"learning_rate": 1.5238761023876104e-05,
"loss": 0.9588,
"step": 13980
},
{
"epoch": 92.04,
"learning_rate": 1.5206496020649601e-05,
"loss": 1.0354,
"step": 13990
},
{
"epoch": 92.1,
"learning_rate": 1.5174231017423103e-05,
"loss": 0.8922,
"step": 14000
},
{
"epoch": 92.1,
"eval_loss": 0.3530175983905792,
"eval_runtime": 325.2566,
"eval_samples_per_second": 21.86,
"eval_steps_per_second": 1.368,
"eval_wer": 0.3618116713442369,
"step": 14000
},
{
"epoch": 92.17,
"learning_rate": 1.5141966014196603e-05,
"loss": 0.9222,
"step": 14010
},
{
"epoch": 92.24,
"learning_rate": 1.5109701010970101e-05,
"loss": 0.9484,
"step": 14020
},
{
"epoch": 92.3,
"learning_rate": 1.5077436007743601e-05,
"loss": 0.9421,
"step": 14030
},
{
"epoch": 92.37,
"learning_rate": 1.5045171004517102e-05,
"loss": 0.8909,
"step": 14040
},
{
"epoch": 92.43,
"learning_rate": 1.50129060012906e-05,
"loss": 0.9404,
"step": 14050
},
{
"epoch": 92.5,
"learning_rate": 1.49806409980641e-05,
"loss": 0.9458,
"step": 14060
},
{
"epoch": 92.56,
"learning_rate": 1.4948375994837599e-05,
"loss": 0.9147,
"step": 14070
},
{
"epoch": 92.63,
"learning_rate": 1.49161109916111e-05,
"loss": 0.9221,
"step": 14080
},
{
"epoch": 92.69,
"learning_rate": 1.4883845988384599e-05,
"loss": 0.8916,
"step": 14090
},
{
"epoch": 92.76,
"learning_rate": 1.4851580985158099e-05,
"loss": 0.9474,
"step": 14100
},
{
"epoch": 92.82,
"learning_rate": 1.4819315981931599e-05,
"loss": 0.8877,
"step": 14110
},
{
"epoch": 92.89,
"learning_rate": 1.4787050978705097e-05,
"loss": 0.8793,
"step": 14120
},
{
"epoch": 92.96,
"learning_rate": 1.47547859754786e-05,
"loss": 0.9027,
"step": 14130
},
{
"epoch": 93.03,
"learning_rate": 1.4722520972252098e-05,
"loss": 1.0347,
"step": 14140
},
{
"epoch": 93.09,
"learning_rate": 1.4690255969025596e-05,
"loss": 0.8792,
"step": 14150
},
{
"epoch": 93.16,
"learning_rate": 1.4657990965799098e-05,
"loss": 0.9272,
"step": 14160
},
{
"epoch": 93.22,
"learning_rate": 1.4625725962572596e-05,
"loss": 0.9519,
"step": 14170
},
{
"epoch": 93.29,
"learning_rate": 1.4593460959346095e-05,
"loss": 0.9434,
"step": 14180
},
{
"epoch": 93.35,
"learning_rate": 1.4561195956119597e-05,
"loss": 0.8696,
"step": 14190
},
{
"epoch": 93.42,
"learning_rate": 1.4528930952893095e-05,
"loss": 0.9093,
"step": 14200
},
{
"epoch": 93.48,
"learning_rate": 1.4496665949666597e-05,
"loss": 0.9385,
"step": 14210
},
{
"epoch": 93.55,
"learning_rate": 1.4464400946440095e-05,
"loss": 0.9052,
"step": 14220
},
{
"epoch": 93.62,
"learning_rate": 1.4432135943213594e-05,
"loss": 0.8826,
"step": 14230
},
{
"epoch": 93.68,
"learning_rate": 1.4399870939987095e-05,
"loss": 0.9008,
"step": 14240
},
{
"epoch": 93.75,
"learning_rate": 1.4367605936760594e-05,
"loss": 0.9739,
"step": 14250
},
{
"epoch": 93.81,
"learning_rate": 1.4335340933534092e-05,
"loss": 0.8942,
"step": 14260
},
{
"epoch": 93.88,
"learning_rate": 1.4303075930307594e-05,
"loss": 0.9117,
"step": 14270
},
{
"epoch": 93.94,
"learning_rate": 1.4270810927081093e-05,
"loss": 0.9197,
"step": 14280
},
{
"epoch": 94.01,
"learning_rate": 1.4238545923854593e-05,
"loss": 1.0093,
"step": 14290
},
{
"epoch": 94.08,
"learning_rate": 1.4206280920628093e-05,
"loss": 0.8751,
"step": 14300
},
{
"epoch": 94.14,
"learning_rate": 1.4174015917401591e-05,
"loss": 0.8825,
"step": 14310
},
{
"epoch": 94.21,
"learning_rate": 1.4141750914175093e-05,
"loss": 0.902,
"step": 14320
},
{
"epoch": 94.27,
"learning_rate": 1.4109485910948591e-05,
"loss": 0.9608,
"step": 14330
},
{
"epoch": 94.34,
"learning_rate": 1.407722090772209e-05,
"loss": 0.8833,
"step": 14340
},
{
"epoch": 94.41,
"learning_rate": 1.4044955904495592e-05,
"loss": 0.8899,
"step": 14350
},
{
"epoch": 94.47,
"learning_rate": 1.401269090126909e-05,
"loss": 0.8863,
"step": 14360
},
{
"epoch": 94.54,
"learning_rate": 1.398042589804259e-05,
"loss": 0.9543,
"step": 14370
},
{
"epoch": 94.6,
"learning_rate": 1.394816089481609e-05,
"loss": 0.8773,
"step": 14380
},
{
"epoch": 94.67,
"learning_rate": 1.3915895891589589e-05,
"loss": 0.8904,
"step": 14390
},
{
"epoch": 94.73,
"learning_rate": 1.3883630888363089e-05,
"loss": 0.9539,
"step": 14400
},
{
"epoch": 94.8,
"learning_rate": 1.3851365885136589e-05,
"loss": 0.9223,
"step": 14410
},
{
"epoch": 94.86,
"learning_rate": 1.3819100881910087e-05,
"loss": 0.8494,
"step": 14420
},
{
"epoch": 94.93,
"learning_rate": 1.378683587868359e-05,
"loss": 0.8945,
"step": 14430
},
{
"epoch": 95.0,
"learning_rate": 1.3754570875457088e-05,
"loss": 0.973,
"step": 14440
},
{
"epoch": 95.07,
"learning_rate": 1.3722305872230588e-05,
"loss": 0.9566,
"step": 14450
},
{
"epoch": 95.13,
"learning_rate": 1.3690040869004088e-05,
"loss": 0.9169,
"step": 14460
},
{
"epoch": 95.2,
"learning_rate": 1.3657775865777586e-05,
"loss": 0.8868,
"step": 14470
},
{
"epoch": 95.26,
"learning_rate": 1.3625510862551086e-05,
"loss": 0.9633,
"step": 14480
},
{
"epoch": 95.33,
"learning_rate": 1.3593245859324587e-05,
"loss": 0.8784,
"step": 14490
},
{
"epoch": 95.39,
"learning_rate": 1.3560980856098085e-05,
"loss": 0.9,
"step": 14500
},
{
"epoch": 95.39,
"eval_loss": 0.34995371103286743,
"eval_runtime": 324.3004,
"eval_samples_per_second": 21.924,
"eval_steps_per_second": 1.372,
"eval_wer": 0.35739862576212134,
"step": 14500
},
{
"epoch": 95.46,
"learning_rate": 1.3528715852871587e-05,
"loss": 0.9304,
"step": 14510
},
{
"epoch": 95.52,
"learning_rate": 1.3496450849645085e-05,
"loss": 0.945,
"step": 14520
},
{
"epoch": 95.59,
"learning_rate": 1.3464185846418585e-05,
"loss": 0.8791,
"step": 14530
},
{
"epoch": 95.65,
"learning_rate": 1.3431920843192085e-05,
"loss": 0.8943,
"step": 14540
},
{
"epoch": 95.72,
"learning_rate": 1.3399655839965584e-05,
"loss": 0.9172,
"step": 14550
},
{
"epoch": 95.79,
"learning_rate": 1.3367390836739084e-05,
"loss": 0.9405,
"step": 14560
},
{
"epoch": 95.85,
"learning_rate": 1.3335125833512584e-05,
"loss": 0.8616,
"step": 14570
},
{
"epoch": 95.92,
"learning_rate": 1.3302860830286083e-05,
"loss": 0.9063,
"step": 14580
},
{
"epoch": 95.98,
"learning_rate": 1.3270595827059583e-05,
"loss": 0.9359,
"step": 14590
},
{
"epoch": 96.05,
"learning_rate": 1.3238330823833083e-05,
"loss": 0.9663,
"step": 14600
},
{
"epoch": 96.12,
"learning_rate": 1.3206065820606583e-05,
"loss": 0.872,
"step": 14610
},
{
"epoch": 96.18,
"learning_rate": 1.3173800817380083e-05,
"loss": 0.8937,
"step": 14620
},
{
"epoch": 96.25,
"learning_rate": 1.3141535814153581e-05,
"loss": 0.9451,
"step": 14630
},
{
"epoch": 96.31,
"learning_rate": 1.3109270810927082e-05,
"loss": 0.8743,
"step": 14640
},
{
"epoch": 96.38,
"learning_rate": 1.3077005807700582e-05,
"loss": 0.8862,
"step": 14650
},
{
"epoch": 96.45,
"learning_rate": 1.304474080447408e-05,
"loss": 0.9016,
"step": 14660
},
{
"epoch": 96.51,
"learning_rate": 1.301247580124758e-05,
"loss": 0.9827,
"step": 14670
},
{
"epoch": 96.58,
"learning_rate": 1.298021079802108e-05,
"loss": 0.8827,
"step": 14680
},
{
"epoch": 96.64,
"learning_rate": 1.294794579479458e-05,
"loss": 0.9001,
"step": 14690
},
{
"epoch": 96.71,
"learning_rate": 1.2915680791568079e-05,
"loss": 0.8752,
"step": 14700
},
{
"epoch": 96.77,
"learning_rate": 1.2883415788341579e-05,
"loss": 0.9638,
"step": 14710
},
{
"epoch": 96.84,
"learning_rate": 1.2851150785115079e-05,
"loss": 0.871,
"step": 14720
},
{
"epoch": 96.9,
"learning_rate": 1.281888578188858e-05,
"loss": 0.9118,
"step": 14730
},
{
"epoch": 96.97,
"learning_rate": 1.2786620778662078e-05,
"loss": 0.9135,
"step": 14740
},
{
"epoch": 97.04,
"learning_rate": 1.2754355775435578e-05,
"loss": 0.991,
"step": 14750
},
{
"epoch": 97.1,
"learning_rate": 1.2722090772209078e-05,
"loss": 0.8699,
"step": 14760
},
{
"epoch": 97.17,
"learning_rate": 1.2689825768982576e-05,
"loss": 0.8852,
"step": 14770
},
{
"epoch": 97.24,
"learning_rate": 1.2657560765756076e-05,
"loss": 0.9257,
"step": 14780
},
{
"epoch": 97.3,
"learning_rate": 1.2625295762529577e-05,
"loss": 0.9189,
"step": 14790
},
{
"epoch": 97.37,
"learning_rate": 1.2593030759303077e-05,
"loss": 0.8538,
"step": 14800
},
{
"epoch": 97.43,
"learning_rate": 1.2560765756076575e-05,
"loss": 0.9018,
"step": 14810
},
{
"epoch": 97.5,
"learning_rate": 1.2528500752850075e-05,
"loss": 0.9465,
"step": 14820
},
{
"epoch": 97.56,
"learning_rate": 1.2496235749623575e-05,
"loss": 0.9016,
"step": 14830
},
{
"epoch": 97.63,
"learning_rate": 1.2463970746397075e-05,
"loss": 0.888,
"step": 14840
},
{
"epoch": 97.69,
"learning_rate": 1.2431705743170574e-05,
"loss": 0.9304,
"step": 14850
},
{
"epoch": 97.76,
"learning_rate": 1.2399440739944074e-05,
"loss": 0.9315,
"step": 14860
},
{
"epoch": 97.82,
"learning_rate": 1.2367175736717574e-05,
"loss": 0.8613,
"step": 14870
},
{
"epoch": 97.89,
"learning_rate": 1.2334910733491074e-05,
"loss": 0.9004,
"step": 14880
},
{
"epoch": 97.96,
"learning_rate": 1.2302645730264573e-05,
"loss": 0.9156,
"step": 14890
},
{
"epoch": 98.03,
"learning_rate": 1.2270380727038073e-05,
"loss": 1.0229,
"step": 14900
},
{
"epoch": 98.09,
"learning_rate": 1.2238115723811573e-05,
"loss": 0.8743,
"step": 14910
},
{
"epoch": 98.16,
"learning_rate": 1.2205850720585073e-05,
"loss": 0.8969,
"step": 14920
},
{
"epoch": 98.22,
"learning_rate": 1.2173585717358571e-05,
"loss": 0.9141,
"step": 14930
},
{
"epoch": 98.29,
"learning_rate": 1.2141320714132072e-05,
"loss": 0.9501,
"step": 14940
},
{
"epoch": 98.35,
"learning_rate": 1.2109055710905572e-05,
"loss": 0.8673,
"step": 14950
},
{
"epoch": 98.42,
"learning_rate": 1.2076790707679072e-05,
"loss": 0.881,
"step": 14960
},
{
"epoch": 98.48,
"learning_rate": 1.204452570445257e-05,
"loss": 0.9423,
"step": 14970
},
{
"epoch": 98.55,
"learning_rate": 1.201226070122607e-05,
"loss": 0.9134,
"step": 14980
},
{
"epoch": 98.62,
"learning_rate": 1.197999569799957e-05,
"loss": 0.869,
"step": 14990
},
{
"epoch": 98.68,
"learning_rate": 1.1947730694773069e-05,
"loss": 0.9051,
"step": 15000
},
{
"epoch": 98.68,
"eval_loss": 0.3455681800842285,
"eval_runtime": 323.8015,
"eval_samples_per_second": 21.958,
"eval_steps_per_second": 1.374,
"eval_wer": 0.35352753314623053,
"step": 15000
},
{
"epoch": 98.75,
"learning_rate": 1.1915465691546569e-05,
"loss": 0.968,
"step": 15010
},
{
"epoch": 98.81,
"learning_rate": 1.1883200688320069e-05,
"loss": 0.8848,
"step": 15020
},
{
"epoch": 98.88,
"learning_rate": 1.185093568509357e-05,
"loss": 0.8963,
"step": 15030
},
{
"epoch": 98.94,
"learning_rate": 1.181867068186707e-05,
"loss": 0.9089,
"step": 15040
},
{
"epoch": 99.01,
"learning_rate": 1.1786405678640568e-05,
"loss": 1.0464,
"step": 15050
},
{
"epoch": 99.08,
"learning_rate": 1.1754140675414068e-05,
"loss": 0.8532,
"step": 15060
},
{
"epoch": 99.14,
"learning_rate": 1.1721875672187568e-05,
"loss": 0.8836,
"step": 15070
},
{
"epoch": 99.21,
"learning_rate": 1.1689610668961066e-05,
"loss": 0.9005,
"step": 15080
},
{
"epoch": 99.27,
"learning_rate": 1.1657345665734567e-05,
"loss": 0.9232,
"step": 15090
},
{
"epoch": 99.34,
"learning_rate": 1.1625080662508067e-05,
"loss": 0.8787,
"step": 15100
},
{
"epoch": 99.41,
"learning_rate": 1.1592815659281565e-05,
"loss": 0.871,
"step": 15110
},
{
"epoch": 99.47,
"learning_rate": 1.1560550656055067e-05,
"loss": 0.9109,
"step": 15120
},
{
"epoch": 99.54,
"learning_rate": 1.1528285652828565e-05,
"loss": 0.9561,
"step": 15130
},
{
"epoch": 99.6,
"learning_rate": 1.1496020649602065e-05,
"loss": 0.8613,
"step": 15140
},
{
"epoch": 99.67,
"learning_rate": 1.1463755646375566e-05,
"loss": 0.8852,
"step": 15150
},
{
"epoch": 99.73,
"learning_rate": 1.1431490643149064e-05,
"loss": 0.9084,
"step": 15160
},
{
"epoch": 99.8,
"learning_rate": 1.1399225639922564e-05,
"loss": 0.905,
"step": 15170
},
{
"epoch": 99.86,
"learning_rate": 1.1366960636696064e-05,
"loss": 0.8638,
"step": 15180
},
{
"epoch": 99.93,
"learning_rate": 1.1334695633469563e-05,
"loss": 0.914,
"step": 15190
},
{
"epoch": 100.0,
"learning_rate": 1.1302430630243064e-05,
"loss": 0.9435,
"step": 15200
},
{
"epoch": 100.07,
"learning_rate": 1.1270165627016563e-05,
"loss": 0.9602,
"step": 15210
},
{
"epoch": 100.13,
"learning_rate": 1.1237900623790063e-05,
"loss": 0.8831,
"step": 15220
},
{
"epoch": 100.2,
"learning_rate": 1.1205635620563563e-05,
"loss": 0.8994,
"step": 15230
},
{
"epoch": 100.26,
"learning_rate": 1.1173370617337062e-05,
"loss": 0.9412,
"step": 15240
},
{
"epoch": 100.33,
"learning_rate": 1.1141105614110562e-05,
"loss": 0.842,
"step": 15250
},
{
"epoch": 100.39,
"learning_rate": 1.1108840610884062e-05,
"loss": 0.8825,
"step": 15260
},
{
"epoch": 100.46,
"learning_rate": 1.107657560765756e-05,
"loss": 0.8777,
"step": 15270
},
{
"epoch": 100.52,
"learning_rate": 1.1044310604431062e-05,
"loss": 0.9486,
"step": 15280
},
{
"epoch": 100.59,
"learning_rate": 1.101204560120456e-05,
"loss": 0.8656,
"step": 15290
},
{
"epoch": 100.65,
"learning_rate": 1.0979780597978059e-05,
"loss": 0.9077,
"step": 15300
},
{
"epoch": 100.72,
"learning_rate": 1.094751559475156e-05,
"loss": 0.9023,
"step": 15310
},
{
"epoch": 100.79,
"learning_rate": 1.0915250591525059e-05,
"loss": 0.9121,
"step": 15320
},
{
"epoch": 100.85,
"learning_rate": 1.088298558829856e-05,
"loss": 0.8787,
"step": 15330
},
{
"epoch": 100.92,
"learning_rate": 1.085072058507206e-05,
"loss": 0.9054,
"step": 15340
},
{
"epoch": 100.98,
"learning_rate": 1.0818455581845558e-05,
"loss": 0.9459,
"step": 15350
},
{
"epoch": 101.05,
"learning_rate": 1.0786190578619058e-05,
"loss": 0.9726,
"step": 15360
},
{
"epoch": 101.12,
"learning_rate": 1.0753925575392558e-05,
"loss": 0.8765,
"step": 15370
},
{
"epoch": 101.18,
"learning_rate": 1.0721660572166056e-05,
"loss": 0.8734,
"step": 15380
},
{
"epoch": 101.25,
"learning_rate": 1.0689395568939558e-05,
"loss": 0.9516,
"step": 15390
},
{
"epoch": 101.31,
"learning_rate": 1.0657130565713057e-05,
"loss": 0.8683,
"step": 15400
},
{
"epoch": 101.38,
"learning_rate": 1.0624865562486555e-05,
"loss": 0.8891,
"step": 15410
},
{
"epoch": 101.45,
"learning_rate": 1.0592600559260057e-05,
"loss": 0.8698,
"step": 15420
},
{
"epoch": 101.51,
"learning_rate": 1.0560335556033555e-05,
"loss": 0.9669,
"step": 15430
},
{
"epoch": 101.58,
"learning_rate": 1.0528070552807055e-05,
"loss": 0.8661,
"step": 15440
},
{
"epoch": 101.64,
"learning_rate": 1.0495805549580556e-05,
"loss": 0.8912,
"step": 15450
},
{
"epoch": 101.71,
"learning_rate": 1.0463540546354054e-05,
"loss": 0.8818,
"step": 15460
},
{
"epoch": 101.77,
"learning_rate": 1.0431275543127556e-05,
"loss": 0.9431,
"step": 15470
},
{
"epoch": 101.84,
"learning_rate": 1.0399010539901054e-05,
"loss": 0.8664,
"step": 15480
},
{
"epoch": 101.9,
"learning_rate": 1.0366745536674553e-05,
"loss": 0.9112,
"step": 15490
},
{
"epoch": 101.97,
"learning_rate": 1.0334480533448055e-05,
"loss": 0.9304,
"step": 15500
},
{
"epoch": 101.97,
"eval_loss": 0.34380748867988586,
"eval_runtime": 321.8146,
"eval_samples_per_second": 22.093,
"eval_steps_per_second": 1.383,
"eval_wer": 0.357766379560631,
"step": 15500
},
{
"epoch": 102.04,
"learning_rate": 1.0302215530221553e-05,
"loss": 1.0402,
"step": 15510
},
{
"epoch": 102.1,
"learning_rate": 1.0269950526995053e-05,
"loss": 0.8567,
"step": 15520
},
{
"epoch": 102.17,
"learning_rate": 1.0237685523768553e-05,
"loss": 0.9101,
"step": 15530
},
{
"epoch": 102.24,
"learning_rate": 1.0205420520542052e-05,
"loss": 0.9497,
"step": 15540
},
{
"epoch": 102.3,
"learning_rate": 1.0173155517315553e-05,
"loss": 0.8929,
"step": 15550
},
{
"epoch": 102.37,
"learning_rate": 1.0140890514089052e-05,
"loss": 0.8703,
"step": 15560
},
{
"epoch": 102.43,
"learning_rate": 1.010862551086255e-05,
"loss": 0.8839,
"step": 15570
},
{
"epoch": 102.5,
"learning_rate": 1.0076360507636052e-05,
"loss": 0.9388,
"step": 15580
},
{
"epoch": 102.56,
"learning_rate": 1.004409550440955e-05,
"loss": 0.8551,
"step": 15590
},
{
"epoch": 102.63,
"learning_rate": 1.0011830501183049e-05,
"loss": 0.8776,
"step": 15600
},
{
"epoch": 102.69,
"learning_rate": 9.97956549795655e-06,
"loss": 0.8986,
"step": 15610
},
{
"epoch": 102.76,
"learning_rate": 9.947300494730049e-06,
"loss": 0.9499,
"step": 15620
},
{
"epoch": 102.82,
"learning_rate": 9.915035491503551e-06,
"loss": 0.875,
"step": 15630
},
{
"epoch": 102.89,
"learning_rate": 9.88277048827705e-06,
"loss": 0.8834,
"step": 15640
},
{
"epoch": 102.96,
"learning_rate": 9.853731985373199e-06,
"loss": 0.9063,
"step": 15650
},
{
"epoch": 103.03,
"learning_rate": 9.821466982146697e-06,
"loss": 1.0078,
"step": 15660
},
{
"epoch": 103.09,
"learning_rate": 9.7892019789202e-06,
"loss": 0.8404,
"step": 15670
},
{
"epoch": 103.16,
"learning_rate": 9.756936975693698e-06,
"loss": 0.877,
"step": 15680
},
{
"epoch": 103.22,
"learning_rate": 9.724671972467196e-06,
"loss": 0.8756,
"step": 15690
},
{
"epoch": 103.29,
"learning_rate": 9.692406969240698e-06,
"loss": 0.9206,
"step": 15700
},
{
"epoch": 103.35,
"learning_rate": 9.660141966014196e-06,
"loss": 0.8646,
"step": 15710
},
{
"epoch": 103.42,
"learning_rate": 9.627876962787696e-06,
"loss": 0.876,
"step": 15720
},
{
"epoch": 103.48,
"learning_rate": 9.595611959561197e-06,
"loss": 0.9049,
"step": 15730
},
{
"epoch": 103.55,
"learning_rate": 9.563346956334695e-06,
"loss": 0.8997,
"step": 15740
},
{
"epoch": 103.62,
"learning_rate": 9.531081953108197e-06,
"loss": 0.8749,
"step": 15750
},
{
"epoch": 103.68,
"learning_rate": 9.498816949881695e-06,
"loss": 0.8853,
"step": 15760
},
{
"epoch": 103.75,
"learning_rate": 9.466551946655194e-06,
"loss": 0.9405,
"step": 15770
},
{
"epoch": 103.81,
"learning_rate": 9.434286943428695e-06,
"loss": 0.8564,
"step": 15780
},
{
"epoch": 103.88,
"learning_rate": 9.402021940202194e-06,
"loss": 0.8431,
"step": 15790
},
{
"epoch": 103.94,
"learning_rate": 9.369756936975694e-06,
"loss": 0.8918,
"step": 15800
},
{
"epoch": 104.01,
"learning_rate": 9.337491933749194e-06,
"loss": 1.0248,
"step": 15810
},
{
"epoch": 104.08,
"learning_rate": 9.305226930522693e-06,
"loss": 0.8441,
"step": 15820
},
{
"epoch": 104.14,
"learning_rate": 9.272961927296194e-06,
"loss": 0.8754,
"step": 15830
},
{
"epoch": 104.21,
"learning_rate": 9.240696924069693e-06,
"loss": 0.8758,
"step": 15840
},
{
"epoch": 104.27,
"learning_rate": 9.208431920843191e-06,
"loss": 0.9222,
"step": 15850
},
{
"epoch": 104.34,
"learning_rate": 9.176166917616693e-06,
"loss": 0.8714,
"step": 15860
},
{
"epoch": 104.41,
"learning_rate": 9.143901914390191e-06,
"loss": 0.8834,
"step": 15870
},
{
"epoch": 104.47,
"learning_rate": 9.111636911163692e-06,
"loss": 0.8587,
"step": 15880
},
{
"epoch": 104.54,
"learning_rate": 9.079371907937192e-06,
"loss": 0.9453,
"step": 15890
},
{
"epoch": 104.6,
"learning_rate": 9.04710690471069e-06,
"loss": 0.8731,
"step": 15900
},
{
"epoch": 104.67,
"learning_rate": 9.01484190148419e-06,
"loss": 0.8939,
"step": 15910
},
{
"epoch": 104.73,
"learning_rate": 8.98257689825769e-06,
"loss": 0.8906,
"step": 15920
},
{
"epoch": 104.8,
"learning_rate": 8.950311895031189e-06,
"loss": 0.8732,
"step": 15930
},
{
"epoch": 104.86,
"learning_rate": 8.91804689180469e-06,
"loss": 0.8841,
"step": 15940
},
{
"epoch": 104.93,
"learning_rate": 8.885781888578189e-06,
"loss": 0.8742,
"step": 15950
},
{
"epoch": 105.0,
"learning_rate": 8.853516885351689e-06,
"loss": 0.9412,
"step": 15960
},
{
"epoch": 105.07,
"learning_rate": 8.82125188212519e-06,
"loss": 0.9196,
"step": 15970
},
{
"epoch": 105.13,
"learning_rate": 8.788986878898688e-06,
"loss": 0.8791,
"step": 15980
},
{
"epoch": 105.2,
"learning_rate": 8.756721875672188e-06,
"loss": 0.8724,
"step": 15990
},
{
"epoch": 105.26,
"learning_rate": 8.724456872445688e-06,
"loss": 0.9433,
"step": 16000
},
{
"epoch": 105.26,
"eval_loss": 0.33956313133239746,
"eval_runtime": 325.5194,
"eval_samples_per_second": 21.842,
"eval_steps_per_second": 1.367,
"eval_wer": 0.35300493564308527,
"step": 16000
},
{
"epoch": 105.33,
"learning_rate": 8.692191869219186e-06,
"loss": 0.8547,
"step": 16010
},
{
"epoch": 105.39,
"learning_rate": 8.659926865992686e-06,
"loss": 0.8773,
"step": 16020
},
{
"epoch": 105.46,
"learning_rate": 8.627661862766187e-06,
"loss": 0.8786,
"step": 16030
},
{
"epoch": 105.52,
"learning_rate": 8.595396859539685e-06,
"loss": 0.9219,
"step": 16040
},
{
"epoch": 105.59,
"learning_rate": 8.563131856313187e-06,
"loss": 0.8632,
"step": 16050
},
{
"epoch": 105.65,
"learning_rate": 8.530866853086685e-06,
"loss": 0.8829,
"step": 16060
},
{
"epoch": 105.72,
"learning_rate": 8.498601849860185e-06,
"loss": 0.8814,
"step": 16070
},
{
"epoch": 105.79,
"learning_rate": 8.466336846633685e-06,
"loss": 0.9033,
"step": 16080
},
{
"epoch": 105.85,
"learning_rate": 8.434071843407184e-06,
"loss": 0.8675,
"step": 16090
},
{
"epoch": 105.92,
"learning_rate": 8.401806840180684e-06,
"loss": 0.8758,
"step": 16100
},
{
"epoch": 105.98,
"learning_rate": 8.369541836954184e-06,
"loss": 0.8747,
"step": 16110
},
{
"epoch": 106.05,
"learning_rate": 8.337276833727683e-06,
"loss": 0.9735,
"step": 16120
},
{
"epoch": 106.12,
"learning_rate": 8.305011830501184e-06,
"loss": 0.8857,
"step": 16130
},
{
"epoch": 106.18,
"learning_rate": 8.272746827274683e-06,
"loss": 0.87,
"step": 16140
},
{
"epoch": 106.25,
"learning_rate": 8.240481824048183e-06,
"loss": 0.9338,
"step": 16150
},
{
"epoch": 106.31,
"learning_rate": 8.208216820821683e-06,
"loss": 0.8607,
"step": 16160
},
{
"epoch": 106.38,
"learning_rate": 8.175951817595181e-06,
"loss": 0.8774,
"step": 16170
},
{
"epoch": 106.45,
"learning_rate": 8.143686814368682e-06,
"loss": 0.8379,
"step": 16180
},
{
"epoch": 106.51,
"learning_rate": 8.111421811142182e-06,
"loss": 0.9258,
"step": 16190
},
{
"epoch": 106.58,
"learning_rate": 8.07915680791568e-06,
"loss": 0.8407,
"step": 16200
},
{
"epoch": 106.64,
"learning_rate": 8.04689180468918e-06,
"loss": 0.8785,
"step": 16210
},
{
"epoch": 106.71,
"learning_rate": 8.01462680146268e-06,
"loss": 0.9169,
"step": 16220
},
{
"epoch": 106.77,
"learning_rate": 7.98236179823618e-06,
"loss": 0.9558,
"step": 16230
},
{
"epoch": 106.84,
"learning_rate": 7.95009679500968e-06,
"loss": 0.8568,
"step": 16240
},
{
"epoch": 106.9,
"learning_rate": 7.917831791783179e-06,
"loss": 0.8971,
"step": 16250
},
{
"epoch": 106.97,
"learning_rate": 7.885566788556679e-06,
"loss": 0.8865,
"step": 16260
},
{
"epoch": 107.04,
"learning_rate": 7.85330178533018e-06,
"loss": 1.012,
"step": 16270
},
{
"epoch": 107.1,
"learning_rate": 7.821036782103678e-06,
"loss": 0.871,
"step": 16280
},
{
"epoch": 107.17,
"learning_rate": 7.788771778877178e-06,
"loss": 0.8686,
"step": 16290
},
{
"epoch": 107.24,
"learning_rate": 7.756506775650678e-06,
"loss": 0.9085,
"step": 16300
},
{
"epoch": 107.3,
"learning_rate": 7.724241772424178e-06,
"loss": 0.905,
"step": 16310
},
{
"epoch": 107.37,
"learning_rate": 7.691976769197676e-06,
"loss": 0.8584,
"step": 16320
},
{
"epoch": 107.43,
"learning_rate": 7.659711765971177e-06,
"loss": 0.9066,
"step": 16330
},
{
"epoch": 107.5,
"learning_rate": 7.627446762744677e-06,
"loss": 0.9365,
"step": 16340
},
{
"epoch": 107.56,
"learning_rate": 7.595181759518176e-06,
"loss": 0.8756,
"step": 16350
},
{
"epoch": 107.63,
"learning_rate": 7.562916756291675e-06,
"loss": 0.8542,
"step": 16360
},
{
"epoch": 107.69,
"learning_rate": 7.530651753065176e-06,
"loss": 0.8647,
"step": 16370
},
{
"epoch": 107.76,
"learning_rate": 7.498386749838675e-06,
"loss": 0.9443,
"step": 16380
},
{
"epoch": 107.82,
"learning_rate": 7.466121746612175e-06,
"loss": 0.8396,
"step": 16390
},
{
"epoch": 107.89,
"learning_rate": 7.433856743385675e-06,
"loss": 0.8714,
"step": 16400
},
{
"epoch": 107.96,
"learning_rate": 7.401591740159175e-06,
"loss": 0.8858,
"step": 16410
},
{
"epoch": 108.03,
"learning_rate": 7.369326736932673e-06,
"loss": 0.9822,
"step": 16420
},
{
"epoch": 108.09,
"learning_rate": 7.3370617337061735e-06,
"loss": 0.8577,
"step": 16430
},
{
"epoch": 108.16,
"learning_rate": 7.304796730479674e-06,
"loss": 0.8978,
"step": 16440
},
{
"epoch": 108.22,
"learning_rate": 7.272531727253173e-06,
"loss": 0.8909,
"step": 16450
},
{
"epoch": 108.29,
"learning_rate": 7.240266724026672e-06,
"loss": 0.9385,
"step": 16460
},
{
"epoch": 108.35,
"learning_rate": 7.208001720800172e-06,
"loss": 0.8541,
"step": 16470
},
{
"epoch": 108.42,
"learning_rate": 7.175736717573672e-06,
"loss": 0.8644,
"step": 16480
},
{
"epoch": 108.48,
"learning_rate": 7.143471714347172e-06,
"loss": 0.911,
"step": 16490
},
{
"epoch": 108.55,
"learning_rate": 7.111206711120671e-06,
"loss": 0.8988,
"step": 16500
},
{
"epoch": 108.55,
"eval_loss": 0.3435661494731903,
"eval_runtime": 318.354,
"eval_samples_per_second": 22.334,
"eval_steps_per_second": 1.398,
"eval_wer": 0.3539146424078196,
"step": 16500
},
{
"epoch": 108.62,
"learning_rate": 7.078941707894171e-06,
"loss": 0.8563,
"step": 16510
},
{
"epoch": 108.68,
"learning_rate": 7.046676704667671e-06,
"loss": 0.8605,
"step": 16520
},
{
"epoch": 108.75,
"learning_rate": 7.017638201763821e-06,
"loss": 0.9282,
"step": 16530
},
{
"epoch": 108.81,
"learning_rate": 6.98537319853732e-06,
"loss": 0.8875,
"step": 16540
},
{
"epoch": 108.88,
"learning_rate": 6.953108195310819e-06,
"loss": 0.8576,
"step": 16550
},
{
"epoch": 108.94,
"learning_rate": 6.9208431920843194e-06,
"loss": 0.879,
"step": 16560
},
{
"epoch": 109.01,
"learning_rate": 6.8885781888578196e-06,
"loss": 0.9845,
"step": 16570
},
{
"epoch": 109.08,
"learning_rate": 6.856313185631319e-06,
"loss": 0.8595,
"step": 16580
},
{
"epoch": 109.14,
"learning_rate": 6.824048182404818e-06,
"loss": 0.8792,
"step": 16590
},
{
"epoch": 109.21,
"learning_rate": 6.791783179178318e-06,
"loss": 0.89,
"step": 16600
},
{
"epoch": 109.27,
"learning_rate": 6.759518175951818e-06,
"loss": 0.9244,
"step": 16610
},
{
"epoch": 109.34,
"learning_rate": 6.727253172725318e-06,
"loss": 0.8525,
"step": 16620
},
{
"epoch": 109.41,
"learning_rate": 6.694988169498817e-06,
"loss": 0.8619,
"step": 16630
},
{
"epoch": 109.47,
"learning_rate": 6.662723166272317e-06,
"loss": 0.9235,
"step": 16640
},
{
"epoch": 109.54,
"learning_rate": 6.630458163045816e-06,
"loss": 0.9352,
"step": 16650
},
{
"epoch": 109.6,
"learning_rate": 6.598193159819316e-06,
"loss": 0.8496,
"step": 16660
},
{
"epoch": 109.67,
"learning_rate": 6.565928156592816e-06,
"loss": 0.8729,
"step": 16670
},
{
"epoch": 109.73,
"learning_rate": 6.533663153366316e-06,
"loss": 0.9166,
"step": 16680
},
{
"epoch": 109.8,
"learning_rate": 6.501398150139815e-06,
"loss": 0.8909,
"step": 16690
},
{
"epoch": 109.86,
"learning_rate": 6.469133146913315e-06,
"loss": 0.882,
"step": 16700
},
{
"epoch": 109.93,
"learning_rate": 6.4368681436868144e-06,
"loss": 0.8945,
"step": 16710
},
{
"epoch": 110.0,
"learning_rate": 6.4046031404603146e-06,
"loss": 0.9299,
"step": 16720
},
{
"epoch": 110.07,
"learning_rate": 6.372338137233814e-06,
"loss": 0.9232,
"step": 16730
},
{
"epoch": 110.13,
"learning_rate": 6.340073134007314e-06,
"loss": 0.8516,
"step": 16740
},
{
"epoch": 110.2,
"learning_rate": 6.307808130780813e-06,
"loss": 0.8654,
"step": 16750
},
{
"epoch": 110.26,
"learning_rate": 6.275543127554313e-06,
"loss": 0.9617,
"step": 16760
},
{
"epoch": 110.33,
"learning_rate": 6.243278124327813e-06,
"loss": 0.8597,
"step": 16770
},
{
"epoch": 110.39,
"learning_rate": 6.211013121101312e-06,
"loss": 0.858,
"step": 16780
},
{
"epoch": 110.46,
"learning_rate": 6.178748117874812e-06,
"loss": 0.8777,
"step": 16790
},
{
"epoch": 110.52,
"learning_rate": 6.146483114648311e-06,
"loss": 0.9278,
"step": 16800
},
{
"epoch": 110.59,
"learning_rate": 6.114218111421811e-06,
"loss": 0.8487,
"step": 16810
},
{
"epoch": 110.65,
"learning_rate": 6.081953108195311e-06,
"loss": 0.8721,
"step": 16820
},
{
"epoch": 110.72,
"learning_rate": 6.049688104968811e-06,
"loss": 0.9058,
"step": 16830
},
{
"epoch": 110.79,
"learning_rate": 6.01742310174231e-06,
"loss": 0.9162,
"step": 16840
},
{
"epoch": 110.85,
"learning_rate": 5.98515809851581e-06,
"loss": 0.861,
"step": 16850
},
{
"epoch": 110.92,
"learning_rate": 5.9528930952893094e-06,
"loss": 0.8752,
"step": 16860
},
{
"epoch": 110.98,
"learning_rate": 5.9206280920628096e-06,
"loss": 0.8966,
"step": 16870
},
{
"epoch": 111.05,
"learning_rate": 5.888363088836309e-06,
"loss": 0.9639,
"step": 16880
},
{
"epoch": 111.12,
"learning_rate": 5.856098085609809e-06,
"loss": 0.8261,
"step": 16890
},
{
"epoch": 111.18,
"learning_rate": 5.823833082383308e-06,
"loss": 0.873,
"step": 16900
},
{
"epoch": 111.25,
"learning_rate": 5.7915680791568075e-06,
"loss": 0.9507,
"step": 16910
},
{
"epoch": 111.31,
"learning_rate": 5.759303075930308e-06,
"loss": 0.8536,
"step": 16920
},
{
"epoch": 111.38,
"learning_rate": 5.727038072703808e-06,
"loss": 0.8887,
"step": 16930
},
{
"epoch": 111.45,
"learning_rate": 5.694773069477307e-06,
"loss": 0.8569,
"step": 16940
},
{
"epoch": 111.51,
"learning_rate": 5.662508066250806e-06,
"loss": 0.9231,
"step": 16950
},
{
"epoch": 111.58,
"learning_rate": 5.630243063024306e-06,
"loss": 0.8588,
"step": 16960
},
{
"epoch": 111.64,
"learning_rate": 5.5979780597978065e-06,
"loss": 0.8674,
"step": 16970
},
{
"epoch": 111.71,
"learning_rate": 5.565713056571306e-06,
"loss": 0.8802,
"step": 16980
},
{
"epoch": 111.77,
"learning_rate": 5.533448053344805e-06,
"loss": 0.966,
"step": 16990
},
{
"epoch": 111.84,
"learning_rate": 5.501183050118305e-06,
"loss": 0.8789,
"step": 17000
},
{
"epoch": 111.84,
"eval_loss": 0.3425801992416382,
"eval_runtime": 327.0047,
"eval_samples_per_second": 21.743,
"eval_steps_per_second": 1.361,
"eval_wer": 0.3515532759121262,
"step": 17000
},
{
"epoch": 111.9,
"learning_rate": 5.468918046891805e-06,
"loss": 0.8766,
"step": 17010
},
{
"epoch": 111.97,
"learning_rate": 5.4366530436653046e-06,
"loss": 0.8643,
"step": 17020
},
{
"epoch": 112.04,
"learning_rate": 5.404388040438804e-06,
"loss": 0.9841,
"step": 17030
},
{
"epoch": 112.1,
"learning_rate": 5.372123037212304e-06,
"loss": 0.8478,
"step": 17040
},
{
"epoch": 112.17,
"learning_rate": 5.339858033985804e-06,
"loss": 0.8621,
"step": 17050
},
{
"epoch": 112.24,
"learning_rate": 5.3075930307593025e-06,
"loss": 0.8895,
"step": 17060
},
{
"epoch": 112.3,
"learning_rate": 5.275328027532803e-06,
"loss": 0.8931,
"step": 17070
},
{
"epoch": 112.37,
"learning_rate": 5.243063024306303e-06,
"loss": 0.8679,
"step": 17080
},
{
"epoch": 112.43,
"learning_rate": 5.210798021079803e-06,
"loss": 0.8957,
"step": 17090
},
{
"epoch": 112.5,
"learning_rate": 5.178533017853301e-06,
"loss": 0.8836,
"step": 17100
},
{
"epoch": 112.56,
"learning_rate": 5.146268014626801e-06,
"loss": 0.8735,
"step": 17110
},
{
"epoch": 112.63,
"learning_rate": 5.1140030114003015e-06,
"loss": 0.8746,
"step": 17120
},
{
"epoch": 112.69,
"learning_rate": 5.081738008173802e-06,
"loss": 0.8755,
"step": 17130
},
{
"epoch": 112.76,
"learning_rate": 5.0494730049473e-06,
"loss": 0.935,
"step": 17140
},
{
"epoch": 112.82,
"learning_rate": 5.0172080017208e-06,
"loss": 0.8438,
"step": 17150
},
{
"epoch": 112.89,
"learning_rate": 4.9849429984943e-06,
"loss": 0.8687,
"step": 17160
},
{
"epoch": 112.96,
"learning_rate": 4.9526779952678e-06,
"loss": 0.8934,
"step": 17170
},
{
"epoch": 113.03,
"learning_rate": 4.920412992041299e-06,
"loss": 0.9906,
"step": 17180
},
{
"epoch": 113.09,
"learning_rate": 4.888147988814799e-06,
"loss": 0.842,
"step": 17190
},
{
"epoch": 113.16,
"learning_rate": 4.855882985588299e-06,
"loss": 0.8746,
"step": 17200
},
{
"epoch": 113.22,
"learning_rate": 4.823617982361798e-06,
"loss": 0.8782,
"step": 17210
},
{
"epoch": 113.29,
"learning_rate": 4.791352979135298e-06,
"loss": 0.913,
"step": 17220
},
{
"epoch": 113.35,
"learning_rate": 4.759087975908798e-06,
"loss": 0.826,
"step": 17230
},
{
"epoch": 113.42,
"learning_rate": 4.726822972682298e-06,
"loss": 0.8792,
"step": 17240
},
{
"epoch": 113.48,
"learning_rate": 4.694557969455797e-06,
"loss": 0.8961,
"step": 17250
},
{
"epoch": 113.55,
"learning_rate": 4.662292966229296e-06,
"loss": 0.8789,
"step": 17260
},
{
"epoch": 113.62,
"learning_rate": 4.6300279630027965e-06,
"loss": 0.8453,
"step": 17270
},
{
"epoch": 113.68,
"learning_rate": 4.597762959776297e-06,
"loss": 0.8834,
"step": 17280
},
{
"epoch": 113.75,
"learning_rate": 4.565497956549796e-06,
"loss": 0.9247,
"step": 17290
},
{
"epoch": 113.81,
"learning_rate": 4.533232953323295e-06,
"loss": 0.8541,
"step": 17300
},
{
"epoch": 113.88,
"learning_rate": 4.500967950096795e-06,
"loss": 0.854,
"step": 17310
},
{
"epoch": 113.94,
"learning_rate": 4.468702946870295e-06,
"loss": 0.9033,
"step": 17320
},
{
"epoch": 114.01,
"learning_rate": 4.436437943643794e-06,
"loss": 1.0242,
"step": 17330
},
{
"epoch": 114.08,
"learning_rate": 4.404172940417294e-06,
"loss": 0.8485,
"step": 17340
},
{
"epoch": 114.14,
"learning_rate": 4.371907937190794e-06,
"loss": 0.8935,
"step": 17350
},
{
"epoch": 114.21,
"learning_rate": 4.339642933964293e-06,
"loss": 0.871,
"step": 17360
},
{
"epoch": 114.27,
"learning_rate": 4.307377930737793e-06,
"loss": 0.9397,
"step": 17370
},
{
"epoch": 114.34,
"learning_rate": 4.275112927511293e-06,
"loss": 0.8355,
"step": 17380
},
{
"epoch": 114.41,
"learning_rate": 4.242847924284793e-06,
"loss": 0.8424,
"step": 17390
},
{
"epoch": 114.47,
"learning_rate": 4.210582921058292e-06,
"loss": 0.8736,
"step": 17400
},
{
"epoch": 114.54,
"learning_rate": 4.178317917831791e-06,
"loss": 0.9186,
"step": 17410
},
{
"epoch": 114.6,
"learning_rate": 4.1460529146052915e-06,
"loss": 0.8414,
"step": 17420
},
{
"epoch": 114.67,
"learning_rate": 4.113787911378792e-06,
"loss": 0.8786,
"step": 17430
},
{
"epoch": 114.73,
"learning_rate": 4.081522908152291e-06,
"loss": 0.8891,
"step": 17440
},
{
"epoch": 114.8,
"learning_rate": 4.04925790492579e-06,
"loss": 0.9005,
"step": 17450
},
{
"epoch": 114.86,
"learning_rate": 4.01699290169929e-06,
"loss": 0.8603,
"step": 17460
},
{
"epoch": 114.93,
"learning_rate": 3.98472789847279e-06,
"loss": 0.869,
"step": 17470
},
{
"epoch": 115.0,
"learning_rate": 3.95246289524629e-06,
"loss": 0.9212,
"step": 17480
},
{
"epoch": 115.07,
"learning_rate": 3.920197892019789e-06,
"loss": 0.9215,
"step": 17490
},
{
"epoch": 115.13,
"learning_rate": 3.887932888793289e-06,
"loss": 0.8667,
"step": 17500
},
{
"epoch": 115.13,
"eval_loss": 0.3438130021095276,
"eval_runtime": 324.2107,
"eval_samples_per_second": 21.93,
"eval_steps_per_second": 1.373,
"eval_wer": 0.35060485822123294,
"step": 17500
},
{
"epoch": 115.2,
"learning_rate": 3.855667885566788e-06,
"loss": 0.8505,
"step": 17510
},
{
"epoch": 115.26,
"learning_rate": 3.8234028823402885e-06,
"loss": 0.935,
"step": 17520
},
{
"epoch": 115.33,
"learning_rate": 3.7911378791137877e-06,
"loss": 0.8515,
"step": 17530
},
{
"epoch": 115.39,
"learning_rate": 3.7588728758872874e-06,
"loss": 0.8767,
"step": 17540
},
{
"epoch": 115.46,
"learning_rate": 3.7266078726607876e-06,
"loss": 0.891,
"step": 17550
},
{
"epoch": 115.52,
"learning_rate": 3.694342869434287e-06,
"loss": 0.9347,
"step": 17560
},
{
"epoch": 115.59,
"learning_rate": 3.662077866207787e-06,
"loss": 0.8439,
"step": 17570
},
{
"epoch": 115.65,
"learning_rate": 3.6298128629812862e-06,
"loss": 0.8656,
"step": 17580
},
{
"epoch": 115.72,
"learning_rate": 3.5975478597547863e-06,
"loss": 0.8868,
"step": 17590
},
{
"epoch": 115.79,
"learning_rate": 3.5652828565282856e-06,
"loss": 0.919,
"step": 17600
},
{
"epoch": 115.85,
"learning_rate": 3.5330178533017857e-06,
"loss": 0.8582,
"step": 17610
},
{
"epoch": 115.92,
"learning_rate": 3.500752850075285e-06,
"loss": 0.8699,
"step": 17620
},
{
"epoch": 115.98,
"learning_rate": 3.4684878468487847e-06,
"loss": 0.8604,
"step": 17630
},
{
"epoch": 116.05,
"learning_rate": 3.4362228436222844e-06,
"loss": 0.9501,
"step": 17640
},
{
"epoch": 116.12,
"learning_rate": 3.403957840395784e-06,
"loss": 0.8585,
"step": 17650
},
{
"epoch": 116.18,
"learning_rate": 3.3716928371692838e-06,
"loss": 0.8697,
"step": 17660
},
{
"epoch": 116.25,
"learning_rate": 3.3394278339427835e-06,
"loss": 0.942,
"step": 17670
},
{
"epoch": 116.31,
"learning_rate": 3.307162830716283e-06,
"loss": 0.879,
"step": 17680
},
{
"epoch": 116.38,
"learning_rate": 3.274897827489783e-06,
"loss": 0.8652,
"step": 17690
},
{
"epoch": 116.45,
"learning_rate": 3.2426328242632826e-06,
"loss": 0.871,
"step": 17700
},
{
"epoch": 116.51,
"learning_rate": 3.2103678210367823e-06,
"loss": 0.9304,
"step": 17710
},
{
"epoch": 116.58,
"learning_rate": 3.178102817810282e-06,
"loss": 0.8397,
"step": 17720
},
{
"epoch": 116.64,
"learning_rate": 3.1458378145837816e-06,
"loss": 0.8661,
"step": 17730
},
{
"epoch": 116.71,
"learning_rate": 3.1135728113572813e-06,
"loss": 0.8686,
"step": 17740
},
{
"epoch": 116.77,
"learning_rate": 3.081307808130781e-06,
"loss": 0.9082,
"step": 17750
},
{
"epoch": 116.84,
"learning_rate": 3.0490428049042807e-06,
"loss": 0.8541,
"step": 17760
},
{
"epoch": 116.9,
"learning_rate": 3.0167778016777804e-06,
"loss": 0.8637,
"step": 17770
},
{
"epoch": 116.97,
"learning_rate": 2.9845127984512797e-06,
"loss": 0.8832,
"step": 17780
},
{
"epoch": 117.04,
"learning_rate": 2.95224779522478e-06,
"loss": 0.9752,
"step": 17790
},
{
"epoch": 117.1,
"learning_rate": 2.919982791998279e-06,
"loss": 0.8353,
"step": 17800
},
{
"epoch": 117.17,
"learning_rate": 2.887717788771779e-06,
"loss": 0.8827,
"step": 17810
},
{
"epoch": 117.24,
"learning_rate": 2.8554527855452785e-06,
"loss": 0.8747,
"step": 17820
},
{
"epoch": 117.3,
"learning_rate": 2.8231877823187786e-06,
"loss": 0.8857,
"step": 17830
},
{
"epoch": 117.37,
"learning_rate": 2.790922779092278e-06,
"loss": 0.8432,
"step": 17840
},
{
"epoch": 117.43,
"learning_rate": 2.758657775865778e-06,
"loss": 0.8877,
"step": 17850
},
{
"epoch": 117.5,
"learning_rate": 2.7263927726392773e-06,
"loss": 0.9045,
"step": 17860
},
{
"epoch": 117.56,
"learning_rate": 2.694127769412777e-06,
"loss": 0.8463,
"step": 17870
},
{
"epoch": 117.63,
"learning_rate": 2.6618627661862766e-06,
"loss": 0.8323,
"step": 17880
},
{
"epoch": 117.69,
"learning_rate": 2.6295977629597763e-06,
"loss": 0.8763,
"step": 17890
},
{
"epoch": 117.76,
"learning_rate": 2.597332759733276e-06,
"loss": 0.9353,
"step": 17900
},
{
"epoch": 117.82,
"learning_rate": 2.5650677565067757e-06,
"loss": 0.8474,
"step": 17910
},
{
"epoch": 117.89,
"learning_rate": 2.5328027532802754e-06,
"loss": 0.8529,
"step": 17920
},
{
"epoch": 117.96,
"learning_rate": 2.5005377500537747e-06,
"loss": 0.8589,
"step": 17930
},
{
"epoch": 118.03,
"learning_rate": 2.468272746827275e-06,
"loss": 1.0224,
"step": 17940
},
{
"epoch": 118.09,
"learning_rate": 2.436007743600774e-06,
"loss": 0.8404,
"step": 17950
},
{
"epoch": 118.16,
"learning_rate": 2.403742740374274e-06,
"loss": 0.8568,
"step": 17960
},
{
"epoch": 118.22,
"learning_rate": 2.3714777371477735e-06,
"loss": 0.8704,
"step": 17970
},
{
"epoch": 118.29,
"learning_rate": 2.3392127339212736e-06,
"loss": 0.9164,
"step": 17980
},
{
"epoch": 118.35,
"learning_rate": 2.306947730694773e-06,
"loss": 0.8434,
"step": 17990
},
{
"epoch": 118.42,
"learning_rate": 2.274682727468273e-06,
"loss": 0.8895,
"step": 18000
},
{
"epoch": 118.42,
"eval_loss": 0.34335601329803467,
"eval_runtime": 325.3466,
"eval_samples_per_second": 21.854,
"eval_steps_per_second": 1.368,
"eval_wer": 0.3502564598858028,
"step": 18000
},
{
"epoch": 118.48,
"learning_rate": 2.2424177242417723e-06,
"loss": 0.9049,
"step": 18010
},
{
"epoch": 118.55,
"learning_rate": 2.2101527210152724e-06,
"loss": 0.8719,
"step": 18020
},
{
"epoch": 118.62,
"learning_rate": 2.1778877177887716e-06,
"loss": 0.86,
"step": 18030
},
{
"epoch": 118.68,
"learning_rate": 2.1456227145622718e-06,
"loss": 0.8437,
"step": 18040
},
{
"epoch": 118.75,
"learning_rate": 2.113357711335771e-06,
"loss": 0.9047,
"step": 18050
},
{
"epoch": 118.81,
"learning_rate": 2.0810927081092707e-06,
"loss": 0.8641,
"step": 18060
},
{
"epoch": 118.88,
"learning_rate": 2.0488277048827704e-06,
"loss": 0.834,
"step": 18070
},
{
"epoch": 118.94,
"learning_rate": 2.01656270165627e-06,
"loss": 0.9071,
"step": 18080
},
{
"epoch": 119.01,
"learning_rate": 1.98429769842977e-06,
"loss": 0.9641,
"step": 18090
},
{
"epoch": 119.08,
"learning_rate": 1.9520326952032695e-06,
"loss": 0.8182,
"step": 18100
},
{
"epoch": 119.14,
"learning_rate": 1.919767691976769e-06,
"loss": 0.8624,
"step": 18110
},
{
"epoch": 119.21,
"learning_rate": 1.8875026887502691e-06,
"loss": 0.8535,
"step": 18120
},
{
"epoch": 119.27,
"learning_rate": 1.8552376855237686e-06,
"loss": 0.931,
"step": 18130
},
{
"epoch": 119.34,
"learning_rate": 1.8229726822972683e-06,
"loss": 0.8267,
"step": 18140
},
{
"epoch": 119.41,
"learning_rate": 1.7907076790707678e-06,
"loss": 0.8664,
"step": 18150
},
{
"epoch": 119.47,
"learning_rate": 1.7584426758442675e-06,
"loss": 0.8943,
"step": 18160
},
{
"epoch": 119.54,
"learning_rate": 1.7261776726177672e-06,
"loss": 0.9318,
"step": 18170
},
{
"epoch": 119.6,
"learning_rate": 1.6939126693912669e-06,
"loss": 0.8273,
"step": 18180
},
{
"epoch": 119.67,
"learning_rate": 1.6616476661647666e-06,
"loss": 0.8521,
"step": 18190
},
{
"epoch": 119.73,
"learning_rate": 1.6293826629382662e-06,
"loss": 0.8815,
"step": 18200
},
{
"epoch": 119.8,
"learning_rate": 1.597117659711766e-06,
"loss": 0.8892,
"step": 18210
},
{
"epoch": 119.86,
"learning_rate": 1.5648526564852656e-06,
"loss": 0.8461,
"step": 18220
},
{
"epoch": 119.93,
"learning_rate": 1.5325876532587653e-06,
"loss": 0.8724,
"step": 18230
},
{
"epoch": 120.0,
"learning_rate": 1.500322650032265e-06,
"loss": 0.9253,
"step": 18240
},
{
"epoch": 120.07,
"learning_rate": 1.4680576468057647e-06,
"loss": 0.9371,
"step": 18250
},
{
"epoch": 120.13,
"learning_rate": 1.4357926435792644e-06,
"loss": 0.8727,
"step": 18260
},
{
"epoch": 120.2,
"learning_rate": 1.4035276403527641e-06,
"loss": 0.8629,
"step": 18270
},
{
"epoch": 120.26,
"learning_rate": 1.3712626371262636e-06,
"loss": 0.9365,
"step": 18280
},
{
"epoch": 120.33,
"learning_rate": 1.3389976338997633e-06,
"loss": 0.8307,
"step": 18290
},
{
"epoch": 120.39,
"learning_rate": 1.306732630673263e-06,
"loss": 0.8293,
"step": 18300
},
{
"epoch": 120.46,
"learning_rate": 1.2744676274467627e-06,
"loss": 0.8891,
"step": 18310
},
{
"epoch": 120.52,
"learning_rate": 1.2422026242202624e-06,
"loss": 0.9369,
"step": 18320
},
{
"epoch": 120.59,
"learning_rate": 1.209937620993762e-06,
"loss": 0.8345,
"step": 18330
},
{
"epoch": 120.65,
"learning_rate": 1.1776726177672618e-06,
"loss": 0.863,
"step": 18340
},
{
"epoch": 120.72,
"learning_rate": 1.1454076145407615e-06,
"loss": 0.869,
"step": 18350
},
{
"epoch": 120.79,
"learning_rate": 1.1131426113142612e-06,
"loss": 0.9124,
"step": 18360
},
{
"epoch": 120.85,
"learning_rate": 1.0808776080877609e-06,
"loss": 0.8465,
"step": 18370
},
{
"epoch": 120.92,
"learning_rate": 1.0486126048612605e-06,
"loss": 0.9023,
"step": 18380
},
{
"epoch": 120.98,
"learning_rate": 1.0163476016347602e-06,
"loss": 0.8672,
"step": 18390
},
{
"epoch": 121.05,
"learning_rate": 9.8408259840826e-07,
"loss": 0.9339,
"step": 18400
},
{
"epoch": 121.12,
"learning_rate": 9.518175951817595e-07,
"loss": 0.8539,
"step": 18410
},
{
"epoch": 121.18,
"learning_rate": 9.195525919552592e-07,
"loss": 0.8677,
"step": 18420
},
{
"epoch": 121.25,
"learning_rate": 8.872875887287589e-07,
"loss": 0.9385,
"step": 18430
},
{
"epoch": 121.31,
"learning_rate": 8.550225855022586e-07,
"loss": 0.8482,
"step": 18440
},
{
"epoch": 121.38,
"learning_rate": 8.227575822757583e-07,
"loss": 0.8607,
"step": 18450
},
{
"epoch": 121.45,
"learning_rate": 7.904925790492579e-07,
"loss": 0.8591,
"step": 18460
},
{
"epoch": 121.51,
"learning_rate": 7.582275758227576e-07,
"loss": 0.9215,
"step": 18470
},
{
"epoch": 121.58,
"learning_rate": 7.259625725962573e-07,
"loss": 0.8359,
"step": 18480
},
{
"epoch": 121.64,
"learning_rate": 6.93697569369757e-07,
"loss": 0.8706,
"step": 18490
},
{
"epoch": 121.71,
"learning_rate": 6.614325661432567e-07,
"loss": 0.8888,
"step": 18500
},
{
"epoch": 121.71,
"eval_loss": 0.34249696135520935,
"eval_runtime": 323.0236,
"eval_samples_per_second": 22.011,
"eval_steps_per_second": 1.378,
"eval_wer": 0.3494048195103068,
"step": 18500
},
{
"epoch": 121.77,
"learning_rate": 6.291675629167564e-07,
"loss": 0.9699,
"step": 18510
},
{
"epoch": 121.84,
"learning_rate": 5.969025596902561e-07,
"loss": 0.8446,
"step": 18520
},
{
"epoch": 121.9,
"learning_rate": 5.646375564637557e-07,
"loss": 0.8564,
"step": 18530
},
{
"epoch": 121.97,
"learning_rate": 5.323725532372553e-07,
"loss": 0.8883,
"step": 18540
},
{
"epoch": 122.04,
"learning_rate": 5.00107550010755e-07,
"loss": 0.9933,
"step": 18550
},
{
"epoch": 122.1,
"learning_rate": 4.678425467842547e-07,
"loss": 0.8476,
"step": 18560
},
{
"epoch": 122.17,
"learning_rate": 4.355775435577544e-07,
"loss": 0.8747,
"step": 18570
},
{
"epoch": 122.24,
"learning_rate": 4.03312540331254e-07,
"loss": 0.8982,
"step": 18580
},
{
"epoch": 122.3,
"learning_rate": 3.710475371047537e-07,
"loss": 0.8518,
"step": 18590
},
{
"epoch": 122.37,
"learning_rate": 3.387825338782534e-07,
"loss": 0.8722,
"step": 18600
},
{
"epoch": 122.43,
"learning_rate": 3.065175306517531e-07,
"loss": 0.8711,
"step": 18610
},
{
"epoch": 122.5,
"learning_rate": 2.7425252742525275e-07,
"loss": 0.9164,
"step": 18620
},
{
"epoch": 122.56,
"learning_rate": 2.4198752419875245e-07,
"loss": 0.8547,
"step": 18630
},
{
"epoch": 122.63,
"learning_rate": 2.0972252097225211e-07,
"loss": 0.858,
"step": 18640
},
{
"epoch": 122.69,
"learning_rate": 1.7745751774575178e-07,
"loss": 0.8493,
"step": 18650
},
{
"epoch": 122.76,
"learning_rate": 1.4519251451925145e-07,
"loss": 0.9404,
"step": 18660
},
{
"epoch": 122.82,
"learning_rate": 1.1292751129275113e-07,
"loss": 0.8165,
"step": 18670
},
{
"epoch": 122.89,
"learning_rate": 8.066250806625082e-08,
"loss": 0.8654,
"step": 18680
},
{
"epoch": 122.96,
"learning_rate": 4.8397504839750484e-08,
"loss": 0.8826,
"step": 18690
},
{
"epoch": 123.0,
"step": 18696,
"total_flos": 2.5832366503240237e+20,
"train_loss": 0.7828263307311466,
"train_runtime": 82672.8006,
"train_samples_per_second": 29.058,
"train_steps_per_second": 0.226
}
],
"max_steps": 18696,
"num_train_epochs": 123,
"total_flos": 2.5832366503240237e+20,
"trial_name": null,
"trial_params": null
}