t5-small-ia3-finetune-tweetsumm / trainer_state.json
samuellimabraz's picture
End of training
501dd7c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00909090909090909,
"grad_norm": 0.6364642977714539,
"learning_rate": 0.000996969696969697,
"loss": 3.3369,
"step": 1
},
{
"epoch": 0.01818181818181818,
"grad_norm": 0.7382655143737793,
"learning_rate": 0.000993939393939394,
"loss": 3.6142,
"step": 2
},
{
"epoch": 0.02727272727272727,
"grad_norm": 0.5715238451957703,
"learning_rate": 0.000990909090909091,
"loss": 3.5781,
"step": 3
},
{
"epoch": 0.03636363636363636,
"grad_norm": 0.6886011958122253,
"learning_rate": 0.000987878787878788,
"loss": 3.5102,
"step": 4
},
{
"epoch": 0.045454545454545456,
"grad_norm": 0.5926012992858887,
"learning_rate": 0.000984848484848485,
"loss": 3.7665,
"step": 5
},
{
"epoch": 0.05454545454545454,
"grad_norm": 0.5465888977050781,
"learning_rate": 0.0009818181818181818,
"loss": 3.6976,
"step": 6
},
{
"epoch": 0.06363636363636363,
"grad_norm": 0.5086413621902466,
"learning_rate": 0.0009787878787878787,
"loss": 2.9476,
"step": 7
},
{
"epoch": 0.07272727272727272,
"grad_norm": 0.7309675216674805,
"learning_rate": 0.0009757575757575757,
"loss": 4.1926,
"step": 8
},
{
"epoch": 0.08181818181818182,
"grad_norm": 0.6094343662261963,
"learning_rate": 0.0009727272727272728,
"loss": 3.5917,
"step": 9
},
{
"epoch": 0.09090909090909091,
"grad_norm": 0.6449739336967468,
"learning_rate": 0.0009696969696969698,
"loss": 3.3021,
"step": 10
},
{
"epoch": 0.1,
"grad_norm": 0.6469141244888306,
"learning_rate": 0.0009666666666666667,
"loss": 3.5931,
"step": 11
},
{
"epoch": 0.10909090909090909,
"grad_norm": 0.6477038860321045,
"learning_rate": 0.0009636363636363637,
"loss": 3.8599,
"step": 12
},
{
"epoch": 0.11818181818181818,
"grad_norm": 0.5713329911231995,
"learning_rate": 0.0009606060606060606,
"loss": 3.3654,
"step": 13
},
{
"epoch": 0.12727272727272726,
"grad_norm": 0.6742083430290222,
"learning_rate": 0.0009575757575757576,
"loss": 3.756,
"step": 14
},
{
"epoch": 0.13636363636363635,
"grad_norm": 0.6667935848236084,
"learning_rate": 0.0009545454545454546,
"loss": 3.8559,
"step": 15
},
{
"epoch": 0.14545454545454545,
"grad_norm": NaN,
"learning_rate": 0.0009545454545454546,
"loss": 3.4346,
"step": 16
},
{
"epoch": 0.15454545454545454,
"grad_norm": 0.5967490077018738,
"learning_rate": 0.0009515151515151516,
"loss": 3.4284,
"step": 17
},
{
"epoch": 0.16363636363636364,
"grad_norm": 0.5478681325912476,
"learning_rate": 0.0009484848484848486,
"loss": 3.4869,
"step": 18
},
{
"epoch": 0.17272727272727273,
"grad_norm": 0.6075071692466736,
"learning_rate": 0.0009454545454545454,
"loss": 3.8386,
"step": 19
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.5510619282722473,
"learning_rate": 0.0009424242424242424,
"loss": 3.4209,
"step": 20
},
{
"epoch": 0.19090909090909092,
"grad_norm": 0.5931372046470642,
"learning_rate": 0.0009393939393939394,
"loss": 3.2638,
"step": 21
},
{
"epoch": 0.2,
"grad_norm": 0.582734227180481,
"learning_rate": 0.0009363636363636364,
"loss": 3.521,
"step": 22
},
{
"epoch": 0.20909090909090908,
"grad_norm": 0.7339524626731873,
"learning_rate": 0.0009333333333333333,
"loss": 3.7846,
"step": 23
},
{
"epoch": 0.21818181818181817,
"grad_norm": 0.5666806697845459,
"learning_rate": 0.0009303030303030303,
"loss": 2.9285,
"step": 24
},
{
"epoch": 0.22727272727272727,
"grad_norm": 0.5900283455848694,
"learning_rate": 0.0009272727272727273,
"loss": 3.3751,
"step": 25
},
{
"epoch": 0.23636363636363636,
"grad_norm": 0.469912588596344,
"learning_rate": 0.0009242424242424242,
"loss": 2.9928,
"step": 26
},
{
"epoch": 0.24545454545454545,
"grad_norm": 0.6613773107528687,
"learning_rate": 0.0009212121212121213,
"loss": 3.915,
"step": 27
},
{
"epoch": 0.2545454545454545,
"grad_norm": 0.5447609424591064,
"learning_rate": 0.0009181818181818182,
"loss": 3.8844,
"step": 28
},
{
"epoch": 0.2636363636363636,
"grad_norm": 0.5744115114212036,
"learning_rate": 0.0009151515151515152,
"loss": 3.7885,
"step": 29
},
{
"epoch": 0.2727272727272727,
"grad_norm": 0.5475027561187744,
"learning_rate": 0.0009121212121212121,
"loss": 3.2517,
"step": 30
},
{
"epoch": 0.2818181818181818,
"grad_norm": 0.5121821165084839,
"learning_rate": 0.0009090909090909091,
"loss": 3.2081,
"step": 31
},
{
"epoch": 0.2909090909090909,
"grad_norm": 0.5392746925354004,
"learning_rate": 0.000906060606060606,
"loss": 3.4964,
"step": 32
},
{
"epoch": 0.3,
"grad_norm": 0.5134533643722534,
"learning_rate": 0.0009030303030303031,
"loss": 3.2608,
"step": 33
},
{
"epoch": 0.3090909090909091,
"grad_norm": 0.9084953665733337,
"learning_rate": 0.0009000000000000001,
"loss": 3.3655,
"step": 34
},
{
"epoch": 0.3181818181818182,
"grad_norm": 0.5819680690765381,
"learning_rate": 0.000896969696969697,
"loss": 3.444,
"step": 35
},
{
"epoch": 0.32727272727272727,
"grad_norm": 0.6547687649726868,
"learning_rate": 0.000893939393939394,
"loss": 3.1361,
"step": 36
},
{
"epoch": 0.33636363636363636,
"grad_norm": 0.5099802017211914,
"learning_rate": 0.0008909090909090909,
"loss": 3.432,
"step": 37
},
{
"epoch": 0.34545454545454546,
"grad_norm": 0.5565956830978394,
"learning_rate": 0.000887878787878788,
"loss": 3.5979,
"step": 38
},
{
"epoch": 0.35454545454545455,
"grad_norm": 0.4903116226196289,
"learning_rate": 0.0008848484848484849,
"loss": 3.0049,
"step": 39
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.5243513584136963,
"learning_rate": 0.0008818181818181819,
"loss": 3.0374,
"step": 40
},
{
"epoch": 0.37272727272727274,
"grad_norm": 0.5933147072792053,
"learning_rate": 0.0008787878787878789,
"loss": 3.7916,
"step": 41
},
{
"epoch": 0.38181818181818183,
"grad_norm": 0.42482495307922363,
"learning_rate": 0.0008757575757575757,
"loss": 3.5664,
"step": 42
},
{
"epoch": 0.39090909090909093,
"grad_norm": 0.5176947116851807,
"learning_rate": 0.0008727272727272727,
"loss": 3.3353,
"step": 43
},
{
"epoch": 0.4,
"grad_norm": 0.648857831954956,
"learning_rate": 0.0008696969696969697,
"loss": 2.9128,
"step": 44
},
{
"epoch": 0.4090909090909091,
"grad_norm": 0.434540718793869,
"learning_rate": 0.0008666666666666667,
"loss": 2.9023,
"step": 45
},
{
"epoch": 0.41818181818181815,
"grad_norm": 0.4114321768283844,
"learning_rate": 0.0008636363636363636,
"loss": 2.8344,
"step": 46
},
{
"epoch": 0.42727272727272725,
"grad_norm": 0.5299983024597168,
"learning_rate": 0.0008606060606060606,
"loss": 3.6191,
"step": 47
},
{
"epoch": 0.43636363636363634,
"grad_norm": 0.5353228449821472,
"learning_rate": 0.0008575757575757575,
"loss": 3.1648,
"step": 48
},
{
"epoch": 0.44545454545454544,
"grad_norm": 0.42767348885536194,
"learning_rate": 0.0008545454545454545,
"loss": 2.7365,
"step": 49
},
{
"epoch": 0.45454545454545453,
"grad_norm": 0.457047700881958,
"learning_rate": 0.0008515151515151516,
"loss": 3.3525,
"step": 50
},
{
"epoch": 0.4636363636363636,
"grad_norm": 0.5668472647666931,
"learning_rate": 0.0008484848484848485,
"loss": 3.6456,
"step": 51
},
{
"epoch": 0.4727272727272727,
"grad_norm": 0.4414864480495453,
"learning_rate": 0.0008454545454545455,
"loss": 3.2231,
"step": 52
},
{
"epoch": 0.4818181818181818,
"grad_norm": 0.5518112778663635,
"learning_rate": 0.0008424242424242424,
"loss": 3.2678,
"step": 53
},
{
"epoch": 0.4909090909090909,
"grad_norm": 0.48707881569862366,
"learning_rate": 0.0008393939393939394,
"loss": 3.3249,
"step": 54
},
{
"epoch": 0.5,
"grad_norm": 0.4373042583465576,
"learning_rate": 0.0008363636363636363,
"loss": 2.9376,
"step": 55
},
{
"epoch": 0.509090909090909,
"grad_norm": 0.45095935463905334,
"learning_rate": 0.0008333333333333334,
"loss": 2.7096,
"step": 56
},
{
"epoch": 0.5181818181818182,
"grad_norm": 0.5320043563842773,
"learning_rate": 0.0008303030303030304,
"loss": 3.3028,
"step": 57
},
{
"epoch": 0.5272727272727272,
"grad_norm": 0.39713728427886963,
"learning_rate": 0.0008272727272727273,
"loss": 2.8784,
"step": 58
},
{
"epoch": 0.5363636363636364,
"grad_norm": 0.6237610578536987,
"learning_rate": 0.0008242424242424243,
"loss": 3.4138,
"step": 59
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.40011513233184814,
"learning_rate": 0.0008212121212121212,
"loss": 2.856,
"step": 60
},
{
"epoch": 0.5545454545454546,
"grad_norm": 0.48356831073760986,
"learning_rate": 0.0008181818181818183,
"loss": 3.3965,
"step": 61
},
{
"epoch": 0.5636363636363636,
"grad_norm": 0.47631216049194336,
"learning_rate": 0.0008151515151515152,
"loss": 3.0289,
"step": 62
},
{
"epoch": 0.5727272727272728,
"grad_norm": 0.44138747453689575,
"learning_rate": 0.0008121212121212122,
"loss": 3.02,
"step": 63
},
{
"epoch": 0.5818181818181818,
"grad_norm": 0.3897497355937958,
"learning_rate": 0.0008090909090909092,
"loss": 2.567,
"step": 64
},
{
"epoch": 0.5909090909090909,
"grad_norm": 0.48581022024154663,
"learning_rate": 0.000806060606060606,
"loss": 2.8268,
"step": 65
},
{
"epoch": 0.6,
"grad_norm": 0.6048558950424194,
"learning_rate": 0.000803030303030303,
"loss": 3.1296,
"step": 66
},
{
"epoch": 0.6090909090909091,
"grad_norm": 0.4718954861164093,
"learning_rate": 0.0008,
"loss": 3.8056,
"step": 67
},
{
"epoch": 0.6181818181818182,
"grad_norm": 0.48950737714767456,
"learning_rate": 0.000796969696969697,
"loss": 3.2057,
"step": 68
},
{
"epoch": 0.6272727272727273,
"grad_norm": 0.4679166376590729,
"learning_rate": 0.0007939393939393939,
"loss": 3.1246,
"step": 69
},
{
"epoch": 0.6363636363636364,
"grad_norm": 0.42045873403549194,
"learning_rate": 0.0007909090909090909,
"loss": 2.535,
"step": 70
},
{
"epoch": 0.6454545454545455,
"grad_norm": 0.4331657886505127,
"learning_rate": 0.0007878787878787878,
"loss": 2.8914,
"step": 71
},
{
"epoch": 0.6545454545454545,
"grad_norm": 0.4637533724308014,
"learning_rate": 0.0007848484848484848,
"loss": 3.1443,
"step": 72
},
{
"epoch": 0.6636363636363637,
"grad_norm": 0.4755864143371582,
"learning_rate": 0.0007818181818181819,
"loss": 2.9274,
"step": 73
},
{
"epoch": 0.6727272727272727,
"grad_norm": 0.46972426772117615,
"learning_rate": 0.0007787878787878788,
"loss": 3.1598,
"step": 74
},
{
"epoch": 0.6818181818181818,
"grad_norm": 0.4510386884212494,
"learning_rate": 0.0007757575757575758,
"loss": 3.0088,
"step": 75
},
{
"epoch": 0.6909090909090909,
"grad_norm": 0.4521528482437134,
"learning_rate": 0.0007727272727272727,
"loss": 2.9321,
"step": 76
},
{
"epoch": 0.7,
"grad_norm": 0.4548567235469818,
"learning_rate": 0.0007696969696969697,
"loss": 2.8669,
"step": 77
},
{
"epoch": 0.7090909090909091,
"grad_norm": 0.4685644507408142,
"learning_rate": 0.0007666666666666667,
"loss": 2.9544,
"step": 78
},
{
"epoch": 0.7181818181818181,
"grad_norm": 0.38922494649887085,
"learning_rate": 0.0007636363636363637,
"loss": 2.8192,
"step": 79
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.4609260857105255,
"learning_rate": 0.0007606060606060607,
"loss": 2.7984,
"step": 80
},
{
"epoch": 0.7363636363636363,
"grad_norm": 0.46624866127967834,
"learning_rate": 0.0007575757575757576,
"loss": 2.9923,
"step": 81
},
{
"epoch": 0.7454545454545455,
"grad_norm": 0.3941594362258911,
"learning_rate": 0.0007545454545454546,
"loss": 2.8727,
"step": 82
},
{
"epoch": 0.7545454545454545,
"grad_norm": 0.5344395041465759,
"learning_rate": 0.0007515151515151515,
"loss": 3.365,
"step": 83
},
{
"epoch": 0.7636363636363637,
"grad_norm": 0.4326035678386688,
"learning_rate": 0.0007484848484848486,
"loss": 2.7616,
"step": 84
},
{
"epoch": 0.7727272727272727,
"grad_norm": 0.4111727178096771,
"learning_rate": 0.0007454545454545455,
"loss": 2.5819,
"step": 85
},
{
"epoch": 0.7818181818181819,
"grad_norm": 0.46036821603775024,
"learning_rate": 0.0007424242424242425,
"loss": 3.4161,
"step": 86
},
{
"epoch": 0.7909090909090909,
"grad_norm": 0.4240114688873291,
"learning_rate": 0.0007393939393939393,
"loss": 2.8336,
"step": 87
},
{
"epoch": 0.8,
"grad_norm": 0.4403522312641144,
"learning_rate": 0.0007363636363636363,
"loss": 3.5287,
"step": 88
},
{
"epoch": 0.8090909090909091,
"grad_norm": 0.48128649592399597,
"learning_rate": 0.0007333333333333333,
"loss": 3.1024,
"step": 89
},
{
"epoch": 0.8181818181818182,
"grad_norm": 0.5348935127258301,
"learning_rate": 0.0007303030303030303,
"loss": 3.2116,
"step": 90
},
{
"epoch": 0.8272727272727273,
"grad_norm": 0.38897505402565,
"learning_rate": 0.0007272727272727273,
"loss": 2.9749,
"step": 91
},
{
"epoch": 0.8363636363636363,
"grad_norm": 0.34820327162742615,
"learning_rate": 0.0007242424242424242,
"loss": 2.6982,
"step": 92
},
{
"epoch": 0.8454545454545455,
"grad_norm": 0.46884340047836304,
"learning_rate": 0.0007212121212121212,
"loss": 3.1035,
"step": 93
},
{
"epoch": 0.8545454545454545,
"grad_norm": 0.46467122435569763,
"learning_rate": 0.0007181818181818181,
"loss": 2.8572,
"step": 94
},
{
"epoch": 0.8636363636363636,
"grad_norm": 0.39106878638267517,
"learning_rate": 0.0007151515151515152,
"loss": 2.5853,
"step": 95
},
{
"epoch": 0.8727272727272727,
"grad_norm": 0.4630919396877289,
"learning_rate": 0.0007121212121212122,
"loss": 2.764,
"step": 96
},
{
"epoch": 0.8818181818181818,
"grad_norm": 0.4636339545249939,
"learning_rate": 0.0007090909090909091,
"loss": 2.6493,
"step": 97
},
{
"epoch": 0.8909090909090909,
"grad_norm": 0.5077859163284302,
"learning_rate": 0.0007060606060606061,
"loss": 2.9443,
"step": 98
},
{
"epoch": 0.9,
"grad_norm": 0.45244234800338745,
"learning_rate": 0.000703030303030303,
"loss": 2.7878,
"step": 99
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.40455231070518494,
"learning_rate": 0.0007,
"loss": 2.8116,
"step": 100
},
{
"epoch": 0.9181818181818182,
"grad_norm": 0.4224472939968109,
"learning_rate": 0.000696969696969697,
"loss": 2.7833,
"step": 101
},
{
"epoch": 0.9272727272727272,
"grad_norm": 0.5336169600486755,
"learning_rate": 0.000693939393939394,
"loss": 2.9864,
"step": 102
},
{
"epoch": 0.9363636363636364,
"grad_norm": 0.4461047947406769,
"learning_rate": 0.0006909090909090909,
"loss": 2.686,
"step": 103
},
{
"epoch": 0.9454545454545454,
"grad_norm": 0.3904286324977875,
"learning_rate": 0.0006878787878787879,
"loss": 2.7575,
"step": 104
},
{
"epoch": 0.9545454545454546,
"grad_norm": 0.37062394618988037,
"learning_rate": 0.0006848484848484849,
"loss": 2.7282,
"step": 105
},
{
"epoch": 0.9636363636363636,
"grad_norm": 0.4348374009132385,
"learning_rate": 0.0006818181818181818,
"loss": 2.8095,
"step": 106
},
{
"epoch": 0.9727272727272728,
"grad_norm": 0.4431620240211487,
"learning_rate": 0.0006787878787878789,
"loss": 2.6665,
"step": 107
},
{
"epoch": 0.9818181818181818,
"grad_norm": 0.5908558964729309,
"learning_rate": 0.0006757575757575758,
"loss": 2.6022,
"step": 108
},
{
"epoch": 0.990909090909091,
"grad_norm": 0.39648768305778503,
"learning_rate": 0.0006727272727272728,
"loss": 3.274,
"step": 109
},
{
"epoch": 1.0,
"grad_norm": 0.4254423975944519,
"learning_rate": 0.0006696969696969696,
"loss": 3.074,
"step": 110
},
{
"epoch": 1.0,
"eval_f1": 0.8554,
"eval_gen_len": 49.7455,
"eval_loss": 2.718046188354492,
"eval_precision": 0.8499,
"eval_recall": 0.8613,
"eval_rouge1": 0.2578,
"eval_rouge2": 0.07,
"eval_rougeL": 0.2025,
"eval_rougeLsum": 0.2348,
"eval_runtime": 13.747,
"eval_samples_per_second": 8.002,
"eval_steps_per_second": 1.018,
"step": 110
},
{
"epoch": 1.009090909090909,
"grad_norm": 0.4320407509803772,
"learning_rate": 0.0006666666666666666,
"loss": 3.0578,
"step": 111
},
{
"epoch": 1.018181818181818,
"grad_norm": 0.37176021933555603,
"learning_rate": 0.0006636363636363638,
"loss": 2.5426,
"step": 112
},
{
"epoch": 1.0272727272727273,
"grad_norm": 0.4169993996620178,
"learning_rate": 0.0006606060606060606,
"loss": 2.8094,
"step": 113
},
{
"epoch": 1.0363636363636364,
"grad_norm": 0.4662134647369385,
"learning_rate": 0.0006575757575757576,
"loss": 3.5191,
"step": 114
},
{
"epoch": 1.0454545454545454,
"grad_norm": 0.40443146228790283,
"learning_rate": 0.0006545454545454545,
"loss": 2.5965,
"step": 115
},
{
"epoch": 1.0545454545454545,
"grad_norm": 0.4482393264770508,
"learning_rate": 0.0006515151515151515,
"loss": 3.1005,
"step": 116
},
{
"epoch": 1.0636363636363637,
"grad_norm": 0.41223442554473877,
"learning_rate": 0.0006484848484848484,
"loss": 2.5401,
"step": 117
},
{
"epoch": 1.0727272727272728,
"grad_norm": 0.39096009731292725,
"learning_rate": 0.0006454545454545455,
"loss": 2.7198,
"step": 118
},
{
"epoch": 1.0818181818181818,
"grad_norm": 0.3902396857738495,
"learning_rate": 0.0006424242424242425,
"loss": 2.9706,
"step": 119
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.4334186017513275,
"learning_rate": 0.0006393939393939394,
"loss": 3.2486,
"step": 120
},
{
"epoch": 1.1,
"grad_norm": 0.5535681843757629,
"learning_rate": 0.0006363636363636364,
"loss": 2.8518,
"step": 121
},
{
"epoch": 1.1090909090909091,
"grad_norm": 0.4210227131843567,
"learning_rate": 0.0006333333333333333,
"loss": 2.7779,
"step": 122
},
{
"epoch": 1.1181818181818182,
"grad_norm": 0.49003827571868896,
"learning_rate": 0.0006303030303030303,
"loss": 3.1741,
"step": 123
},
{
"epoch": 1.1272727272727272,
"grad_norm": 0.43262967467308044,
"learning_rate": 0.0006272727272727273,
"loss": 2.8839,
"step": 124
},
{
"epoch": 1.1363636363636362,
"grad_norm": 0.43903571367263794,
"learning_rate": 0.0006242424242424243,
"loss": 2.7924,
"step": 125
},
{
"epoch": 1.1454545454545455,
"grad_norm": 0.4224606454372406,
"learning_rate": 0.0006212121212121212,
"loss": 2.72,
"step": 126
},
{
"epoch": 1.1545454545454545,
"grad_norm": 0.36309605836868286,
"learning_rate": 0.0006181818181818182,
"loss": 2.6742,
"step": 127
},
{
"epoch": 1.1636363636363636,
"grad_norm": NaN,
"learning_rate": 0.0006181818181818182,
"loss": 2.6101,
"step": 128
},
{
"epoch": 1.1727272727272728,
"grad_norm": 0.47046589851379395,
"learning_rate": 0.0006151515151515152,
"loss": 3.1834,
"step": 129
},
{
"epoch": 1.1818181818181819,
"grad_norm": 0.5007497072219849,
"learning_rate": 0.0006121212121212121,
"loss": 3.1918,
"step": 130
},
{
"epoch": 1.190909090909091,
"grad_norm": 0.3614126145839691,
"learning_rate": 0.0006090909090909092,
"loss": 2.6803,
"step": 131
},
{
"epoch": 1.2,
"grad_norm": 0.3837718367576599,
"learning_rate": 0.0006060606060606061,
"loss": 2.7227,
"step": 132
},
{
"epoch": 1.209090909090909,
"grad_norm": 0.4169047474861145,
"learning_rate": 0.0006030303030303031,
"loss": 2.5232,
"step": 133
},
{
"epoch": 1.2181818181818183,
"grad_norm": 0.5007426738739014,
"learning_rate": 0.0006,
"loss": 2.9245,
"step": 134
},
{
"epoch": 1.2272727272727273,
"grad_norm": 0.4294348657131195,
"learning_rate": 0.0005969696969696969,
"loss": 3.0697,
"step": 135
},
{
"epoch": 1.2363636363636363,
"grad_norm": 0.45499905943870544,
"learning_rate": 0.000593939393939394,
"loss": 2.895,
"step": 136
},
{
"epoch": 1.2454545454545454,
"grad_norm": 0.3476593494415283,
"learning_rate": 0.0005909090909090909,
"loss": 2.2629,
"step": 137
},
{
"epoch": 1.2545454545454544,
"grad_norm": 0.3480299711227417,
"learning_rate": 0.0005878787878787879,
"loss": 2.4505,
"step": 138
},
{
"epoch": 1.2636363636363637,
"grad_norm": 0.3720663785934448,
"learning_rate": 0.0005848484848484848,
"loss": 2.5818,
"step": 139
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.4281027019023895,
"learning_rate": 0.0005818181818181818,
"loss": 3.1608,
"step": 140
},
{
"epoch": 1.2818181818181817,
"grad_norm": 0.3955448567867279,
"learning_rate": 0.0005787878787878787,
"loss": 2.8524,
"step": 141
},
{
"epoch": 1.290909090909091,
"grad_norm": 0.41574737429618835,
"learning_rate": 0.0005757575757575758,
"loss": 2.5939,
"step": 142
},
{
"epoch": 1.3,
"grad_norm": 0.5213348269462585,
"learning_rate": 0.0005727272727272727,
"loss": 3.0735,
"step": 143
},
{
"epoch": 1.309090909090909,
"grad_norm": 0.3448740839958191,
"learning_rate": 0.0005696969696969697,
"loss": 2.3949,
"step": 144
},
{
"epoch": 1.3181818181818181,
"grad_norm": 0.5018311142921448,
"learning_rate": 0.0005666666666666667,
"loss": 3.0924,
"step": 145
},
{
"epoch": 1.3272727272727272,
"grad_norm": 0.3792860805988312,
"learning_rate": 0.0005636363636363636,
"loss": 2.8024,
"step": 146
},
{
"epoch": 1.3363636363636364,
"grad_norm": 0.4702470898628235,
"learning_rate": 0.0005606060606060606,
"loss": 2.7595,
"step": 147
},
{
"epoch": 1.3454545454545455,
"grad_norm": 0.49066582322120667,
"learning_rate": 0.0005575757575757576,
"loss": 3.0074,
"step": 148
},
{
"epoch": 1.3545454545454545,
"grad_norm": 0.4291347563266754,
"learning_rate": 0.0005545454545454546,
"loss": 2.6343,
"step": 149
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.3881821036338806,
"learning_rate": 0.0005515151515151515,
"loss": 2.6528,
"step": 150
},
{
"epoch": 1.3727272727272728,
"grad_norm": 0.45461714267730713,
"learning_rate": 0.0005484848484848485,
"loss": 2.9632,
"step": 151
},
{
"epoch": 1.3818181818181818,
"grad_norm": 0.7852878570556641,
"learning_rate": 0.0005454545454545455,
"loss": 2.8063,
"step": 152
},
{
"epoch": 1.3909090909090909,
"grad_norm": 0.40388843417167664,
"learning_rate": 0.0005424242424242425,
"loss": 2.816,
"step": 153
},
{
"epoch": 1.4,
"grad_norm": 0.4106318950653076,
"learning_rate": 0.0005393939393939395,
"loss": 2.7375,
"step": 154
},
{
"epoch": 1.4090909090909092,
"grad_norm": 0.4104039669036865,
"learning_rate": 0.0005363636363636364,
"loss": 2.8403,
"step": 155
},
{
"epoch": 1.4181818181818182,
"grad_norm": 0.43326428532600403,
"learning_rate": 0.0005333333333333334,
"loss": 2.8803,
"step": 156
},
{
"epoch": 1.4272727272727272,
"grad_norm": 0.40821921825408936,
"learning_rate": 0.0005303030303030302,
"loss": 2.5137,
"step": 157
},
{
"epoch": 1.4363636363636363,
"grad_norm": 0.35917264223098755,
"learning_rate": 0.0005272727272727272,
"loss": 2.8159,
"step": 158
},
{
"epoch": 1.4454545454545453,
"grad_norm": 0.4057943820953369,
"learning_rate": 0.0005242424242424244,
"loss": 2.8094,
"step": 159
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.4162052869796753,
"learning_rate": 0.0005212121212121212,
"loss": 3.0779,
"step": 160
},
{
"epoch": 1.4636363636363636,
"grad_norm": 0.35214442014694214,
"learning_rate": 0.0005181818181818182,
"loss": 2.575,
"step": 161
},
{
"epoch": 1.4727272727272727,
"grad_norm": 0.3810121417045593,
"learning_rate": 0.0005151515151515151,
"loss": 2.2556,
"step": 162
},
{
"epoch": 1.481818181818182,
"grad_norm": 0.42652326822280884,
"learning_rate": 0.0005121212121212121,
"loss": 3.0068,
"step": 163
},
{
"epoch": 1.490909090909091,
"grad_norm": 0.44811055064201355,
"learning_rate": 0.000509090909090909,
"loss": 2.6942,
"step": 164
},
{
"epoch": 1.5,
"grad_norm": 0.3270896375179291,
"learning_rate": 0.0005060606060606061,
"loss": 2.2775,
"step": 165
},
{
"epoch": 1.509090909090909,
"grad_norm": 0.38227197527885437,
"learning_rate": 0.000503030303030303,
"loss": 2.7609,
"step": 166
},
{
"epoch": 1.518181818181818,
"grad_norm": 0.422146737575531,
"learning_rate": 0.0005,
"loss": 2.8683,
"step": 167
},
{
"epoch": 1.5272727272727273,
"grad_norm": 0.3827129602432251,
"learning_rate": 0.000496969696969697,
"loss": 2.4833,
"step": 168
},
{
"epoch": 1.5363636363636364,
"grad_norm": 0.3594245910644531,
"learning_rate": 0.000493939393939394,
"loss": 2.7452,
"step": 169
},
{
"epoch": 1.5454545454545454,
"grad_norm": 0.39295586943626404,
"learning_rate": 0.0004909090909090909,
"loss": 2.4076,
"step": 170
},
{
"epoch": 1.5545454545454547,
"grad_norm": 0.4528680741786957,
"learning_rate": 0.00048787878787878784,
"loss": 2.8534,
"step": 171
},
{
"epoch": 1.5636363636363635,
"grad_norm": 0.43613335490226746,
"learning_rate": 0.0004848484848484849,
"loss": 2.6855,
"step": 172
},
{
"epoch": 1.5727272727272728,
"grad_norm": 0.41567420959472656,
"learning_rate": 0.00048181818181818184,
"loss": 2.5252,
"step": 173
},
{
"epoch": 1.5818181818181818,
"grad_norm": 0.46097588539123535,
"learning_rate": 0.0004787878787878788,
"loss": 2.7966,
"step": 174
},
{
"epoch": 1.5909090909090908,
"grad_norm": 0.39751607179641724,
"learning_rate": 0.0004757575757575758,
"loss": 2.6312,
"step": 175
},
{
"epoch": 1.6,
"grad_norm": 0.4568496346473694,
"learning_rate": 0.0004727272727272727,
"loss": 3.035,
"step": 176
},
{
"epoch": 1.6090909090909091,
"grad_norm": 0.4183782935142517,
"learning_rate": 0.0004696969696969697,
"loss": 2.6313,
"step": 177
},
{
"epoch": 1.6181818181818182,
"grad_norm": 0.37121620774269104,
"learning_rate": 0.00046666666666666666,
"loss": 2.7121,
"step": 178
},
{
"epoch": 1.6272727272727274,
"grad_norm": 0.3829936981201172,
"learning_rate": 0.00046363636363636366,
"loss": 2.5797,
"step": 179
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.40406131744384766,
"learning_rate": 0.00046060606060606066,
"loss": 2.6845,
"step": 180
},
{
"epoch": 1.6454545454545455,
"grad_norm": 0.3852001130580902,
"learning_rate": 0.0004575757575757576,
"loss": 2.5071,
"step": 181
},
{
"epoch": 1.6545454545454545,
"grad_norm": 0.39973515272140503,
"learning_rate": 0.00045454545454545455,
"loss": 2.3022,
"step": 182
},
{
"epoch": 1.6636363636363636,
"grad_norm": 0.36672043800354004,
"learning_rate": 0.00045151515151515154,
"loss": 2.4523,
"step": 183
},
{
"epoch": 1.6727272727272728,
"grad_norm": 0.43695715069770813,
"learning_rate": 0.0004484848484848485,
"loss": 3.2144,
"step": 184
},
{
"epoch": 1.6818181818181817,
"grad_norm": 0.4261430501937866,
"learning_rate": 0.00044545454545454543,
"loss": 2.889,
"step": 185
},
{
"epoch": 1.690909090909091,
"grad_norm": 0.3702210783958435,
"learning_rate": 0.00044242424242424243,
"loss": 2.6261,
"step": 186
},
{
"epoch": 1.7,
"grad_norm": 0.3131745159626007,
"learning_rate": 0.0004393939393939394,
"loss": 2.4216,
"step": 187
},
{
"epoch": 1.709090909090909,
"grad_norm": 0.36688050627708435,
"learning_rate": 0.00043636363636363637,
"loss": 2.7838,
"step": 188
},
{
"epoch": 1.7181818181818183,
"grad_norm": 0.3396598994731903,
"learning_rate": 0.00043333333333333337,
"loss": 2.239,
"step": 189
},
{
"epoch": 1.7272727272727273,
"grad_norm": 0.41309380531311035,
"learning_rate": 0.0004303030303030303,
"loss": 2.6512,
"step": 190
},
{
"epoch": 1.7363636363636363,
"grad_norm": 0.4132671058177948,
"learning_rate": 0.00042727272727272726,
"loss": 2.5455,
"step": 191
},
{
"epoch": 1.7454545454545456,
"grad_norm": 0.4893184304237366,
"learning_rate": 0.00042424242424242425,
"loss": 2.7746,
"step": 192
},
{
"epoch": 1.7545454545454544,
"grad_norm": 0.3930586874485016,
"learning_rate": 0.0004212121212121212,
"loss": 2.5218,
"step": 193
},
{
"epoch": 1.7636363636363637,
"grad_norm": 0.3655427396297455,
"learning_rate": 0.00041818181818181814,
"loss": 2.7951,
"step": 194
},
{
"epoch": 1.7727272727272727,
"grad_norm": 0.35268452763557434,
"learning_rate": 0.0004151515151515152,
"loss": 2.6024,
"step": 195
},
{
"epoch": 1.7818181818181817,
"grad_norm": 0.4578306972980499,
"learning_rate": 0.00041212121212121214,
"loss": 3.0399,
"step": 196
},
{
"epoch": 1.790909090909091,
"grad_norm": 0.37848854064941406,
"learning_rate": 0.00040909090909090913,
"loss": 2.1378,
"step": 197
},
{
"epoch": 1.8,
"grad_norm": 0.3940020203590393,
"learning_rate": 0.0004060606060606061,
"loss": 2.7782,
"step": 198
},
{
"epoch": 1.809090909090909,
"grad_norm": 0.4250258505344391,
"learning_rate": 0.000403030303030303,
"loss": 2.9341,
"step": 199
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.4134068191051483,
"learning_rate": 0.0004,
"loss": 2.9678,
"step": 200
},
{
"epoch": 1.8272727272727272,
"grad_norm": 0.3724648058414459,
"learning_rate": 0.00039696969696969696,
"loss": 2.7075,
"step": 201
},
{
"epoch": 1.8363636363636364,
"grad_norm": 0.38021424412727356,
"learning_rate": 0.0003939393939393939,
"loss": 2.5617,
"step": 202
},
{
"epoch": 1.8454545454545455,
"grad_norm": 0.41387441754341125,
"learning_rate": 0.00039090909090909096,
"loss": 2.5376,
"step": 203
},
{
"epoch": 1.8545454545454545,
"grad_norm": 0.3312947452068329,
"learning_rate": 0.0003878787878787879,
"loss": 2.3927,
"step": 204
},
{
"epoch": 1.8636363636363638,
"grad_norm": 0.40720024704933167,
"learning_rate": 0.00038484848484848485,
"loss": 2.5062,
"step": 205
},
{
"epoch": 1.8727272727272726,
"grad_norm": 0.43445464968681335,
"learning_rate": 0.00038181818181818184,
"loss": 2.6462,
"step": 206
},
{
"epoch": 1.8818181818181818,
"grad_norm": 0.3456729054450989,
"learning_rate": 0.0003787878787878788,
"loss": 2.4646,
"step": 207
},
{
"epoch": 1.8909090909090909,
"grad_norm": 0.5572815537452698,
"learning_rate": 0.00037575757575757573,
"loss": 2.6547,
"step": 208
},
{
"epoch": 1.9,
"grad_norm": 0.5938643217086792,
"learning_rate": 0.00037272727272727273,
"loss": 2.6929,
"step": 209
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.4092172384262085,
"learning_rate": 0.00036969696969696967,
"loss": 2.6125,
"step": 210
},
{
"epoch": 1.9181818181818182,
"grad_norm": 0.4827238917350769,
"learning_rate": 0.00036666666666666667,
"loss": 2.5752,
"step": 211
},
{
"epoch": 1.9272727272727272,
"grad_norm": 0.3931882679462433,
"learning_rate": 0.00036363636363636367,
"loss": 2.7916,
"step": 212
},
{
"epoch": 1.9363636363636365,
"grad_norm": 0.41997700929641724,
"learning_rate": 0.0003606060606060606,
"loss": 2.5752,
"step": 213
},
{
"epoch": 1.9454545454545453,
"grad_norm": 0.3693118095397949,
"learning_rate": 0.0003575757575757576,
"loss": 2.5806,
"step": 214
},
{
"epoch": 1.9545454545454546,
"grad_norm": 0.35362234711647034,
"learning_rate": 0.00035454545454545455,
"loss": 2.7562,
"step": 215
},
{
"epoch": 1.9636363636363636,
"grad_norm": 0.5130824446678162,
"learning_rate": 0.0003515151515151515,
"loss": 3.0111,
"step": 216
},
{
"epoch": 1.9727272727272727,
"grad_norm": 0.42124655842781067,
"learning_rate": 0.0003484848484848485,
"loss": 2.5606,
"step": 217
},
{
"epoch": 1.981818181818182,
"grad_norm": 0.34274134039878845,
"learning_rate": 0.00034545454545454544,
"loss": 2.3912,
"step": 218
},
{
"epoch": 1.990909090909091,
"grad_norm": 0.4317152500152588,
"learning_rate": 0.00034242424242424244,
"loss": 3.1581,
"step": 219
},
{
"epoch": 2.0,
"grad_norm": 0.43109312653541565,
"learning_rate": 0.00033939393939393943,
"loss": 2.8218,
"step": 220
},
{
"epoch": 2.0,
"eval_f1": 0.8603,
"eval_gen_len": 49.7,
"eval_loss": 2.524232864379883,
"eval_precision": 0.8578,
"eval_recall": 0.863,
"eval_rouge1": 0.2895,
"eval_rouge2": 0.0902,
"eval_rougeL": 0.2315,
"eval_rougeLsum": 0.2639,
"eval_runtime": 13.981,
"eval_samples_per_second": 7.868,
"eval_steps_per_second": 1.001,
"step": 220
},
{
"epoch": 2.0090909090909093,
"grad_norm": 0.37619197368621826,
"learning_rate": 0.0003363636363636364,
"loss": 2.1978,
"step": 221
},
{
"epoch": 2.018181818181818,
"grad_norm": 0.3892785608768463,
"learning_rate": 0.0003333333333333333,
"loss": 2.5908,
"step": 222
},
{
"epoch": 2.0272727272727273,
"grad_norm": 0.3815114498138428,
"learning_rate": 0.0003303030303030303,
"loss": 2.5642,
"step": 223
},
{
"epoch": 2.036363636363636,
"grad_norm": 0.5688372850418091,
"learning_rate": 0.00032727272727272726,
"loss": 2.6337,
"step": 224
},
{
"epoch": 2.0454545454545454,
"grad_norm": 0.45682141184806824,
"learning_rate": 0.0003242424242424242,
"loss": 2.932,
"step": 225
},
{
"epoch": 2.0545454545454547,
"grad_norm": 0.4763953387737274,
"learning_rate": 0.00032121212121212126,
"loss": 3.451,
"step": 226
},
{
"epoch": 2.0636363636363635,
"grad_norm": 0.44714823365211487,
"learning_rate": 0.0003181818181818182,
"loss": 2.753,
"step": 227
},
{
"epoch": 2.0727272727272728,
"grad_norm": 0.3732368052005768,
"learning_rate": 0.00031515151515151515,
"loss": 2.6315,
"step": 228
},
{
"epoch": 2.081818181818182,
"grad_norm": 0.32465749979019165,
"learning_rate": 0.00031212121212121214,
"loss": 2.2293,
"step": 229
},
{
"epoch": 2.090909090909091,
"grad_norm": 0.522760272026062,
"learning_rate": 0.0003090909090909091,
"loss": 2.7078,
"step": 230
},
{
"epoch": 2.1,
"grad_norm": 0.39734870195388794,
"learning_rate": 0.00030606060606060603,
"loss": 2.4472,
"step": 231
},
{
"epoch": 2.109090909090909,
"grad_norm": 0.3766539692878723,
"learning_rate": 0.00030303030303030303,
"loss": 2.2977,
"step": 232
},
{
"epoch": 2.118181818181818,
"grad_norm": 0.40086719393730164,
"learning_rate": 0.0003,
"loss": 2.4791,
"step": 233
},
{
"epoch": 2.1272727272727274,
"grad_norm": 0.4024496078491211,
"learning_rate": 0.000296969696969697,
"loss": 2.6618,
"step": 234
},
{
"epoch": 2.1363636363636362,
"grad_norm": 0.3908478915691376,
"learning_rate": 0.00029393939393939397,
"loss": 2.4619,
"step": 235
},
{
"epoch": 2.1454545454545455,
"grad_norm": 0.529670000076294,
"learning_rate": 0.0002909090909090909,
"loss": 2.7967,
"step": 236
},
{
"epoch": 2.1545454545454543,
"grad_norm": 0.5121607780456543,
"learning_rate": 0.0002878787878787879,
"loss": 2.5262,
"step": 237
},
{
"epoch": 2.1636363636363636,
"grad_norm": 0.41577398777008057,
"learning_rate": 0.00028484848484848485,
"loss": 2.6012,
"step": 238
},
{
"epoch": 2.172727272727273,
"grad_norm": 0.4425376355648041,
"learning_rate": 0.0002818181818181818,
"loss": 2.8202,
"step": 239
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.4031323194503784,
"learning_rate": 0.0002787878787878788,
"loss": 3.1458,
"step": 240
},
{
"epoch": 2.190909090909091,
"grad_norm": 0.40450453758239746,
"learning_rate": 0.00027575757575757574,
"loss": 2.4299,
"step": 241
},
{
"epoch": 2.2,
"grad_norm": 0.3808647692203522,
"learning_rate": 0.00027272727272727274,
"loss": 2.1072,
"step": 242
},
{
"epoch": 2.209090909090909,
"grad_norm": 0.43926000595092773,
"learning_rate": 0.00026969696969696974,
"loss": 3.0111,
"step": 243
},
{
"epoch": 2.2181818181818183,
"grad_norm": 0.4190639853477478,
"learning_rate": 0.0002666666666666667,
"loss": 2.8447,
"step": 244
},
{
"epoch": 2.227272727272727,
"grad_norm": 0.4418098032474518,
"learning_rate": 0.0002636363636363636,
"loss": 2.9145,
"step": 245
},
{
"epoch": 2.2363636363636363,
"grad_norm": 0.4507788419723511,
"learning_rate": 0.0002606060606060606,
"loss": 2.9411,
"step": 246
},
{
"epoch": 2.2454545454545456,
"grad_norm": 0.48967909812927246,
"learning_rate": 0.00025757575757575756,
"loss": 2.6977,
"step": 247
},
{
"epoch": 2.2545454545454544,
"grad_norm": 0.4220658242702484,
"learning_rate": 0.0002545454545454545,
"loss": 2.7123,
"step": 248
},
{
"epoch": 2.2636363636363637,
"grad_norm": 0.431964248418808,
"learning_rate": 0.0002515151515151515,
"loss": 2.9291,
"step": 249
},
{
"epoch": 2.2727272727272725,
"grad_norm": 0.38637152314186096,
"learning_rate": 0.0002484848484848485,
"loss": 3.0334,
"step": 250
},
{
"epoch": 2.2818181818181817,
"grad_norm": 0.3597087562084198,
"learning_rate": 0.00024545454545454545,
"loss": 2.2375,
"step": 251
},
{
"epoch": 2.290909090909091,
"grad_norm": 0.3618970215320587,
"learning_rate": 0.00024242424242424245,
"loss": 2.1943,
"step": 252
},
{
"epoch": 2.3,
"grad_norm": 0.3960267901420593,
"learning_rate": 0.0002393939393939394,
"loss": 2.6104,
"step": 253
},
{
"epoch": 2.309090909090909,
"grad_norm": 0.5842825174331665,
"learning_rate": 0.00023636363636363636,
"loss": 2.8518,
"step": 254
},
{
"epoch": 2.3181818181818183,
"grad_norm": 0.4031458795070648,
"learning_rate": 0.00023333333333333333,
"loss": 2.4321,
"step": 255
},
{
"epoch": 2.327272727272727,
"grad_norm": 0.4214467704296112,
"learning_rate": 0.00023030303030303033,
"loss": 2.5849,
"step": 256
},
{
"epoch": 2.3363636363636364,
"grad_norm": 0.4047069549560547,
"learning_rate": 0.00022727272727272727,
"loss": 2.9788,
"step": 257
},
{
"epoch": 2.3454545454545457,
"grad_norm": 0.4479776620864868,
"learning_rate": 0.00022424242424242424,
"loss": 2.4104,
"step": 258
},
{
"epoch": 2.3545454545454545,
"grad_norm": 0.44733545184135437,
"learning_rate": 0.00022121212121212121,
"loss": 2.7293,
"step": 259
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.42044103145599365,
"learning_rate": 0.00021818181818181818,
"loss": 2.6847,
"step": 260
},
{
"epoch": 2.3727272727272726,
"grad_norm": 0.4420049488544464,
"learning_rate": 0.00021515151515151516,
"loss": 2.5097,
"step": 261
},
{
"epoch": 2.381818181818182,
"grad_norm": 0.44038712978363037,
"learning_rate": 0.00021212121212121213,
"loss": 2.5838,
"step": 262
},
{
"epoch": 2.390909090909091,
"grad_norm": 0.39566606283187866,
"learning_rate": 0.00020909090909090907,
"loss": 2.09,
"step": 263
},
{
"epoch": 2.4,
"grad_norm": 0.3763667941093445,
"learning_rate": 0.00020606060606060607,
"loss": 2.8325,
"step": 264
},
{
"epoch": 2.409090909090909,
"grad_norm": 0.3535614311695099,
"learning_rate": 0.00020303030303030304,
"loss": 2.4777,
"step": 265
},
{
"epoch": 2.418181818181818,
"grad_norm": 0.30210864543914795,
"learning_rate": 0.0002,
"loss": 2.3018,
"step": 266
},
{
"epoch": 2.4272727272727272,
"grad_norm": 0.39669087529182434,
"learning_rate": 0.00019696969696969695,
"loss": 2.8965,
"step": 267
},
{
"epoch": 2.4363636363636365,
"grad_norm": 0.40731772780418396,
"learning_rate": 0.00019393939393939395,
"loss": 2.6297,
"step": 268
},
{
"epoch": 2.4454545454545453,
"grad_norm": 0.3963909447193146,
"learning_rate": 0.00019090909090909092,
"loss": 2.6312,
"step": 269
},
{
"epoch": 2.4545454545454546,
"grad_norm": 0.3853819668292999,
"learning_rate": 0.00018787878787878787,
"loss": 2.4434,
"step": 270
},
{
"epoch": 2.463636363636364,
"grad_norm": 0.37811392545700073,
"learning_rate": 0.00018484848484848484,
"loss": 2.5427,
"step": 271
},
{
"epoch": 2.4727272727272727,
"grad_norm": 0.3932028114795685,
"learning_rate": 0.00018181818181818183,
"loss": 2.3086,
"step": 272
},
{
"epoch": 2.481818181818182,
"grad_norm": 0.4498761296272278,
"learning_rate": 0.0001787878787878788,
"loss": 2.5002,
"step": 273
},
{
"epoch": 2.4909090909090907,
"grad_norm": 0.4003858268260956,
"learning_rate": 0.00017575757575757575,
"loss": 2.7886,
"step": 274
},
{
"epoch": 2.5,
"grad_norm": 0.4310906231403351,
"learning_rate": 0.00017272727272727272,
"loss": 2.5798,
"step": 275
},
{
"epoch": 2.509090909090909,
"grad_norm": 0.45177921652793884,
"learning_rate": 0.00016969696969696972,
"loss": 2.5467,
"step": 276
},
{
"epoch": 2.518181818181818,
"grad_norm": 0.5024899840354919,
"learning_rate": 0.00016666666666666666,
"loss": 2.4274,
"step": 277
},
{
"epoch": 2.5272727272727273,
"grad_norm": 0.4538518786430359,
"learning_rate": 0.00016363636363636363,
"loss": 2.7246,
"step": 278
},
{
"epoch": 2.536363636363636,
"grad_norm": 0.41360950469970703,
"learning_rate": 0.00016060606060606063,
"loss": 2.8499,
"step": 279
},
{
"epoch": 2.5454545454545454,
"grad_norm": 0.3810077905654907,
"learning_rate": 0.00015757575757575757,
"loss": 2.4792,
"step": 280
},
{
"epoch": 2.5545454545454547,
"grad_norm": 0.3792803883552551,
"learning_rate": 0.00015454545454545454,
"loss": 2.537,
"step": 281
},
{
"epoch": 2.5636363636363635,
"grad_norm": 0.4680609703063965,
"learning_rate": 0.00015151515151515152,
"loss": 2.4762,
"step": 282
},
{
"epoch": 2.5727272727272728,
"grad_norm": 0.4536025822162628,
"learning_rate": 0.0001484848484848485,
"loss": 2.501,
"step": 283
},
{
"epoch": 2.581818181818182,
"grad_norm": 0.38101115822792053,
"learning_rate": 0.00014545454545454546,
"loss": 2.5216,
"step": 284
},
{
"epoch": 2.590909090909091,
"grad_norm": 0.34691163897514343,
"learning_rate": 0.00014242424242424243,
"loss": 2.4876,
"step": 285
},
{
"epoch": 2.6,
"grad_norm": 0.3825785517692566,
"learning_rate": 0.0001393939393939394,
"loss": 2.2582,
"step": 286
},
{
"epoch": 2.6090909090909093,
"grad_norm": 0.32838982343673706,
"learning_rate": 0.00013636363636363637,
"loss": 2.2437,
"step": 287
},
{
"epoch": 2.618181818181818,
"grad_norm": 0.44533124566078186,
"learning_rate": 0.00013333333333333334,
"loss": 2.7636,
"step": 288
},
{
"epoch": 2.6272727272727274,
"grad_norm": 0.41694092750549316,
"learning_rate": 0.0001303030303030303,
"loss": 2.3834,
"step": 289
},
{
"epoch": 2.6363636363636362,
"grad_norm": 0.36219823360443115,
"learning_rate": 0.00012727272727272725,
"loss": 2.68,
"step": 290
},
{
"epoch": 2.6454545454545455,
"grad_norm": 0.43871843814849854,
"learning_rate": 0.00012424242424242425,
"loss": 2.8321,
"step": 291
},
{
"epoch": 2.6545454545454543,
"grad_norm": 0.5161608457565308,
"learning_rate": 0.00012121212121212122,
"loss": 3.2886,
"step": 292
},
{
"epoch": 2.6636363636363636,
"grad_norm": 0.4421100616455078,
"learning_rate": 0.00011818181818181818,
"loss": 2.6982,
"step": 293
},
{
"epoch": 2.672727272727273,
"grad_norm": 0.38912761211395264,
"learning_rate": 0.00011515151515151516,
"loss": 2.4868,
"step": 294
},
{
"epoch": 2.6818181818181817,
"grad_norm": 0.4366167485713959,
"learning_rate": 0.00011212121212121212,
"loss": 3.0351,
"step": 295
},
{
"epoch": 2.690909090909091,
"grad_norm": 0.4008959233760834,
"learning_rate": 0.00010909090909090909,
"loss": 2.756,
"step": 296
},
{
"epoch": 2.7,
"grad_norm": 0.4010573625564575,
"learning_rate": 0.00010606060606060606,
"loss": 2.7766,
"step": 297
},
{
"epoch": 2.709090909090909,
"grad_norm": 0.38755205273628235,
"learning_rate": 0.00010303030303030303,
"loss": 2.4979,
"step": 298
},
{
"epoch": 2.7181818181818183,
"grad_norm": 0.404317706823349,
"learning_rate": 0.0001,
"loss": 2.4744,
"step": 299
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.8506999611854553,
"learning_rate": 9.696969696969698e-05,
"loss": 3.4183,
"step": 300
},
{
"epoch": 2.7363636363636363,
"grad_norm": 0.3759117126464844,
"learning_rate": 9.393939393939393e-05,
"loss": 2.4054,
"step": 301
},
{
"epoch": 2.7454545454545456,
"grad_norm": 0.4374713599681854,
"learning_rate": 9.090909090909092e-05,
"loss": 2.5911,
"step": 302
},
{
"epoch": 2.7545454545454544,
"grad_norm": 0.43626895546913147,
"learning_rate": 8.787878787878787e-05,
"loss": 2.6702,
"step": 303
},
{
"epoch": 2.7636363636363637,
"grad_norm": 0.4850078523159027,
"learning_rate": 8.484848484848486e-05,
"loss": 2.4731,
"step": 304
},
{
"epoch": 2.7727272727272725,
"grad_norm": 0.3225160837173462,
"learning_rate": 8.181818181818182e-05,
"loss": 2.2104,
"step": 305
},
{
"epoch": 2.7818181818181817,
"grad_norm": 0.3751175105571747,
"learning_rate": 7.878787878787879e-05,
"loss": 2.4663,
"step": 306
},
{
"epoch": 2.790909090909091,
"grad_norm": 0.46076083183288574,
"learning_rate": 7.575757575757576e-05,
"loss": 2.999,
"step": 307
},
{
"epoch": 2.8,
"grad_norm": 0.41437849402427673,
"learning_rate": 7.272727272727273e-05,
"loss": 2.543,
"step": 308
},
{
"epoch": 2.809090909090909,
"grad_norm": 0.3945867419242859,
"learning_rate": 6.96969696969697e-05,
"loss": 2.835,
"step": 309
},
{
"epoch": 2.8181818181818183,
"grad_norm": 0.36785265803337097,
"learning_rate": 6.666666666666667e-05,
"loss": 2.4023,
"step": 310
},
{
"epoch": 2.827272727272727,
"grad_norm": 0.4000905752182007,
"learning_rate": 6.363636363636363e-05,
"loss": 2.8601,
"step": 311
},
{
"epoch": 2.8363636363636364,
"grad_norm": 0.39836353063583374,
"learning_rate": 6.060606060606061e-05,
"loss": 2.5777,
"step": 312
},
{
"epoch": 2.8454545454545457,
"grad_norm": 0.4224497973918915,
"learning_rate": 5.757575757575758e-05,
"loss": 2.6325,
"step": 313
},
{
"epoch": 2.8545454545454545,
"grad_norm": 0.38463807106018066,
"learning_rate": 5.4545454545454546e-05,
"loss": 2.587,
"step": 314
},
{
"epoch": 2.8636363636363638,
"grad_norm": 0.3975173532962799,
"learning_rate": 5.151515151515152e-05,
"loss": 2.351,
"step": 315
},
{
"epoch": 2.8727272727272726,
"grad_norm": 0.6949688792228699,
"learning_rate": 4.848484848484849e-05,
"loss": 2.678,
"step": 316
},
{
"epoch": 2.881818181818182,
"grad_norm": 0.3743134140968323,
"learning_rate": 4.545454545454546e-05,
"loss": 2.572,
"step": 317
},
{
"epoch": 2.8909090909090907,
"grad_norm": 0.39012962579727173,
"learning_rate": 4.242424242424243e-05,
"loss": 3.0743,
"step": 318
},
{
"epoch": 2.9,
"grad_norm": 0.4210345447063446,
"learning_rate": 3.939393939393939e-05,
"loss": 2.9515,
"step": 319
},
{
"epoch": 2.909090909090909,
"grad_norm": 1.0956965684890747,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.2825,
"step": 320
},
{
"epoch": 2.918181818181818,
"grad_norm": 0.5798986554145813,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.6334,
"step": 321
},
{
"epoch": 2.9272727272727272,
"grad_norm": 0.7255111336708069,
"learning_rate": 3.0303030303030306e-05,
"loss": 2.3757,
"step": 322
},
{
"epoch": 2.9363636363636365,
"grad_norm": 0.45164722204208374,
"learning_rate": 2.7272727272727273e-05,
"loss": 2.4321,
"step": 323
},
{
"epoch": 2.9454545454545453,
"grad_norm": 0.3834429979324341,
"learning_rate": 2.4242424242424244e-05,
"loss": 2.7031,
"step": 324
},
{
"epoch": 2.9545454545454546,
"grad_norm": 0.3603014349937439,
"learning_rate": 2.1212121212121215e-05,
"loss": 2.565,
"step": 325
},
{
"epoch": 2.963636363636364,
"grad_norm": 0.43686574697494507,
"learning_rate": 1.8181818181818182e-05,
"loss": 3.008,
"step": 326
},
{
"epoch": 2.9727272727272727,
"grad_norm": 0.37516218423843384,
"learning_rate": 1.5151515151515153e-05,
"loss": 2.3153,
"step": 327
},
{
"epoch": 2.981818181818182,
"grad_norm": 0.3693811893463135,
"learning_rate": 1.2121212121212122e-05,
"loss": 2.3701,
"step": 328
},
{
"epoch": 2.990909090909091,
"grad_norm": 0.5156209468841553,
"learning_rate": 9.090909090909091e-06,
"loss": 2.1715,
"step": 329
},
{
"epoch": 3.0,
"grad_norm": 0.4056047201156616,
"learning_rate": 6.060606060606061e-06,
"loss": 2.5886,
"step": 330
},
{
"epoch": 3.0,
"eval_f1": 0.8624,
"eval_gen_len": 49.7364,
"eval_loss": 2.47719144821167,
"eval_precision": 0.8604,
"eval_recall": 0.8646,
"eval_rouge1": 0.3032,
"eval_rouge2": 0.1016,
"eval_rougeL": 0.2431,
"eval_rougeLsum": 0.2761,
"eval_runtime": 14.1697,
"eval_samples_per_second": 7.763,
"eval_steps_per_second": 0.988,
"step": 330
},
{
"epoch": 3.0,
"step": 330,
"total_flos": 357853398958080.0,
"train_loss": 2.845433976433494,
"train_runtime": 116.68,
"train_samples_per_second": 22.6,
"train_steps_per_second": 2.828
}
],
"logging_steps": 1,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 357853398958080.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}