joseagmz's picture
Upload folder using huggingface_hub
0f1e583 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.942528735632184,
"eval_steps": 22,
"global_step": 261,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 19.59183260405101,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.3985,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 1.5676738023757935,
"eval_runtime": 3.9227,
"eval_samples_per_second": 4.844,
"eval_steps_per_second": 1.275,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 25.857640141956836,
"learning_rate": 4.000000000000001e-06,
"loss": 1.6073,
"step": 2
},
{
"epoch": 0.03,
"grad_norm": 19.53747244629775,
"learning_rate": 6e-06,
"loss": 1.5784,
"step": 3
},
{
"epoch": 0.05,
"grad_norm": 32.1611390841105,
"learning_rate": 8.000000000000001e-06,
"loss": 1.4924,
"step": 4
},
{
"epoch": 0.06,
"grad_norm": 25.68491589990429,
"learning_rate": 1e-05,
"loss": 1.5215,
"step": 5
},
{
"epoch": 0.07,
"grad_norm": 21.800722806654328,
"learning_rate": 1.2e-05,
"loss": 1.5508,
"step": 6
},
{
"epoch": 0.08,
"grad_norm": 17.19812526688047,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.205,
"step": 7
},
{
"epoch": 0.09,
"grad_norm": 33.24510948526866,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.6376,
"step": 8
},
{
"epoch": 0.1,
"grad_norm": 14.986129451187,
"learning_rate": 1.8e-05,
"loss": 1.454,
"step": 9
},
{
"epoch": 0.11,
"grad_norm": 17.517194747344856,
"learning_rate": 2e-05,
"loss": 1.6559,
"step": 10
},
{
"epoch": 0.13,
"grad_norm": 18.22606604955052,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.5816,
"step": 11
},
{
"epoch": 0.14,
"grad_norm": 26.577328122239592,
"learning_rate": 2.4e-05,
"loss": 1.7884,
"step": 12
},
{
"epoch": 0.15,
"grad_norm": 17.563463503652706,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.4405,
"step": 13
},
{
"epoch": 0.16,
"grad_norm": 19.98739837372538,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.7512,
"step": 14
},
{
"epoch": 0.17,
"grad_norm": 16.25364292099236,
"learning_rate": 3e-05,
"loss": 1.7037,
"step": 15
},
{
"epoch": 0.18,
"grad_norm": 22.328347783615648,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5476,
"step": 16
},
{
"epoch": 0.2,
"grad_norm": 20.71265405702363,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.8108,
"step": 17
},
{
"epoch": 0.21,
"grad_norm": 22.425069186679085,
"learning_rate": 3.6e-05,
"loss": 1.6751,
"step": 18
},
{
"epoch": 0.22,
"grad_norm": 18.646553829771403,
"learning_rate": 3.8e-05,
"loss": 1.7696,
"step": 19
},
{
"epoch": 0.23,
"grad_norm": 16.74801305116847,
"learning_rate": 4e-05,
"loss": 1.7699,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 30.99364381661585,
"learning_rate": 4.2e-05,
"loss": 2.1827,
"step": 21
},
{
"epoch": 0.25,
"grad_norm": 19.761245037191404,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.4776,
"step": 22
},
{
"epoch": 0.25,
"eval_loss": 1.856759786605835,
"eval_runtime": 1.4309,
"eval_samples_per_second": 13.279,
"eval_steps_per_second": 3.494,
"step": 22
},
{
"epoch": 0.26,
"grad_norm": 27.821861354666428,
"learning_rate": 4.600000000000001e-05,
"loss": 1.8677,
"step": 23
},
{
"epoch": 0.28,
"grad_norm": 22.80354998875929,
"learning_rate": 4.8e-05,
"loss": 1.7424,
"step": 24
},
{
"epoch": 0.29,
"grad_norm": 19.920321630615994,
"learning_rate": 5e-05,
"loss": 1.769,
"step": 25
},
{
"epoch": 0.3,
"grad_norm": 23.82621534355921,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.8615,
"step": 26
},
{
"epoch": 0.31,
"grad_norm": 27.63826215875828,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.7164,
"step": 27
},
{
"epoch": 0.32,
"grad_norm": 30.810269474286084,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.8922,
"step": 28
},
{
"epoch": 0.33,
"grad_norm": 23.279443617002265,
"learning_rate": 5.8e-05,
"loss": 1.8507,
"step": 29
},
{
"epoch": 0.34,
"grad_norm": 18.091756285883704,
"learning_rate": 6e-05,
"loss": 1.6537,
"step": 30
},
{
"epoch": 0.36,
"grad_norm": 23.292568819411724,
"learning_rate": 6.2e-05,
"loss": 1.9125,
"step": 31
},
{
"epoch": 0.37,
"grad_norm": 52.21913805211888,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8456,
"step": 32
},
{
"epoch": 0.38,
"grad_norm": 30.771918199992527,
"learning_rate": 6.6e-05,
"loss": 1.9398,
"step": 33
},
{
"epoch": 0.39,
"grad_norm": 40.19880102087157,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3763,
"step": 34
},
{
"epoch": 0.4,
"grad_norm": 28.33779955799431,
"learning_rate": 7e-05,
"loss": 2.0815,
"step": 35
},
{
"epoch": 0.41,
"grad_norm": 54.379783823327905,
"learning_rate": 7.2e-05,
"loss": 2.1032,
"step": 36
},
{
"epoch": 0.43,
"grad_norm": 662.6041427579516,
"learning_rate": 7.4e-05,
"loss": 3.5917,
"step": 37
},
{
"epoch": 0.44,
"grad_norm": 4011.501090614335,
"learning_rate": 7.6e-05,
"loss": 7.3933,
"step": 38
},
{
"epoch": 0.45,
"grad_norm": 753.5816146908044,
"learning_rate": 7.800000000000001e-05,
"loss": 15.8371,
"step": 39
},
{
"epoch": 0.46,
"grad_norm": 291.86609997137384,
"learning_rate": 8e-05,
"loss": 9.0249,
"step": 40
},
{
"epoch": 0.47,
"grad_norm": 922.0326051718562,
"learning_rate": 8.2e-05,
"loss": 9.8922,
"step": 41
},
{
"epoch": 0.48,
"grad_norm": 115.68135873083658,
"learning_rate": 8.4e-05,
"loss": 8.2719,
"step": 42
},
{
"epoch": 0.49,
"grad_norm": 693.1702757980898,
"learning_rate": 8.6e-05,
"loss": 24.1699,
"step": 43
},
{
"epoch": 0.51,
"grad_norm": 219.78828591396768,
"learning_rate": 8.800000000000001e-05,
"loss": 10.1246,
"step": 44
},
{
"epoch": 0.51,
"eval_loss": 8.758976936340332,
"eval_runtime": 1.3819,
"eval_samples_per_second": 13.749,
"eval_steps_per_second": 3.618,
"step": 44
},
{
"epoch": 0.52,
"grad_norm": 101.32256273283853,
"learning_rate": 9e-05,
"loss": 9.1756,
"step": 45
},
{
"epoch": 0.53,
"grad_norm": 11.958039992364316,
"learning_rate": 9.200000000000001e-05,
"loss": 7.7273,
"step": 46
},
{
"epoch": 0.54,
"grad_norm": 3232.8188937829473,
"learning_rate": 9.4e-05,
"loss": 8.7486,
"step": 47
},
{
"epoch": 0.55,
"grad_norm": 34.97640980952823,
"learning_rate": 9.6e-05,
"loss": 8.0394,
"step": 48
},
{
"epoch": 0.56,
"grad_norm": 28.785233378323184,
"learning_rate": 9.8e-05,
"loss": 7.6241,
"step": 49
},
{
"epoch": 0.57,
"grad_norm": 734.1733446536996,
"learning_rate": 0.0001,
"loss": 7.7813,
"step": 50
},
{
"epoch": 0.59,
"grad_norm": 59.66290191764372,
"learning_rate": 0.00010200000000000001,
"loss": 7.6734,
"step": 51
},
{
"epoch": 0.6,
"grad_norm": 18.23606863385719,
"learning_rate": 0.00010400000000000001,
"loss": 7.8783,
"step": 52
},
{
"epoch": 0.61,
"grad_norm": 15.152500341579357,
"learning_rate": 0.00010600000000000002,
"loss": 7.6016,
"step": 53
},
{
"epoch": 0.62,
"grad_norm": 25.1062172290986,
"learning_rate": 0.00010800000000000001,
"loss": 7.7206,
"step": 54
},
{
"epoch": 0.63,
"grad_norm": 30.75760910048317,
"learning_rate": 0.00011000000000000002,
"loss": 7.7526,
"step": 55
},
{
"epoch": 0.64,
"grad_norm": 18.354297835205724,
"learning_rate": 0.00011200000000000001,
"loss": 7.5551,
"step": 56
},
{
"epoch": 0.66,
"grad_norm": 32.48061320811231,
"learning_rate": 0.00011399999999999999,
"loss": 7.6922,
"step": 57
},
{
"epoch": 0.67,
"grad_norm": 26.624723764338736,
"learning_rate": 0.000116,
"loss": 7.5791,
"step": 58
},
{
"epoch": 0.68,
"grad_norm": 13.92226298912218,
"learning_rate": 0.000118,
"loss": 7.3692,
"step": 59
},
{
"epoch": 0.69,
"grad_norm": 15.428105634893914,
"learning_rate": 0.00012,
"loss": 7.564,
"step": 60
},
{
"epoch": 0.7,
"grad_norm": 23.02235056392551,
"learning_rate": 0.000122,
"loss": 7.3948,
"step": 61
},
{
"epoch": 0.71,
"grad_norm": 16.470374126805776,
"learning_rate": 0.000124,
"loss": 7.4646,
"step": 62
},
{
"epoch": 0.72,
"grad_norm": 19.574553557429,
"learning_rate": 0.000126,
"loss": 7.3902,
"step": 63
},
{
"epoch": 0.74,
"grad_norm": 25.549182808604662,
"learning_rate": 0.00012800000000000002,
"loss": 7.5461,
"step": 64
},
{
"epoch": 0.75,
"grad_norm": 618.1147749407063,
"learning_rate": 0.00013000000000000002,
"loss": 8.157,
"step": 65
},
{
"epoch": 0.76,
"grad_norm": 66.0610660913446,
"learning_rate": 0.000132,
"loss": 8.1284,
"step": 66
},
{
"epoch": 0.76,
"eval_loss": 8.004860877990723,
"eval_runtime": 1.3748,
"eval_samples_per_second": 13.82,
"eval_steps_per_second": 3.637,
"step": 66
},
{
"epoch": 0.77,
"grad_norm": 41.635960690924975,
"learning_rate": 0.000134,
"loss": 8.0602,
"step": 67
},
{
"epoch": 0.78,
"grad_norm": 16.679869546723175,
"learning_rate": 0.00013600000000000003,
"loss": 7.8268,
"step": 68
},
{
"epoch": 0.79,
"grad_norm": 33.70142265056291,
"learning_rate": 0.000138,
"loss": 8.1206,
"step": 69
},
{
"epoch": 0.8,
"grad_norm": 43.29315748881759,
"learning_rate": 0.00014,
"loss": 7.7882,
"step": 70
},
{
"epoch": 0.82,
"grad_norm": 28.3799834727588,
"learning_rate": 0.000142,
"loss": 7.5454,
"step": 71
},
{
"epoch": 0.83,
"grad_norm": 30.911781055251613,
"learning_rate": 0.000144,
"loss": 7.7616,
"step": 72
},
{
"epoch": 0.84,
"grad_norm": 56.741836296758365,
"learning_rate": 0.000146,
"loss": 8.1145,
"step": 73
},
{
"epoch": 0.85,
"grad_norm": 13.967249574234195,
"learning_rate": 0.000148,
"loss": 7.6758,
"step": 74
},
{
"epoch": 0.86,
"grad_norm": 42.28071588262047,
"learning_rate": 0.00015000000000000001,
"loss": 7.5507,
"step": 75
},
{
"epoch": 0.87,
"grad_norm": 27.40577229308579,
"learning_rate": 0.000152,
"loss": 7.6852,
"step": 76
},
{
"epoch": 0.89,
"grad_norm": 21.422615778584905,
"learning_rate": 0.000154,
"loss": 7.6071,
"step": 77
},
{
"epoch": 0.9,
"grad_norm": 28.22569647336005,
"learning_rate": 0.00015600000000000002,
"loss": 7.5556,
"step": 78
},
{
"epoch": 0.91,
"grad_norm": 20.075502929468385,
"learning_rate": 0.00015800000000000002,
"loss": 7.529,
"step": 79
},
{
"epoch": 0.92,
"grad_norm": 14.896626626967638,
"learning_rate": 0.00016,
"loss": 7.5352,
"step": 80
},
{
"epoch": 0.93,
"grad_norm": 16.98565517398293,
"learning_rate": 0.000162,
"loss": 7.5734,
"step": 81
},
{
"epoch": 0.94,
"grad_norm": 17.76803489316823,
"learning_rate": 0.000164,
"loss": 7.591,
"step": 82
},
{
"epoch": 0.95,
"grad_norm": 15.601674972992182,
"learning_rate": 0.000166,
"loss": 7.4251,
"step": 83
},
{
"epoch": 0.97,
"grad_norm": 21.36580777081032,
"learning_rate": 0.000168,
"loss": 7.728,
"step": 84
},
{
"epoch": 0.98,
"grad_norm": 19.73380900316605,
"learning_rate": 0.00017,
"loss": 7.3548,
"step": 85
},
{
"epoch": 0.99,
"grad_norm": 16.40313028374367,
"learning_rate": 0.000172,
"loss": 7.3652,
"step": 86
},
{
"epoch": 1.0,
"grad_norm": 11.846265862818466,
"learning_rate": 0.000174,
"loss": 7.4363,
"step": 87
},
{
"epoch": 1.01,
"grad_norm": 11.82176337757903,
"learning_rate": 0.00017600000000000002,
"loss": 7.3967,
"step": 88
},
{
"epoch": 1.01,
"eval_loss": 7.461406707763672,
"eval_runtime": 1.3867,
"eval_samples_per_second": 13.702,
"eval_steps_per_second": 3.606,
"step": 88
},
{
"epoch": 1.02,
"grad_norm": 14.25806078550941,
"learning_rate": 0.00017800000000000002,
"loss": 7.3814,
"step": 89
},
{
"epoch": 1.03,
"grad_norm": 10.077089336320658,
"learning_rate": 0.00018,
"loss": 7.2599,
"step": 90
},
{
"epoch": 1.01,
"grad_norm": 15.159363102480643,
"learning_rate": 0.000182,
"loss": 7.5719,
"step": 91
},
{
"epoch": 1.02,
"grad_norm": 14.678683877513562,
"learning_rate": 0.00018400000000000003,
"loss": 7.4794,
"step": 92
},
{
"epoch": 1.03,
"grad_norm": 16.396549401486602,
"learning_rate": 0.00018600000000000002,
"loss": 7.6211,
"step": 93
},
{
"epoch": 1.05,
"grad_norm": 15.078668930441678,
"learning_rate": 0.000188,
"loss": 7.3625,
"step": 94
},
{
"epoch": 1.06,
"grad_norm": 13.546080322892589,
"learning_rate": 0.00019,
"loss": 7.3016,
"step": 95
},
{
"epoch": 1.07,
"grad_norm": 18.231949750348118,
"learning_rate": 0.000192,
"loss": 7.386,
"step": 96
},
{
"epoch": 1.08,
"grad_norm": 12.444086372431334,
"learning_rate": 0.000194,
"loss": 7.5534,
"step": 97
},
{
"epoch": 1.09,
"grad_norm": 9.994459143045797,
"learning_rate": 0.000196,
"loss": 7.4254,
"step": 98
},
{
"epoch": 1.1,
"grad_norm": 14.889247706503676,
"learning_rate": 0.00019800000000000002,
"loss": 7.3938,
"step": 99
},
{
"epoch": 1.11,
"grad_norm": 19.194847464605004,
"learning_rate": 0.0002,
"loss": 7.3875,
"step": 100
},
{
"epoch": 1.13,
"grad_norm": 19.234057674068023,
"learning_rate": 0.00019998096274980728,
"loss": 7.348,
"step": 101
},
{
"epoch": 1.14,
"grad_norm": 10.248320279074052,
"learning_rate": 0.000199923858247567,
"loss": 7.2365,
"step": 102
},
{
"epoch": 1.15,
"grad_norm": 15.579201434411983,
"learning_rate": 0.00019982870823553308,
"loss": 7.2351,
"step": 103
},
{
"epoch": 1.16,
"grad_norm": 10.951732160571428,
"learning_rate": 0.00019969554894159723,
"loss": 7.4413,
"step": 104
},
{
"epoch": 1.17,
"grad_norm": 12.034336556190219,
"learning_rate": 0.00019952443106549533,
"loss": 7.4256,
"step": 105
},
{
"epoch": 1.18,
"grad_norm": 11.555864069154051,
"learning_rate": 0.00019931541975950378,
"loss": 7.0947,
"step": 106
},
{
"epoch": 1.2,
"grad_norm": 8.079125584036722,
"learning_rate": 0.00019906859460363307,
"loss": 7.3727,
"step": 107
},
{
"epoch": 1.21,
"grad_norm": 10.901301706963714,
"learning_rate": 0.00019878404957532814,
"loss": 7.2419,
"step": 108
},
{
"epoch": 1.22,
"grad_norm": 9.771076993744128,
"learning_rate": 0.0001984618930136869,
"loss": 7.2896,
"step": 109
},
{
"epoch": 1.23,
"grad_norm": 11.395240298688034,
"learning_rate": 0.00019810224757821064,
"loss": 7.2567,
"step": 110
},
{
"epoch": 1.23,
"eval_loss": 7.299312591552734,
"eval_runtime": 1.381,
"eval_samples_per_second": 13.758,
"eval_steps_per_second": 3.621,
"step": 110
},
{
"epoch": 1.24,
"grad_norm": 11.839939762683946,
"learning_rate": 0.00019770525020210204,
"loss": 7.3145,
"step": 111
},
{
"epoch": 1.25,
"grad_norm": 13.454044820108352,
"learning_rate": 0.0001972710520401287,
"loss": 7.3279,
"step": 112
},
{
"epoch": 1.26,
"grad_norm": 10.349510041217494,
"learning_rate": 0.0001967998184110713,
"loss": 7.0995,
"step": 113
},
{
"epoch": 1.28,
"grad_norm": 11.299695940906268,
"learning_rate": 0.00019629172873477995,
"loss": 7.1545,
"step": 114
},
{
"epoch": 1.29,
"grad_norm": 10.300523126150598,
"learning_rate": 0.00019574697646386027,
"loss": 7.2619,
"step": 115
},
{
"epoch": 1.3,
"grad_norm": 12.280692750358627,
"learning_rate": 0.0001951657690100178,
"loss": 7.4703,
"step": 116
},
{
"epoch": 1.31,
"grad_norm": 11.308344462386193,
"learning_rate": 0.0001945483276650868,
"loss": 7.2927,
"step": 117
},
{
"epoch": 1.32,
"grad_norm": 9.46182612207882,
"learning_rate": 0.0001938948875167745,
"loss": 7.1097,
"step": 118
},
{
"epoch": 1.33,
"grad_norm": 8.495214573764272,
"learning_rate": 0.00019320569735915271,
"loss": 7.2199,
"step": 119
},
{
"epoch": 1.34,
"grad_norm": 10.888318204738358,
"learning_rate": 0.00019248101959793066,
"loss": 7.4214,
"step": 120
},
{
"epoch": 1.36,
"grad_norm": 15.655909856272627,
"learning_rate": 0.00019172113015054532,
"loss": 7.4141,
"step": 121
},
{
"epoch": 1.37,
"grad_norm": 14.348570302527095,
"learning_rate": 0.00019092631834110723,
"loss": 7.2034,
"step": 122
},
{
"epoch": 1.38,
"grad_norm": 11.216620878113865,
"learning_rate": 0.0001900968867902419,
"loss": 7.1925,
"step": 123
},
{
"epoch": 1.39,
"grad_norm": 15.214529842718143,
"learning_rate": 0.00018923315129986835,
"loss": 7.204,
"step": 124
},
{
"epoch": 1.4,
"grad_norm": 24.933551829531577,
"learning_rate": 0.00018833544073295917,
"loss": 7.5231,
"step": 125
},
{
"epoch": 1.41,
"grad_norm": 12.291980045440198,
"learning_rate": 0.00018740409688832764,
"loss": 7.2323,
"step": 126
},
{
"epoch": 1.43,
"grad_norm": 19.503396545387695,
"learning_rate": 0.00018643947437048944,
"loss": 7.3214,
"step": 127
},
{
"epoch": 1.44,
"grad_norm": 13.982404823681808,
"learning_rate": 0.00018544194045464886,
"loss": 7.4008,
"step": 128
},
{
"epoch": 1.45,
"grad_norm": 13.775851829485163,
"learning_rate": 0.00018441187494686053,
"loss": 7.2854,
"step": 129
},
{
"epoch": 1.46,
"grad_norm": 16.44073199273777,
"learning_rate": 0.0001833496700394202,
"loss": 7.4218,
"step": 130
},
{
"epoch": 1.47,
"grad_norm": 21.66328419423237,
"learning_rate": 0.00018225573016153945,
"loss": 7.3589,
"step": 131
},
{
"epoch": 1.48,
"grad_norm": 14.027278482513156,
"learning_rate": 0.00018113047182536127,
"loss": 7.3329,
"step": 132
},
{
"epoch": 1.48,
"eval_loss": 7.374873161315918,
"eval_runtime": 1.3815,
"eval_samples_per_second": 13.753,
"eval_steps_per_second": 3.619,
"step": 132
},
{
"epoch": 1.49,
"grad_norm": 10.89412121909016,
"learning_rate": 0.00017997432346737524,
"loss": 7.3277,
"step": 133
},
{
"epoch": 1.51,
"grad_norm": 18.182727305484867,
"learning_rate": 0.00017878772528529232,
"loss": 7.3505,
"step": 134
},
{
"epoch": 1.52,
"grad_norm": 18.738647279231998,
"learning_rate": 0.000177571129070442,
"loss": 7.4395,
"step": 135
},
{
"epoch": 1.53,
"grad_norm": 8.544634593385059,
"learning_rate": 0.00017632499803575474,
"loss": 7.3132,
"step": 136
},
{
"epoch": 1.54,
"grad_norm": 23.787945119488832,
"learning_rate": 0.00017504980663939613,
"loss": 7.3635,
"step": 137
},
{
"epoch": 1.55,
"grad_norm": 15.988837162794528,
"learning_rate": 0.00017374604040411935,
"loss": 7.298,
"step": 138
},
{
"epoch": 1.56,
"grad_norm": 8.594141503866256,
"learning_rate": 0.00017241419573240462,
"loss": 7.2263,
"step": 139
},
{
"epoch": 1.57,
"grad_norm": 10.834111660116264,
"learning_rate": 0.00017105477971745666,
"loss": 7.2234,
"step": 140
},
{
"epoch": 1.59,
"grad_norm": 20.898663112099943,
"learning_rate": 0.00016966830995013133,
"loss": 7.4312,
"step": 141
},
{
"epoch": 1.6,
"grad_norm": 9.882602517532073,
"learning_rate": 0.00016825531432186543,
"loss": 7.3389,
"step": 142
},
{
"epoch": 1.61,
"grad_norm": 7.435249821686469,
"learning_rate": 0.00016681633082368498,
"loss": 7.2015,
"step": 143
},
{
"epoch": 1.62,
"grad_norm": 13.215068331486576,
"learning_rate": 0.0001653519073413675,
"loss": 7.1425,
"step": 144
},
{
"epoch": 1.63,
"grad_norm": 11.754841557793458,
"learning_rate": 0.00016386260144683745,
"loss": 7.3101,
"step": 145
},
{
"epoch": 1.64,
"grad_norm": 7.674786980372276,
"learning_rate": 0.00016234898018587337,
"loss": 7.1703,
"step": 146
},
{
"epoch": 1.66,
"grad_norm": 8.30389279917059,
"learning_rate": 0.00016081161986220807,
"loss": 7.2516,
"step": 147
},
{
"epoch": 1.67,
"grad_norm": 14.325137493545546,
"learning_rate": 0.00015925110581810394,
"loss": 7.3326,
"step": 148
},
{
"epoch": 1.68,
"grad_norm": 9.943124774719692,
"learning_rate": 0.00015766803221148673,
"loss": 7.5501,
"step": 149
},
{
"epoch": 1.69,
"grad_norm": 11.710815673357324,
"learning_rate": 0.00015606300178972287,
"loss": 7.3344,
"step": 150
},
{
"epoch": 1.7,
"grad_norm": 8.287278539768234,
"learning_rate": 0.00015443662566012645,
"loss": 7.2426,
"step": 151
},
{
"epoch": 1.71,
"grad_norm": 8.045767995001754,
"learning_rate": 0.00015278952305728324,
"loss": 7.2319,
"step": 152
},
{
"epoch": 1.72,
"grad_norm": 16.213767636990735,
"learning_rate": 0.00015112232110728015,
"loss": 7.2226,
"step": 153
},
{
"epoch": 1.74,
"grad_norm": 10.115986520778595,
"learning_rate": 0.00014943565458893,
"loss": 7.0671,
"step": 154
},
{
"epoch": 1.74,
"eval_loss": 7.336472511291504,
"eval_runtime": 1.3763,
"eval_samples_per_second": 13.806,
"eval_steps_per_second": 3.633,
"step": 154
},
{
"epoch": 1.75,
"grad_norm": 7.370162670560762,
"learning_rate": 0.00014773016569208283,
"loss": 7.2456,
"step": 155
},
{
"epoch": 1.76,
"grad_norm": 14.854609514494987,
"learning_rate": 0.00014600650377311522,
"loss": 7.0447,
"step": 156
},
{
"epoch": 1.77,
"grad_norm": 9.984605765623492,
"learning_rate": 0.0001442653251076912,
"loss": 7.1948,
"step": 157
},
{
"epoch": 1.78,
"grad_norm": 11.053561117527067,
"learning_rate": 0.00014250729264088843,
"loss": 7.1485,
"step": 158
},
{
"epoch": 1.79,
"grad_norm": 11.604464862126479,
"learning_rate": 0.00014073307573478526,
"loss": 7.4198,
"step": 159
},
{
"epoch": 1.8,
"grad_norm": 8.62267592810566,
"learning_rate": 0.00013894334991360448,
"loss": 7.3045,
"step": 160
},
{
"epoch": 1.82,
"grad_norm": 10.961255591651343,
"learning_rate": 0.00013713879660651068,
"loss": 6.9983,
"step": 161
},
{
"epoch": 1.83,
"grad_norm": 11.12104950103157,
"learning_rate": 0.0001353201028881598,
"loss": 7.1046,
"step": 162
},
{
"epoch": 1.84,
"grad_norm": 8.807345567053673,
"learning_rate": 0.00013348796121709862,
"loss": 7.3378,
"step": 163
},
{
"epoch": 1.85,
"grad_norm": 10.666053356976384,
"learning_rate": 0.00013164306917211476,
"loss": 7.0008,
"step": 164
},
{
"epoch": 1.86,
"grad_norm": 7.835224761952429,
"learning_rate": 0.000129786129186637,
"loss": 6.9881,
"step": 165
},
{
"epoch": 1.87,
"grad_norm": 19.4062633667929,
"learning_rate": 0.00012791784828128724,
"loss": 7.2579,
"step": 166
},
{
"epoch": 1.89,
"grad_norm": 10.113263144537674,
"learning_rate": 0.00012603893779468604,
"loss": 7.3091,
"step": 167
},
{
"epoch": 1.9,
"grad_norm": 10.870503515462726,
"learning_rate": 0.0001241501131126138,
"loss": 7.4527,
"step": 168
},
{
"epoch": 1.91,
"grad_norm": 8.533887294828766,
"learning_rate": 0.00012225209339563145,
"loss": 7.2855,
"step": 169
},
{
"epoch": 1.92,
"grad_norm": 13.486746604884923,
"learning_rate": 0.0001203456013052634,
"loss": 7.2705,
"step": 170
},
{
"epoch": 1.93,
"grad_norm": 10.69251762526038,
"learning_rate": 0.00011843136272884794,
"loss": 7.0932,
"step": 171
},
{
"epoch": 1.94,
"grad_norm": 9.124923914761991,
"learning_rate": 0.00011651010650315923,
"loss": 7.3754,
"step": 172
},
{
"epoch": 1.95,
"grad_norm": 13.481473855966252,
"learning_rate": 0.00011458256413690633,
"loss": 7.3104,
"step": 173
},
{
"epoch": 1.97,
"grad_norm": 13.187202052506544,
"learning_rate": 0.00011264946953221496,
"loss": 7.3614,
"step": 174
},
{
"epoch": 1.98,
"grad_norm": 13.527193725705766,
"learning_rate": 0.00011071155870519777,
"loss": 7.2734,
"step": 175
},
{
"epoch": 1.99,
"grad_norm": 8.023369511183457,
"learning_rate": 0.00010876956950572006,
"loss": 7.4786,
"step": 176
},
{
"epoch": 1.99,
"eval_loss": 7.319369316101074,
"eval_runtime": 1.3976,
"eval_samples_per_second": 13.595,
"eval_steps_per_second": 3.578,
"step": 176
},
{
"epoch": 2.0,
"grad_norm": 11.41077205951204,
"learning_rate": 0.0001068242413364671,
"loss": 7.2116,
"step": 177
},
{
"epoch": 2.01,
"grad_norm": 9.551865991569315,
"learning_rate": 0.00010487631487142017,
"loss": 7.2411,
"step": 178
},
{
"epoch": 2.02,
"grad_norm": 11.576046488111642,
"learning_rate": 0.00010292653177384876,
"loss": 7.4401,
"step": 179
},
{
"epoch": 2.01,
"grad_norm": 9.356673167513755,
"learning_rate": 0.00010097563441392581,
"loss": 7.3184,
"step": 180
},
{
"epoch": 2.02,
"grad_norm": 12.119641565241189,
"learning_rate": 9.90243655860742e-05,
"loss": 7.2285,
"step": 181
},
{
"epoch": 2.03,
"grad_norm": 11.06643382673938,
"learning_rate": 9.707346822615128e-05,
"loss": 7.1729,
"step": 182
},
{
"epoch": 2.05,
"grad_norm": 9.198991504624212,
"learning_rate": 9.512368512857984e-05,
"loss": 7.2619,
"step": 183
},
{
"epoch": 2.06,
"grad_norm": 14.240944645441582,
"learning_rate": 9.317575866353292e-05,
"loss": 7.5778,
"step": 184
},
{
"epoch": 2.07,
"grad_norm": 8.473455381717486,
"learning_rate": 9.123043049427995e-05,
"loss": 7.2432,
"step": 185
},
{
"epoch": 2.08,
"grad_norm": 12.027303786674432,
"learning_rate": 8.928844129480227e-05,
"loss": 7.4036,
"step": 186
},
{
"epoch": 2.09,
"grad_norm": 10.45148339945213,
"learning_rate": 8.735053046778506e-05,
"loss": 7.2052,
"step": 187
},
{
"epoch": 2.1,
"grad_norm": 12.816375890508084,
"learning_rate": 8.541743586309365e-05,
"loss": 7.1882,
"step": 188
},
{
"epoch": 2.11,
"grad_norm": 12.872264386256504,
"learning_rate": 8.348989349684076e-05,
"loss": 7.1129,
"step": 189
},
{
"epoch": 2.13,
"grad_norm": 8.712185045895534,
"learning_rate": 8.156863727115211e-05,
"loss": 7.3354,
"step": 190
},
{
"epoch": 2.14,
"grad_norm": 7.169962044034168,
"learning_rate": 7.965439869473664e-05,
"loss": 7.1493,
"step": 191
},
{
"epoch": 2.15,
"grad_norm": 9.20643667535514,
"learning_rate": 7.774790660436858e-05,
"loss": 7.1867,
"step": 192
},
{
"epoch": 2.16,
"grad_norm": 9.71231661001151,
"learning_rate": 7.584988688738622e-05,
"loss": 7.1941,
"step": 193
},
{
"epoch": 2.17,
"grad_norm": 9.869652181292283,
"learning_rate": 7.396106220531398e-05,
"loss": 7.2606,
"step": 194
},
{
"epoch": 2.18,
"grad_norm": 7.820315914553451,
"learning_rate": 7.208215171871277e-05,
"loss": 7.1575,
"step": 195
},
{
"epoch": 2.2,
"grad_norm": 8.668852408202328,
"learning_rate": 7.021387081336301e-05,
"loss": 7.3339,
"step": 196
},
{
"epoch": 2.21,
"grad_norm": 6.97843034949997,
"learning_rate": 6.835693082788525e-05,
"loss": 7.307,
"step": 197
},
{
"epoch": 2.22,
"grad_norm": 16.571225038227503,
"learning_rate": 6.651203878290139e-05,
"loss": 7.3548,
"step": 198
},
{
"epoch": 2.22,
"eval_loss": 7.309223175048828,
"eval_runtime": 1.3716,
"eval_samples_per_second": 13.852,
"eval_steps_per_second": 3.645,
"step": 198
},
{
"epoch": 2.23,
"grad_norm": 14.645439276497052,
"learning_rate": 6.46798971118402e-05,
"loss": 7.1198,
"step": 199
},
{
"epoch": 2.24,
"grad_norm": 11.180148474401758,
"learning_rate": 6.286120339348935e-05,
"loss": 7.383,
"step": 200
},
{
"epoch": 2.25,
"grad_norm": 8.297433608048966,
"learning_rate": 6.105665008639557e-05,
"loss": 7.0708,
"step": 201
},
{
"epoch": 2.26,
"grad_norm": 7.35350265603106,
"learning_rate": 5.926692426521474e-05,
"loss": 7.2686,
"step": 202
},
{
"epoch": 2.28,
"grad_norm": 10.931447125375616,
"learning_rate": 5.749270735911158e-05,
"loss": 7.0715,
"step": 203
},
{
"epoch": 2.29,
"grad_norm": 12.580996736421003,
"learning_rate": 5.573467489230879e-05,
"loss": 7.3576,
"step": 204
},
{
"epoch": 2.3,
"grad_norm": 12.648533406639174,
"learning_rate": 5.399349622688479e-05,
"loss": 7.3546,
"step": 205
},
{
"epoch": 2.31,
"grad_norm": 8.448979459452485,
"learning_rate": 5.226983430791722e-05,
"loss": 7.0968,
"step": 206
},
{
"epoch": 2.32,
"grad_norm": 14.75106031841705,
"learning_rate": 5.0564345411070025e-05,
"loss": 7.2726,
"step": 207
},
{
"epoch": 2.33,
"grad_norm": 7.623885276483663,
"learning_rate": 4.8877678892719866e-05,
"loss": 7.2646,
"step": 208
},
{
"epoch": 2.34,
"grad_norm": 11.112819218207525,
"learning_rate": 4.721047694271676e-05,
"loss": 7.1452,
"step": 209
},
{
"epoch": 2.36,
"grad_norm": 15.00591466641024,
"learning_rate": 4.556337433987359e-05,
"loss": 6.9634,
"step": 210
},
{
"epoch": 2.37,
"grad_norm": 9.000881151933429,
"learning_rate": 4.393699821027716e-05,
"loss": 7.2109,
"step": 211
},
{
"epoch": 2.38,
"grad_norm": 8.677018023760695,
"learning_rate": 4.2331967788513295e-05,
"loss": 7.1976,
"step": 212
},
{
"epoch": 2.39,
"grad_norm": 7.021254016561991,
"learning_rate": 4.074889418189608e-05,
"loss": 7.2308,
"step": 213
},
{
"epoch": 2.4,
"grad_norm": 14.097624320239541,
"learning_rate": 3.9188380137791936e-05,
"loss": 7.2678,
"step": 214
},
{
"epoch": 2.41,
"grad_norm": 7.660009091922155,
"learning_rate": 3.7651019814126654e-05,
"loss": 7.2609,
"step": 215
},
{
"epoch": 2.43,
"grad_norm": 6.879429170747529,
"learning_rate": 3.613739855316257e-05,
"loss": 7.1545,
"step": 216
},
{
"epoch": 2.44,
"grad_norm": 9.342549224552343,
"learning_rate": 3.46480926586325e-05,
"loss": 7.122,
"step": 217
},
{
"epoch": 2.45,
"grad_norm": 9.584840694689616,
"learning_rate": 3.3183669176315045e-05,
"loss": 7.2049,
"step": 218
},
{
"epoch": 2.46,
"grad_norm": 14.37215919620074,
"learning_rate": 3.174468567813461e-05,
"loss": 7.4374,
"step": 219
},
{
"epoch": 2.47,
"grad_norm": 6.257502820833114,
"learning_rate": 3.033169004986873e-05,
"loss": 7.1782,
"step": 220
},
{
"epoch": 2.47,
"eval_loss": 7.296360492706299,
"eval_runtime": 1.3751,
"eval_samples_per_second": 13.818,
"eval_steps_per_second": 3.636,
"step": 220
},
{
"epoch": 2.48,
"grad_norm": 7.202404193022492,
"learning_rate": 2.894522028254334e-05,
"loss": 7.1248,
"step": 221
},
{
"epoch": 2.49,
"grad_norm": 11.03004064225623,
"learning_rate": 2.7585804267595384e-05,
"loss": 7.4726,
"step": 222
},
{
"epoch": 2.51,
"grad_norm": 10.208828744090669,
"learning_rate": 2.6253959595880673e-05,
"loss": 7.1886,
"step": 223
},
{
"epoch": 2.52,
"grad_norm": 7.194649499092706,
"learning_rate": 2.495019336060387e-05,
"loss": 7.2576,
"step": 224
},
{
"epoch": 2.53,
"grad_norm": 15.063508439238962,
"learning_rate": 2.367500196424529e-05,
"loss": 6.924,
"step": 225
},
{
"epoch": 2.54,
"grad_norm": 9.528135297299904,
"learning_rate": 2.242887092955801e-05,
"loss": 7.1941,
"step": 226
},
{
"epoch": 2.55,
"grad_norm": 7.719290154588222,
"learning_rate": 2.121227471470768e-05,
"loss": 7.3391,
"step": 227
},
{
"epoch": 2.56,
"grad_norm": 7.714834107984481,
"learning_rate": 2.002567653262479e-05,
"loss": 7.1969,
"step": 228
},
{
"epoch": 2.57,
"grad_norm": 8.228923346547797,
"learning_rate": 1.8869528174638752e-05,
"loss": 7.1543,
"step": 229
},
{
"epoch": 2.59,
"grad_norm": 6.001756411009184,
"learning_rate": 1.774426983846058e-05,
"loss": 7.2465,
"step": 230
},
{
"epoch": 2.6,
"grad_norm": 10.434576481704424,
"learning_rate": 1.6650329960579792e-05,
"loss": 7.2623,
"step": 231
},
{
"epoch": 2.61,
"grad_norm": 8.095148267701724,
"learning_rate": 1.5588125053139468e-05,
"loss": 7.1761,
"step": 232
},
{
"epoch": 2.62,
"grad_norm": 9.145578669056132,
"learning_rate": 1.4558059545351143e-05,
"loss": 7.1295,
"step": 233
},
{
"epoch": 2.63,
"grad_norm": 7.413949304325555,
"learning_rate": 1.3560525629510568e-05,
"loss": 7.1572,
"step": 234
},
{
"epoch": 2.64,
"grad_norm": 8.238266895583749,
"learning_rate": 1.259590311167238e-05,
"loss": 7.2317,
"step": 235
},
{
"epoch": 2.66,
"grad_norm": 7.150286325977236,
"learning_rate": 1.166455926704082e-05,
"loss": 7.3381,
"step": 236
},
{
"epoch": 2.67,
"grad_norm": 10.918607427652104,
"learning_rate": 1.0766848700131648e-05,
"loss": 7.3925,
"step": 237
},
{
"epoch": 2.68,
"grad_norm": 6.6969446531973285,
"learning_rate": 9.903113209758096e-06,
"loss": 7.2799,
"step": 238
},
{
"epoch": 2.69,
"grad_norm": 13.477825461653845,
"learning_rate": 9.073681658892775e-06,
"loss": 6.901,
"step": 239
},
{
"epoch": 2.7,
"grad_norm": 6.783585528560174,
"learning_rate": 8.278869849454718e-06,
"loss": 7.2819,
"step": 240
},
{
"epoch": 2.71,
"grad_norm": 9.548057270672551,
"learning_rate": 7.5189804020693536e-06,
"loss": 7.1192,
"step": 241
},
{
"epoch": 2.72,
"grad_norm": 7.742180355619181,
"learning_rate": 6.794302640847294e-06,
"loss": 7.2729,
"step": 242
},
{
"epoch": 2.72,
"eval_loss": 7.292217254638672,
"eval_runtime": 1.3839,
"eval_samples_per_second": 13.729,
"eval_steps_per_second": 3.613,
"step": 242
},
{
"epoch": 2.74,
"grad_norm": 10.845656873156827,
"learning_rate": 6.1051124832254944e-06,
"loss": 6.9693,
"step": 243
},
{
"epoch": 2.75,
"grad_norm": 10.323147542445835,
"learning_rate": 5.451672334913216e-06,
"loss": 7.3785,
"step": 244
},
{
"epoch": 2.76,
"grad_norm": 10.41796325752105,
"learning_rate": 4.834230989982213e-06,
"loss": 7.3632,
"step": 245
},
{
"epoch": 2.77,
"grad_norm": 6.590462249149956,
"learning_rate": 4.253023536139733e-06,
"loss": 7.279,
"step": 246
},
{
"epoch": 2.78,
"grad_norm": 9.637387713657647,
"learning_rate": 3.7082712652200867e-06,
"loss": 7.2857,
"step": 247
},
{
"epoch": 2.79,
"grad_norm": 5.983998578137723,
"learning_rate": 3.2001815889286856e-06,
"loss": 7.2269,
"step": 248
},
{
"epoch": 2.8,
"grad_norm": 6.750577266168699,
"learning_rate": 2.728947959871353e-06,
"loss": 7.2672,
"step": 249
},
{
"epoch": 2.82,
"grad_norm": 8.828265865856226,
"learning_rate": 2.294749797897955e-06,
"loss": 7.1355,
"step": 250
},
{
"epoch": 2.83,
"grad_norm": 8.092540989425222,
"learning_rate": 1.8977524217893783e-06,
"loss": 7.079,
"step": 251
},
{
"epoch": 2.84,
"grad_norm": 14.298932570519032,
"learning_rate": 1.5381069863131037e-06,
"loss": 7.1462,
"step": 252
},
{
"epoch": 2.85,
"grad_norm": 6.9722905081773465,
"learning_rate": 1.2159504246718522e-06,
"loss": 7.1766,
"step": 253
},
{
"epoch": 2.86,
"grad_norm": 8.429744971276516,
"learning_rate": 9.314053963669245e-07,
"loss": 7.3844,
"step": 254
},
{
"epoch": 2.87,
"grad_norm": 7.0927677230675386,
"learning_rate": 6.845802404962243e-07,
"loss": 7.3318,
"step": 255
},
{
"epoch": 2.89,
"grad_norm": 6.6033524293092265,
"learning_rate": 4.7556893450466653e-07,
"loss": 7.1159,
"step": 256
},
{
"epoch": 2.9,
"grad_norm": 9.59870072155561,
"learning_rate": 3.044510584027771e-07,
"loss": 7.3733,
"step": 257
},
{
"epoch": 2.91,
"grad_norm": 7.255692976305313,
"learning_rate": 1.7129176446692984e-07,
"loss": 7.2345,
"step": 258
},
{
"epoch": 2.92,
"grad_norm": 7.239401601436061,
"learning_rate": 7.614175243301213e-08,
"loss": 7.2672,
"step": 259
},
{
"epoch": 2.93,
"grad_norm": 6.342843427851908,
"learning_rate": 1.9037250192732726e-08,
"loss": 7.2741,
"step": 260
},
{
"epoch": 2.94,
"grad_norm": 8.76817744282957,
"learning_rate": 0.0,
"loss": 7.2543,
"step": 261
}
],
"logging_steps": 1,
"max_steps": 261,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 87,
"total_flos": 1.8244065215014502e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}