Wanff
Add fine-tuned model
e84f7f1
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 0,
"global_step": 560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0017857142857142857,
"grad_norm": 0.451171875,
"learning_rate": 9.982142857142858e-06,
"loss": 1.8264,
"step": 1
},
{
"epoch": 0.0035714285714285713,
"grad_norm": 0.44921875,
"learning_rate": 9.964285714285714e-06,
"loss": 1.7787,
"step": 2
},
{
"epoch": 0.005357142857142857,
"grad_norm": 0.44140625,
"learning_rate": 9.946428571428572e-06,
"loss": 1.83,
"step": 3
},
{
"epoch": 0.007142857142857143,
"grad_norm": 0.427734375,
"learning_rate": 9.92857142857143e-06,
"loss": 1.8276,
"step": 4
},
{
"epoch": 0.008928571428571428,
"grad_norm": 0.40234375,
"learning_rate": 9.910714285714288e-06,
"loss": 1.8274,
"step": 5
},
{
"epoch": 0.010714285714285714,
"grad_norm": 0.376953125,
"learning_rate": 9.892857142857143e-06,
"loss": 1.854,
"step": 6
},
{
"epoch": 0.0125,
"grad_norm": 0.353515625,
"learning_rate": 9.875000000000001e-06,
"loss": 1.7426,
"step": 7
},
{
"epoch": 0.014285714285714285,
"grad_norm": 0.345703125,
"learning_rate": 9.857142857142859e-06,
"loss": 1.8109,
"step": 8
},
{
"epoch": 0.01607142857142857,
"grad_norm": 0.330078125,
"learning_rate": 9.839285714285715e-06,
"loss": 1.8108,
"step": 9
},
{
"epoch": 0.017857142857142856,
"grad_norm": 0.310546875,
"learning_rate": 9.821428571428573e-06,
"loss": 1.8006,
"step": 10
},
{
"epoch": 0.019642857142857142,
"grad_norm": 0.31640625,
"learning_rate": 9.803571428571428e-06,
"loss": 1.7489,
"step": 11
},
{
"epoch": 0.02142857142857143,
"grad_norm": 0.28515625,
"learning_rate": 9.785714285714286e-06,
"loss": 1.6758,
"step": 12
},
{
"epoch": 0.023214285714285715,
"grad_norm": 0.306640625,
"learning_rate": 9.767857142857144e-06,
"loss": 1.6705,
"step": 13
},
{
"epoch": 0.025,
"grad_norm": 0.283203125,
"learning_rate": 9.75e-06,
"loss": 1.6707,
"step": 14
},
{
"epoch": 0.026785714285714284,
"grad_norm": 0.29296875,
"learning_rate": 9.732142857142858e-06,
"loss": 1.7135,
"step": 15
},
{
"epoch": 0.02857142857142857,
"grad_norm": 0.27734375,
"learning_rate": 9.714285714285715e-06,
"loss": 1.653,
"step": 16
},
{
"epoch": 0.030357142857142857,
"grad_norm": 0.29296875,
"learning_rate": 9.696428571428573e-06,
"loss": 1.6137,
"step": 17
},
{
"epoch": 0.03214285714285714,
"grad_norm": 0.287109375,
"learning_rate": 9.678571428571429e-06,
"loss": 1.6772,
"step": 18
},
{
"epoch": 0.033928571428571426,
"grad_norm": 0.373046875,
"learning_rate": 9.660714285714287e-06,
"loss": 1.5118,
"step": 19
},
{
"epoch": 0.03571428571428571,
"grad_norm": 0.265625,
"learning_rate": 9.642857142857144e-06,
"loss": 1.5881,
"step": 20
},
{
"epoch": 0.0375,
"grad_norm": 0.27734375,
"learning_rate": 9.625e-06,
"loss": 1.6045,
"step": 21
},
{
"epoch": 0.039285714285714285,
"grad_norm": 0.2431640625,
"learning_rate": 9.607142857142858e-06,
"loss": 1.5647,
"step": 22
},
{
"epoch": 0.04107142857142857,
"grad_norm": 0.279296875,
"learning_rate": 9.589285714285716e-06,
"loss": 1.5269,
"step": 23
},
{
"epoch": 0.04285714285714286,
"grad_norm": 0.2314453125,
"learning_rate": 9.571428571428573e-06,
"loss": 1.5098,
"step": 24
},
{
"epoch": 0.044642857142857144,
"grad_norm": 0.23828125,
"learning_rate": 9.55357142857143e-06,
"loss": 1.5816,
"step": 25
},
{
"epoch": 0.04642857142857143,
"grad_norm": 0.220703125,
"learning_rate": 9.535714285714287e-06,
"loss": 1.5997,
"step": 26
},
{
"epoch": 0.048214285714285716,
"grad_norm": 0.6875,
"learning_rate": 9.517857142857143e-06,
"loss": 1.5325,
"step": 27
},
{
"epoch": 0.05,
"grad_norm": 0.337890625,
"learning_rate": 9.5e-06,
"loss": 1.5751,
"step": 28
},
{
"epoch": 0.05178571428571429,
"grad_norm": 0.2109375,
"learning_rate": 9.482142857142858e-06,
"loss": 1.4823,
"step": 29
},
{
"epoch": 0.05357142857142857,
"grad_norm": 0.201171875,
"learning_rate": 9.464285714285714e-06,
"loss": 1.4963,
"step": 30
},
{
"epoch": 0.055357142857142855,
"grad_norm": 0.1845703125,
"learning_rate": 9.446428571428572e-06,
"loss": 1.3804,
"step": 31
},
{
"epoch": 0.05714285714285714,
"grad_norm": 0.1962890625,
"learning_rate": 9.42857142857143e-06,
"loss": 1.4485,
"step": 32
},
{
"epoch": 0.05892857142857143,
"grad_norm": 0.1904296875,
"learning_rate": 9.410714285714286e-06,
"loss": 1.4925,
"step": 33
},
{
"epoch": 0.060714285714285714,
"grad_norm": 0.201171875,
"learning_rate": 9.392857142857143e-06,
"loss": 1.5104,
"step": 34
},
{
"epoch": 0.0625,
"grad_norm": 0.1962890625,
"learning_rate": 9.375000000000001e-06,
"loss": 1.4536,
"step": 35
},
{
"epoch": 0.06428571428571428,
"grad_norm": 0.189453125,
"learning_rate": 9.357142857142859e-06,
"loss": 1.4875,
"step": 36
},
{
"epoch": 0.06607142857142857,
"grad_norm": 0.193359375,
"learning_rate": 9.339285714285715e-06,
"loss": 1.5001,
"step": 37
},
{
"epoch": 0.06785714285714285,
"grad_norm": 0.18359375,
"learning_rate": 9.321428571428572e-06,
"loss": 1.4814,
"step": 38
},
{
"epoch": 0.06964285714285715,
"grad_norm": 0.177734375,
"learning_rate": 9.30357142857143e-06,
"loss": 1.4407,
"step": 39
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.177734375,
"learning_rate": 9.285714285714288e-06,
"loss": 1.4605,
"step": 40
},
{
"epoch": 0.07321428571428572,
"grad_norm": 0.205078125,
"learning_rate": 9.267857142857144e-06,
"loss": 1.4516,
"step": 41
},
{
"epoch": 0.075,
"grad_norm": 0.1669921875,
"learning_rate": 9.250000000000001e-06,
"loss": 1.3603,
"step": 42
},
{
"epoch": 0.07678571428571429,
"grad_norm": 0.2392578125,
"learning_rate": 9.232142857142859e-06,
"loss": 1.5122,
"step": 43
},
{
"epoch": 0.07857142857142857,
"grad_norm": 0.18359375,
"learning_rate": 9.214285714285715e-06,
"loss": 1.499,
"step": 44
},
{
"epoch": 0.08035714285714286,
"grad_norm": 0.1787109375,
"learning_rate": 9.196428571428571e-06,
"loss": 1.4101,
"step": 45
},
{
"epoch": 0.08214285714285714,
"grad_norm": 0.22265625,
"learning_rate": 9.178571428571429e-06,
"loss": 1.4654,
"step": 46
},
{
"epoch": 0.08392857142857142,
"grad_norm": 0.1904296875,
"learning_rate": 9.160714285714286e-06,
"loss": 1.3772,
"step": 47
},
{
"epoch": 0.08571428571428572,
"grad_norm": 0.1689453125,
"learning_rate": 9.142857142857144e-06,
"loss": 1.3117,
"step": 48
},
{
"epoch": 0.0875,
"grad_norm": 0.177734375,
"learning_rate": 9.125e-06,
"loss": 1.4509,
"step": 49
},
{
"epoch": 0.08928571428571429,
"grad_norm": 0.1875,
"learning_rate": 9.107142857142858e-06,
"loss": 1.4234,
"step": 50
},
{
"epoch": 0.09107142857142857,
"grad_norm": 0.16015625,
"learning_rate": 9.089285714285715e-06,
"loss": 1.3606,
"step": 51
},
{
"epoch": 0.09285714285714286,
"grad_norm": 0.1845703125,
"learning_rate": 9.071428571428573e-06,
"loss": 1.3514,
"step": 52
},
{
"epoch": 0.09464285714285714,
"grad_norm": 0.1875,
"learning_rate": 9.053571428571429e-06,
"loss": 1.4011,
"step": 53
},
{
"epoch": 0.09642857142857143,
"grad_norm": 0.16796875,
"learning_rate": 9.035714285714287e-06,
"loss": 1.3582,
"step": 54
},
{
"epoch": 0.09821428571428571,
"grad_norm": 0.1728515625,
"learning_rate": 9.017857142857144e-06,
"loss": 1.3714,
"step": 55
},
{
"epoch": 0.1,
"grad_norm": 0.1923828125,
"learning_rate": 9e-06,
"loss": 1.34,
"step": 56
},
{
"epoch": 0.10178571428571428,
"grad_norm": 0.1484375,
"learning_rate": 8.982142857142858e-06,
"loss": 1.3457,
"step": 57
},
{
"epoch": 0.10357142857142858,
"grad_norm": 0.177734375,
"learning_rate": 8.964285714285716e-06,
"loss": 1.3943,
"step": 58
},
{
"epoch": 0.10535714285714286,
"grad_norm": 0.1787109375,
"learning_rate": 8.946428571428573e-06,
"loss": 1.4559,
"step": 59
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.1552734375,
"learning_rate": 8.92857142857143e-06,
"loss": 1.3399,
"step": 60
},
{
"epoch": 0.10892857142857143,
"grad_norm": 0.1572265625,
"learning_rate": 8.910714285714287e-06,
"loss": 1.3514,
"step": 61
},
{
"epoch": 0.11071428571428571,
"grad_norm": 0.154296875,
"learning_rate": 8.892857142857143e-06,
"loss": 1.3186,
"step": 62
},
{
"epoch": 0.1125,
"grad_norm": 0.2119140625,
"learning_rate": 8.875e-06,
"loss": 1.3524,
"step": 63
},
{
"epoch": 0.11428571428571428,
"grad_norm": 0.1767578125,
"learning_rate": 8.857142857142858e-06,
"loss": 1.3673,
"step": 64
},
{
"epoch": 0.11607142857142858,
"grad_norm": 0.2177734375,
"learning_rate": 8.839285714285714e-06,
"loss": 1.3364,
"step": 65
},
{
"epoch": 0.11785714285714285,
"grad_norm": 0.16015625,
"learning_rate": 8.821428571428572e-06,
"loss": 1.3614,
"step": 66
},
{
"epoch": 0.11964285714285715,
"grad_norm": 0.1591796875,
"learning_rate": 8.80357142857143e-06,
"loss": 1.32,
"step": 67
},
{
"epoch": 0.12142857142857143,
"grad_norm": 0.1650390625,
"learning_rate": 8.785714285714286e-06,
"loss": 1.3709,
"step": 68
},
{
"epoch": 0.12321428571428572,
"grad_norm": 0.16015625,
"learning_rate": 8.767857142857143e-06,
"loss": 1.3182,
"step": 69
},
{
"epoch": 0.125,
"grad_norm": 0.30078125,
"learning_rate": 8.750000000000001e-06,
"loss": 1.3977,
"step": 70
},
{
"epoch": 0.12678571428571428,
"grad_norm": 0.302734375,
"learning_rate": 8.732142857142859e-06,
"loss": 1.3094,
"step": 71
},
{
"epoch": 0.12857142857142856,
"grad_norm": 0.1513671875,
"learning_rate": 8.714285714285715e-06,
"loss": 1.3111,
"step": 72
},
{
"epoch": 0.13035714285714287,
"grad_norm": 0.154296875,
"learning_rate": 8.696428571428572e-06,
"loss": 1.3303,
"step": 73
},
{
"epoch": 0.13214285714285715,
"grad_norm": 0.146484375,
"learning_rate": 8.67857142857143e-06,
"loss": 1.2938,
"step": 74
},
{
"epoch": 0.13392857142857142,
"grad_norm": 0.1728515625,
"learning_rate": 8.660714285714286e-06,
"loss": 1.2852,
"step": 75
},
{
"epoch": 0.1357142857142857,
"grad_norm": 0.1708984375,
"learning_rate": 8.642857142857144e-06,
"loss": 1.2962,
"step": 76
},
{
"epoch": 0.1375,
"grad_norm": 0.150390625,
"learning_rate": 8.625000000000001e-06,
"loss": 1.2855,
"step": 77
},
{
"epoch": 0.1392857142857143,
"grad_norm": 0.1591796875,
"learning_rate": 8.607142857142859e-06,
"loss": 1.3886,
"step": 78
},
{
"epoch": 0.14107142857142857,
"grad_norm": 0.158203125,
"learning_rate": 8.589285714285715e-06,
"loss": 1.3353,
"step": 79
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.2158203125,
"learning_rate": 8.571428571428571e-06,
"loss": 1.3206,
"step": 80
},
{
"epoch": 0.14464285714285716,
"grad_norm": 0.1533203125,
"learning_rate": 8.553571428571429e-06,
"loss": 1.2875,
"step": 81
},
{
"epoch": 0.14642857142857144,
"grad_norm": 0.1826171875,
"learning_rate": 8.535714285714286e-06,
"loss": 1.263,
"step": 82
},
{
"epoch": 0.14821428571428572,
"grad_norm": 0.16796875,
"learning_rate": 8.517857142857144e-06,
"loss": 1.3335,
"step": 83
},
{
"epoch": 0.15,
"grad_norm": 0.1591796875,
"learning_rate": 8.5e-06,
"loss": 1.3109,
"step": 84
},
{
"epoch": 0.15178571428571427,
"grad_norm": 0.2109375,
"learning_rate": 8.482142857142858e-06,
"loss": 1.2587,
"step": 85
},
{
"epoch": 0.15357142857142858,
"grad_norm": 0.1513671875,
"learning_rate": 8.464285714285715e-06,
"loss": 1.2751,
"step": 86
},
{
"epoch": 0.15535714285714286,
"grad_norm": 0.1650390625,
"learning_rate": 8.446428571428571e-06,
"loss": 1.3281,
"step": 87
},
{
"epoch": 0.15714285714285714,
"grad_norm": 0.63671875,
"learning_rate": 8.428571428571429e-06,
"loss": 1.294,
"step": 88
},
{
"epoch": 0.15892857142857142,
"grad_norm": 0.1533203125,
"learning_rate": 8.410714285714287e-06,
"loss": 1.2808,
"step": 89
},
{
"epoch": 0.16071428571428573,
"grad_norm": 0.1611328125,
"learning_rate": 8.392857142857144e-06,
"loss": 1.2168,
"step": 90
},
{
"epoch": 0.1625,
"grad_norm": 0.173828125,
"learning_rate": 8.375e-06,
"loss": 1.2869,
"step": 91
},
{
"epoch": 0.16428571428571428,
"grad_norm": 0.162109375,
"learning_rate": 8.357142857142858e-06,
"loss": 1.2177,
"step": 92
},
{
"epoch": 0.16607142857142856,
"grad_norm": 0.1689453125,
"learning_rate": 8.339285714285716e-06,
"loss": 1.336,
"step": 93
},
{
"epoch": 0.16785714285714284,
"grad_norm": 0.462890625,
"learning_rate": 8.321428571428573e-06,
"loss": 1.2556,
"step": 94
},
{
"epoch": 0.16964285714285715,
"grad_norm": 0.2099609375,
"learning_rate": 8.30357142857143e-06,
"loss": 1.2662,
"step": 95
},
{
"epoch": 0.17142857142857143,
"grad_norm": 0.1787109375,
"learning_rate": 8.285714285714287e-06,
"loss": 1.2892,
"step": 96
},
{
"epoch": 0.1732142857142857,
"grad_norm": 0.1787109375,
"learning_rate": 8.267857142857143e-06,
"loss": 1.3663,
"step": 97
},
{
"epoch": 0.175,
"grad_norm": 0.158203125,
"learning_rate": 8.25e-06,
"loss": 1.2514,
"step": 98
},
{
"epoch": 0.1767857142857143,
"grad_norm": 0.27734375,
"learning_rate": 8.232142857142857e-06,
"loss": 1.2026,
"step": 99
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.1533203125,
"learning_rate": 8.214285714285714e-06,
"loss": 1.2481,
"step": 100
},
{
"epoch": 0.18035714285714285,
"grad_norm": 0.1767578125,
"learning_rate": 8.196428571428572e-06,
"loss": 1.3102,
"step": 101
},
{
"epoch": 0.18214285714285713,
"grad_norm": 0.16796875,
"learning_rate": 8.17857142857143e-06,
"loss": 1.3078,
"step": 102
},
{
"epoch": 0.18392857142857144,
"grad_norm": 0.1669921875,
"learning_rate": 8.160714285714286e-06,
"loss": 1.2453,
"step": 103
},
{
"epoch": 0.18571428571428572,
"grad_norm": 0.205078125,
"learning_rate": 8.142857142857143e-06,
"loss": 1.2964,
"step": 104
},
{
"epoch": 0.1875,
"grad_norm": 0.2138671875,
"learning_rate": 8.125000000000001e-06,
"loss": 1.2259,
"step": 105
},
{
"epoch": 0.18928571428571428,
"grad_norm": 0.158203125,
"learning_rate": 8.107142857142859e-06,
"loss": 1.2752,
"step": 106
},
{
"epoch": 0.19107142857142856,
"grad_norm": 0.17578125,
"learning_rate": 8.089285714285715e-06,
"loss": 1.2201,
"step": 107
},
{
"epoch": 0.19285714285714287,
"grad_norm": 0.2021484375,
"learning_rate": 8.071428571428572e-06,
"loss": 1.2729,
"step": 108
},
{
"epoch": 0.19464285714285715,
"grad_norm": 0.181640625,
"learning_rate": 8.05357142857143e-06,
"loss": 1.3076,
"step": 109
},
{
"epoch": 0.19642857142857142,
"grad_norm": 0.1591796875,
"learning_rate": 8.035714285714286e-06,
"loss": 1.2438,
"step": 110
},
{
"epoch": 0.1982142857142857,
"grad_norm": 0.177734375,
"learning_rate": 8.017857142857144e-06,
"loss": 1.2728,
"step": 111
},
{
"epoch": 0.2,
"grad_norm": 0.15625,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2819,
"step": 112
},
{
"epoch": 0.2017857142857143,
"grad_norm": 0.1708984375,
"learning_rate": 7.982142857142859e-06,
"loss": 1.2777,
"step": 113
},
{
"epoch": 0.20357142857142857,
"grad_norm": 0.173828125,
"learning_rate": 7.964285714285715e-06,
"loss": 1.3234,
"step": 114
},
{
"epoch": 0.20535714285714285,
"grad_norm": 0.1630859375,
"learning_rate": 7.946428571428571e-06,
"loss": 1.2494,
"step": 115
},
{
"epoch": 0.20714285714285716,
"grad_norm": 0.1689453125,
"learning_rate": 7.928571428571429e-06,
"loss": 1.2596,
"step": 116
},
{
"epoch": 0.20892857142857144,
"grad_norm": 0.1611328125,
"learning_rate": 7.910714285714286e-06,
"loss": 1.2633,
"step": 117
},
{
"epoch": 0.21071428571428572,
"grad_norm": 0.1669921875,
"learning_rate": 7.892857142857144e-06,
"loss": 1.2387,
"step": 118
},
{
"epoch": 0.2125,
"grad_norm": 0.158203125,
"learning_rate": 7.875e-06,
"loss": 1.2819,
"step": 119
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.162109375,
"learning_rate": 7.857142857142858e-06,
"loss": 1.2503,
"step": 120
},
{
"epoch": 0.21607142857142858,
"grad_norm": 0.1748046875,
"learning_rate": 7.839285714285715e-06,
"loss": 1.2501,
"step": 121
},
{
"epoch": 0.21785714285714286,
"grad_norm": 0.1884765625,
"learning_rate": 7.821428571428571e-06,
"loss": 1.266,
"step": 122
},
{
"epoch": 0.21964285714285714,
"grad_norm": 0.1865234375,
"learning_rate": 7.803571428571429e-06,
"loss": 1.2812,
"step": 123
},
{
"epoch": 0.22142857142857142,
"grad_norm": 0.171875,
"learning_rate": 7.785714285714287e-06,
"loss": 1.259,
"step": 124
},
{
"epoch": 0.22321428571428573,
"grad_norm": 0.1767578125,
"learning_rate": 7.767857142857144e-06,
"loss": 1.2712,
"step": 125
},
{
"epoch": 0.225,
"grad_norm": 0.16796875,
"learning_rate": 7.75e-06,
"loss": 1.2627,
"step": 126
},
{
"epoch": 0.22678571428571428,
"grad_norm": 0.17578125,
"learning_rate": 7.732142857142858e-06,
"loss": 1.2744,
"step": 127
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.169921875,
"learning_rate": 7.714285714285716e-06,
"loss": 1.2053,
"step": 128
},
{
"epoch": 0.23035714285714284,
"grad_norm": 0.1650390625,
"learning_rate": 7.696428571428572e-06,
"loss": 1.2231,
"step": 129
},
{
"epoch": 0.23214285714285715,
"grad_norm": 0.19921875,
"learning_rate": 7.67857142857143e-06,
"loss": 1.2107,
"step": 130
},
{
"epoch": 0.23392857142857143,
"grad_norm": 0.1845703125,
"learning_rate": 7.660714285714287e-06,
"loss": 1.1928,
"step": 131
},
{
"epoch": 0.2357142857142857,
"grad_norm": 0.1826171875,
"learning_rate": 7.642857142857143e-06,
"loss": 1.2871,
"step": 132
},
{
"epoch": 0.2375,
"grad_norm": 0.1650390625,
"learning_rate": 7.625e-06,
"loss": 1.241,
"step": 133
},
{
"epoch": 0.2392857142857143,
"grad_norm": 0.1611328125,
"learning_rate": 7.6071428571428575e-06,
"loss": 1.2231,
"step": 134
},
{
"epoch": 0.24107142857142858,
"grad_norm": 0.47265625,
"learning_rate": 7.589285714285714e-06,
"loss": 1.3065,
"step": 135
},
{
"epoch": 0.24285714285714285,
"grad_norm": 0.162109375,
"learning_rate": 7.571428571428572e-06,
"loss": 1.1785,
"step": 136
},
{
"epoch": 0.24464285714285713,
"grad_norm": 0.1748046875,
"learning_rate": 7.553571428571429e-06,
"loss": 1.175,
"step": 137
},
{
"epoch": 0.24642857142857144,
"grad_norm": 0.18359375,
"learning_rate": 7.5357142857142865e-06,
"loss": 1.2725,
"step": 138
},
{
"epoch": 0.24821428571428572,
"grad_norm": 0.166015625,
"learning_rate": 7.517857142857143e-06,
"loss": 1.2524,
"step": 139
},
{
"epoch": 0.25,
"grad_norm": 0.3671875,
"learning_rate": 7.500000000000001e-06,
"loss": 1.2102,
"step": 140
},
{
"epoch": 0.2517857142857143,
"grad_norm": 0.1767578125,
"learning_rate": 7.482142857142858e-06,
"loss": 1.2474,
"step": 141
},
{
"epoch": 0.25357142857142856,
"grad_norm": 0.1708984375,
"learning_rate": 7.464285714285715e-06,
"loss": 1.2537,
"step": 142
},
{
"epoch": 0.25535714285714284,
"grad_norm": 0.2119140625,
"learning_rate": 7.446428571428572e-06,
"loss": 1.2221,
"step": 143
},
{
"epoch": 0.2571428571428571,
"grad_norm": 0.166015625,
"learning_rate": 7.428571428571429e-06,
"loss": 1.1796,
"step": 144
},
{
"epoch": 0.25892857142857145,
"grad_norm": 0.1728515625,
"learning_rate": 7.410714285714287e-06,
"loss": 1.2549,
"step": 145
},
{
"epoch": 0.26071428571428573,
"grad_norm": 0.1689453125,
"learning_rate": 7.392857142857144e-06,
"loss": 1.2014,
"step": 146
},
{
"epoch": 0.2625,
"grad_norm": 0.1708984375,
"learning_rate": 7.375000000000001e-06,
"loss": 1.2295,
"step": 147
},
{
"epoch": 0.2642857142857143,
"grad_norm": 0.1640625,
"learning_rate": 7.357142857142858e-06,
"loss": 1.2125,
"step": 148
},
{
"epoch": 0.26607142857142857,
"grad_norm": 0.1669921875,
"learning_rate": 7.339285714285714e-06,
"loss": 1.1909,
"step": 149
},
{
"epoch": 0.26785714285714285,
"grad_norm": 0.1650390625,
"learning_rate": 7.321428571428572e-06,
"loss": 1.1728,
"step": 150
},
{
"epoch": 0.26964285714285713,
"grad_norm": 0.16015625,
"learning_rate": 7.303571428571429e-06,
"loss": 1.2091,
"step": 151
},
{
"epoch": 0.2714285714285714,
"grad_norm": 0.2060546875,
"learning_rate": 7.285714285714286e-06,
"loss": 1.247,
"step": 152
},
{
"epoch": 0.2732142857142857,
"grad_norm": 0.2451171875,
"learning_rate": 7.267857142857143e-06,
"loss": 1.1899,
"step": 153
},
{
"epoch": 0.275,
"grad_norm": 0.1728515625,
"learning_rate": 7.25e-06,
"loss": 1.2335,
"step": 154
},
{
"epoch": 0.2767857142857143,
"grad_norm": 0.173828125,
"learning_rate": 7.232142857142858e-06,
"loss": 1.2507,
"step": 155
},
{
"epoch": 0.2785714285714286,
"grad_norm": 0.1875,
"learning_rate": 7.2142857142857145e-06,
"loss": 1.2654,
"step": 156
},
{
"epoch": 0.28035714285714286,
"grad_norm": 0.8671875,
"learning_rate": 7.196428571428572e-06,
"loss": 1.2424,
"step": 157
},
{
"epoch": 0.28214285714285714,
"grad_norm": 0.197265625,
"learning_rate": 7.178571428571429e-06,
"loss": 1.2825,
"step": 158
},
{
"epoch": 0.2839285714285714,
"grad_norm": 0.2080078125,
"learning_rate": 7.160714285714287e-06,
"loss": 1.2786,
"step": 159
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.1865234375,
"learning_rate": 7.1428571428571436e-06,
"loss": 1.167,
"step": 160
},
{
"epoch": 0.2875,
"grad_norm": 0.171875,
"learning_rate": 7.125e-06,
"loss": 1.1822,
"step": 161
},
{
"epoch": 0.2892857142857143,
"grad_norm": 0.1708984375,
"learning_rate": 7.107142857142858e-06,
"loss": 1.252,
"step": 162
},
{
"epoch": 0.2910714285714286,
"grad_norm": 0.169921875,
"learning_rate": 7.089285714285715e-06,
"loss": 1.187,
"step": 163
},
{
"epoch": 0.29285714285714287,
"grad_norm": 0.17578125,
"learning_rate": 7.0714285714285726e-06,
"loss": 1.2069,
"step": 164
},
{
"epoch": 0.29464285714285715,
"grad_norm": 0.169921875,
"learning_rate": 7.053571428571429e-06,
"loss": 1.2166,
"step": 165
},
{
"epoch": 0.29642857142857143,
"grad_norm": 0.2177734375,
"learning_rate": 7.035714285714287e-06,
"loss": 1.2966,
"step": 166
},
{
"epoch": 0.2982142857142857,
"grad_norm": 0.1845703125,
"learning_rate": 7.017857142857143e-06,
"loss": 1.2777,
"step": 167
},
{
"epoch": 0.3,
"grad_norm": 0.1669921875,
"learning_rate": 7e-06,
"loss": 1.1945,
"step": 168
},
{
"epoch": 0.30178571428571427,
"grad_norm": 0.17578125,
"learning_rate": 6.9821428571428576e-06,
"loss": 1.2145,
"step": 169
},
{
"epoch": 0.30357142857142855,
"grad_norm": 0.171875,
"learning_rate": 6.964285714285714e-06,
"loss": 1.2073,
"step": 170
},
{
"epoch": 0.3053571428571429,
"grad_norm": 0.1689453125,
"learning_rate": 6.946428571428572e-06,
"loss": 1.1993,
"step": 171
},
{
"epoch": 0.30714285714285716,
"grad_norm": 0.173828125,
"learning_rate": 6.928571428571429e-06,
"loss": 1.2071,
"step": 172
},
{
"epoch": 0.30892857142857144,
"grad_norm": 0.1650390625,
"learning_rate": 6.910714285714286e-06,
"loss": 1.2003,
"step": 173
},
{
"epoch": 0.3107142857142857,
"grad_norm": 0.177734375,
"learning_rate": 6.892857142857143e-06,
"loss": 1.2725,
"step": 174
},
{
"epoch": 0.3125,
"grad_norm": 0.18359375,
"learning_rate": 6.875e-06,
"loss": 1.1951,
"step": 175
},
{
"epoch": 0.3142857142857143,
"grad_norm": 0.2236328125,
"learning_rate": 6.857142857142858e-06,
"loss": 1.1859,
"step": 176
},
{
"epoch": 0.31607142857142856,
"grad_norm": 0.208984375,
"learning_rate": 6.839285714285715e-06,
"loss": 1.2176,
"step": 177
},
{
"epoch": 0.31785714285714284,
"grad_norm": 0.177734375,
"learning_rate": 6.8214285714285724e-06,
"loss": 1.209,
"step": 178
},
{
"epoch": 0.3196428571428571,
"grad_norm": 0.1708984375,
"learning_rate": 6.803571428571429e-06,
"loss": 1.1817,
"step": 179
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.203125,
"learning_rate": 6.785714285714287e-06,
"loss": 1.2103,
"step": 180
},
{
"epoch": 0.32321428571428573,
"grad_norm": 0.234375,
"learning_rate": 6.767857142857144e-06,
"loss": 1.1874,
"step": 181
},
{
"epoch": 0.325,
"grad_norm": 0.193359375,
"learning_rate": 6.750000000000001e-06,
"loss": 1.1934,
"step": 182
},
{
"epoch": 0.3267857142857143,
"grad_norm": 0.1962890625,
"learning_rate": 6.732142857142858e-06,
"loss": 1.1743,
"step": 183
},
{
"epoch": 0.32857142857142857,
"grad_norm": 0.259765625,
"learning_rate": 6.714285714285714e-06,
"loss": 1.2188,
"step": 184
},
{
"epoch": 0.33035714285714285,
"grad_norm": 0.173828125,
"learning_rate": 6.696428571428571e-06,
"loss": 1.175,
"step": 185
},
{
"epoch": 0.33214285714285713,
"grad_norm": 0.275390625,
"learning_rate": 6.678571428571429e-06,
"loss": 1.2215,
"step": 186
},
{
"epoch": 0.3339285714285714,
"grad_norm": 0.1748046875,
"learning_rate": 6.660714285714286e-06,
"loss": 1.1251,
"step": 187
},
{
"epoch": 0.3357142857142857,
"grad_norm": 0.181640625,
"learning_rate": 6.642857142857143e-06,
"loss": 1.1588,
"step": 188
},
{
"epoch": 0.3375,
"grad_norm": 0.169921875,
"learning_rate": 6.625e-06,
"loss": 1.1694,
"step": 189
},
{
"epoch": 0.3392857142857143,
"grad_norm": 0.1806640625,
"learning_rate": 6.607142857142858e-06,
"loss": 1.266,
"step": 190
},
{
"epoch": 0.3410714285714286,
"grad_norm": 0.1767578125,
"learning_rate": 6.589285714285715e-06,
"loss": 1.2323,
"step": 191
},
{
"epoch": 0.34285714285714286,
"grad_norm": 0.19921875,
"learning_rate": 6.571428571428572e-06,
"loss": 1.1801,
"step": 192
},
{
"epoch": 0.34464285714285714,
"grad_norm": 0.224609375,
"learning_rate": 6.553571428571429e-06,
"loss": 1.181,
"step": 193
},
{
"epoch": 0.3464285714285714,
"grad_norm": 0.1748046875,
"learning_rate": 6.535714285714286e-06,
"loss": 1.212,
"step": 194
},
{
"epoch": 0.3482142857142857,
"grad_norm": 0.18359375,
"learning_rate": 6.517857142857144e-06,
"loss": 1.193,
"step": 195
},
{
"epoch": 0.35,
"grad_norm": 0.1826171875,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.1976,
"step": 196
},
{
"epoch": 0.3517857142857143,
"grad_norm": 0.236328125,
"learning_rate": 6.482142857142858e-06,
"loss": 1.1992,
"step": 197
},
{
"epoch": 0.3535714285714286,
"grad_norm": 0.171875,
"learning_rate": 6.464285714285715e-06,
"loss": 1.2138,
"step": 198
},
{
"epoch": 0.35535714285714287,
"grad_norm": 0.1943359375,
"learning_rate": 6.446428571428573e-06,
"loss": 1.1763,
"step": 199
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.177734375,
"learning_rate": 6.4285714285714295e-06,
"loss": 1.1462,
"step": 200
},
{
"epoch": 0.35892857142857143,
"grad_norm": 0.18359375,
"learning_rate": 6.410714285714287e-06,
"loss": 1.1992,
"step": 201
},
{
"epoch": 0.3607142857142857,
"grad_norm": 0.1845703125,
"learning_rate": 6.392857142857143e-06,
"loss": 1.1701,
"step": 202
},
{
"epoch": 0.3625,
"grad_norm": 0.1826171875,
"learning_rate": 6.375e-06,
"loss": 1.1651,
"step": 203
},
{
"epoch": 0.36428571428571427,
"grad_norm": 0.2216796875,
"learning_rate": 6.357142857142858e-06,
"loss": 1.1889,
"step": 204
},
{
"epoch": 0.36607142857142855,
"grad_norm": 0.1826171875,
"learning_rate": 6.3392857142857145e-06,
"loss": 1.2059,
"step": 205
},
{
"epoch": 0.3678571428571429,
"grad_norm": 0.1796875,
"learning_rate": 6.321428571428571e-06,
"loss": 1.1756,
"step": 206
},
{
"epoch": 0.36964285714285716,
"grad_norm": 0.302734375,
"learning_rate": 6.303571428571429e-06,
"loss": 1.2142,
"step": 207
},
{
"epoch": 0.37142857142857144,
"grad_norm": 0.263671875,
"learning_rate": 6.285714285714286e-06,
"loss": 1.2188,
"step": 208
},
{
"epoch": 0.3732142857142857,
"grad_norm": 0.2431640625,
"learning_rate": 6.2678571428571435e-06,
"loss": 1.2105,
"step": 209
},
{
"epoch": 0.375,
"grad_norm": 0.271484375,
"learning_rate": 6.25e-06,
"loss": 1.1784,
"step": 210
},
{
"epoch": 0.3767857142857143,
"grad_norm": 0.255859375,
"learning_rate": 6.232142857142858e-06,
"loss": 1.248,
"step": 211
},
{
"epoch": 0.37857142857142856,
"grad_norm": 0.18359375,
"learning_rate": 6.214285714285715e-06,
"loss": 1.1754,
"step": 212
},
{
"epoch": 0.38035714285714284,
"grad_norm": 0.1787109375,
"learning_rate": 6.1964285714285725e-06,
"loss": 1.1834,
"step": 213
},
{
"epoch": 0.3821428571428571,
"grad_norm": 0.1875,
"learning_rate": 6.178571428571429e-06,
"loss": 1.2056,
"step": 214
},
{
"epoch": 0.38392857142857145,
"grad_norm": 0.51171875,
"learning_rate": 6.160714285714286e-06,
"loss": 1.1652,
"step": 215
},
{
"epoch": 0.38571428571428573,
"grad_norm": 0.1796875,
"learning_rate": 6.142857142857144e-06,
"loss": 1.1651,
"step": 216
},
{
"epoch": 0.3875,
"grad_norm": 0.1826171875,
"learning_rate": 6.125000000000001e-06,
"loss": 1.2336,
"step": 217
},
{
"epoch": 0.3892857142857143,
"grad_norm": 0.22265625,
"learning_rate": 6.107142857142858e-06,
"loss": 1.1685,
"step": 218
},
{
"epoch": 0.39107142857142857,
"grad_norm": 0.2158203125,
"learning_rate": 6.089285714285714e-06,
"loss": 1.1554,
"step": 219
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.193359375,
"learning_rate": 6.071428571428571e-06,
"loss": 1.1878,
"step": 220
},
{
"epoch": 0.39464285714285713,
"grad_norm": 0.18359375,
"learning_rate": 6.053571428571429e-06,
"loss": 1.1796,
"step": 221
},
{
"epoch": 0.3964285714285714,
"grad_norm": 0.2373046875,
"learning_rate": 6.035714285714286e-06,
"loss": 1.2006,
"step": 222
},
{
"epoch": 0.3982142857142857,
"grad_norm": 0.1923828125,
"learning_rate": 6.017857142857143e-06,
"loss": 1.1679,
"step": 223
},
{
"epoch": 0.4,
"grad_norm": 0.21875,
"learning_rate": 6e-06,
"loss": 1.1688,
"step": 224
},
{
"epoch": 0.4017857142857143,
"grad_norm": 0.1748046875,
"learning_rate": 5.982142857142858e-06,
"loss": 1.1752,
"step": 225
},
{
"epoch": 0.4035714285714286,
"grad_norm": 0.17578125,
"learning_rate": 5.964285714285715e-06,
"loss": 1.1462,
"step": 226
},
{
"epoch": 0.40535714285714286,
"grad_norm": 0.2197265625,
"learning_rate": 5.9464285714285715e-06,
"loss": 1.1739,
"step": 227
},
{
"epoch": 0.40714285714285714,
"grad_norm": 0.2041015625,
"learning_rate": 5.928571428571429e-06,
"loss": 1.1946,
"step": 228
},
{
"epoch": 0.4089285714285714,
"grad_norm": 0.185546875,
"learning_rate": 5.910714285714286e-06,
"loss": 1.189,
"step": 229
},
{
"epoch": 0.4107142857142857,
"grad_norm": 0.1728515625,
"learning_rate": 5.892857142857144e-06,
"loss": 1.1436,
"step": 230
},
{
"epoch": 0.4125,
"grad_norm": 0.326171875,
"learning_rate": 5.8750000000000005e-06,
"loss": 1.29,
"step": 231
},
{
"epoch": 0.4142857142857143,
"grad_norm": 0.1953125,
"learning_rate": 5.857142857142858e-06,
"loss": 1.1559,
"step": 232
},
{
"epoch": 0.4160714285714286,
"grad_norm": 0.18359375,
"learning_rate": 5.839285714285715e-06,
"loss": 1.1212,
"step": 233
},
{
"epoch": 0.41785714285714287,
"grad_norm": 0.1953125,
"learning_rate": 5.821428571428573e-06,
"loss": 1.1776,
"step": 234
},
{
"epoch": 0.41964285714285715,
"grad_norm": 0.1923828125,
"learning_rate": 5.8035714285714295e-06,
"loss": 1.2123,
"step": 235
},
{
"epoch": 0.42142857142857143,
"grad_norm": 0.2177734375,
"learning_rate": 5.785714285714286e-06,
"loss": 1.2795,
"step": 236
},
{
"epoch": 0.4232142857142857,
"grad_norm": 0.78125,
"learning_rate": 5.767857142857143e-06,
"loss": 1.1719,
"step": 237
},
{
"epoch": 0.425,
"grad_norm": 0.1826171875,
"learning_rate": 5.75e-06,
"loss": 1.1923,
"step": 238
},
{
"epoch": 0.42678571428571427,
"grad_norm": 0.2353515625,
"learning_rate": 5.732142857142857e-06,
"loss": 1.2586,
"step": 239
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.1806640625,
"learning_rate": 5.7142857142857145e-06,
"loss": 1.1634,
"step": 240
},
{
"epoch": 0.4303571428571429,
"grad_norm": 0.2021484375,
"learning_rate": 5.696428571428571e-06,
"loss": 1.1752,
"step": 241
},
{
"epoch": 0.43214285714285716,
"grad_norm": 0.3046875,
"learning_rate": 5.678571428571429e-06,
"loss": 1.1835,
"step": 242
},
{
"epoch": 0.43392857142857144,
"grad_norm": 0.1884765625,
"learning_rate": 5.660714285714286e-06,
"loss": 1.1811,
"step": 243
},
{
"epoch": 0.4357142857142857,
"grad_norm": 0.291015625,
"learning_rate": 5.6428571428571435e-06,
"loss": 1.3322,
"step": 244
},
{
"epoch": 0.4375,
"grad_norm": 2.234375,
"learning_rate": 5.625e-06,
"loss": 1.1047,
"step": 245
},
{
"epoch": 0.4392857142857143,
"grad_norm": 0.1875,
"learning_rate": 5.607142857142858e-06,
"loss": 1.1832,
"step": 246
},
{
"epoch": 0.44107142857142856,
"grad_norm": 0.1826171875,
"learning_rate": 5.589285714285715e-06,
"loss": 1.2046,
"step": 247
},
{
"epoch": 0.44285714285714284,
"grad_norm": 0.1923828125,
"learning_rate": 5.571428571428572e-06,
"loss": 1.0937,
"step": 248
},
{
"epoch": 0.4446428571428571,
"grad_norm": 0.2138671875,
"learning_rate": 5.553571428571429e-06,
"loss": 1.208,
"step": 249
},
{
"epoch": 0.44642857142857145,
"grad_norm": 0.197265625,
"learning_rate": 5.535714285714286e-06,
"loss": 1.1323,
"step": 250
},
{
"epoch": 0.44821428571428573,
"grad_norm": 0.197265625,
"learning_rate": 5.517857142857144e-06,
"loss": 1.1391,
"step": 251
},
{
"epoch": 0.45,
"grad_norm": 0.19140625,
"learning_rate": 5.500000000000001e-06,
"loss": 1.16,
"step": 252
},
{
"epoch": 0.4517857142857143,
"grad_norm": 0.1748046875,
"learning_rate": 5.482142857142858e-06,
"loss": 1.1171,
"step": 253
},
{
"epoch": 0.45357142857142857,
"grad_norm": 0.189453125,
"learning_rate": 5.464285714285714e-06,
"loss": 1.2083,
"step": 254
},
{
"epoch": 0.45535714285714285,
"grad_norm": 0.21484375,
"learning_rate": 5.446428571428571e-06,
"loss": 1.1761,
"step": 255
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.197265625,
"learning_rate": 5.428571428571429e-06,
"loss": 1.1479,
"step": 256
},
{
"epoch": 0.4589285714285714,
"grad_norm": 0.1953125,
"learning_rate": 5.410714285714286e-06,
"loss": 1.1903,
"step": 257
},
{
"epoch": 0.4607142857142857,
"grad_norm": 0.2138671875,
"learning_rate": 5.392857142857143e-06,
"loss": 1.1795,
"step": 258
},
{
"epoch": 0.4625,
"grad_norm": 0.306640625,
"learning_rate": 5.375e-06,
"loss": 1.233,
"step": 259
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.1845703125,
"learning_rate": 5.357142857142857e-06,
"loss": 1.1406,
"step": 260
},
{
"epoch": 0.4660714285714286,
"grad_norm": 0.18359375,
"learning_rate": 5.339285714285715e-06,
"loss": 1.176,
"step": 261
},
{
"epoch": 0.46785714285714286,
"grad_norm": 0.189453125,
"learning_rate": 5.3214285714285715e-06,
"loss": 1.1739,
"step": 262
},
{
"epoch": 0.46964285714285714,
"grad_norm": 0.2041015625,
"learning_rate": 5.303571428571429e-06,
"loss": 1.1291,
"step": 263
},
{
"epoch": 0.4714285714285714,
"grad_norm": 0.193359375,
"learning_rate": 5.285714285714286e-06,
"loss": 1.215,
"step": 264
},
{
"epoch": 0.4732142857142857,
"grad_norm": 0.2119140625,
"learning_rate": 5.267857142857144e-06,
"loss": 1.1484,
"step": 265
},
{
"epoch": 0.475,
"grad_norm": 0.205078125,
"learning_rate": 5.2500000000000006e-06,
"loss": 1.1621,
"step": 266
},
{
"epoch": 0.4767857142857143,
"grad_norm": 0.181640625,
"learning_rate": 5.232142857142858e-06,
"loss": 1.1341,
"step": 267
},
{
"epoch": 0.4785714285714286,
"grad_norm": 0.1953125,
"learning_rate": 5.214285714285715e-06,
"loss": 1.1793,
"step": 268
},
{
"epoch": 0.48035714285714287,
"grad_norm": 0.189453125,
"learning_rate": 5.196428571428572e-06,
"loss": 1.1767,
"step": 269
},
{
"epoch": 0.48214285714285715,
"grad_norm": 0.1875,
"learning_rate": 5.1785714285714296e-06,
"loss": 1.1695,
"step": 270
},
{
"epoch": 0.48392857142857143,
"grad_norm": 0.197265625,
"learning_rate": 5.160714285714286e-06,
"loss": 1.21,
"step": 271
},
{
"epoch": 0.4857142857142857,
"grad_norm": 0.1787109375,
"learning_rate": 5.142857142857142e-06,
"loss": 1.1121,
"step": 272
},
{
"epoch": 0.4875,
"grad_norm": 0.2060546875,
"learning_rate": 5.125e-06,
"loss": 1.1956,
"step": 273
},
{
"epoch": 0.48928571428571427,
"grad_norm": 0.2333984375,
"learning_rate": 5.107142857142857e-06,
"loss": 1.1052,
"step": 274
},
{
"epoch": 0.49107142857142855,
"grad_norm": 0.185546875,
"learning_rate": 5.0892857142857146e-06,
"loss": 1.1829,
"step": 275
},
{
"epoch": 0.4928571428571429,
"grad_norm": 0.263671875,
"learning_rate": 5.071428571428571e-06,
"loss": 1.1666,
"step": 276
},
{
"epoch": 0.49464285714285716,
"grad_norm": 0.2314453125,
"learning_rate": 5.053571428571429e-06,
"loss": 1.1083,
"step": 277
},
{
"epoch": 0.49642857142857144,
"grad_norm": 0.20703125,
"learning_rate": 5.035714285714286e-06,
"loss": 1.166,
"step": 278
},
{
"epoch": 0.4982142857142857,
"grad_norm": 0.1865234375,
"learning_rate": 5.017857142857144e-06,
"loss": 1.168,
"step": 279
},
{
"epoch": 0.5,
"grad_norm": 0.1923828125,
"learning_rate": 5e-06,
"loss": 1.1707,
"step": 280
},
{
"epoch": 0.5017857142857143,
"grad_norm": 0.181640625,
"learning_rate": 4.982142857142857e-06,
"loss": 1.1735,
"step": 281
},
{
"epoch": 0.5035714285714286,
"grad_norm": 0.19140625,
"learning_rate": 4.964285714285715e-06,
"loss": 1.1395,
"step": 282
},
{
"epoch": 0.5053571428571428,
"grad_norm": 0.1953125,
"learning_rate": 4.946428571428572e-06,
"loss": 1.1439,
"step": 283
},
{
"epoch": 0.5071428571428571,
"grad_norm": 0.244140625,
"learning_rate": 4.928571428571429e-06,
"loss": 1.1547,
"step": 284
},
{
"epoch": 0.5089285714285714,
"grad_norm": 0.1953125,
"learning_rate": 4.910714285714286e-06,
"loss": 1.2232,
"step": 285
},
{
"epoch": 0.5107142857142857,
"grad_norm": 0.19921875,
"learning_rate": 4.892857142857143e-06,
"loss": 1.1607,
"step": 286
},
{
"epoch": 0.5125,
"grad_norm": 0.1806640625,
"learning_rate": 4.875e-06,
"loss": 1.1799,
"step": 287
},
{
"epoch": 0.5142857142857142,
"grad_norm": 0.18359375,
"learning_rate": 4.857142857142858e-06,
"loss": 1.1534,
"step": 288
},
{
"epoch": 0.5160714285714286,
"grad_norm": 0.2060546875,
"learning_rate": 4.839285714285714e-06,
"loss": 1.1432,
"step": 289
},
{
"epoch": 0.5178571428571429,
"grad_norm": 0.1943359375,
"learning_rate": 4.821428571428572e-06,
"loss": 1.1573,
"step": 290
},
{
"epoch": 0.5196428571428572,
"grad_norm": 0.1923828125,
"learning_rate": 4.803571428571429e-06,
"loss": 1.1567,
"step": 291
},
{
"epoch": 0.5214285714285715,
"grad_norm": 0.1884765625,
"learning_rate": 4.785714285714287e-06,
"loss": 1.163,
"step": 292
},
{
"epoch": 0.5232142857142857,
"grad_norm": 0.1845703125,
"learning_rate": 4.7678571428571434e-06,
"loss": 1.1372,
"step": 293
},
{
"epoch": 0.525,
"grad_norm": 0.18359375,
"learning_rate": 4.75e-06,
"loss": 1.1123,
"step": 294
},
{
"epoch": 0.5267857142857143,
"grad_norm": 0.2109375,
"learning_rate": 4.732142857142857e-06,
"loss": 1.1416,
"step": 295
},
{
"epoch": 0.5285714285714286,
"grad_norm": 0.193359375,
"learning_rate": 4.714285714285715e-06,
"loss": 1.1302,
"step": 296
},
{
"epoch": 0.5303571428571429,
"grad_norm": 0.1826171875,
"learning_rate": 4.696428571428572e-06,
"loss": 1.1657,
"step": 297
},
{
"epoch": 0.5321428571428571,
"grad_norm": 0.1796875,
"learning_rate": 4.678571428571429e-06,
"loss": 1.1288,
"step": 298
},
{
"epoch": 0.5339285714285714,
"grad_norm": 0.185546875,
"learning_rate": 4.660714285714286e-06,
"loss": 1.1896,
"step": 299
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.1923828125,
"learning_rate": 4.642857142857144e-06,
"loss": 1.1692,
"step": 300
},
{
"epoch": 0.5375,
"grad_norm": 0.1787109375,
"learning_rate": 4.625000000000001e-06,
"loss": 1.1562,
"step": 301
},
{
"epoch": 0.5392857142857143,
"grad_norm": 0.21484375,
"learning_rate": 4.6071428571428574e-06,
"loss": 1.1978,
"step": 302
},
{
"epoch": 0.5410714285714285,
"grad_norm": 0.2001953125,
"learning_rate": 4.589285714285714e-06,
"loss": 1.1697,
"step": 303
},
{
"epoch": 0.5428571428571428,
"grad_norm": 0.2177734375,
"learning_rate": 4.571428571428572e-06,
"loss": 1.2107,
"step": 304
},
{
"epoch": 0.5446428571428571,
"grad_norm": 0.19921875,
"learning_rate": 4.553571428571429e-06,
"loss": 1.1483,
"step": 305
},
{
"epoch": 0.5464285714285714,
"grad_norm": 0.1904296875,
"learning_rate": 4.5357142857142865e-06,
"loss": 1.1341,
"step": 306
},
{
"epoch": 0.5482142857142858,
"grad_norm": 0.2001953125,
"learning_rate": 4.517857142857143e-06,
"loss": 1.1576,
"step": 307
},
{
"epoch": 0.55,
"grad_norm": 0.212890625,
"learning_rate": 4.5e-06,
"loss": 1.2077,
"step": 308
},
{
"epoch": 0.5517857142857143,
"grad_norm": 0.1875,
"learning_rate": 4.482142857142858e-06,
"loss": 1.1227,
"step": 309
},
{
"epoch": 0.5535714285714286,
"grad_norm": 0.31640625,
"learning_rate": 4.464285714285715e-06,
"loss": 1.1901,
"step": 310
},
{
"epoch": 0.5553571428571429,
"grad_norm": 0.1904296875,
"learning_rate": 4.4464285714285715e-06,
"loss": 1.1644,
"step": 311
},
{
"epoch": 0.5571428571428572,
"grad_norm": 0.18359375,
"learning_rate": 4.428571428571429e-06,
"loss": 1.2139,
"step": 312
},
{
"epoch": 0.5589285714285714,
"grad_norm": 0.18359375,
"learning_rate": 4.410714285714286e-06,
"loss": 1.1444,
"step": 313
},
{
"epoch": 0.5607142857142857,
"grad_norm": 0.19140625,
"learning_rate": 4.392857142857143e-06,
"loss": 1.1729,
"step": 314
},
{
"epoch": 0.5625,
"grad_norm": 0.21875,
"learning_rate": 4.3750000000000005e-06,
"loss": 1.1465,
"step": 315
},
{
"epoch": 0.5642857142857143,
"grad_norm": 0.1865234375,
"learning_rate": 4.357142857142857e-06,
"loss": 1.1259,
"step": 316
},
{
"epoch": 0.5660714285714286,
"grad_norm": 0.212890625,
"learning_rate": 4.339285714285715e-06,
"loss": 1.1134,
"step": 317
},
{
"epoch": 0.5678571428571428,
"grad_norm": 0.181640625,
"learning_rate": 4.321428571428572e-06,
"loss": 1.145,
"step": 318
},
{
"epoch": 0.5696428571428571,
"grad_norm": 0.2119140625,
"learning_rate": 4.3035714285714295e-06,
"loss": 1.1057,
"step": 319
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.189453125,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.1909,
"step": 320
},
{
"epoch": 0.5732142857142857,
"grad_norm": 0.1953125,
"learning_rate": 4.267857142857143e-06,
"loss": 1.1526,
"step": 321
},
{
"epoch": 0.575,
"grad_norm": 0.2119140625,
"learning_rate": 4.25e-06,
"loss": 1.1345,
"step": 322
},
{
"epoch": 0.5767857142857142,
"grad_norm": 0.197265625,
"learning_rate": 4.232142857142858e-06,
"loss": 1.1276,
"step": 323
},
{
"epoch": 0.5785714285714286,
"grad_norm": 0.208984375,
"learning_rate": 4.2142857142857145e-06,
"loss": 1.1772,
"step": 324
},
{
"epoch": 0.5803571428571429,
"grad_norm": 0.1953125,
"learning_rate": 4.196428571428572e-06,
"loss": 1.1043,
"step": 325
},
{
"epoch": 0.5821428571428572,
"grad_norm": 0.43359375,
"learning_rate": 4.178571428571429e-06,
"loss": 1.1695,
"step": 326
},
{
"epoch": 0.5839285714285715,
"grad_norm": 0.201171875,
"learning_rate": 4.160714285714287e-06,
"loss": 1.0878,
"step": 327
},
{
"epoch": 0.5857142857142857,
"grad_norm": 0.212890625,
"learning_rate": 4.1428571428571435e-06,
"loss": 1.175,
"step": 328
},
{
"epoch": 0.5875,
"grad_norm": 0.1826171875,
"learning_rate": 4.125e-06,
"loss": 1.1478,
"step": 329
},
{
"epoch": 0.5892857142857143,
"grad_norm": 0.1923828125,
"learning_rate": 4.107142857142857e-06,
"loss": 1.166,
"step": 330
},
{
"epoch": 0.5910714285714286,
"grad_norm": 0.1962890625,
"learning_rate": 4.089285714285715e-06,
"loss": 1.1393,
"step": 331
},
{
"epoch": 0.5928571428571429,
"grad_norm": 0.2021484375,
"learning_rate": 4.071428571428572e-06,
"loss": 1.1259,
"step": 332
},
{
"epoch": 0.5946428571428571,
"grad_norm": 0.1865234375,
"learning_rate": 4.053571428571429e-06,
"loss": 1.1298,
"step": 333
},
{
"epoch": 0.5964285714285714,
"grad_norm": 0.1962890625,
"learning_rate": 4.035714285714286e-06,
"loss": 1.1384,
"step": 334
},
{
"epoch": 0.5982142857142857,
"grad_norm": 0.2001953125,
"learning_rate": 4.017857142857143e-06,
"loss": 1.1776,
"step": 335
},
{
"epoch": 0.6,
"grad_norm": 0.193359375,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1937,
"step": 336
},
{
"epoch": 0.6017857142857143,
"grad_norm": 0.25,
"learning_rate": 3.9821428571428575e-06,
"loss": 1.1618,
"step": 337
},
{
"epoch": 0.6035714285714285,
"grad_norm": 0.189453125,
"learning_rate": 3.964285714285714e-06,
"loss": 1.1387,
"step": 338
},
{
"epoch": 0.6053571428571428,
"grad_norm": 0.1943359375,
"learning_rate": 3.946428571428572e-06,
"loss": 1.1479,
"step": 339
},
{
"epoch": 0.6071428571428571,
"grad_norm": 0.1845703125,
"learning_rate": 3.928571428571429e-06,
"loss": 1.1185,
"step": 340
},
{
"epoch": 0.6089285714285714,
"grad_norm": 0.208984375,
"learning_rate": 3.910714285714286e-06,
"loss": 1.1447,
"step": 341
},
{
"epoch": 0.6107142857142858,
"grad_norm": 0.2021484375,
"learning_rate": 3.892857142857143e-06,
"loss": 1.1693,
"step": 342
},
{
"epoch": 0.6125,
"grad_norm": 0.18359375,
"learning_rate": 3.875e-06,
"loss": 1.1268,
"step": 343
},
{
"epoch": 0.6142857142857143,
"grad_norm": 0.197265625,
"learning_rate": 3.857142857142858e-06,
"loss": 1.1435,
"step": 344
},
{
"epoch": 0.6160714285714286,
"grad_norm": 0.2099609375,
"learning_rate": 3.839285714285715e-06,
"loss": 1.102,
"step": 345
},
{
"epoch": 0.6178571428571429,
"grad_norm": 0.2294921875,
"learning_rate": 3.8214285714285715e-06,
"loss": 1.0893,
"step": 346
},
{
"epoch": 0.6196428571428572,
"grad_norm": 0.185546875,
"learning_rate": 3.8035714285714288e-06,
"loss": 1.1115,
"step": 347
},
{
"epoch": 0.6214285714285714,
"grad_norm": 0.216796875,
"learning_rate": 3.785714285714286e-06,
"loss": 1.1497,
"step": 348
},
{
"epoch": 0.6232142857142857,
"grad_norm": 0.185546875,
"learning_rate": 3.7678571428571433e-06,
"loss": 1.1288,
"step": 349
},
{
"epoch": 0.625,
"grad_norm": 0.1865234375,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.1183,
"step": 350
},
{
"epoch": 0.6267857142857143,
"grad_norm": 0.189453125,
"learning_rate": 3.7321428571428573e-06,
"loss": 1.1357,
"step": 351
},
{
"epoch": 0.6285714285714286,
"grad_norm": 0.189453125,
"learning_rate": 3.7142857142857146e-06,
"loss": 1.1683,
"step": 352
},
{
"epoch": 0.6303571428571428,
"grad_norm": 0.1962890625,
"learning_rate": 3.696428571428572e-06,
"loss": 1.1582,
"step": 353
},
{
"epoch": 0.6321428571428571,
"grad_norm": 0.2041015625,
"learning_rate": 3.678571428571429e-06,
"loss": 1.1508,
"step": 354
},
{
"epoch": 0.6339285714285714,
"grad_norm": 0.1884765625,
"learning_rate": 3.660714285714286e-06,
"loss": 1.1375,
"step": 355
},
{
"epoch": 0.6357142857142857,
"grad_norm": 0.2041015625,
"learning_rate": 3.642857142857143e-06,
"loss": 1.1921,
"step": 356
},
{
"epoch": 0.6375,
"grad_norm": 0.181640625,
"learning_rate": 3.625e-06,
"loss": 1.0964,
"step": 357
},
{
"epoch": 0.6392857142857142,
"grad_norm": 0.1904296875,
"learning_rate": 3.6071428571428573e-06,
"loss": 1.1554,
"step": 358
},
{
"epoch": 0.6410714285714286,
"grad_norm": 0.185546875,
"learning_rate": 3.5892857142857145e-06,
"loss": 1.1234,
"step": 359
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.1884765625,
"learning_rate": 3.5714285714285718e-06,
"loss": 1.1793,
"step": 360
},
{
"epoch": 0.6446428571428572,
"grad_norm": 0.2265625,
"learning_rate": 3.553571428571429e-06,
"loss": 1.1054,
"step": 361
},
{
"epoch": 0.6464285714285715,
"grad_norm": 0.189453125,
"learning_rate": 3.5357142857142863e-06,
"loss": 1.1465,
"step": 362
},
{
"epoch": 0.6482142857142857,
"grad_norm": 0.1884765625,
"learning_rate": 3.5178571428571435e-06,
"loss": 1.1645,
"step": 363
},
{
"epoch": 0.65,
"grad_norm": 0.1884765625,
"learning_rate": 3.5e-06,
"loss": 1.1173,
"step": 364
},
{
"epoch": 0.6517857142857143,
"grad_norm": 0.328125,
"learning_rate": 3.482142857142857e-06,
"loss": 1.1352,
"step": 365
},
{
"epoch": 0.6535714285714286,
"grad_norm": 0.228515625,
"learning_rate": 3.4642857142857145e-06,
"loss": 1.1772,
"step": 366
},
{
"epoch": 0.6553571428571429,
"grad_norm": 0.2158203125,
"learning_rate": 3.4464285714285717e-06,
"loss": 1.1848,
"step": 367
},
{
"epoch": 0.6571428571428571,
"grad_norm": 0.216796875,
"learning_rate": 3.428571428571429e-06,
"loss": 1.1608,
"step": 368
},
{
"epoch": 0.6589285714285714,
"grad_norm": 0.2119140625,
"learning_rate": 3.4107142857142862e-06,
"loss": 1.1825,
"step": 369
},
{
"epoch": 0.6607142857142857,
"grad_norm": 0.197265625,
"learning_rate": 3.3928571428571435e-06,
"loss": 1.1475,
"step": 370
},
{
"epoch": 0.6625,
"grad_norm": 0.236328125,
"learning_rate": 3.3750000000000003e-06,
"loss": 1.1106,
"step": 371
},
{
"epoch": 0.6642857142857143,
"grad_norm": 0.185546875,
"learning_rate": 3.357142857142857e-06,
"loss": 1.1679,
"step": 372
},
{
"epoch": 0.6660714285714285,
"grad_norm": 0.189453125,
"learning_rate": 3.3392857142857144e-06,
"loss": 1.1394,
"step": 373
},
{
"epoch": 0.6678571428571428,
"grad_norm": 0.2001953125,
"learning_rate": 3.3214285714285716e-06,
"loss": 1.1166,
"step": 374
},
{
"epoch": 0.6696428571428571,
"grad_norm": 0.197265625,
"learning_rate": 3.303571428571429e-06,
"loss": 1.2486,
"step": 375
},
{
"epoch": 0.6714285714285714,
"grad_norm": 0.1904296875,
"learning_rate": 3.285714285714286e-06,
"loss": 1.0829,
"step": 376
},
{
"epoch": 0.6732142857142858,
"grad_norm": 0.1884765625,
"learning_rate": 3.267857142857143e-06,
"loss": 1.1629,
"step": 377
},
{
"epoch": 0.675,
"grad_norm": 0.1943359375,
"learning_rate": 3.2500000000000002e-06,
"loss": 1.1803,
"step": 378
},
{
"epoch": 0.6767857142857143,
"grad_norm": 0.2138671875,
"learning_rate": 3.2321428571428575e-06,
"loss": 1.127,
"step": 379
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.234375,
"learning_rate": 3.2142857142857147e-06,
"loss": 1.1411,
"step": 380
},
{
"epoch": 0.6803571428571429,
"grad_norm": 0.19140625,
"learning_rate": 3.1964285714285716e-06,
"loss": 1.1343,
"step": 381
},
{
"epoch": 0.6821428571428572,
"grad_norm": 0.2041015625,
"learning_rate": 3.178571428571429e-06,
"loss": 1.1033,
"step": 382
},
{
"epoch": 0.6839285714285714,
"grad_norm": 0.20703125,
"learning_rate": 3.1607142857142856e-06,
"loss": 1.1471,
"step": 383
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.23046875,
"learning_rate": 3.142857142857143e-06,
"loss": 1.1144,
"step": 384
},
{
"epoch": 0.6875,
"grad_norm": 0.2236328125,
"learning_rate": 3.125e-06,
"loss": 1.1688,
"step": 385
},
{
"epoch": 0.6892857142857143,
"grad_norm": 0.55859375,
"learning_rate": 3.1071428571428574e-06,
"loss": 1.2375,
"step": 386
},
{
"epoch": 0.6910714285714286,
"grad_norm": 0.2041015625,
"learning_rate": 3.0892857142857147e-06,
"loss": 1.1086,
"step": 387
},
{
"epoch": 0.6928571428571428,
"grad_norm": 0.2021484375,
"learning_rate": 3.071428571428572e-06,
"loss": 1.1237,
"step": 388
},
{
"epoch": 0.6946428571428571,
"grad_norm": 0.205078125,
"learning_rate": 3.053571428571429e-06,
"loss": 1.2193,
"step": 389
},
{
"epoch": 0.6964285714285714,
"grad_norm": 0.19921875,
"learning_rate": 3.0357142857142856e-06,
"loss": 1.1369,
"step": 390
},
{
"epoch": 0.6982142857142857,
"grad_norm": 0.1962890625,
"learning_rate": 3.017857142857143e-06,
"loss": 1.1489,
"step": 391
},
{
"epoch": 0.7,
"grad_norm": 0.2080078125,
"learning_rate": 3e-06,
"loss": 1.1448,
"step": 392
},
{
"epoch": 0.7017857142857142,
"grad_norm": 0.2021484375,
"learning_rate": 2.9821428571428573e-06,
"loss": 1.1144,
"step": 393
},
{
"epoch": 0.7035714285714286,
"grad_norm": 0.19140625,
"learning_rate": 2.9642857142857146e-06,
"loss": 1.1157,
"step": 394
},
{
"epoch": 0.7053571428571429,
"grad_norm": 0.1875,
"learning_rate": 2.946428571428572e-06,
"loss": 1.1229,
"step": 395
},
{
"epoch": 0.7071428571428572,
"grad_norm": 0.2080078125,
"learning_rate": 2.928571428571429e-06,
"loss": 1.1004,
"step": 396
},
{
"epoch": 0.7089285714285715,
"grad_norm": 0.2119140625,
"learning_rate": 2.9107142857142863e-06,
"loss": 1.1635,
"step": 397
},
{
"epoch": 0.7107142857142857,
"grad_norm": 0.189453125,
"learning_rate": 2.892857142857143e-06,
"loss": 1.1401,
"step": 398
},
{
"epoch": 0.7125,
"grad_norm": 0.1865234375,
"learning_rate": 2.875e-06,
"loss": 1.1199,
"step": 399
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.19921875,
"learning_rate": 2.8571428571428573e-06,
"loss": 1.1308,
"step": 400
},
{
"epoch": 0.7160714285714286,
"grad_norm": 0.19921875,
"learning_rate": 2.8392857142857145e-06,
"loss": 1.134,
"step": 401
},
{
"epoch": 0.7178571428571429,
"grad_norm": 0.1923828125,
"learning_rate": 2.8214285714285718e-06,
"loss": 1.1755,
"step": 402
},
{
"epoch": 0.7196428571428571,
"grad_norm": 0.1865234375,
"learning_rate": 2.803571428571429e-06,
"loss": 1.1561,
"step": 403
},
{
"epoch": 0.7214285714285714,
"grad_norm": 0.203125,
"learning_rate": 2.785714285714286e-06,
"loss": 1.0835,
"step": 404
},
{
"epoch": 0.7232142857142857,
"grad_norm": 0.1953125,
"learning_rate": 2.767857142857143e-06,
"loss": 1.1308,
"step": 405
},
{
"epoch": 0.725,
"grad_norm": 0.2255859375,
"learning_rate": 2.7500000000000004e-06,
"loss": 1.1434,
"step": 406
},
{
"epoch": 0.7267857142857143,
"grad_norm": 0.2734375,
"learning_rate": 2.732142857142857e-06,
"loss": 1.1328,
"step": 407
},
{
"epoch": 0.7285714285714285,
"grad_norm": 0.1943359375,
"learning_rate": 2.7142857142857144e-06,
"loss": 1.1019,
"step": 408
},
{
"epoch": 0.7303571428571428,
"grad_norm": 0.2001953125,
"learning_rate": 2.6964285714285717e-06,
"loss": 1.2163,
"step": 409
},
{
"epoch": 0.7321428571428571,
"grad_norm": 0.1982421875,
"learning_rate": 2.6785714285714285e-06,
"loss": 1.1774,
"step": 410
},
{
"epoch": 0.7339285714285714,
"grad_norm": 0.21484375,
"learning_rate": 2.6607142857142858e-06,
"loss": 1.1145,
"step": 411
},
{
"epoch": 0.7357142857142858,
"grad_norm": 0.1923828125,
"learning_rate": 2.642857142857143e-06,
"loss": 1.1181,
"step": 412
},
{
"epoch": 0.7375,
"grad_norm": 0.2255859375,
"learning_rate": 2.6250000000000003e-06,
"loss": 1.1201,
"step": 413
},
{
"epoch": 0.7392857142857143,
"grad_norm": 0.1884765625,
"learning_rate": 2.6071428571428575e-06,
"loss": 1.1604,
"step": 414
},
{
"epoch": 0.7410714285714286,
"grad_norm": 0.193359375,
"learning_rate": 2.5892857142857148e-06,
"loss": 1.1095,
"step": 415
},
{
"epoch": 0.7428571428571429,
"grad_norm": 0.197265625,
"learning_rate": 2.571428571428571e-06,
"loss": 1.1058,
"step": 416
},
{
"epoch": 0.7446428571428572,
"grad_norm": 0.24609375,
"learning_rate": 2.5535714285714284e-06,
"loss": 1.1625,
"step": 417
},
{
"epoch": 0.7464285714285714,
"grad_norm": 0.1982421875,
"learning_rate": 2.5357142857142857e-06,
"loss": 1.1778,
"step": 418
},
{
"epoch": 0.7482142857142857,
"grad_norm": 0.2255859375,
"learning_rate": 2.517857142857143e-06,
"loss": 1.136,
"step": 419
},
{
"epoch": 0.75,
"grad_norm": 0.1923828125,
"learning_rate": 2.5e-06,
"loss": 1.1642,
"step": 420
},
{
"epoch": 0.7517857142857143,
"grad_norm": 0.1982421875,
"learning_rate": 2.4821428571428575e-06,
"loss": 1.1833,
"step": 421
},
{
"epoch": 0.7535714285714286,
"grad_norm": 0.1875,
"learning_rate": 2.4642857142857147e-06,
"loss": 1.1342,
"step": 422
},
{
"epoch": 0.7553571428571428,
"grad_norm": 0.255859375,
"learning_rate": 2.4464285714285715e-06,
"loss": 1.2057,
"step": 423
},
{
"epoch": 0.7571428571428571,
"grad_norm": 0.1982421875,
"learning_rate": 2.428571428571429e-06,
"loss": 1.141,
"step": 424
},
{
"epoch": 0.7589285714285714,
"grad_norm": 0.279296875,
"learning_rate": 2.410714285714286e-06,
"loss": 1.1426,
"step": 425
},
{
"epoch": 0.7607142857142857,
"grad_norm": 0.1962890625,
"learning_rate": 2.3928571428571433e-06,
"loss": 1.1725,
"step": 426
},
{
"epoch": 0.7625,
"grad_norm": 0.31640625,
"learning_rate": 2.375e-06,
"loss": 1.1323,
"step": 427
},
{
"epoch": 0.7642857142857142,
"grad_norm": 0.197265625,
"learning_rate": 2.3571428571428574e-06,
"loss": 1.1337,
"step": 428
},
{
"epoch": 0.7660714285714286,
"grad_norm": 0.279296875,
"learning_rate": 2.3392857142857146e-06,
"loss": 1.1141,
"step": 429
},
{
"epoch": 0.7678571428571429,
"grad_norm": 0.1962890625,
"learning_rate": 2.321428571428572e-06,
"loss": 1.1064,
"step": 430
},
{
"epoch": 0.7696428571428572,
"grad_norm": 0.21484375,
"learning_rate": 2.3035714285714287e-06,
"loss": 1.174,
"step": 431
},
{
"epoch": 0.7714285714285715,
"grad_norm": 0.251953125,
"learning_rate": 2.285714285714286e-06,
"loss": 1.1263,
"step": 432
},
{
"epoch": 0.7732142857142857,
"grad_norm": 0.2109375,
"learning_rate": 2.2678571428571432e-06,
"loss": 1.132,
"step": 433
},
{
"epoch": 0.775,
"grad_norm": 0.2470703125,
"learning_rate": 2.25e-06,
"loss": 1.1163,
"step": 434
},
{
"epoch": 0.7767857142857143,
"grad_norm": 0.189453125,
"learning_rate": 2.2321428571428573e-06,
"loss": 1.1375,
"step": 435
},
{
"epoch": 0.7785714285714286,
"grad_norm": 0.19921875,
"learning_rate": 2.2142857142857146e-06,
"loss": 1.1587,
"step": 436
},
{
"epoch": 0.7803571428571429,
"grad_norm": 0.1923828125,
"learning_rate": 2.1964285714285714e-06,
"loss": 1.1021,
"step": 437
},
{
"epoch": 0.7821428571428571,
"grad_norm": 0.193359375,
"learning_rate": 2.1785714285714286e-06,
"loss": 1.0877,
"step": 438
},
{
"epoch": 0.7839285714285714,
"grad_norm": 0.28515625,
"learning_rate": 2.160714285714286e-06,
"loss": 1.1146,
"step": 439
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.193359375,
"learning_rate": 2.1428571428571427e-06,
"loss": 1.1518,
"step": 440
},
{
"epoch": 0.7875,
"grad_norm": 0.21875,
"learning_rate": 2.125e-06,
"loss": 1.1839,
"step": 441
},
{
"epoch": 0.7892857142857143,
"grad_norm": 0.193359375,
"learning_rate": 2.1071428571428572e-06,
"loss": 1.1355,
"step": 442
},
{
"epoch": 0.7910714285714285,
"grad_norm": 0.1943359375,
"learning_rate": 2.0892857142857145e-06,
"loss": 1.0875,
"step": 443
},
{
"epoch": 0.7928571428571428,
"grad_norm": 0.197265625,
"learning_rate": 2.0714285714285717e-06,
"loss": 1.172,
"step": 444
},
{
"epoch": 0.7946428571428571,
"grad_norm": 0.19921875,
"learning_rate": 2.0535714285714286e-06,
"loss": 1.1502,
"step": 445
},
{
"epoch": 0.7964285714285714,
"grad_norm": 0.2392578125,
"learning_rate": 2.035714285714286e-06,
"loss": 1.1237,
"step": 446
},
{
"epoch": 0.7982142857142858,
"grad_norm": 0.2021484375,
"learning_rate": 2.017857142857143e-06,
"loss": 1.111,
"step": 447
},
{
"epoch": 0.8,
"grad_norm": 0.1904296875,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.1212,
"step": 448
},
{
"epoch": 0.8017857142857143,
"grad_norm": 0.2060546875,
"learning_rate": 1.982142857142857e-06,
"loss": 1.1525,
"step": 449
},
{
"epoch": 0.8035714285714286,
"grad_norm": 0.1865234375,
"learning_rate": 1.9642857142857144e-06,
"loss": 1.0929,
"step": 450
},
{
"epoch": 0.8053571428571429,
"grad_norm": 0.197265625,
"learning_rate": 1.9464285714285717e-06,
"loss": 1.1021,
"step": 451
},
{
"epoch": 0.8071428571428572,
"grad_norm": 0.185546875,
"learning_rate": 1.928571428571429e-06,
"loss": 1.0899,
"step": 452
},
{
"epoch": 0.8089285714285714,
"grad_norm": 0.19921875,
"learning_rate": 1.9107142857142858e-06,
"loss": 1.153,
"step": 453
},
{
"epoch": 0.8107142857142857,
"grad_norm": 0.263671875,
"learning_rate": 1.892857142857143e-06,
"loss": 1.157,
"step": 454
},
{
"epoch": 0.8125,
"grad_norm": 0.1865234375,
"learning_rate": 1.8750000000000003e-06,
"loss": 1.1144,
"step": 455
},
{
"epoch": 0.8142857142857143,
"grad_norm": 0.1923828125,
"learning_rate": 1.8571428571428573e-06,
"loss": 1.155,
"step": 456
},
{
"epoch": 0.8160714285714286,
"grad_norm": 0.193359375,
"learning_rate": 1.8392857142857146e-06,
"loss": 1.1415,
"step": 457
},
{
"epoch": 0.8178571428571428,
"grad_norm": 0.1953125,
"learning_rate": 1.8214285714285716e-06,
"loss": 1.1344,
"step": 458
},
{
"epoch": 0.8196428571428571,
"grad_norm": 0.220703125,
"learning_rate": 1.8035714285714286e-06,
"loss": 1.0889,
"step": 459
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.1982421875,
"learning_rate": 1.7857142857142859e-06,
"loss": 1.1418,
"step": 460
},
{
"epoch": 0.8232142857142857,
"grad_norm": 0.189453125,
"learning_rate": 1.7678571428571431e-06,
"loss": 1.1216,
"step": 461
},
{
"epoch": 0.825,
"grad_norm": 0.201171875,
"learning_rate": 1.75e-06,
"loss": 1.1632,
"step": 462
},
{
"epoch": 0.8267857142857142,
"grad_norm": 0.2255859375,
"learning_rate": 1.7321428571428572e-06,
"loss": 1.106,
"step": 463
},
{
"epoch": 0.8285714285714286,
"grad_norm": 0.22265625,
"learning_rate": 1.7142857142857145e-06,
"loss": 1.152,
"step": 464
},
{
"epoch": 0.8303571428571429,
"grad_norm": 0.1875,
"learning_rate": 1.6964285714285717e-06,
"loss": 1.1252,
"step": 465
},
{
"epoch": 0.8321428571428572,
"grad_norm": 0.1904296875,
"learning_rate": 1.6785714285714286e-06,
"loss": 1.0623,
"step": 466
},
{
"epoch": 0.8339285714285715,
"grad_norm": 0.1923828125,
"learning_rate": 1.6607142857142858e-06,
"loss": 1.1411,
"step": 467
},
{
"epoch": 0.8357142857142857,
"grad_norm": 0.20703125,
"learning_rate": 1.642857142857143e-06,
"loss": 1.186,
"step": 468
},
{
"epoch": 0.8375,
"grad_norm": 0.1982421875,
"learning_rate": 1.6250000000000001e-06,
"loss": 1.1114,
"step": 469
},
{
"epoch": 0.8392857142857143,
"grad_norm": 0.197265625,
"learning_rate": 1.6071428571428574e-06,
"loss": 1.1707,
"step": 470
},
{
"epoch": 0.8410714285714286,
"grad_norm": 0.1982421875,
"learning_rate": 1.5892857142857144e-06,
"loss": 1.1489,
"step": 471
},
{
"epoch": 0.8428571428571429,
"grad_norm": 0.2314453125,
"learning_rate": 1.5714285714285714e-06,
"loss": 1.1259,
"step": 472
},
{
"epoch": 0.8446428571428571,
"grad_norm": 0.1904296875,
"learning_rate": 1.5535714285714287e-06,
"loss": 1.1018,
"step": 473
},
{
"epoch": 0.8464285714285714,
"grad_norm": 0.193359375,
"learning_rate": 1.535714285714286e-06,
"loss": 1.0899,
"step": 474
},
{
"epoch": 0.8482142857142857,
"grad_norm": 0.271484375,
"learning_rate": 1.5178571428571428e-06,
"loss": 1.1349,
"step": 475
},
{
"epoch": 0.85,
"grad_norm": 0.203125,
"learning_rate": 1.5e-06,
"loss": 1.1787,
"step": 476
},
{
"epoch": 0.8517857142857143,
"grad_norm": 0.216796875,
"learning_rate": 1.4821428571428573e-06,
"loss": 1.1398,
"step": 477
},
{
"epoch": 0.8535714285714285,
"grad_norm": 0.205078125,
"learning_rate": 1.4642857142857145e-06,
"loss": 1.1429,
"step": 478
},
{
"epoch": 0.8553571428571428,
"grad_norm": 0.2412109375,
"learning_rate": 1.4464285714285716e-06,
"loss": 1.1161,
"step": 479
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.193359375,
"learning_rate": 1.4285714285714286e-06,
"loss": 1.1527,
"step": 480
},
{
"epoch": 0.8589285714285714,
"grad_norm": 0.1923828125,
"learning_rate": 1.4107142857142859e-06,
"loss": 1.1439,
"step": 481
},
{
"epoch": 0.8607142857142858,
"grad_norm": 0.1884765625,
"learning_rate": 1.392857142857143e-06,
"loss": 1.1271,
"step": 482
},
{
"epoch": 0.8625,
"grad_norm": 0.337890625,
"learning_rate": 1.3750000000000002e-06,
"loss": 1.1871,
"step": 483
},
{
"epoch": 0.8642857142857143,
"grad_norm": 0.265625,
"learning_rate": 1.3571428571428572e-06,
"loss": 1.0913,
"step": 484
},
{
"epoch": 0.8660714285714286,
"grad_norm": 0.2470703125,
"learning_rate": 1.3392857142857143e-06,
"loss": 1.1074,
"step": 485
},
{
"epoch": 0.8678571428571429,
"grad_norm": 0.185546875,
"learning_rate": 1.3214285714285715e-06,
"loss": 1.1388,
"step": 486
},
{
"epoch": 0.8696428571428572,
"grad_norm": 0.2470703125,
"learning_rate": 1.3035714285714288e-06,
"loss": 1.0727,
"step": 487
},
{
"epoch": 0.8714285714285714,
"grad_norm": 0.19140625,
"learning_rate": 1.2857142857142856e-06,
"loss": 1.1419,
"step": 488
},
{
"epoch": 0.8732142857142857,
"grad_norm": 0.1865234375,
"learning_rate": 1.2678571428571428e-06,
"loss": 1.0769,
"step": 489
},
{
"epoch": 0.875,
"grad_norm": 0.2001953125,
"learning_rate": 1.25e-06,
"loss": 1.1293,
"step": 490
},
{
"epoch": 0.8767857142857143,
"grad_norm": 0.216796875,
"learning_rate": 1.2321428571428574e-06,
"loss": 1.1096,
"step": 491
},
{
"epoch": 0.8785714285714286,
"grad_norm": 0.294921875,
"learning_rate": 1.2142857142857144e-06,
"loss": 1.1627,
"step": 492
},
{
"epoch": 0.8803571428571428,
"grad_norm": 0.1904296875,
"learning_rate": 1.1964285714285717e-06,
"loss": 1.1395,
"step": 493
},
{
"epoch": 0.8821428571428571,
"grad_norm": 0.2060546875,
"learning_rate": 1.1785714285714287e-06,
"loss": 1.1657,
"step": 494
},
{
"epoch": 0.8839285714285714,
"grad_norm": 0.1923828125,
"learning_rate": 1.160714285714286e-06,
"loss": 1.131,
"step": 495
},
{
"epoch": 0.8857142857142857,
"grad_norm": 0.2021484375,
"learning_rate": 1.142857142857143e-06,
"loss": 1.1686,
"step": 496
},
{
"epoch": 0.8875,
"grad_norm": 0.1923828125,
"learning_rate": 1.125e-06,
"loss": 1.1463,
"step": 497
},
{
"epoch": 0.8892857142857142,
"grad_norm": 0.203125,
"learning_rate": 1.1071428571428573e-06,
"loss": 1.2182,
"step": 498
},
{
"epoch": 0.8910714285714286,
"grad_norm": 0.1962890625,
"learning_rate": 1.0892857142857143e-06,
"loss": 1.1011,
"step": 499
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.2021484375,
"learning_rate": 1.0714285714285714e-06,
"loss": 1.1859,
"step": 500
},
{
"epoch": 0.8946428571428572,
"grad_norm": 0.2353515625,
"learning_rate": 1.0535714285714286e-06,
"loss": 1.1523,
"step": 501
},
{
"epoch": 0.8964285714285715,
"grad_norm": 0.2021484375,
"learning_rate": 1.0357142857142859e-06,
"loss": 1.1122,
"step": 502
},
{
"epoch": 0.8982142857142857,
"grad_norm": 0.19921875,
"learning_rate": 1.017857142857143e-06,
"loss": 1.1763,
"step": 503
},
{
"epoch": 0.9,
"grad_norm": 0.28125,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.0727,
"step": 504
},
{
"epoch": 0.9017857142857143,
"grad_norm": 0.1923828125,
"learning_rate": 9.821428571428572e-07,
"loss": 1.1076,
"step": 505
},
{
"epoch": 0.9035714285714286,
"grad_norm": 0.197265625,
"learning_rate": 9.642857142857145e-07,
"loss": 1.1587,
"step": 506
},
{
"epoch": 0.9053571428571429,
"grad_norm": 0.2890625,
"learning_rate": 9.464285714285715e-07,
"loss": 1.1549,
"step": 507
},
{
"epoch": 0.9071428571428571,
"grad_norm": 0.19140625,
"learning_rate": 9.285714285714287e-07,
"loss": 1.1607,
"step": 508
},
{
"epoch": 0.9089285714285714,
"grad_norm": 0.19140625,
"learning_rate": 9.107142857142858e-07,
"loss": 1.1395,
"step": 509
},
{
"epoch": 0.9107142857142857,
"grad_norm": 0.1904296875,
"learning_rate": 8.928571428571429e-07,
"loss": 1.1434,
"step": 510
},
{
"epoch": 0.9125,
"grad_norm": 0.208984375,
"learning_rate": 8.75e-07,
"loss": 1.0969,
"step": 511
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.1953125,
"learning_rate": 8.571428571428572e-07,
"loss": 1.1581,
"step": 512
},
{
"epoch": 0.9160714285714285,
"grad_norm": 0.2001953125,
"learning_rate": 8.392857142857143e-07,
"loss": 1.1507,
"step": 513
},
{
"epoch": 0.9178571428571428,
"grad_norm": 0.216796875,
"learning_rate": 8.214285714285715e-07,
"loss": 1.1276,
"step": 514
},
{
"epoch": 0.9196428571428571,
"grad_norm": 0.2373046875,
"learning_rate": 8.035714285714287e-07,
"loss": 1.1668,
"step": 515
},
{
"epoch": 0.9214285714285714,
"grad_norm": 0.1962890625,
"learning_rate": 7.857142857142857e-07,
"loss": 1.1198,
"step": 516
},
{
"epoch": 0.9232142857142858,
"grad_norm": 0.2138671875,
"learning_rate": 7.67857142857143e-07,
"loss": 1.2005,
"step": 517
},
{
"epoch": 0.925,
"grad_norm": 0.294921875,
"learning_rate": 7.5e-07,
"loss": 1.1021,
"step": 518
},
{
"epoch": 0.9267857142857143,
"grad_norm": 0.1904296875,
"learning_rate": 7.321428571428573e-07,
"loss": 1.1009,
"step": 519
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.1962890625,
"learning_rate": 7.142857142857143e-07,
"loss": 1.1358,
"step": 520
},
{
"epoch": 0.9303571428571429,
"grad_norm": 0.193359375,
"learning_rate": 6.964285714285715e-07,
"loss": 1.1524,
"step": 521
},
{
"epoch": 0.9321428571428572,
"grad_norm": 0.2060546875,
"learning_rate": 6.785714285714286e-07,
"loss": 1.0907,
"step": 522
},
{
"epoch": 0.9339285714285714,
"grad_norm": 0.1875,
"learning_rate": 6.607142857142858e-07,
"loss": 1.1222,
"step": 523
},
{
"epoch": 0.9357142857142857,
"grad_norm": 0.1962890625,
"learning_rate": 6.428571428571428e-07,
"loss": 1.1787,
"step": 524
},
{
"epoch": 0.9375,
"grad_norm": 0.19140625,
"learning_rate": 6.25e-07,
"loss": 1.1474,
"step": 525
},
{
"epoch": 0.9392857142857143,
"grad_norm": 0.1953125,
"learning_rate": 6.071428571428572e-07,
"loss": 1.1283,
"step": 526
},
{
"epoch": 0.9410714285714286,
"grad_norm": 0.1875,
"learning_rate": 5.892857142857143e-07,
"loss": 1.1177,
"step": 527
},
{
"epoch": 0.9428571428571428,
"grad_norm": 0.2373046875,
"learning_rate": 5.714285714285715e-07,
"loss": 1.119,
"step": 528
},
{
"epoch": 0.9446428571428571,
"grad_norm": 0.2001953125,
"learning_rate": 5.535714285714286e-07,
"loss": 1.1682,
"step": 529
},
{
"epoch": 0.9464285714285714,
"grad_norm": 0.1962890625,
"learning_rate": 5.357142857142857e-07,
"loss": 1.1034,
"step": 530
},
{
"epoch": 0.9482142857142857,
"grad_norm": 0.193359375,
"learning_rate": 5.178571428571429e-07,
"loss": 1.1415,
"step": 531
},
{
"epoch": 0.95,
"grad_norm": 0.2255859375,
"learning_rate": 5.000000000000001e-07,
"loss": 1.151,
"step": 532
},
{
"epoch": 0.9517857142857142,
"grad_norm": 0.220703125,
"learning_rate": 4.821428571428572e-07,
"loss": 1.133,
"step": 533
},
{
"epoch": 0.9535714285714286,
"grad_norm": 0.1904296875,
"learning_rate": 4.642857142857143e-07,
"loss": 1.1081,
"step": 534
},
{
"epoch": 0.9553571428571429,
"grad_norm": 0.22265625,
"learning_rate": 4.4642857142857147e-07,
"loss": 1.0945,
"step": 535
},
{
"epoch": 0.9571428571428572,
"grad_norm": 0.1884765625,
"learning_rate": 4.285714285714286e-07,
"loss": 1.1079,
"step": 536
},
{
"epoch": 0.9589285714285715,
"grad_norm": 0.18359375,
"learning_rate": 4.1071428571428577e-07,
"loss": 1.1026,
"step": 537
},
{
"epoch": 0.9607142857142857,
"grad_norm": 0.1923828125,
"learning_rate": 3.9285714285714286e-07,
"loss": 1.1647,
"step": 538
},
{
"epoch": 0.9625,
"grad_norm": 0.23046875,
"learning_rate": 3.75e-07,
"loss": 1.179,
"step": 539
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.2333984375,
"learning_rate": 3.5714285714285716e-07,
"loss": 1.1877,
"step": 540
},
{
"epoch": 0.9660714285714286,
"grad_norm": 0.1982421875,
"learning_rate": 3.392857142857143e-07,
"loss": 1.102,
"step": 541
},
{
"epoch": 0.9678571428571429,
"grad_norm": 0.1884765625,
"learning_rate": 3.214285714285714e-07,
"loss": 1.1424,
"step": 542
},
{
"epoch": 0.9696428571428571,
"grad_norm": 0.1865234375,
"learning_rate": 3.035714285714286e-07,
"loss": 1.1172,
"step": 543
},
{
"epoch": 0.9714285714285714,
"grad_norm": 0.1982421875,
"learning_rate": 2.8571428571428575e-07,
"loss": 1.1402,
"step": 544
},
{
"epoch": 0.9732142857142857,
"grad_norm": 0.2119140625,
"learning_rate": 2.6785714285714284e-07,
"loss": 1.1771,
"step": 545
},
{
"epoch": 0.975,
"grad_norm": 0.220703125,
"learning_rate": 2.5000000000000004e-07,
"loss": 1.1276,
"step": 546
},
{
"epoch": 0.9767857142857143,
"grad_norm": 0.1875,
"learning_rate": 2.3214285714285716e-07,
"loss": 1.143,
"step": 547
},
{
"epoch": 0.9785714285714285,
"grad_norm": 0.197265625,
"learning_rate": 2.142857142857143e-07,
"loss": 1.1141,
"step": 548
},
{
"epoch": 0.9803571428571428,
"grad_norm": 0.193359375,
"learning_rate": 1.9642857142857143e-07,
"loss": 1.1463,
"step": 549
},
{
"epoch": 0.9821428571428571,
"grad_norm": 0.1943359375,
"learning_rate": 1.7857142857142858e-07,
"loss": 1.1643,
"step": 550
},
{
"epoch": 0.9839285714285714,
"grad_norm": 0.1904296875,
"learning_rate": 1.607142857142857e-07,
"loss": 1.1075,
"step": 551
},
{
"epoch": 0.9857142857142858,
"grad_norm": 0.20703125,
"learning_rate": 1.4285714285714287e-07,
"loss": 1.1964,
"step": 552
},
{
"epoch": 0.9875,
"grad_norm": 0.1826171875,
"learning_rate": 1.2500000000000002e-07,
"loss": 1.0845,
"step": 553
},
{
"epoch": 0.9892857142857143,
"grad_norm": 0.1953125,
"learning_rate": 1.0714285714285716e-07,
"loss": 1.1831,
"step": 554
},
{
"epoch": 0.9910714285714286,
"grad_norm": 0.201171875,
"learning_rate": 8.928571428571429e-08,
"loss": 1.1154,
"step": 555
},
{
"epoch": 0.9928571428571429,
"grad_norm": 0.1953125,
"learning_rate": 7.142857142857144e-08,
"loss": 1.1277,
"step": 556
},
{
"epoch": 0.9946428571428572,
"grad_norm": 0.1943359375,
"learning_rate": 5.357142857142858e-08,
"loss": 1.0752,
"step": 557
},
{
"epoch": 0.9964285714285714,
"grad_norm": 0.2099609375,
"learning_rate": 3.571428571428572e-08,
"loss": 1.18,
"step": 558
},
{
"epoch": 0.9982142857142857,
"grad_norm": 0.2138671875,
"learning_rate": 1.785714285714286e-08,
"loss": 1.1936,
"step": 559
},
{
"epoch": 1.0,
"grad_norm": 0.189453125,
"learning_rate": 0.0,
"loss": 1.1262,
"step": 560
}
],
"logging_steps": 1.0,
"max_steps": 560,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.796277576746926e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}