|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 1260, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007936507936507936, |
|
"grad_norm": 300.0, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 44.0514, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03968253968253968, |
|
"grad_norm": 294.0, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 44.4732, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07936507936507936, |
|
"grad_norm": 172.0, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 39.0478, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 61.5, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 27.5033, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 19.625, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 19.8771, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1984126984126984, |
|
"grad_norm": 12.6875, |
|
"learning_rate": 3.968253968253968e-05, |
|
"loss": 18.3177, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 5.875, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 16.5284, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 3.015625, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 15.6508, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 3.828125, |
|
"learning_rate": 6.349206349206349e-05, |
|
"loss": 14.7333, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 7.34375, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 14.189, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3968253968253968, |
|
"grad_norm": 19.0, |
|
"learning_rate": 7.936507936507937e-05, |
|
"loss": 11.8511, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4365079365079365, |
|
"grad_norm": 16.625, |
|
"learning_rate": 8.730158730158731e-05, |
|
"loss": 5.6454, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 2.203125, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 1.7925, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5158730158730159, |
|
"grad_norm": 2.265625, |
|
"learning_rate": 0.00010317460317460319, |
|
"loss": 1.4147, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 0.74609375, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 1.2306, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"grad_norm": 2.046875, |
|
"learning_rate": 0.00011904761904761905, |
|
"loss": 1.1237, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": 2.0, |
|
"learning_rate": 0.00012698412698412698, |
|
"loss": 1.0467, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6746031746031746, |
|
"grad_norm": 7.90625, |
|
"learning_rate": 0.00013492063492063494, |
|
"loss": 0.9706, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 1.5078125, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 0.944, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.753968253968254, |
|
"grad_norm": 2.765625, |
|
"learning_rate": 0.0001507936507936508, |
|
"loss": 0.9101, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 2.140625, |
|
"learning_rate": 0.00015873015873015873, |
|
"loss": 0.8892, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 1.640625, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.8475, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.873015873015873, |
|
"grad_norm": 27.125, |
|
"learning_rate": 0.00017460317460317462, |
|
"loss": 0.8386, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9126984126984127, |
|
"grad_norm": 1.8828125, |
|
"learning_rate": 0.00018253968253968255, |
|
"loss": 0.8187, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.6015625, |
|
"learning_rate": 0.00019047619047619048, |
|
"loss": 0.7724, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9920634920634921, |
|
"grad_norm": 1.6640625, |
|
"learning_rate": 0.00019841269841269844, |
|
"loss": 0.7499, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.2579786777496338, |
|
"eval_runtime": 1.0146, |
|
"eval_samples_per_second": 1.971, |
|
"eval_steps_per_second": 0.986, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.0317460317460316, |
|
"grad_norm": 0.453125, |
|
"learning_rate": 0.00019999386012995552, |
|
"loss": 0.7176, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 0.00019996891820008164, |
|
"loss": 0.7074, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 0.609375, |
|
"learning_rate": 0.00019992479525042303, |
|
"loss": 0.6984, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.1507936507936507, |
|
"grad_norm": 7.375, |
|
"learning_rate": 0.0001998614997468427, |
|
"loss": 0.6891, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 0.90625, |
|
"learning_rate": 0.0001997790438338385, |
|
"loss": 0.6864, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.2301587301587302, |
|
"grad_norm": 0.306640625, |
|
"learning_rate": 0.00019967744333221278, |
|
"loss": 0.6766, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 8.1875, |
|
"learning_rate": 0.00019955671773603696, |
|
"loss": 0.6776, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"grad_norm": 2.203125, |
|
"learning_rate": 0.0001994168902089112, |
|
"loss": 0.6565, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.3492063492063493, |
|
"grad_norm": 0.80859375, |
|
"learning_rate": 0.00019925798757952, |
|
"loss": 0.6495, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3888888888888888, |
|
"grad_norm": 1.90625, |
|
"learning_rate": 0.00019908004033648453, |
|
"loss": 0.6625, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 1.0234375, |
|
"learning_rate": 0.00019888308262251285, |
|
"loss": 0.6444, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.4682539682539684, |
|
"grad_norm": 0.9609375, |
|
"learning_rate": 0.00019866715222784895, |
|
"loss": 0.635, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.507936507936508, |
|
"grad_norm": 1.953125, |
|
"learning_rate": 0.0001984322905830219, |
|
"loss": 0.6417, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.5476190476190477, |
|
"grad_norm": 1.0859375, |
|
"learning_rate": 0.0001981785427508966, |
|
"loss": 0.6381, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.5873015873015874, |
|
"grad_norm": 0.69140625, |
|
"learning_rate": 0.00019790595741802757, |
|
"loss": 0.6256, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.626984126984127, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 0.00019761458688531756, |
|
"loss": 0.6247, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.921875, |
|
"learning_rate": 0.00019730448705798239, |
|
"loss": 0.6244, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.7063492063492065, |
|
"grad_norm": 0.6328125, |
|
"learning_rate": 0.0001969757174348246, |
|
"loss": 0.6094, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.746031746031746, |
|
"grad_norm": 0.76953125, |
|
"learning_rate": 0.0001966283410968174, |
|
"loss": 0.6156, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 1.1484375, |
|
"learning_rate": 0.0001962624246950012, |
|
"loss": 0.6037, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.8253968253968254, |
|
"grad_norm": 0.984375, |
|
"learning_rate": 0.0001958780384376955, |
|
"loss": 0.6068, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.8650793650793651, |
|
"grad_norm": 0.640625, |
|
"learning_rate": 0.00019547525607702774, |
|
"loss": 0.5994, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 1.8203125, |
|
"learning_rate": 0.0001950541548947829, |
|
"loss": 0.6115, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.9444444444444444, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 0.00019461481568757506, |
|
"loss": 0.598, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.9841269841269842, |
|
"grad_norm": 0.396484375, |
|
"learning_rate": 0.00019415732275134513, |
|
"loss": 0.6058, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.1687145233154297, |
|
"eval_runtime": 1.0157, |
|
"eval_samples_per_second": 1.969, |
|
"eval_steps_per_second": 0.985, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.0238095238095237, |
|
"grad_norm": 0.458984375, |
|
"learning_rate": 0.0001936817638651871, |
|
"loss": 0.5677, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.0634920634920633, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 0.0001931882302745057, |
|
"loss": 0.5648, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.1031746031746033, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 0.00019267681667350928, |
|
"loss": 0.5502, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.48828125, |
|
"learning_rate": 0.00019214762118704076, |
|
"loss": 0.5573, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.1825396825396823, |
|
"grad_norm": 0.26953125, |
|
"learning_rate": 0.00019160074535175058, |
|
"loss": 0.5622, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 0.291015625, |
|
"learning_rate": 0.0001910362940966147, |
|
"loss": 0.5586, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.261904761904762, |
|
"grad_norm": 0.3984375, |
|
"learning_rate": 0.00019045437572280194, |
|
"loss": 0.5545, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.3015873015873014, |
|
"grad_norm": 0.34765625, |
|
"learning_rate": 0.0001898551018828944, |
|
"loss": 0.5489, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.3412698412698414, |
|
"grad_norm": 0.275390625, |
|
"learning_rate": 0.0001892385875594645, |
|
"loss": 0.5577, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 0.00018860495104301345, |
|
"loss": 0.5462, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.4206349206349205, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 0.0001879543139092747, |
|
"loss": 0.557, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.4603174603174605, |
|
"grad_norm": 0.44921875, |
|
"learning_rate": 0.00018728680099588748, |
|
"loss": 0.5531, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.27734375, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.5597, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"grad_norm": 0.71484375, |
|
"learning_rate": 0.00018590166334591531, |
|
"loss": 0.5578, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.5793650793650795, |
|
"grad_norm": 0.375, |
|
"learning_rate": 0.000185184304375462, |
|
"loss": 0.5546, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.2109375, |
|
"learning_rate": 0.0001844506011066308, |
|
"loss": 0.5532, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.6587301587301586, |
|
"grad_norm": 0.349609375, |
|
"learning_rate": 0.00018370069431494646, |
|
"loss": 0.5509, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.6984126984126986, |
|
"grad_norm": 0.27734375, |
|
"learning_rate": 0.00018293472788490095, |
|
"loss": 0.5479, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.738095238095238, |
|
"grad_norm": 0.279296875, |
|
"learning_rate": 0.00018215284878234642, |
|
"loss": 0.5505, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.7777777777777777, |
|
"grad_norm": 0.28125, |
|
"learning_rate": 0.00018135520702629675, |
|
"loss": 0.5466, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.817460317460317, |
|
"grad_norm": 0.373046875, |
|
"learning_rate": 0.0001805419556601437, |
|
"loss": 0.5548, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.25, |
|
"learning_rate": 0.00017971325072229226, |
|
"loss": 0.5521, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.8968253968253967, |
|
"grad_norm": 0.318359375, |
|
"learning_rate": 0.0001788692512162216, |
|
"loss": 0.5413, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.9365079365079367, |
|
"grad_norm": 0.390625, |
|
"learning_rate": 0.00017801011907997725, |
|
"loss": 0.5546, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.9761904761904763, |
|
"grad_norm": 0.26171875, |
|
"learning_rate": 0.0001771360191551, |
|
"loss": 0.5571, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.1491789817810059, |
|
"eval_runtime": 1.0142, |
|
"eval_samples_per_second": 1.972, |
|
"eval_steps_per_second": 0.986, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 3.015873015873016, |
|
"grad_norm": 0.30859375, |
|
"learning_rate": 0.00017624711915499764, |
|
"loss": 0.5262, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.0555555555555554, |
|
"grad_norm": 0.2734375, |
|
"learning_rate": 0.00017534358963276607, |
|
"loss": 0.5035, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.0952380952380953, |
|
"grad_norm": 0.203125, |
|
"learning_rate": 0.00017442560394846516, |
|
"loss": 0.5017, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.134920634920635, |
|
"grad_norm": 0.21484375, |
|
"learning_rate": 0.00017349333823585617, |
|
"loss": 0.5052, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.1746031746031744, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 0.00017254697136860703, |
|
"loss": 0.5056, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.2142857142857144, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.00017158668492597186, |
|
"loss": 0.5199, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.253968253968254, |
|
"grad_norm": 0.2216796875, |
|
"learning_rate": 0.00017061266315795146, |
|
"loss": 0.5038, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.2936507936507935, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.0001696250929499412, |
|
"loss": 0.501, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 0.0001686241637868734, |
|
"loss": 0.5044, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.373015873015873, |
|
"grad_norm": 0.4296875, |
|
"learning_rate": 0.0001676100677168608, |
|
"loss": 0.4998, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.4126984126984126, |
|
"grad_norm": 0.265625, |
|
"learning_rate": 0.00016658299931434858, |
|
"loss": 0.5172, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.4523809523809526, |
|
"grad_norm": 0.212890625, |
|
"learning_rate": 0.000165543155642781, |
|
"loss": 0.5082, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.492063492063492, |
|
"grad_norm": 0.22265625, |
|
"learning_rate": 0.00016449073621679127, |
|
"loss": 0.5016, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.5317460317460316, |
|
"grad_norm": 0.1865234375, |
|
"learning_rate": 0.0001634259429639203, |
|
"loss": 0.5109, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.571428571428571, |
|
"grad_norm": 0.197265625, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.5178, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.611111111111111, |
|
"grad_norm": 0.267578125, |
|
"learning_rate": 0.0001612600545193203, |
|
"loss": 0.5163, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 3.6507936507936507, |
|
"grad_norm": 0.29296875, |
|
"learning_rate": 0.00016015937489624848, |
|
"loss": 0.5078, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.6904761904761907, |
|
"grad_norm": 0.25, |
|
"learning_rate": 0.00015904715250387498, |
|
"loss": 0.508, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.7301587301587302, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 0.00015792360074412613, |
|
"loss": 0.5055, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.7698412698412698, |
|
"grad_norm": 0.19140625, |
|
"learning_rate": 0.00015678893519269197, |
|
"loss": 0.5083, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 0.00015564337355766412, |
|
"loss": 0.5121, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.8492063492063493, |
|
"grad_norm": 0.2109375, |
|
"learning_rate": 0.00015448713563776374, |
|
"loss": 0.4984, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.888888888888889, |
|
"grad_norm": 0.412109375, |
|
"learning_rate": 0.00015332044328016914, |
|
"loss": 0.5044, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.928571428571429, |
|
"grad_norm": 0.44140625, |
|
"learning_rate": 0.0001521435203379498, |
|
"loss": 0.5127, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.9682539682539684, |
|
"grad_norm": 0.54296875, |
|
"learning_rate": 0.0001509565926271159, |
|
"loss": 0.5118, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.155090570449829, |
|
"eval_runtime": 1.0165, |
|
"eval_samples_per_second": 1.968, |
|
"eval_steps_per_second": 0.984, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 4.007936507936508, |
|
"grad_norm": 0.97265625, |
|
"learning_rate": 0.00014975988788329064, |
|
"loss": 0.4977, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 4.0476190476190474, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 0.00014855363571801523, |
|
"loss": 0.4642, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.087301587301587, |
|
"grad_norm": 0.251953125, |
|
"learning_rate": 0.00014733806757469286, |
|
"loss": 0.457, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 4.1269841269841265, |
|
"grad_norm": 0.51953125, |
|
"learning_rate": 0.000146113416684182, |
|
"loss": 0.4674, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.166666666666667, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 0.00014487991802004623, |
|
"loss": 0.4608, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 4.2063492063492065, |
|
"grad_norm": 0.45703125, |
|
"learning_rate": 0.00014363780825347005, |
|
"loss": 0.4601, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.246031746031746, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 0.00014238732570784866, |
|
"loss": 0.4656, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 0.00014112871031306119, |
|
"loss": 0.4661, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.325396825396825, |
|
"grad_norm": 0.42578125, |
|
"learning_rate": 0.00013986220355943494, |
|
"loss": 0.4652, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.365079365079365, |
|
"grad_norm": 0.5, |
|
"learning_rate": 0.00013858804845141116, |
|
"loss": 0.4667, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.404761904761905, |
|
"grad_norm": 0.205078125, |
|
"learning_rate": 0.0001373064894609194, |
|
"loss": 0.469, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 0.59375, |
|
"learning_rate": 0.00013601777248047105, |
|
"loss": 0.4654, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.484126984126984, |
|
"grad_norm": 0.298828125, |
|
"learning_rate": 0.00013472214477597977, |
|
"loss": 0.4662, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 4.523809523809524, |
|
"grad_norm": 0.28515625, |
|
"learning_rate": 0.00013341985493931877, |
|
"loss": 0.4669, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.563492063492063, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 0.00013211115284062335, |
|
"loss": 0.465, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 4.603174603174603, |
|
"grad_norm": 0.30859375, |
|
"learning_rate": 0.00013079628958034855, |
|
"loss": 0.4696, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.642857142857143, |
|
"grad_norm": 0.38671875, |
|
"learning_rate": 0.00012947551744109043, |
|
"loss": 0.4837, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 4.682539682539683, |
|
"grad_norm": 0.75390625, |
|
"learning_rate": 0.00012814908983918073, |
|
"loss": 0.4752, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.722222222222222, |
|
"grad_norm": 0.59375, |
|
"learning_rate": 0.00012681726127606376, |
|
"loss": 0.4678, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 4.761904761904762, |
|
"grad_norm": 0.306640625, |
|
"learning_rate": 0.0001254802872894655, |
|
"loss": 0.4753, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.801587301587301, |
|
"grad_norm": 0.333984375, |
|
"learning_rate": 0.00012413842440436333, |
|
"loss": 0.473, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 4.841269841269841, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 0.000122791930083767, |
|
"loss": 0.4646, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.880952380952381, |
|
"grad_norm": 0.224609375, |
|
"learning_rate": 0.00012144106267931876, |
|
"loss": 0.4715, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.920634920634921, |
|
"grad_norm": 0.2734375, |
|
"learning_rate": 0.00012008608138172393, |
|
"loss": 0.4704, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.9603174603174605, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.00011872724617101969, |
|
"loss": 0.4657, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.224609375, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.4711, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.1766771078109741, |
|
"eval_runtime": 1.015, |
|
"eval_samples_per_second": 1.971, |
|
"eval_steps_per_second": 0.985, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.0396825396825395, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 0.0001159990575776563, |
|
"loss": 0.4228, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 5.079365079365079, |
|
"grad_norm": 0.3125, |
|
"learning_rate": 0.00011463022765209088, |
|
"loss": 0.4163, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.119047619047619, |
|
"grad_norm": 0.353515625, |
|
"learning_rate": 0.00011325859062716795, |
|
"loss": 0.4213, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 5.158730158730159, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 0.00011188440967865641, |
|
"loss": 0.4319, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.198412698412699, |
|
"grad_norm": 0.3203125, |
|
"learning_rate": 0.00011050794847042731, |
|
"loss": 0.4258, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 5.238095238095238, |
|
"grad_norm": 0.26953125, |
|
"learning_rate": 0.00010912947110386484, |
|
"loss": 0.4237, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.277777777777778, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 0.0001077492420671931, |
|
"loss": 0.4238, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 5.317460317460317, |
|
"grad_norm": 0.22265625, |
|
"learning_rate": 0.00010636752618472887, |
|
"loss": 0.4225, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 5.357142857142857, |
|
"grad_norm": 0.2451171875, |
|
"learning_rate": 0.00010498458856606972, |
|
"loss": 0.4214, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 5.396825396825397, |
|
"grad_norm": 0.283203125, |
|
"learning_rate": 0.00010360069455522765, |
|
"loss": 0.425, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.436507936507937, |
|
"grad_norm": 0.43359375, |
|
"learning_rate": 0.00010221610967971735, |
|
"loss": 0.4281, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 5.476190476190476, |
|
"grad_norm": 0.275390625, |
|
"learning_rate": 0.00010083109959960973, |
|
"loss": 0.427, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 5.515873015873016, |
|
"grad_norm": 0.2099609375, |
|
"learning_rate": 9.944593005655947e-05, |
|
"loss": 0.4299, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 5.555555555555555, |
|
"grad_norm": 0.216796875, |
|
"learning_rate": 9.806086682281758e-05, |
|
"loss": 0.4413, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.595238095238095, |
|
"grad_norm": 0.2158203125, |
|
"learning_rate": 9.667617565023735e-05, |
|
"loss": 0.4352, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 5.634920634920634, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 9.529212221928483e-05, |
|
"loss": 0.4337, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 5.674603174603175, |
|
"grad_norm": 0.2294921875, |
|
"learning_rate": 9.390897208806266e-05, |
|
"loss": 0.4242, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 0.365234375, |
|
"learning_rate": 9.252699064135758e-05, |
|
"loss": 0.4286, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 5.753968253968254, |
|
"grad_norm": 0.5078125, |
|
"learning_rate": 9.114644303972096e-05, |
|
"loss": 0.4349, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 5.7936507936507935, |
|
"grad_norm": 0.318359375, |
|
"learning_rate": 8.976759416859256e-05, |
|
"loss": 0.4311, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.833333333333333, |
|
"grad_norm": 0.2470703125, |
|
"learning_rate": 8.839070858747697e-05, |
|
"loss": 0.4257, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 5.8730158730158735, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 8.701605047918276e-05, |
|
"loss": 0.4332, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.912698412698413, |
|
"grad_norm": 0.2255859375, |
|
"learning_rate": 8.564388359913356e-05, |
|
"loss": 0.4309, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 5.9523809523809526, |
|
"grad_norm": 0.2138671875, |
|
"learning_rate": 8.427447122476148e-05, |
|
"loss": 0.4311, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 5.992063492063492, |
|
"grad_norm": 0.244140625, |
|
"learning_rate": 8.290807610499206e-05, |
|
"loss": 0.4287, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.1948192119598389, |
|
"eval_runtime": 1.0151, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 0.985, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 6.031746031746032, |
|
"grad_norm": 0.265625, |
|
"learning_rate": 8.154496040983073e-05, |
|
"loss": 0.3917, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.071428571428571, |
|
"grad_norm": 0.322265625, |
|
"learning_rate": 8.018538568006027e-05, |
|
"loss": 0.3851, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 6.111111111111111, |
|
"grad_norm": 0.267578125, |
|
"learning_rate": 7.882961277705895e-05, |
|
"loss": 0.3846, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 6.150793650793651, |
|
"grad_norm": 0.392578125, |
|
"learning_rate": 7.747790183274922e-05, |
|
"loss": 0.3897, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 6.190476190476191, |
|
"grad_norm": 0.25, |
|
"learning_rate": 7.613051219968623e-05, |
|
"loss": 0.3894, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 6.23015873015873, |
|
"grad_norm": 0.30859375, |
|
"learning_rate": 7.478770240129579e-05, |
|
"loss": 0.386, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 6.26984126984127, |
|
"grad_norm": 0.29296875, |
|
"learning_rate": 7.344973008227161e-05, |
|
"loss": 0.383, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 6.309523809523809, |
|
"grad_norm": 0.265625, |
|
"learning_rate": 7.211685195914097e-05, |
|
"loss": 0.3867, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 6.349206349206349, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 7.078932377100877e-05, |
|
"loss": 0.393, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.388888888888889, |
|
"grad_norm": 0.345703125, |
|
"learning_rate": 6.94674002304887e-05, |
|
"loss": 0.3856, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 6.428571428571429, |
|
"grad_norm": 0.2255859375, |
|
"learning_rate": 6.815133497483157e-05, |
|
"loss": 0.397, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 6.468253968253968, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 6.684138051726012e-05, |
|
"loss": 0.3879, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 6.507936507936508, |
|
"grad_norm": 0.248046875, |
|
"learning_rate": 6.553778819851926e-05, |
|
"loss": 0.3852, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 6.5476190476190474, |
|
"grad_norm": 0.24609375, |
|
"learning_rate": 6.424080813865138e-05, |
|
"loss": 0.3956, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 6.587301587301587, |
|
"grad_norm": 0.4296875, |
|
"learning_rate": 6.295068918900586e-05, |
|
"loss": 0.394, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 6.6269841269841265, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 6.16676788844919e-05, |
|
"loss": 0.3904, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 0.33984375, |
|
"learning_rate": 6.039202339608432e-05, |
|
"loss": 0.395, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 6.7063492063492065, |
|
"grad_norm": 0.326171875, |
|
"learning_rate": 5.912396748359046e-05, |
|
"loss": 0.3892, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 6.746031746031746, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 5.786375444868828e-05, |
|
"loss": 0.3945, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 6.785714285714286, |
|
"grad_norm": 0.443359375, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.3948, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 6.825396825396825, |
|
"grad_norm": 0.314453125, |
|
"learning_rate": 5.5367822647919424e-05, |
|
"loss": 0.3953, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 6.865079365079366, |
|
"grad_norm": 0.283203125, |
|
"learning_rate": 5.4132582776074126e-05, |
|
"loss": 0.3983, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 6.904761904761905, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 5.290614347797802e-05, |
|
"loss": 0.3862, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 6.944444444444445, |
|
"grad_norm": 0.2431640625, |
|
"learning_rate": 5.168874007033615e-05, |
|
"loss": 0.39, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 6.984126984126984, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 5.048060613613888e-05, |
|
"loss": 0.3943, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.2383077144622803, |
|
"eval_runtime": 1.0141, |
|
"eval_samples_per_second": 1.972, |
|
"eval_steps_per_second": 0.986, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 7.023809523809524, |
|
"grad_norm": 0.2216796875, |
|
"learning_rate": 4.92819734798441e-05, |
|
"loss": 0.3718, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 7.063492063492063, |
|
"grad_norm": 0.41015625, |
|
"learning_rate": 4.809307208290114e-05, |
|
"loss": 0.3505, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 7.103174603174603, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 4.691413005962415e-05, |
|
"loss": 0.3559, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 7.142857142857143, |
|
"grad_norm": 0.248046875, |
|
"learning_rate": 4.574537361342407e-05, |
|
"loss": 0.3581, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.182539682539683, |
|
"grad_norm": 0.224609375, |
|
"learning_rate": 4.458702699340667e-05, |
|
"loss": 0.3601, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 7.222222222222222, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 4.343931245134616e-05, |
|
"loss": 0.3587, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 7.261904761904762, |
|
"grad_norm": 0.240234375, |
|
"learning_rate": 4.23024501990417e-05, |
|
"loss": 0.3589, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 7.301587301587301, |
|
"grad_norm": 0.26171875, |
|
"learning_rate": 4.117665836606549e-05, |
|
"loss": 0.3595, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 7.341269841269841, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 4.00621529579101e-05, |
|
"loss": 0.359, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 7.380952380952381, |
|
"grad_norm": 0.24609375, |
|
"learning_rate": 3.89591478145437e-05, |
|
"loss": 0.3637, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 7.420634920634921, |
|
"grad_norm": 0.26953125, |
|
"learning_rate": 3.786785456938049e-05, |
|
"loss": 0.3667, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 7.4603174603174605, |
|
"grad_norm": 0.26171875, |
|
"learning_rate": 3.6788482608674826e-05, |
|
"loss": 0.3566, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.248046875, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.3538, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 7.5396825396825395, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 3.4666328609242725e-05, |
|
"loss": 0.3564, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 7.579365079365079, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 3.362395374785283e-05, |
|
"loss": 0.3588, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 7.619047619047619, |
|
"grad_norm": 0.25390625, |
|
"learning_rate": 3.259431444746846e-05, |
|
"loss": 0.3617, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 7.658730158730159, |
|
"grad_norm": 0.2470703125, |
|
"learning_rate": 3.157760826481174e-05, |
|
"loss": 0.3616, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 7.698412698412699, |
|
"grad_norm": 0.25, |
|
"learning_rate": 3.057403027512963e-05, |
|
"loss": 0.3531, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 7.738095238095238, |
|
"grad_norm": 0.287109375, |
|
"learning_rate": 2.9583773034764826e-05, |
|
"loss": 0.3547, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 7.777777777777778, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 2.8607026544210114e-05, |
|
"loss": 0.3609, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 7.817460317460317, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 2.764397821165292e-05, |
|
"loss": 0.3588, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 7.857142857142857, |
|
"grad_norm": 0.271484375, |
|
"learning_rate": 2.669481281701739e-05, |
|
"loss": 0.3648, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 7.896825396825397, |
|
"grad_norm": 0.251953125, |
|
"learning_rate": 2.5759712476510622e-05, |
|
"loss": 0.3635, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 7.936507936507937, |
|
"grad_norm": 0.2412109375, |
|
"learning_rate": 2.4838856607680183e-05, |
|
"loss": 0.3568, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 7.976190476190476, |
|
"grad_norm": 0.25390625, |
|
"learning_rate": 2.3932421894989167e-05, |
|
"loss": 0.3612, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.2904332876205444, |
|
"eval_runtime": 1.0153, |
|
"eval_samples_per_second": 1.97, |
|
"eval_steps_per_second": 0.985, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 8.015873015873016, |
|
"grad_norm": 0.26171875, |
|
"learning_rate": 2.304058225591581e-05, |
|
"loss": 0.354, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 8.055555555555555, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 2.2163508807583998e-05, |
|
"loss": 0.3464, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 8.095238095238095, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 2.1301369833931117e-05, |
|
"loss": 0.3411, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 8.134920634920634, |
|
"grad_norm": 0.259765625, |
|
"learning_rate": 2.045433075341927e-05, |
|
"loss": 0.3369, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 8.174603174603174, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 1.962255408729662e-05, |
|
"loss": 0.3369, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 8.214285714285714, |
|
"grad_norm": 0.2353515625, |
|
"learning_rate": 1.880619942841435e-05, |
|
"loss": 0.3391, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 8.253968253968253, |
|
"grad_norm": 0.2470703125, |
|
"learning_rate": 1.8005423410605772e-05, |
|
"loss": 0.3395, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 8.293650793650794, |
|
"grad_norm": 0.244140625, |
|
"learning_rate": 1.7220379678632814e-05, |
|
"loss": 0.3409, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 8.333333333333334, |
|
"grad_norm": 0.2412109375, |
|
"learning_rate": 1.6451218858706374e-05, |
|
"loss": 0.3483, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 8.373015873015873, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 1.5698088529585597e-05, |
|
"loss": 0.3459, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 8.412698412698413, |
|
"grad_norm": 0.2412109375, |
|
"learning_rate": 1.49611331942621e-05, |
|
"loss": 0.336, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 8.452380952380953, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 1.4240494252234049e-05, |
|
"loss": 0.3349, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 8.492063492063492, |
|
"grad_norm": 0.2412109375, |
|
"learning_rate": 1.3536309972375948e-05, |
|
"loss": 0.3463, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 8.531746031746032, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 1.2848715466408967e-05, |
|
"loss": 0.3372, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 0.2314453125, |
|
"learning_rate": 1.2177842662977135e-05, |
|
"loss": 0.3346, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 8.61111111111111, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 1.1523820282334219e-05, |
|
"loss": 0.3449, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 8.65079365079365, |
|
"grad_norm": 0.234375, |
|
"learning_rate": 1.088677381164609e-05, |
|
"loss": 0.3368, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 8.69047619047619, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 1.0266825480913611e-05, |
|
"loss": 0.3379, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 8.73015873015873, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 9.664094239520372e-06, |
|
"loss": 0.348, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 8.76984126984127, |
|
"grad_norm": 0.244140625, |
|
"learning_rate": 9.07869573340987e-06, |
|
"loss": 0.343, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 8.80952380952381, |
|
"grad_norm": 0.236328125, |
|
"learning_rate": 8.510742282896544e-06, |
|
"loss": 0.3396, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 8.84920634920635, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 7.960342861114921e-06, |
|
"loss": 0.3391, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 8.88888888888889, |
|
"grad_norm": 0.2333984375, |
|
"learning_rate": 7.427603073110967e-06, |
|
"loss": 0.3405, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 8.928571428571429, |
|
"grad_norm": 0.2314453125, |
|
"learning_rate": 6.9126251355795864e-06, |
|
"loss": 0.3378, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 8.968253968253968, |
|
"grad_norm": 0.263671875, |
|
"learning_rate": 6.415507857252389e-06, |
|
"loss": 0.3457, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.3253473043441772, |
|
"eval_runtime": 1.0138, |
|
"eval_samples_per_second": 1.973, |
|
"eval_steps_per_second": 0.986, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 9.007936507936508, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 5.936346619939271e-06, |
|
"loss": 0.3402, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 9.047619047619047, |
|
"grad_norm": 0.248046875, |
|
"learning_rate": 5.475233360227516e-06, |
|
"loss": 0.3362, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 9.087301587301587, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 5.03225655184194e-06, |
|
"loss": 0.3346, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 9.126984126984127, |
|
"grad_norm": 0.228515625, |
|
"learning_rate": 4.607501188669394e-06, |
|
"loss": 0.3358, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 9.166666666666666, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 4.20104876845111e-06, |
|
"loss": 0.333, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 9.206349206349206, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 3.8129772771456797e-06, |
|
"loss": 0.3393, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 9.246031746031745, |
|
"grad_norm": 0.232421875, |
|
"learning_rate": 3.4433611739658645e-06, |
|
"loss": 0.3409, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 9.285714285714286, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 3.092271377092215e-06, |
|
"loss": 0.34, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 9.325396825396826, |
|
"grad_norm": 0.2353515625, |
|
"learning_rate": 2.759775250065899e-06, |
|
"loss": 0.3362, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 9.365079365079366, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 2.4459365888638062e-06, |
|
"loss": 0.333, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 9.404761904761905, |
|
"grad_norm": 0.2314453125, |
|
"learning_rate": 2.150815609657875e-06, |
|
"loss": 0.3315, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 9.444444444444445, |
|
"grad_norm": 0.23046875, |
|
"learning_rate": 1.874468937261531e-06, |
|
"loss": 0.3371, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 9.484126984126984, |
|
"grad_norm": 0.24609375, |
|
"learning_rate": 1.6169495942650714e-06, |
|
"loss": 0.339, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 9.523809523809524, |
|
"grad_norm": 0.23046875, |
|
"learning_rate": 1.378306990862177e-06, |
|
"loss": 0.3413, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 9.563492063492063, |
|
"grad_norm": 0.2333984375, |
|
"learning_rate": 1.158586915369675e-06, |
|
"loss": 0.3313, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 9.603174603174603, |
|
"grad_norm": 0.2333984375, |
|
"learning_rate": 9.578315254420767e-07, |
|
"loss": 0.339, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 9.642857142857142, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 7.760793399827937e-07, |
|
"loss": 0.3284, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 9.682539682539682, |
|
"grad_norm": 0.23828125, |
|
"learning_rate": 6.13365231753571e-07, |
|
"loss": 0.3359, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 9.722222222222221, |
|
"grad_norm": 0.228515625, |
|
"learning_rate": 4.6972042068341714e-07, |
|
"loss": 0.3394, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 9.761904761904763, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 3.451724678784518e-07, |
|
"loss": 0.337, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 9.801587301587302, |
|
"grad_norm": 0.2421875, |
|
"learning_rate": 2.397452703337577e-07, |
|
"loss": 0.3344, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 9.841269841269842, |
|
"grad_norm": 0.234375, |
|
"learning_rate": 1.5345905634827074e-07, |
|
"loss": 0.3307, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 9.880952380952381, |
|
"grad_norm": 0.2373046875, |
|
"learning_rate": 8.633038164358454e-08, |
|
"loss": 0.3361, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 9.920634920634921, |
|
"grad_norm": 0.2392578125, |
|
"learning_rate": 3.8372126187413704e-08, |
|
"loss": 0.34, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 9.96031746031746, |
|
"grad_norm": 0.2314453125, |
|
"learning_rate": 9.593491722270642e-09, |
|
"loss": 0.3385, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.2470703125, |
|
"learning_rate": 0.0, |
|
"loss": 0.3328, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.332617163658142, |
|
"eval_runtime": 1.0141, |
|
"eval_samples_per_second": 1.972, |
|
"eval_steps_per_second": 0.986, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 1260, |
|
"total_flos": 3.8899222565240177e+18, |
|
"train_loss": 1.363766341266178, |
|
"train_runtime": 8098.2535, |
|
"train_samples_per_second": 4.97, |
|
"train_steps_per_second": 0.156 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1260, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"total_flos": 3.8899222565240177e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|