|
{ |
|
"best_metric": 99.20864964343225, |
|
"best_model_checkpoint": "./output/small/cv9-special-batch8-lr3-small/checkpoint-3000", |
|
"epoch": 4.84027105517909, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.4e-05, |
|
"loss": 0.8653, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.400000000000001e-05, |
|
"loss": 0.6242, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000144, |
|
"loss": 0.8546, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000194, |
|
"loss": 1.1895, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000244, |
|
"loss": 1.4699, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000294, |
|
"loss": 1.6882, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00034399999999999996, |
|
"loss": 2.0917, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00039400000000000004, |
|
"loss": 2.4089, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.000444, |
|
"loss": 2.4894, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.000494, |
|
"loss": 2.7854, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005440000000000001, |
|
"loss": 3.2241, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000594, |
|
"loss": 3.3251, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000644, |
|
"loss": 3.3869, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000694, |
|
"loss": 3.5124, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000744, |
|
"loss": 4.0303, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0007940000000000001, |
|
"loss": 4.1377, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000844, |
|
"loss": 4.4847, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.000894, |
|
"loss": 4.5197, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000942, |
|
"loss": 4.5887, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00099, |
|
"loss": 4.7118, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0009955555555555555, |
|
"loss": 4.6729, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00099, |
|
"loss": 4.549, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0009844444444444445, |
|
"loss": 4.5721, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.000978888888888889, |
|
"loss": 4.667, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0009733333333333334, |
|
"loss": 4.616, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0009677777777777778, |
|
"loss": 4.3229, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0009622222222222222, |
|
"loss": 4.4065, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0009566666666666666, |
|
"loss": 4.3209, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0009511111111111111, |
|
"loss": 4.3818, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0009455555555555556, |
|
"loss": 4.1451, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00094, |
|
"loss": 4.2453, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0009344444444444444, |
|
"loss": 4.1155, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0009288888888888889, |
|
"loss": 4.2202, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0009233333333333334, |
|
"loss": 4.2521, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0009177777777777778, |
|
"loss": 4.0625, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0009122222222222223, |
|
"loss": 4.1473, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0009066666666666666, |
|
"loss": 4.2598, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0009011111111111111, |
|
"loss": 4.188, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0008955555555555556, |
|
"loss": 4.1178, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0008900000000000001, |
|
"loss": 3.9757, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_loss": 3.3364593982696533, |
|
"eval_runtime": 677.063, |
|
"eval_samples_per_second": 5.35, |
|
"eval_steps_per_second": 1.338, |
|
"eval_wer": 126.40901771336554, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0008844444444444445, |
|
"loss": 4.157, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.000878888888888889, |
|
"loss": 3.8666, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0008733333333333333, |
|
"loss": 3.7358, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0008677777777777778, |
|
"loss": 3.7493, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0008622222222222222, |
|
"loss": 3.7249, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0008566666666666667, |
|
"loss": 3.6983, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0008511111111111112, |
|
"loss": 3.7131, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0008455555555555556, |
|
"loss": 3.7108, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00084, |
|
"loss": 3.6822, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0008344444444444444, |
|
"loss": 3.6606, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0008288888888888889, |
|
"loss": 3.7068, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0008233333333333334, |
|
"loss": 3.7024, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0008177777777777778, |
|
"loss": 3.7004, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0008122222222222222, |
|
"loss": 3.6022, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0008066666666666667, |
|
"loss": 3.6732, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0008011111111111112, |
|
"loss": 3.6493, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0007955555555555555, |
|
"loss": 3.6812, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00079, |
|
"loss": 3.6498, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0007844444444444445, |
|
"loss": 3.6443, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0007788888888888889, |
|
"loss": 3.6482, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0007733333333333333, |
|
"loss": 3.5286, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0007677777777777778, |
|
"loss": 3.6997, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0007622222222222223, |
|
"loss": 3.6399, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0007566666666666668, |
|
"loss": 3.6307, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.000751111111111111, |
|
"loss": 3.657, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0007455555555555555, |
|
"loss": 3.624, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00074, |
|
"loss": 3.5541, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0007344444444444445, |
|
"loss": 3.5803, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0007288888888888889, |
|
"loss": 3.5941, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0007233333333333334, |
|
"loss": 3.5228, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0007177777777777778, |
|
"loss": 3.5326, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.0007122222222222222, |
|
"loss": 3.5902, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0007066666666666666, |
|
"loss": 3.6143, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0007011111111111111, |
|
"loss": 3.4867, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0006955555555555556, |
|
"loss": 3.6418, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00069, |
|
"loss": 3.5212, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0006844444444444444, |
|
"loss": 3.4048, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0006788888888888889, |
|
"loss": 3.5468, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0006733333333333334, |
|
"loss": 3.4827, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0006677777777777778, |
|
"loss": 3.4153, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 2.970080614089966, |
|
"eval_runtime": 599.8536, |
|
"eval_samples_per_second": 6.038, |
|
"eval_steps_per_second": 1.51, |
|
"eval_wer": 105.58546123763514, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0006622222222222222, |
|
"loss": 3.4134, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0006566666666666666, |
|
"loss": 3.5384, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0006511111111111111, |
|
"loss": 3.2915, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 0.0006455555555555556, |
|
"loss": 3.0732, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00064, |
|
"loss": 3.1543, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 0.0006344444444444445, |
|
"loss": 3.0643, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.000628888888888889, |
|
"loss": 3.0654, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0006233333333333333, |
|
"loss": 3.1225, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.0006177777777777777, |
|
"loss": 3.1434, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.0006122222222222222, |
|
"loss": 3.1411, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.0006066666666666667, |
|
"loss": 3.2095, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0006011111111111112, |
|
"loss": 3.2001, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 0.0005955555555555556, |
|
"loss": 3.0931, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.00059, |
|
"loss": 3.0743, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 0.0005844444444444444, |
|
"loss": 3.0635, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 0.0005788888888888889, |
|
"loss": 3.1503, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 0.0005733333333333334, |
|
"loss": 3.2175, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.0005677777777777778, |
|
"loss": 3.1774, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0005622222222222222, |
|
"loss": 3.1572, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 0.0005566666666666667, |
|
"loss": 3.1266, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 0.0005511111111111112, |
|
"loss": 3.1867, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.0005455555555555555, |
|
"loss": 3.1558, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 0.00054, |
|
"loss": 3.0291, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0005344444444444445, |
|
"loss": 3.1676, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 0.0005288888888888889, |
|
"loss": 3.0545, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 0.0005233333333333333, |
|
"loss": 3.1117, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 0.0005177777777777778, |
|
"loss": 3.117, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.0005122222222222223, |
|
"loss": 3.0775, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 0.0005066666666666668, |
|
"loss": 3.1249, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 0.000501111111111111, |
|
"loss": 3.0891, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 0.0004955555555555556, |
|
"loss": 3.1325, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.00049, |
|
"loss": 3.0401, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00048444444444444446, |
|
"loss": 3.0508, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.0004788888888888889, |
|
"loss": 3.1069, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00047333333333333336, |
|
"loss": 3.0406, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.0004677777777777778, |
|
"loss": 3.0954, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.0004622222222222222, |
|
"loss": 3.098, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.0004566666666666667, |
|
"loss": 3.1103, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.0004511111111111111, |
|
"loss": 3.0801, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00044555555555555554, |
|
"loss": 2.9747, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_loss": 2.802910089492798, |
|
"eval_runtime": 542.4216, |
|
"eval_samples_per_second": 6.677, |
|
"eval_steps_per_second": 1.67, |
|
"eval_wer": 99.20864964343225, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00044, |
|
"loss": 3.193, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.0004344444444444445, |
|
"loss": 3.113, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00042888888888888886, |
|
"loss": 3.1098, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00042333333333333334, |
|
"loss": 2.9956, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.0004177777777777778, |
|
"loss": 2.5881, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.00041222222222222224, |
|
"loss": 2.5287, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.00040666666666666667, |
|
"loss": 2.5626, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.0004011111111111111, |
|
"loss": 2.6698, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 0.00039555555555555557, |
|
"loss": 2.6592, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 0.00039000000000000005, |
|
"loss": 2.6444, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 0.0003844444444444444, |
|
"loss": 2.637, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 0.0003788888888888889, |
|
"loss": 2.6242, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 0.0003733333333333334, |
|
"loss": 2.6529, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 0.00036777777777777774, |
|
"loss": 2.5876, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.0003622222222222222, |
|
"loss": 2.7021, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 0.0003566666666666667, |
|
"loss": 2.6121, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.0003511111111111111, |
|
"loss": 2.6204, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.00034555555555555555, |
|
"loss": 2.7417, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.00034, |
|
"loss": 2.6468, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 0.00033444444444444445, |
|
"loss": 2.6018, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 0.0003288888888888889, |
|
"loss": 2.6171, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 0.0003233333333333333, |
|
"loss": 2.7169, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.0003177777777777778, |
|
"loss": 2.5877, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00031222222222222225, |
|
"loss": 2.6279, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 0.0003066666666666667, |
|
"loss": 2.7309, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.0003011111111111111, |
|
"loss": 2.6565, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 0.0002955555555555556, |
|
"loss": 2.6611, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00029, |
|
"loss": 2.5951, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 0.0002844444444444444, |
|
"loss": 2.6352, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 0.0002788888888888889, |
|
"loss": 2.6061, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00027333333333333333, |
|
"loss": 2.5893, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 0.0002677777777777778, |
|
"loss": 2.6666, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 0.00026222222222222223, |
|
"loss": 2.6148, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.00025666666666666665, |
|
"loss": 2.7157, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 0.00025111111111111113, |
|
"loss": 2.5981, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 0.00024555555555555556, |
|
"loss": 2.5674, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00024, |
|
"loss": 2.653, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 0.00023444444444444446, |
|
"loss": 2.5634, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 0.0002288888888888889, |
|
"loss": 2.6516, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.00022333333333333333, |
|
"loss": 2.6552, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"eval_loss": 2.692873954772949, |
|
"eval_runtime": 624.562, |
|
"eval_samples_per_second": 5.799, |
|
"eval_steps_per_second": 1.451, |
|
"eval_wer": 102.48907292385555, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.00021777777777777776, |
|
"loss": 2.6111, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 0.00021222222222222223, |
|
"loss": 2.5593, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.00020666666666666666, |
|
"loss": 2.5781, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 0.0002011111111111111, |
|
"loss": 2.5623, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 0.00019555555555555556, |
|
"loss": 2.5678, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00019, |
|
"loss": 2.1888, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.00018444444444444443, |
|
"loss": 1.9472, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 0.00017888888888888889, |
|
"loss": 2.0722, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 0.00017333333333333334, |
|
"loss": 2.0618, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.0001677777777777778, |
|
"loss": 2.0658, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 0.0001622222222222222, |
|
"loss": 1.9975, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 0.0001566666666666667, |
|
"loss": 2.0241, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 0.0001511111111111111, |
|
"loss": 2.094, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.00014555555555555556, |
|
"loss": 2.059, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 0.00014000000000000001, |
|
"loss": 2.0825, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 0.00013444444444444447, |
|
"loss": 2.0594, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 0.0001288888888888889, |
|
"loss": 2.0409, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 0.00012333333333333334, |
|
"loss": 2.015, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 0.00011777777777777778, |
|
"loss": 2.0112, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 0.00011222222222222223, |
|
"loss": 2.0068, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 0.00010666666666666668, |
|
"loss": 2.0239, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 0.00010111111111111112, |
|
"loss": 2.0149, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 9.555555555555557e-05, |
|
"loss": 2.0444, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.0065, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 8.444444444444444e-05, |
|
"loss": 2.0313, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 7.888888888888888e-05, |
|
"loss": 2.0404, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 1.9673, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 6.777777777777778e-05, |
|
"loss": 1.9915, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 6.222222222222222e-05, |
|
"loss": 1.9981, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 5.6666666666666664e-05, |
|
"loss": 1.9701, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 5.1111111111111115e-05, |
|
"loss": 1.991, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 4.555555555555556e-05, |
|
"loss": 1.9827, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 4e-05, |
|
"loss": 1.926, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 3.444444444444445e-05, |
|
"loss": 1.953, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 2.0105, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 1.9334, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 1.9869, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"loss": 1.9123, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 6.6666666666666675e-06, |
|
"loss": 1.9805, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 1.9795, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"eval_loss": 2.7743773460388184, |
|
"eval_runtime": 588.3952, |
|
"eval_samples_per_second": 6.156, |
|
"eval_steps_per_second": 1.54, |
|
"eval_wer": 104.82631700023003, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"step": 5000, |
|
"total_flos": 1.153995298799616e+19, |
|
"train_loss": 3.0206046129226682, |
|
"train_runtime": 7578.8548, |
|
"train_samples_per_second": 5.278, |
|
"train_steps_per_second": 0.66 |
|
} |
|
], |
|
"max_steps": 5000, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.153995298799616e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|