|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 98.2905982905983, |
|
"global_step": 23000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.8933760683760685e-05, |
|
"loss": 1.1035, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_loss": 0.7537529468536377, |
|
"eval_runtime": 164.1555, |
|
"eval_samples_per_second": 1072.605, |
|
"eval_steps_per_second": 33.523, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 4.7865384615384615e-05, |
|
"loss": 0.7267, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"eval_loss": 0.6768979430198669, |
|
"eval_runtime": 180.9639, |
|
"eval_samples_per_second": 972.979, |
|
"eval_steps_per_second": 30.409, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 4.6797008547008546e-05, |
|
"loss": 0.6744, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"eval_loss": 0.6488664746284485, |
|
"eval_runtime": 190.376, |
|
"eval_samples_per_second": 924.875, |
|
"eval_steps_per_second": 28.906, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 4.572863247863248e-05, |
|
"loss": 0.6491, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"eval_loss": 0.6347554326057434, |
|
"eval_runtime": 166.0432, |
|
"eval_samples_per_second": 1060.411, |
|
"eval_steps_per_second": 33.142, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"learning_rate": 4.466025641025641e-05, |
|
"loss": 0.6334, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"eval_loss": 0.6249234080314636, |
|
"eval_runtime": 165.6169, |
|
"eval_samples_per_second": 1063.141, |
|
"eval_steps_per_second": 33.227, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 4.359188034188034e-05, |
|
"loss": 0.6219, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"eval_loss": 0.6181825995445251, |
|
"eval_runtime": 167.5877, |
|
"eval_samples_per_second": 1050.638, |
|
"eval_steps_per_second": 32.837, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 14.96, |
|
"learning_rate": 4.252350427350428e-05, |
|
"loss": 0.6131, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 14.96, |
|
"eval_loss": 0.6141465902328491, |
|
"eval_runtime": 177.8868, |
|
"eval_samples_per_second": 989.809, |
|
"eval_steps_per_second": 30.935, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 17.09, |
|
"learning_rate": 4.145512820512821e-05, |
|
"loss": 0.6058, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 17.09, |
|
"eval_loss": 0.6131681203842163, |
|
"eval_runtime": 163.4313, |
|
"eval_samples_per_second": 1077.358, |
|
"eval_steps_per_second": 33.672, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 4.0386752136752134e-05, |
|
"loss": 0.5995, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"eval_loss": 0.6094414591789246, |
|
"eval_runtime": 178.3809, |
|
"eval_samples_per_second": 987.068, |
|
"eval_steps_per_second": 30.85, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 21.37, |
|
"learning_rate": 3.931837606837607e-05, |
|
"loss": 0.5941, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 21.37, |
|
"eval_loss": 0.6074140667915344, |
|
"eval_runtime": 169.9857, |
|
"eval_samples_per_second": 1035.817, |
|
"eval_steps_per_second": 32.373, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 3.825e-05, |
|
"loss": 0.5894, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"eval_loss": 0.6062588691711426, |
|
"eval_runtime": 168.4324, |
|
"eval_samples_per_second": 1045.369, |
|
"eval_steps_per_second": 32.672, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"learning_rate": 3.718162393162393e-05, |
|
"loss": 0.5851, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 25.64, |
|
"eval_loss": 0.6049907207489014, |
|
"eval_runtime": 169.165, |
|
"eval_samples_per_second": 1040.842, |
|
"eval_steps_per_second": 32.53, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"learning_rate": 3.611324786324787e-05, |
|
"loss": 0.5813, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"eval_loss": 0.6046043634414673, |
|
"eval_runtime": 164.2895, |
|
"eval_samples_per_second": 1071.73, |
|
"eval_steps_per_second": 33.496, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 29.91, |
|
"learning_rate": 3.50448717948718e-05, |
|
"loss": 0.5777, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 29.91, |
|
"eval_loss": 0.6043672561645508, |
|
"eval_runtime": 163.9442, |
|
"eval_samples_per_second": 1073.987, |
|
"eval_steps_per_second": 33.566, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 32.05, |
|
"learning_rate": 3.397649572649573e-05, |
|
"loss": 0.5744, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 32.05, |
|
"eval_loss": 0.6073368787765503, |
|
"eval_runtime": 182.0186, |
|
"eval_samples_per_second": 967.341, |
|
"eval_steps_per_second": 30.233, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 34.19, |
|
"learning_rate": 3.290811965811966e-05, |
|
"loss": 0.5712, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 34.19, |
|
"eval_loss": 0.6060159802436829, |
|
"eval_runtime": 164.583, |
|
"eval_samples_per_second": 1069.819, |
|
"eval_steps_per_second": 33.436, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 36.32, |
|
"learning_rate": 3.183974358974359e-05, |
|
"loss": 0.5684, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 36.32, |
|
"eval_loss": 0.6061359643936157, |
|
"eval_runtime": 165.5065, |
|
"eval_samples_per_second": 1063.849, |
|
"eval_steps_per_second": 33.249, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 3.077136752136752e-05, |
|
"loss": 0.5658, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"eval_loss": 0.6064005494117737, |
|
"eval_runtime": 177.3855, |
|
"eval_samples_per_second": 992.607, |
|
"eval_steps_per_second": 31.023, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 40.6, |
|
"learning_rate": 2.9702991452991454e-05, |
|
"loss": 0.5634, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 40.6, |
|
"eval_loss": 0.6069360375404358, |
|
"eval_runtime": 164.0377, |
|
"eval_samples_per_second": 1073.375, |
|
"eval_steps_per_second": 33.547, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 42.74, |
|
"learning_rate": 2.8634615384615387e-05, |
|
"loss": 0.5612, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 42.74, |
|
"eval_loss": 0.6074803471565247, |
|
"eval_runtime": 184.5732, |
|
"eval_samples_per_second": 953.952, |
|
"eval_steps_per_second": 29.815, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 44.87, |
|
"learning_rate": 2.7566239316239318e-05, |
|
"loss": 0.5591, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 44.87, |
|
"eval_loss": 0.6080661416053772, |
|
"eval_runtime": 164.771, |
|
"eval_samples_per_second": 1068.598, |
|
"eval_steps_per_second": 33.398, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 47.01, |
|
"learning_rate": 2.6497863247863248e-05, |
|
"loss": 0.5571, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 47.01, |
|
"eval_loss": 0.610197901725769, |
|
"eval_runtime": 163.8142, |
|
"eval_samples_per_second": 1074.84, |
|
"eval_steps_per_second": 33.593, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 49.15, |
|
"learning_rate": 2.542948717948718e-05, |
|
"loss": 0.5549, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 49.15, |
|
"eval_loss": 0.6111814379692078, |
|
"eval_runtime": 172.4133, |
|
"eval_samples_per_second": 1021.232, |
|
"eval_steps_per_second": 31.917, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 51.28, |
|
"learning_rate": 2.4361111111111112e-05, |
|
"loss": 0.5532, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 51.28, |
|
"eval_loss": 0.611818790435791, |
|
"eval_runtime": 174.1127, |
|
"eval_samples_per_second": 1011.264, |
|
"eval_steps_per_second": 31.606, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 53.42, |
|
"learning_rate": 2.3292735042735045e-05, |
|
"loss": 0.5515, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 53.42, |
|
"eval_loss": 0.6132317781448364, |
|
"eval_runtime": 164.6297, |
|
"eval_samples_per_second": 1069.515, |
|
"eval_steps_per_second": 33.427, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"learning_rate": 2.2224358974358976e-05, |
|
"loss": 0.55, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"eval_loss": 0.6136063933372498, |
|
"eval_runtime": 164.5555, |
|
"eval_samples_per_second": 1069.998, |
|
"eval_steps_per_second": 33.442, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 57.69, |
|
"learning_rate": 2.1155982905982906e-05, |
|
"loss": 0.5486, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 57.69, |
|
"eval_loss": 0.6144915819168091, |
|
"eval_runtime": 203.4153, |
|
"eval_samples_per_second": 865.589, |
|
"eval_steps_per_second": 27.053, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 59.83, |
|
"learning_rate": 2.008760683760684e-05, |
|
"loss": 0.5472, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 59.83, |
|
"eval_loss": 0.6147069931030273, |
|
"eval_runtime": 199.2649, |
|
"eval_samples_per_second": 883.618, |
|
"eval_steps_per_second": 27.617, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 61.97, |
|
"learning_rate": 1.9021367521367522e-05, |
|
"loss": 0.5457, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 61.97, |
|
"eval_loss": 0.6158111095428467, |
|
"eval_runtime": 206.6245, |
|
"eval_samples_per_second": 852.145, |
|
"eval_steps_per_second": 26.633, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"learning_rate": 1.7952991452991456e-05, |
|
"loss": 0.5445, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 64.1, |
|
"eval_loss": 0.6180585026741028, |
|
"eval_runtime": 181.3159, |
|
"eval_samples_per_second": 971.09, |
|
"eval_steps_per_second": 30.35, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 66.24, |
|
"learning_rate": 1.688675213675214e-05, |
|
"loss": 0.5431, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 66.24, |
|
"eval_loss": 0.6186466217041016, |
|
"eval_runtime": 175.3115, |
|
"eval_samples_per_second": 1004.349, |
|
"eval_steps_per_second": 31.39, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 68.38, |
|
"learning_rate": 1.581837606837607e-05, |
|
"loss": 0.5421, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 68.38, |
|
"eval_loss": 0.6195328831672668, |
|
"eval_runtime": 164.9282, |
|
"eval_samples_per_second": 1067.58, |
|
"eval_steps_per_second": 33.366, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 70.51, |
|
"learning_rate": 1.475e-05, |
|
"loss": 0.5411, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 70.51, |
|
"eval_loss": 0.6201679110527039, |
|
"eval_runtime": 197.124, |
|
"eval_samples_per_second": 893.215, |
|
"eval_steps_per_second": 27.916, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 72.65, |
|
"learning_rate": 1.3681623931623933e-05, |
|
"loss": 0.5401, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 72.65, |
|
"eval_loss": 0.6210618019104004, |
|
"eval_runtime": 173.1765, |
|
"eval_samples_per_second": 1016.732, |
|
"eval_steps_per_second": 31.777, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 74.79, |
|
"learning_rate": 1.2613247863247865e-05, |
|
"loss": 0.5391, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 74.79, |
|
"eval_loss": 0.6214122772216797, |
|
"eval_runtime": 196.4031, |
|
"eval_samples_per_second": 896.493, |
|
"eval_steps_per_second": 28.019, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"learning_rate": 1.1544871794871795e-05, |
|
"loss": 0.5382, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"eval_loss": 0.6220324635505676, |
|
"eval_runtime": 164.5331, |
|
"eval_samples_per_second": 1070.143, |
|
"eval_steps_per_second": 33.446, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 79.06, |
|
"learning_rate": 1.0476495726495727e-05, |
|
"loss": 0.5372, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 79.06, |
|
"eval_loss": 0.6238572001457214, |
|
"eval_runtime": 180.654, |
|
"eval_samples_per_second": 974.647, |
|
"eval_steps_per_second": 30.462, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 81.2, |
|
"learning_rate": 9.40811965811966e-06, |
|
"loss": 0.5363, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 81.2, |
|
"eval_loss": 0.6245262026786804, |
|
"eval_runtime": 163.6759, |
|
"eval_samples_per_second": 1075.748, |
|
"eval_steps_per_second": 33.621, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 8.341880341880342e-06, |
|
"loss": 0.5355, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_loss": 0.6251608729362488, |
|
"eval_runtime": 200.2426, |
|
"eval_samples_per_second": 879.303, |
|
"eval_steps_per_second": 27.482, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 85.47, |
|
"learning_rate": 7.273504273504274e-06, |
|
"loss": 0.5348, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 85.47, |
|
"eval_loss": 0.6253054738044739, |
|
"eval_runtime": 175.0233, |
|
"eval_samples_per_second": 1006.003, |
|
"eval_steps_per_second": 31.442, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 87.61, |
|
"learning_rate": 6.205128205128206e-06, |
|
"loss": 0.5341, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 87.61, |
|
"eval_loss": 0.6260212063789368, |
|
"eval_runtime": 165.1088, |
|
"eval_samples_per_second": 1066.412, |
|
"eval_steps_per_second": 33.33, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 89.74, |
|
"learning_rate": 5.136752136752137e-06, |
|
"loss": 0.5334, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 89.74, |
|
"eval_loss": 0.6266852021217346, |
|
"eval_runtime": 164.357, |
|
"eval_samples_per_second": 1071.29, |
|
"eval_steps_per_second": 33.482, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 91.88, |
|
"learning_rate": 4.068376068376069e-06, |
|
"loss": 0.5327, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 91.88, |
|
"eval_loss": 0.6268437504768372, |
|
"eval_runtime": 168.7818, |
|
"eval_samples_per_second": 1043.205, |
|
"eval_steps_per_second": 32.604, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 94.02, |
|
"learning_rate": 3e-06, |
|
"loss": 0.5321, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 94.02, |
|
"eval_loss": 0.6274420022964478, |
|
"eval_runtime": 193.8022, |
|
"eval_samples_per_second": 908.524, |
|
"eval_steps_per_second": 28.395, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 96.15, |
|
"learning_rate": 1.9316239316239316e-06, |
|
"loss": 0.5315, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 96.15, |
|
"eval_loss": 0.6278537511825562, |
|
"eval_runtime": 195.5578, |
|
"eval_samples_per_second": 900.368, |
|
"eval_steps_per_second": 28.14, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 98.29, |
|
"learning_rate": 8.632478632478633e-07, |
|
"loss": 0.531, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 98.29, |
|
"eval_loss": 0.6279672980308533, |
|
"eval_runtime": 167.092, |
|
"eval_samples_per_second": 1053.755, |
|
"eval_steps_per_second": 32.934, |
|
"step": 23000 |
|
} |
|
], |
|
"max_steps": 23400, |
|
"num_train_epochs": 100, |
|
"total_flos": 2.3846898985745777e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|