|
{ |
|
"best_metric": 1.969947099685669, |
|
"best_model_checkpoint": "./outputs/checkpoint-4000", |
|
"epoch": 2.9143897996357016, |
|
"eval_steps": 100, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7201, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.618009567260742, |
|
"eval_runtime": 1145.9752, |
|
"eval_samples_per_second": 5.475, |
|
"eval_steps_per_second": 0.685, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.5655324459075928, |
|
"eval_runtime": 1107.0292, |
|
"eval_samples_per_second": 5.667, |
|
"eval_steps_per_second": 0.709, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5387, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.530600070953369, |
|
"eval_runtime": 1174.5089, |
|
"eval_samples_per_second": 5.342, |
|
"eval_steps_per_second": 0.668, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5124, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.496591806411743, |
|
"eval_runtime": 1111.3152, |
|
"eval_samples_per_second": 5.646, |
|
"eval_steps_per_second": 0.706, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4699, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.470156192779541, |
|
"eval_runtime": 1148.7316, |
|
"eval_samples_per_second": 5.462, |
|
"eval_steps_per_second": 0.683, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4445, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4447057247161865, |
|
"eval_runtime": 1011.8783, |
|
"eval_samples_per_second": 6.2, |
|
"eval_steps_per_second": 0.776, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4269, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.4192888736724854, |
|
"eval_runtime": 1320.0264, |
|
"eval_samples_per_second": 4.753, |
|
"eval_steps_per_second": 0.595, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4054, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.3991127014160156, |
|
"eval_runtime": 1042.8122, |
|
"eval_samples_per_second": 6.016, |
|
"eval_steps_per_second": 0.753, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3697, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3775434494018555, |
|
"eval_runtime": 1384.392, |
|
"eval_samples_per_second": 4.532, |
|
"eval_steps_per_second": 0.567, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3663, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.360269784927368, |
|
"eval_runtime": 205.5954, |
|
"eval_samples_per_second": 30.516, |
|
"eval_steps_per_second": 3.818, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3591, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.3368232250213623, |
|
"eval_runtime": 205.7042, |
|
"eval_samples_per_second": 30.5, |
|
"eval_steps_per_second": 3.816, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3156, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.3195290565490723, |
|
"eval_runtime": 205.8525, |
|
"eval_samples_per_second": 30.478, |
|
"eval_steps_per_second": 3.813, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3264, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3028838634490967, |
|
"eval_runtime": 205.7996, |
|
"eval_samples_per_second": 30.486, |
|
"eval_steps_per_second": 3.814, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2775, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.279730796813965, |
|
"eval_runtime": 206.0898, |
|
"eval_samples_per_second": 30.443, |
|
"eval_steps_per_second": 3.809, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2381, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.2633275985717773, |
|
"eval_runtime": 205.9739, |
|
"eval_samples_per_second": 30.46, |
|
"eval_steps_per_second": 3.811, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.23, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2453196048736572, |
|
"eval_runtime": 206.3463, |
|
"eval_samples_per_second": 30.405, |
|
"eval_steps_per_second": 3.804, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2304, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.2294440269470215, |
|
"eval_runtime": 205.8333, |
|
"eval_samples_per_second": 30.481, |
|
"eval_steps_per_second": 3.814, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1919, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2156779766082764, |
|
"eval_runtime": 205.9059, |
|
"eval_samples_per_second": 30.47, |
|
"eval_steps_per_second": 3.812, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1769, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.1997225284576416, |
|
"eval_runtime": 206.368, |
|
"eval_samples_per_second": 30.402, |
|
"eval_steps_per_second": 3.804, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1705, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.1834230422973633, |
|
"eval_runtime": 206.1218, |
|
"eval_samples_per_second": 30.438, |
|
"eval_steps_per_second": 3.808, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1549, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.171769618988037, |
|
"eval_runtime": 206.0843, |
|
"eval_samples_per_second": 30.444, |
|
"eval_steps_per_second": 3.809, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1373, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.1567013263702393, |
|
"eval_runtime": 206.3812, |
|
"eval_samples_per_second": 30.4, |
|
"eval_steps_per_second": 3.804, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1272, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.144631862640381, |
|
"eval_runtime": 206.1489, |
|
"eval_samples_per_second": 30.434, |
|
"eval_steps_per_second": 3.808, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1339, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.1313953399658203, |
|
"eval_runtime": 206.1249, |
|
"eval_samples_per_second": 30.438, |
|
"eval_steps_per_second": 3.808, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1011, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1183180809020996, |
|
"eval_runtime": 206.302, |
|
"eval_samples_per_second": 30.412, |
|
"eval_steps_per_second": 3.805, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1031, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 2.107461929321289, |
|
"eval_runtime": 206.4059, |
|
"eval_samples_per_second": 30.396, |
|
"eval_steps_per_second": 3.803, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0873, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 2.09187388420105, |
|
"eval_runtime": 206.3333, |
|
"eval_samples_per_second": 30.407, |
|
"eval_steps_per_second": 3.805, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0424, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.0847010612487793, |
|
"eval_runtime": 206.387, |
|
"eval_samples_per_second": 30.399, |
|
"eval_steps_per_second": 3.804, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0159, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 2.07328462600708, |
|
"eval_runtime": 205.9829, |
|
"eval_samples_per_second": 30.459, |
|
"eval_steps_per_second": 3.811, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0375, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 2.063817024230957, |
|
"eval_runtime": 206.2209, |
|
"eval_samples_per_second": 30.424, |
|
"eval_steps_per_second": 3.807, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0054, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_loss": 2.0530624389648438, |
|
"eval_runtime": 206.2013, |
|
"eval_samples_per_second": 30.427, |
|
"eval_steps_per_second": 3.807, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0202, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 2.041797637939453, |
|
"eval_runtime": 206.1064, |
|
"eval_samples_per_second": 30.441, |
|
"eval_steps_per_second": 3.809, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9871, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 2.0300114154815674, |
|
"eval_runtime": 205.777, |
|
"eval_samples_per_second": 30.489, |
|
"eval_steps_per_second": 3.815, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9829, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_loss": 2.0251846313476562, |
|
"eval_runtime": 206.4461, |
|
"eval_samples_per_second": 30.39, |
|
"eval_steps_per_second": 3.802, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9787, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.0121688842773438, |
|
"eval_runtime": 206.1187, |
|
"eval_samples_per_second": 30.439, |
|
"eval_steps_per_second": 3.808, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9764, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 2.0046775341033936, |
|
"eval_runtime": 206.1405, |
|
"eval_samples_per_second": 30.436, |
|
"eval_steps_per_second": 3.808, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9565, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_loss": 1.9959838390350342, |
|
"eval_runtime": 214.3631, |
|
"eval_samples_per_second": 29.268, |
|
"eval_steps_per_second": 3.662, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9494, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_loss": 1.9870028495788574, |
|
"eval_runtime": 206.1419, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9354, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 1.9740982055664062, |
|
"eval_runtime": 206.2491, |
|
"eval_samples_per_second": 30.42, |
|
"eval_steps_per_second": 3.806, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9383, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"eval_loss": 1.969947099685669, |
|
"eval_runtime": 206.1194, |
|
"eval_samples_per_second": 30.439, |
|
"eval_steps_per_second": 3.808, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.1744177835220992e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|