|
{ |
|
"best_metric": 2.41491436958313, |
|
"best_model_checkpoint": "./gpt2_dolly_lite/checkpoint-2600", |
|
"epoch": 1.9996154585656605, |
|
"eval_steps": 500, |
|
"global_step": 2600, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009871794871794872, |
|
"loss": 3.3078, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009743589743589744, |
|
"loss": 3.1249, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009615384615384616, |
|
"loss": 3.1026, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009487179487179487, |
|
"loss": 3.0496, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000935897435897436, |
|
"loss": 2.9706, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0009230769230769232, |
|
"loss": 3.0195, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0009102564102564102, |
|
"loss": 2.9747, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0008974358974358974, |
|
"loss": 2.9205, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0008846153846153846, |
|
"loss": 2.9495, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0008717948717948718, |
|
"loss": 2.8866, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0008589743589743589, |
|
"loss": 2.892, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0008461538461538462, |
|
"loss": 2.8849, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0008333333333333334, |
|
"loss": 2.8819, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0008205128205128205, |
|
"loss": 2.8561, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0008076923076923078, |
|
"loss": 2.8311, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0007948717948717948, |
|
"loss": 2.8176, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.000782051282051282, |
|
"loss": 2.7782, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0007692307692307693, |
|
"loss": 2.7273, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0007564102564102564, |
|
"loss": 2.7686, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0007435897435897436, |
|
"loss": 2.7274, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0007307692307692307, |
|
"loss": 2.7514, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.000717948717948718, |
|
"loss": 2.7232, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0007051282051282052, |
|
"loss": 2.6937, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0006923076923076923, |
|
"loss": 2.7209, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0006794871794871796, |
|
"loss": 2.6832, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0006666666666666666, |
|
"loss": 2.708, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.561117649078369, |
|
"eval_runtime": 41.0726, |
|
"eval_samples_per_second": 253.235, |
|
"eval_steps_per_second": 7.937, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0006538461538461538, |
|
"loss": 2.2119, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0006410256410256411, |
|
"loss": 2.2279, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0006282051282051282, |
|
"loss": 2.1996, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0006153846153846154, |
|
"loss": 2.1886, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0006025641025641026, |
|
"loss": 2.268, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0005897435897435898, |
|
"loss": 2.2118, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0005769230769230769, |
|
"loss": 2.2831, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0005641025641025641, |
|
"loss": 2.2317, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0005512820512820514, |
|
"loss": 2.2504, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0005384615384615384, |
|
"loss": 2.241, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0005256410256410256, |
|
"loss": 2.2381, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0005128205128205128, |
|
"loss": 2.2313, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0005, |
|
"loss": 2.2493, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0004871794871794872, |
|
"loss": 2.2056, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00047435897435897434, |
|
"loss": 2.2178, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0004615384615384616, |
|
"loss": 2.2518, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0004487179487179487, |
|
"loss": 2.2354, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0004358974358974359, |
|
"loss": 2.2144, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0004230769230769231, |
|
"loss": 2.2197, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00041025641025641023, |
|
"loss": 2.2006, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0003974358974358974, |
|
"loss": 2.1802, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00038461538461538467, |
|
"loss": 2.1656, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0003717948717948718, |
|
"loss": 2.1592, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.000358974358974359, |
|
"loss": 2.1847, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00034615384615384613, |
|
"loss": 2.1981, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0003333333333333333, |
|
"loss": 2.1768, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.41491436958313, |
|
"eval_runtime": 39.8227, |
|
"eval_samples_per_second": 261.183, |
|
"eval_steps_per_second": 8.186, |
|
"step": 2600 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 3900, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 4433294636163072.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|