lesso02's picture
Training in progress, step 30, checkpoint
fa47645 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0021253985122210413,
"eval_steps": 4,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.084661707403471e-05,
"grad_norm": 11.687026023864746,
"learning_rate": 2.0000000000000003e-06,
"loss": 8.409,
"step": 1
},
{
"epoch": 7.084661707403471e-05,
"eval_loss": 7.959962844848633,
"eval_runtime": 853.269,
"eval_samples_per_second": 6.966,
"eval_steps_per_second": 1.742,
"step": 1
},
{
"epoch": 0.00014169323414806942,
"grad_norm": 14.769333839416504,
"learning_rate": 4.000000000000001e-06,
"loss": 7.4806,
"step": 2
},
{
"epoch": 0.00021253985122210415,
"grad_norm": 16.0882625579834,
"learning_rate": 6e-06,
"loss": 8.2333,
"step": 3
},
{
"epoch": 0.00028338646829613885,
"grad_norm": 11.657814979553223,
"learning_rate": 8.000000000000001e-06,
"loss": 7.5361,
"step": 4
},
{
"epoch": 0.00028338646829613885,
"eval_loss": 7.9468793869018555,
"eval_runtime": 854.4406,
"eval_samples_per_second": 6.957,
"eval_steps_per_second": 1.739,
"step": 4
},
{
"epoch": 0.00035423308537017357,
"grad_norm": 10.549196243286133,
"learning_rate": 1e-05,
"loss": 8.4016,
"step": 5
},
{
"epoch": 0.0004250797024442083,
"grad_norm": 10.008234024047852,
"learning_rate": 9.960573506572391e-06,
"loss": 7.4369,
"step": 6
},
{
"epoch": 0.000495926319518243,
"grad_norm": 10.50498104095459,
"learning_rate": 9.842915805643156e-06,
"loss": 6.433,
"step": 7
},
{
"epoch": 0.0005667729365922777,
"grad_norm": 10.711443901062012,
"learning_rate": 9.648882429441258e-06,
"loss": 7.2974,
"step": 8
},
{
"epoch": 0.0005667729365922777,
"eval_loss": 7.829478740692139,
"eval_runtime": 854.4135,
"eval_samples_per_second": 6.957,
"eval_steps_per_second": 1.739,
"step": 8
},
{
"epoch": 0.0006376195536663124,
"grad_norm": 17.665964126586914,
"learning_rate": 9.381533400219319e-06,
"loss": 7.4679,
"step": 9
},
{
"epoch": 0.0007084661707403471,
"grad_norm": 17.61928939819336,
"learning_rate": 9.045084971874738e-06,
"loss": 7.2824,
"step": 10
},
{
"epoch": 0.0007793127878143819,
"grad_norm": 15.639145851135254,
"learning_rate": 8.644843137107058e-06,
"loss": 8.937,
"step": 11
},
{
"epoch": 0.0008501594048884166,
"grad_norm": 21.38347625732422,
"learning_rate": 8.18711994874345e-06,
"loss": 7.4862,
"step": 12
},
{
"epoch": 0.0008501594048884166,
"eval_loss": 7.602104187011719,
"eval_runtime": 854.3957,
"eval_samples_per_second": 6.957,
"eval_steps_per_second": 1.739,
"step": 12
},
{
"epoch": 0.0009210060219624513,
"grad_norm": 17.321067810058594,
"learning_rate": 7.679133974894984e-06,
"loss": 5.8041,
"step": 13
},
{
"epoch": 0.000991852639036486,
"grad_norm": 13.502540588378906,
"learning_rate": 7.128896457825364e-06,
"loss": 7.3902,
"step": 14
},
{
"epoch": 0.0010626992561105207,
"grad_norm": 12.128912925720215,
"learning_rate": 6.545084971874738e-06,
"loss": 7.7259,
"step": 15
},
{
"epoch": 0.0011335458731845554,
"grad_norm": 10.935140609741211,
"learning_rate": 5.936906572928625e-06,
"loss": 7.9965,
"step": 16
},
{
"epoch": 0.0011335458731845554,
"eval_loss": 7.300816535949707,
"eval_runtime": 854.0987,
"eval_samples_per_second": 6.959,
"eval_steps_per_second": 1.74,
"step": 16
},
{
"epoch": 0.0012043924902585901,
"grad_norm": 16.006664276123047,
"learning_rate": 5.3139525976465675e-06,
"loss": 6.2996,
"step": 17
},
{
"epoch": 0.0012752391073326248,
"grad_norm": 14.367422103881836,
"learning_rate": 4.686047402353433e-06,
"loss": 7.2675,
"step": 18
},
{
"epoch": 0.0013460857244066596,
"grad_norm": 12.350759506225586,
"learning_rate": 4.063093427071376e-06,
"loss": 6.3202,
"step": 19
},
{
"epoch": 0.0014169323414806943,
"grad_norm": 14.988066673278809,
"learning_rate": 3.4549150281252635e-06,
"loss": 7.5826,
"step": 20
},
{
"epoch": 0.0014169323414806943,
"eval_loss": 7.063891887664795,
"eval_runtime": 853.8676,
"eval_samples_per_second": 6.961,
"eval_steps_per_second": 1.74,
"step": 20
},
{
"epoch": 0.001487778958554729,
"grad_norm": 17.12288475036621,
"learning_rate": 2.871103542174637e-06,
"loss": 6.8697,
"step": 21
},
{
"epoch": 0.0015586255756287637,
"grad_norm": 12.52187442779541,
"learning_rate": 2.320866025105016e-06,
"loss": 5.9574,
"step": 22
},
{
"epoch": 0.0016294721927027985,
"grad_norm": 14.923164367675781,
"learning_rate": 1.8128800512565514e-06,
"loss": 8.2413,
"step": 23
},
{
"epoch": 0.0017003188097768332,
"grad_norm": 13.699613571166992,
"learning_rate": 1.3551568628929434e-06,
"loss": 7.6529,
"step": 24
},
{
"epoch": 0.0017003188097768332,
"eval_loss": 6.94155216217041,
"eval_runtime": 854.7337,
"eval_samples_per_second": 6.954,
"eval_steps_per_second": 1.739,
"step": 24
},
{
"epoch": 0.001771165426850868,
"grad_norm": 15.484549522399902,
"learning_rate": 9.549150281252633e-07,
"loss": 5.9348,
"step": 25
},
{
"epoch": 0.0018420120439249026,
"grad_norm": 15.122868537902832,
"learning_rate": 6.184665997806832e-07,
"loss": 6.1889,
"step": 26
},
{
"epoch": 0.0019128586609989374,
"grad_norm": 17.317827224731445,
"learning_rate": 3.511175705587433e-07,
"loss": 7.4455,
"step": 27
},
{
"epoch": 0.001983705278072972,
"grad_norm": 14.604246139526367,
"learning_rate": 1.5708419435684463e-07,
"loss": 5.7908,
"step": 28
},
{
"epoch": 0.001983705278072972,
"eval_loss": 6.888062000274658,
"eval_runtime": 853.8107,
"eval_samples_per_second": 6.962,
"eval_steps_per_second": 1.74,
"step": 28
},
{
"epoch": 0.002054551895147007,
"grad_norm": 14.61190414428711,
"learning_rate": 3.9426493427611177e-08,
"loss": 6.7001,
"step": 29
},
{
"epoch": 0.0021253985122210413,
"grad_norm": 12.80364990234375,
"learning_rate": 0.0,
"loss": 6.8942,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4466802029428736e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}