nttx's picture
Training in progress, epoch 0, checkpoint
622a345 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.21499596882558453,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010749798441279225,
"eval_loss": 2.633302688598633,
"eval_runtime": 31.7207,
"eval_samples_per_second": 12.358,
"eval_steps_per_second": 6.179,
"step": 1
},
{
"epoch": 0.005374899220639613,
"grad_norm": 1.0801095962524414,
"learning_rate": 5e-05,
"loss": 1.1635,
"step": 5
},
{
"epoch": 0.010749798441279226,
"grad_norm": 1.8692516088485718,
"learning_rate": 0.0001,
"loss": 1.4162,
"step": 10
},
{
"epoch": 0.01612469766191884,
"grad_norm": 2.035304069519043,
"learning_rate": 9.98292246503335e-05,
"loss": 0.9354,
"step": 15
},
{
"epoch": 0.021499596882558453,
"grad_norm": 2.7511253356933594,
"learning_rate": 9.931806517013612e-05,
"loss": 0.8112,
"step": 20
},
{
"epoch": 0.026874496103198066,
"grad_norm": 2.422313928604126,
"learning_rate": 9.847001329696653e-05,
"loss": 0.7849,
"step": 25
},
{
"epoch": 0.03224939532383768,
"grad_norm": 2.5819692611694336,
"learning_rate": 9.729086208503174e-05,
"loss": 0.6657,
"step": 30
},
{
"epoch": 0.03762429454447729,
"grad_norm": 2.4328465461730957,
"learning_rate": 9.578866633275288e-05,
"loss": 0.6872,
"step": 35
},
{
"epoch": 0.042999193765116905,
"grad_norm": 2.129849433898926,
"learning_rate": 9.397368756032445e-05,
"loss": 0.7661,
"step": 40
},
{
"epoch": 0.04837409298575652,
"grad_norm": 3.1652305126190186,
"learning_rate": 9.185832391312644e-05,
"loss": 0.7971,
"step": 45
},
{
"epoch": 0.05374899220639613,
"grad_norm": 2.1810810565948486,
"learning_rate": 8.945702546981969e-05,
"loss": 0.6101,
"step": 50
},
{
"epoch": 0.05374899220639613,
"eval_loss": 0.7575846910476685,
"eval_runtime": 32.089,
"eval_samples_per_second": 12.216,
"eval_steps_per_second": 6.108,
"step": 50
},
{
"epoch": 0.059123891427035745,
"grad_norm": 1.179375171661377,
"learning_rate": 8.678619553365659e-05,
"loss": 0.628,
"step": 55
},
{
"epoch": 0.06449879064767536,
"grad_norm": 1.4318596124649048,
"learning_rate": 8.386407858128706e-05,
"loss": 0.6071,
"step": 60
},
{
"epoch": 0.06987368986831496,
"grad_norm": 0.842193067073822,
"learning_rate": 8.07106356344834e-05,
"loss": 0.4833,
"step": 65
},
{
"epoch": 0.07524858908895458,
"grad_norm": 1.4781662225723267,
"learning_rate": 7.734740790612136e-05,
"loss": 0.6483,
"step": 70
},
{
"epoch": 0.08062348830959419,
"grad_norm": 1.721100926399231,
"learning_rate": 7.379736965185368e-05,
"loss": 0.6261,
"step": 75
},
{
"epoch": 0.08599838753023381,
"grad_norm": 1.579014539718628,
"learning_rate": 7.008477123264848e-05,
"loss": 0.6904,
"step": 80
},
{
"epoch": 0.09137328675087342,
"grad_norm": 2.411149740219116,
"learning_rate": 6.623497346023418e-05,
"loss": 0.5852,
"step": 85
},
{
"epoch": 0.09674818597151304,
"grad_norm": 2.7585904598236084,
"learning_rate": 6.227427435703997e-05,
"loss": 0.6734,
"step": 90
},
{
"epoch": 0.10212308519215264,
"grad_norm": 1.8563114404678345,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.6365,
"step": 95
},
{
"epoch": 0.10749798441279226,
"grad_norm": 3.247232437133789,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.7104,
"step": 100
},
{
"epoch": 0.10749798441279226,
"eval_loss": 0.6235585808753967,
"eval_runtime": 32.1467,
"eval_samples_per_second": 12.194,
"eval_steps_per_second": 6.097,
"step": 100
},
{
"epoch": 0.11287288363343187,
"grad_norm": 1.1242468357086182,
"learning_rate": 5e-05,
"loss": 0.5403,
"step": 105
},
{
"epoch": 0.11824778285407149,
"grad_norm": 1.0179481506347656,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.5151,
"step": 110
},
{
"epoch": 0.1236226820747111,
"grad_norm": 1.5832953453063965,
"learning_rate": 4.17702704859633e-05,
"loss": 0.5154,
"step": 115
},
{
"epoch": 0.12899758129535072,
"grad_norm": 0.951425313949585,
"learning_rate": 3.772572564296005e-05,
"loss": 0.4058,
"step": 120
},
{
"epoch": 0.13437248051599032,
"grad_norm": 1.8775848150253296,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.5965,
"step": 125
},
{
"epoch": 0.13974737973662993,
"grad_norm": 1.5794333219528198,
"learning_rate": 2.991522876735154e-05,
"loss": 0.5362,
"step": 130
},
{
"epoch": 0.14512227895726956,
"grad_norm": 1.7076222896575928,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.5668,
"step": 135
},
{
"epoch": 0.15049717817790917,
"grad_norm": 1.849480390548706,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.5461,
"step": 140
},
{
"epoch": 0.15587207739854878,
"grad_norm": 1.9041430950164795,
"learning_rate": 1.928936436551661e-05,
"loss": 0.5302,
"step": 145
},
{
"epoch": 0.16124697661918838,
"grad_norm": 2.6399660110473633,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.6175,
"step": 150
},
{
"epoch": 0.16124697661918838,
"eval_loss": 0.5478774309158325,
"eval_runtime": 32.2037,
"eval_samples_per_second": 12.173,
"eval_steps_per_second": 6.086,
"step": 150
},
{
"epoch": 0.16662187583982802,
"grad_norm": 0.9312940835952759,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.411,
"step": 155
},
{
"epoch": 0.17199677506046762,
"grad_norm": 1.047676920890808,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.4869,
"step": 160
},
{
"epoch": 0.17737167428110723,
"grad_norm": 1.1791578531265259,
"learning_rate": 8.141676086873572e-06,
"loss": 0.4584,
"step": 165
},
{
"epoch": 0.18274657350174683,
"grad_norm": 1.4493966102600098,
"learning_rate": 6.026312439675552e-06,
"loss": 0.4337,
"step": 170
},
{
"epoch": 0.18812147272238647,
"grad_norm": 1.671579122543335,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.4747,
"step": 175
},
{
"epoch": 0.19349637194302607,
"grad_norm": 2.335181474685669,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.592,
"step": 180
},
{
"epoch": 0.19887127116366568,
"grad_norm": 1.7437834739685059,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.5927,
"step": 185
},
{
"epoch": 0.2042461703843053,
"grad_norm": 1.8436033725738525,
"learning_rate": 6.819348298638839e-07,
"loss": 0.5768,
"step": 190
},
{
"epoch": 0.20962106960494492,
"grad_norm": 3.186182737350464,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.7015,
"step": 195
},
{
"epoch": 0.21499596882558453,
"grad_norm": 1.6149808168411255,
"learning_rate": 0.0,
"loss": 0.5627,
"step": 200
},
{
"epoch": 0.21499596882558453,
"eval_loss": 0.5310530662536621,
"eval_runtime": 32.1744,
"eval_samples_per_second": 12.184,
"eval_steps_per_second": 6.092,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.39825699258368e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}