nathanialhunt's picture
Training in progress, step 50, checkpoint
6b36e2d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1276324186343331,
"eval_steps": 13,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025526483726866626,
"grad_norm": 0.38045117259025574,
"learning_rate": 5e-06,
"loss": 4.6611,
"step": 1
},
{
"epoch": 0.0025526483726866626,
"eval_loss": 1.226615071296692,
"eval_runtime": 18.849,
"eval_samples_per_second": 8.754,
"eval_steps_per_second": 4.403,
"step": 1
},
{
"epoch": 0.005105296745373325,
"grad_norm": 0.506873369216919,
"learning_rate": 1e-05,
"loss": 5.3237,
"step": 2
},
{
"epoch": 0.007657945118059987,
"grad_norm": 0.4798293113708496,
"learning_rate": 1.5e-05,
"loss": 4.8629,
"step": 3
},
{
"epoch": 0.01021059349074665,
"grad_norm": 0.47835850715637207,
"learning_rate": 2e-05,
"loss": 5.1539,
"step": 4
},
{
"epoch": 0.012763241863433313,
"grad_norm": 0.5494751334190369,
"learning_rate": 2.5e-05,
"loss": 5.3459,
"step": 5
},
{
"epoch": 0.015315890236119975,
"grad_norm": 0.441150426864624,
"learning_rate": 3e-05,
"loss": 5.294,
"step": 6
},
{
"epoch": 0.017868538608806637,
"grad_norm": 0.4212581217288971,
"learning_rate": 3.5e-05,
"loss": 4.4479,
"step": 7
},
{
"epoch": 0.0204211869814933,
"grad_norm": 0.4588180184364319,
"learning_rate": 4e-05,
"loss": 5.1455,
"step": 8
},
{
"epoch": 0.02297383535417996,
"grad_norm": 0.45286649465560913,
"learning_rate": 4.5e-05,
"loss": 4.7709,
"step": 9
},
{
"epoch": 0.025526483726866625,
"grad_norm": 0.4099409580230713,
"learning_rate": 5e-05,
"loss": 5.016,
"step": 10
},
{
"epoch": 0.028079132099553285,
"grad_norm": 0.5061376094818115,
"learning_rate": 4.99229333433282e-05,
"loss": 4.831,
"step": 11
},
{
"epoch": 0.03063178047223995,
"grad_norm": 0.5543558597564697,
"learning_rate": 4.9692208514878444e-05,
"loss": 5.1186,
"step": 12
},
{
"epoch": 0.03318442884492661,
"grad_norm": 0.5037564039230347,
"learning_rate": 4.9309248009941914e-05,
"loss": 4.8973,
"step": 13
},
{
"epoch": 0.03318442884492661,
"eval_loss": 1.221695065498352,
"eval_runtime": 18.8578,
"eval_samples_per_second": 8.75,
"eval_steps_per_second": 4.401,
"step": 13
},
{
"epoch": 0.035737077217613274,
"grad_norm": 0.45682233572006226,
"learning_rate": 4.877641290737884e-05,
"loss": 4.858,
"step": 14
},
{
"epoch": 0.03828972559029994,
"grad_norm": 0.5056037306785583,
"learning_rate": 4.8096988312782174e-05,
"loss": 5.0241,
"step": 15
},
{
"epoch": 0.0408423739629866,
"grad_norm": 0.5200461745262146,
"learning_rate": 4.72751631047092e-05,
"loss": 4.9421,
"step": 16
},
{
"epoch": 0.04339502233567326,
"grad_norm": 0.5702153444290161,
"learning_rate": 4.6316004108852305e-05,
"loss": 5.3207,
"step": 17
},
{
"epoch": 0.04594767070835992,
"grad_norm": 0.5843238234519958,
"learning_rate": 4.522542485937369e-05,
"loss": 4.5834,
"step": 18
},
{
"epoch": 0.048500319081046586,
"grad_norm": 0.6220148801803589,
"learning_rate": 4.401014914000078e-05,
"loss": 5.3389,
"step": 19
},
{
"epoch": 0.05105296745373325,
"grad_norm": 0.5212782621383667,
"learning_rate": 4.267766952966369e-05,
"loss": 4.9851,
"step": 20
},
{
"epoch": 0.053605615826419914,
"grad_norm": 0.6411041617393494,
"learning_rate": 4.123620120825459e-05,
"loss": 5.4835,
"step": 21
},
{
"epoch": 0.05615826419910657,
"grad_norm": 0.509672224521637,
"learning_rate": 3.969463130731183e-05,
"loss": 4.7967,
"step": 22
},
{
"epoch": 0.058710912571793235,
"grad_norm": 6.348986625671387,
"learning_rate": 3.8062464117898724e-05,
"loss": 4.2609,
"step": 23
},
{
"epoch": 0.0612635609444799,
"grad_norm": 0.5605723261833191,
"learning_rate": 3.634976249348867e-05,
"loss": 4.7142,
"step": 24
},
{
"epoch": 0.06381620931716656,
"grad_norm": 0.5585960745811462,
"learning_rate": 3.456708580912725e-05,
"loss": 4.9871,
"step": 25
},
{
"epoch": 0.06636885768985322,
"grad_norm": 0.5804082155227661,
"learning_rate": 3.272542485937369e-05,
"loss": 4.8049,
"step": 26
},
{
"epoch": 0.06636885768985322,
"eval_loss": 1.200016975402832,
"eval_runtime": 18.8842,
"eval_samples_per_second": 8.737,
"eval_steps_per_second": 4.395,
"step": 26
},
{
"epoch": 0.06892150606253988,
"grad_norm": 0.5582651495933533,
"learning_rate": 3.083613409639764e-05,
"loss": 4.7835,
"step": 27
},
{
"epoch": 0.07147415443522655,
"grad_norm": 0.6535090208053589,
"learning_rate": 2.8910861626005776e-05,
"loss": 5.1362,
"step": 28
},
{
"epoch": 0.07402680280791321,
"grad_norm": 0.649197518825531,
"learning_rate": 2.6961477393196126e-05,
"loss": 5.1688,
"step": 29
},
{
"epoch": 0.07657945118059988,
"grad_norm": 0.6213428378105164,
"learning_rate": 2.5e-05,
"loss": 4.7665,
"step": 30
},
{
"epoch": 0.07913209955328654,
"grad_norm": 0.5954240560531616,
"learning_rate": 2.303852260680388e-05,
"loss": 4.6579,
"step": 31
},
{
"epoch": 0.0816847479259732,
"grad_norm": 0.6680808067321777,
"learning_rate": 2.1089138373994223e-05,
"loss": 4.7746,
"step": 32
},
{
"epoch": 0.08423739629865985,
"grad_norm": 0.8420075178146362,
"learning_rate": 1.9163865903602374e-05,
"loss": 4.3421,
"step": 33
},
{
"epoch": 0.08679004467134652,
"grad_norm": 0.6272104382514954,
"learning_rate": 1.7274575140626318e-05,
"loss": 5.0857,
"step": 34
},
{
"epoch": 0.08934269304403318,
"grad_norm": 0.6511965990066528,
"learning_rate": 1.5432914190872757e-05,
"loss": 4.2033,
"step": 35
},
{
"epoch": 0.09189534141671984,
"grad_norm": 0.6219887733459473,
"learning_rate": 1.3650237506511331e-05,
"loss": 5.0471,
"step": 36
},
{
"epoch": 0.09444798978940651,
"grad_norm": 0.6609371304512024,
"learning_rate": 1.1937535882101281e-05,
"loss": 5.1857,
"step": 37
},
{
"epoch": 0.09700063816209317,
"grad_norm": 0.6283613443374634,
"learning_rate": 1.0305368692688174e-05,
"loss": 4.5755,
"step": 38
},
{
"epoch": 0.09955328653477984,
"grad_norm": 0.6079770922660828,
"learning_rate": 8.763798791745411e-06,
"loss": 4.6142,
"step": 39
},
{
"epoch": 0.09955328653477984,
"eval_loss": 1.1924939155578613,
"eval_runtime": 18.8948,
"eval_samples_per_second": 8.733,
"eval_steps_per_second": 4.393,
"step": 39
},
{
"epoch": 0.1021059349074665,
"grad_norm": 0.742786705493927,
"learning_rate": 7.3223304703363135e-06,
"loss": 5.2024,
"step": 40
},
{
"epoch": 0.10465858328015316,
"grad_norm": 0.637015163898468,
"learning_rate": 5.989850859999227e-06,
"loss": 4.9553,
"step": 41
},
{
"epoch": 0.10721123165283983,
"grad_norm": 0.6206537485122681,
"learning_rate": 4.7745751406263165e-06,
"loss": 4.4764,
"step": 42
},
{
"epoch": 0.10976388002552648,
"grad_norm": 0.7534269690513611,
"learning_rate": 3.6839958911476957e-06,
"loss": 5.0251,
"step": 43
},
{
"epoch": 0.11231652839821314,
"grad_norm": 0.642451286315918,
"learning_rate": 2.7248368952908053e-06,
"loss": 4.9982,
"step": 44
},
{
"epoch": 0.1148691767708998,
"grad_norm": 0.6466948986053467,
"learning_rate": 1.9030116872178316e-06,
"loss": 5.0004,
"step": 45
},
{
"epoch": 0.11742182514358647,
"grad_norm": 0.6484655141830444,
"learning_rate": 1.2235870926211619e-06,
"loss": 4.6644,
"step": 46
},
{
"epoch": 0.11997447351627313,
"grad_norm": 0.6601917147636414,
"learning_rate": 6.907519900580861e-07,
"loss": 4.7506,
"step": 47
},
{
"epoch": 0.1225271218889598,
"grad_norm": 0.6565570831298828,
"learning_rate": 3.077914851215585e-07,
"loss": 5.1511,
"step": 48
},
{
"epoch": 0.12507977026164646,
"grad_norm": 0.6291937232017517,
"learning_rate": 7.706665667180091e-08,
"loss": 4.7335,
"step": 49
},
{
"epoch": 0.1276324186343331,
"grad_norm": 0.6736301183700562,
"learning_rate": 0.0,
"loss": 5.0993,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.37188711038976e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}