lesso07's picture
Training in progress, step 100, checkpoint
746134b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.016886187098953058,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016886187098953058,
"grad_norm": 13.738941192626953,
"learning_rate": 1e-05,
"loss": 6.6647,
"step": 1
},
{
"epoch": 0.00016886187098953058,
"eval_loss": 7.013807773590088,
"eval_runtime": 547.6222,
"eval_samples_per_second": 9.107,
"eval_steps_per_second": 1.139,
"step": 1
},
{
"epoch": 0.00033772374197906115,
"grad_norm": 11.927528381347656,
"learning_rate": 2e-05,
"loss": 6.6838,
"step": 2
},
{
"epoch": 0.0005065856129685917,
"grad_norm": 12.927790641784668,
"learning_rate": 3e-05,
"loss": 6.7555,
"step": 3
},
{
"epoch": 0.0006754474839581223,
"grad_norm": 16.575254440307617,
"learning_rate": 4e-05,
"loss": 6.658,
"step": 4
},
{
"epoch": 0.0008443093549476528,
"grad_norm": 16.825057983398438,
"learning_rate": 5e-05,
"loss": 6.912,
"step": 5
},
{
"epoch": 0.0010131712259371835,
"grad_norm": 19.55130958557129,
"learning_rate": 6e-05,
"loss": 7.1032,
"step": 6
},
{
"epoch": 0.001182033096926714,
"grad_norm": 18.538299560546875,
"learning_rate": 7e-05,
"loss": 5.4669,
"step": 7
},
{
"epoch": 0.0013508949679162446,
"grad_norm": 16.627212524414062,
"learning_rate": 8e-05,
"loss": 3.6652,
"step": 8
},
{
"epoch": 0.001519756838905775,
"grad_norm": 17.426902770996094,
"learning_rate": 9e-05,
"loss": 1.4218,
"step": 9
},
{
"epoch": 0.001519756838905775,
"eval_loss": 1.1324265003204346,
"eval_runtime": 547.8033,
"eval_samples_per_second": 9.104,
"eval_steps_per_second": 1.139,
"step": 9
},
{
"epoch": 0.0016886187098953055,
"grad_norm": 9.461427688598633,
"learning_rate": 0.0001,
"loss": 0.8645,
"step": 10
},
{
"epoch": 0.0018574805808848362,
"grad_norm": 14.465333938598633,
"learning_rate": 9.99695413509548e-05,
"loss": 0.5779,
"step": 11
},
{
"epoch": 0.002026342451874367,
"grad_norm": 4.43085241317749,
"learning_rate": 9.987820251299122e-05,
"loss": 0.2061,
"step": 12
},
{
"epoch": 0.002195204322863897,
"grad_norm": 3.272099018096924,
"learning_rate": 9.972609476841367e-05,
"loss": 0.2045,
"step": 13
},
{
"epoch": 0.002364066193853428,
"grad_norm": 6.308006763458252,
"learning_rate": 9.951340343707852e-05,
"loss": 0.151,
"step": 14
},
{
"epoch": 0.0025329280648429585,
"grad_norm": 2.1944222450256348,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0865,
"step": 15
},
{
"epoch": 0.002701789935832489,
"grad_norm": 3.657510280609131,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0805,
"step": 16
},
{
"epoch": 0.0028706518068220195,
"grad_norm": 7.360884189605713,
"learning_rate": 9.851478631379982e-05,
"loss": 0.2637,
"step": 17
},
{
"epoch": 0.00303951367781155,
"grad_norm": 7.242488384246826,
"learning_rate": 9.806308479691595e-05,
"loss": 0.2653,
"step": 18
},
{
"epoch": 0.00303951367781155,
"eval_loss": 0.1920052468776703,
"eval_runtime": 548.1102,
"eval_samples_per_second": 9.099,
"eval_steps_per_second": 1.138,
"step": 18
},
{
"epoch": 0.003208375548801081,
"grad_norm": 4.502208709716797,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0484,
"step": 19
},
{
"epoch": 0.003377237419790611,
"grad_norm": 22.75486946105957,
"learning_rate": 9.698463103929542e-05,
"loss": 0.4917,
"step": 20
},
{
"epoch": 0.0035460992907801418,
"grad_norm": 10.100854873657227,
"learning_rate": 9.635919272833938e-05,
"loss": 0.3209,
"step": 21
},
{
"epoch": 0.0037149611617696724,
"grad_norm": 7.107204437255859,
"learning_rate": 9.567727288213005e-05,
"loss": 0.1272,
"step": 22
},
{
"epoch": 0.003883823032759203,
"grad_norm": 8.472229957580566,
"learning_rate": 9.493970231495835e-05,
"loss": 0.1549,
"step": 23
},
{
"epoch": 0.004052684903748734,
"grad_norm": 7.11989164352417,
"learning_rate": 9.414737964294636e-05,
"loss": 0.3053,
"step": 24
},
{
"epoch": 0.0042215467747382645,
"grad_norm": 8.643906593322754,
"learning_rate": 9.330127018922194e-05,
"loss": 0.467,
"step": 25
},
{
"epoch": 0.004390408645727794,
"grad_norm": 11.489653587341309,
"learning_rate": 9.24024048078213e-05,
"loss": 0.6607,
"step": 26
},
{
"epoch": 0.004559270516717325,
"grad_norm": 10.87166690826416,
"learning_rate": 9.145187862775209e-05,
"loss": 0.251,
"step": 27
},
{
"epoch": 0.004559270516717325,
"eval_loss": 0.19702549278736115,
"eval_runtime": 549.5055,
"eval_samples_per_second": 9.075,
"eval_steps_per_second": 1.136,
"step": 27
},
{
"epoch": 0.004728132387706856,
"grad_norm": 4.551630973815918,
"learning_rate": 9.045084971874738e-05,
"loss": 0.1867,
"step": 28
},
{
"epoch": 0.004896994258696386,
"grad_norm": 4.584312915802002,
"learning_rate": 8.940053768033609e-05,
"loss": 0.1185,
"step": 29
},
{
"epoch": 0.005065856129685917,
"grad_norm": 3.1855695247650146,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0763,
"step": 30
},
{
"epoch": 0.005234718000675448,
"grad_norm": 3.6154963970184326,
"learning_rate": 8.715724127386972e-05,
"loss": 0.1514,
"step": 31
},
{
"epoch": 0.005403579871664978,
"grad_norm": 2.6439056396484375,
"learning_rate": 8.596699001693255e-05,
"loss": 0.106,
"step": 32
},
{
"epoch": 0.005572441742654508,
"grad_norm": 3.4792051315307617,
"learning_rate": 8.473291852294987e-05,
"loss": 0.2561,
"step": 33
},
{
"epoch": 0.005741303613644039,
"grad_norm": 2.5845413208007812,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0694,
"step": 34
},
{
"epoch": 0.00591016548463357,
"grad_norm": 5.633108615875244,
"learning_rate": 8.213938048432697e-05,
"loss": 0.2615,
"step": 35
},
{
"epoch": 0.0060790273556231,
"grad_norm": 1.69390070438385,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0489,
"step": 36
},
{
"epoch": 0.0060790273556231,
"eval_loss": 0.14112035930156708,
"eval_runtime": 549.905,
"eval_samples_per_second": 9.069,
"eval_steps_per_second": 1.135,
"step": 36
},
{
"epoch": 0.006247889226612631,
"grad_norm": 3.9095728397369385,
"learning_rate": 7.938926261462366e-05,
"loss": 0.177,
"step": 37
},
{
"epoch": 0.006416751097602162,
"grad_norm": 3.213510751724243,
"learning_rate": 7.795964517353735e-05,
"loss": 0.1018,
"step": 38
},
{
"epoch": 0.006585612968591692,
"grad_norm": 0.6583210825920105,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0182,
"step": 39
},
{
"epoch": 0.006754474839581222,
"grad_norm": 2.233595371246338,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0855,
"step": 40
},
{
"epoch": 0.006923336710570753,
"grad_norm": 4.077779769897461,
"learning_rate": 7.347357813929454e-05,
"loss": 0.2739,
"step": 41
},
{
"epoch": 0.0070921985815602835,
"grad_norm": 0.9433442950248718,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0213,
"step": 42
},
{
"epoch": 0.007261060452549814,
"grad_norm": 1.9460291862487793,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0612,
"step": 43
},
{
"epoch": 0.007429922323539345,
"grad_norm": 1.7257614135742188,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0572,
"step": 44
},
{
"epoch": 0.007598784194528876,
"grad_norm": 2.224824905395508,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0212,
"step": 45
},
{
"epoch": 0.007598784194528876,
"eval_loss": 0.11776414513587952,
"eval_runtime": 549.1937,
"eval_samples_per_second": 9.081,
"eval_steps_per_second": 1.136,
"step": 45
},
{
"epoch": 0.007767646065518406,
"grad_norm": 2.233121395111084,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0473,
"step": 46
},
{
"epoch": 0.007936507936507936,
"grad_norm": 5.550644874572754,
"learning_rate": 6.378186779084995e-05,
"loss": 0.3432,
"step": 47
},
{
"epoch": 0.008105369807497468,
"grad_norm": 1.9874426126480103,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0844,
"step": 48
},
{
"epoch": 0.008274231678486997,
"grad_norm": 0.4189305901527405,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0099,
"step": 49
},
{
"epoch": 0.008443093549476529,
"grad_norm": 4.122661590576172,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0626,
"step": 50
},
{
"epoch": 0.008611955420466059,
"grad_norm": 1.935459852218628,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0458,
"step": 51
},
{
"epoch": 0.008780817291455589,
"grad_norm": 4.436528205871582,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2329,
"step": 52
},
{
"epoch": 0.00894967916244512,
"grad_norm": 5.652027130126953,
"learning_rate": 5.348782368720626e-05,
"loss": 0.246,
"step": 53
},
{
"epoch": 0.00911854103343465,
"grad_norm": 2.375805616378784,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0326,
"step": 54
},
{
"epoch": 0.00911854103343465,
"eval_loss": 0.09233339130878448,
"eval_runtime": 549.7539,
"eval_samples_per_second": 9.071,
"eval_steps_per_second": 1.135,
"step": 54
},
{
"epoch": 0.009287402904424182,
"grad_norm": 3.6387789249420166,
"learning_rate": 5e-05,
"loss": 0.2236,
"step": 55
},
{
"epoch": 0.009456264775413711,
"grad_norm": 2.7353525161743164,
"learning_rate": 4.825502516487497e-05,
"loss": 0.137,
"step": 56
},
{
"epoch": 0.009625126646403243,
"grad_norm": 4.310062885284424,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.2697,
"step": 57
},
{
"epoch": 0.009793988517392773,
"grad_norm": 4.689754486083984,
"learning_rate": 4.477357683661734e-05,
"loss": 0.1093,
"step": 58
},
{
"epoch": 0.009962850388382303,
"grad_norm": 0.28570395708084106,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0058,
"step": 59
},
{
"epoch": 0.010131712259371834,
"grad_norm": 2.6869800090789795,
"learning_rate": 4.131759111665349e-05,
"loss": 0.1282,
"step": 60
},
{
"epoch": 0.010300574130361364,
"grad_norm": 3.4605836868286133,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0432,
"step": 61
},
{
"epoch": 0.010469436001350895,
"grad_norm": 2.410428047180176,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0395,
"step": 62
},
{
"epoch": 0.010638297872340425,
"grad_norm": 0.8962574005126953,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0161,
"step": 63
},
{
"epoch": 0.010638297872340425,
"eval_loss": 0.08070806413888931,
"eval_runtime": 548.9743,
"eval_samples_per_second": 9.084,
"eval_steps_per_second": 1.137,
"step": 63
},
{
"epoch": 0.010807159743329957,
"grad_norm": 2.4982762336730957,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.1033,
"step": 64
},
{
"epoch": 0.010976021614319487,
"grad_norm": 2.2008962631225586,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0379,
"step": 65
},
{
"epoch": 0.011144883485309016,
"grad_norm": 2.7710535526275635,
"learning_rate": 3.12696703292044e-05,
"loss": 0.056,
"step": 66
},
{
"epoch": 0.011313745356298548,
"grad_norm": 1.5169901847839355,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.1376,
"step": 67
},
{
"epoch": 0.011482607227288078,
"grad_norm": 4.770974636077881,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2865,
"step": 68
},
{
"epoch": 0.01165146909827761,
"grad_norm": 1.257779836654663,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0276,
"step": 69
},
{
"epoch": 0.01182033096926714,
"grad_norm": 1.8166325092315674,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0374,
"step": 70
},
{
"epoch": 0.01198919284025667,
"grad_norm": 2.774444103240967,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0976,
"step": 71
},
{
"epoch": 0.0121580547112462,
"grad_norm": 2.698269844055176,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.144,
"step": 72
},
{
"epoch": 0.0121580547112462,
"eval_loss": 0.0661754235625267,
"eval_runtime": 548.8204,
"eval_samples_per_second": 9.087,
"eval_steps_per_second": 1.137,
"step": 72
},
{
"epoch": 0.01232691658223573,
"grad_norm": 0.39094191789627075,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0049,
"step": 73
},
{
"epoch": 0.012495778453225262,
"grad_norm": 1.9535773992538452,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0903,
"step": 74
},
{
"epoch": 0.012664640324214792,
"grad_norm": 4.114951133728027,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.2374,
"step": 75
},
{
"epoch": 0.012833502195204323,
"grad_norm": 6.92727518081665,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0763,
"step": 76
},
{
"epoch": 0.013002364066193853,
"grad_norm": 2.045363187789917,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0438,
"step": 77
},
{
"epoch": 0.013171225937183385,
"grad_norm": 0.5398113131523132,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0111,
"step": 78
},
{
"epoch": 0.013340087808172914,
"grad_norm": 0.021576276049017906,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0005,
"step": 79
},
{
"epoch": 0.013508949679162444,
"grad_norm": 2.4949581623077393,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0727,
"step": 80
},
{
"epoch": 0.013677811550151976,
"grad_norm": 3.859738349914551,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.1469,
"step": 81
},
{
"epoch": 0.013677811550151976,
"eval_loss": 0.055368997156620026,
"eval_runtime": 549.2203,
"eval_samples_per_second": 9.08,
"eval_steps_per_second": 1.136,
"step": 81
},
{
"epoch": 0.013846673421141506,
"grad_norm": 2.325451135635376,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0345,
"step": 82
},
{
"epoch": 0.014015535292131037,
"grad_norm": 0.9638977646827698,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0166,
"step": 83
},
{
"epoch": 0.014184397163120567,
"grad_norm": 0.6836529970169067,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0114,
"step": 84
},
{
"epoch": 0.014353259034110099,
"grad_norm": 2.1207351684570312,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0357,
"step": 85
},
{
"epoch": 0.014522120905099628,
"grad_norm": 0.31320345401763916,
"learning_rate": 5.852620357053651e-06,
"loss": 0.004,
"step": 86
},
{
"epoch": 0.014690982776089158,
"grad_norm": 2.221935272216797,
"learning_rate": 5.060297685041659e-06,
"loss": 0.1703,
"step": 87
},
{
"epoch": 0.01485984464707869,
"grad_norm": 0.486160010099411,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0152,
"step": 88
},
{
"epoch": 0.01502870651806822,
"grad_norm": 0.8668288588523865,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.0133,
"step": 89
},
{
"epoch": 0.015197568389057751,
"grad_norm": 2.698561429977417,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.1563,
"step": 90
},
{
"epoch": 0.015197568389057751,
"eval_loss": 0.05165484547615051,
"eval_runtime": 549.0935,
"eval_samples_per_second": 9.082,
"eval_steps_per_second": 1.136,
"step": 90
},
{
"epoch": 0.015366430260047281,
"grad_norm": 1.8739296197891235,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0499,
"step": 91
},
{
"epoch": 0.015535292131036813,
"grad_norm": 1.749314546585083,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.1767,
"step": 92
},
{
"epoch": 0.015704154002026342,
"grad_norm": 2.652275800704956,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.1609,
"step": 93
},
{
"epoch": 0.015873015873015872,
"grad_norm": 2.8473572731018066,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.348,
"step": 94
},
{
"epoch": 0.016041877744005402,
"grad_norm": 1.411710500717163,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0292,
"step": 95
},
{
"epoch": 0.016210739614994935,
"grad_norm": 3.422239065170288,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0503,
"step": 96
},
{
"epoch": 0.016379601485984465,
"grad_norm": 0.6517059803009033,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0131,
"step": 97
},
{
"epoch": 0.016548463356973995,
"grad_norm": 1.2050694227218628,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0304,
"step": 98
},
{
"epoch": 0.016717325227963525,
"grad_norm": 2.157958984375,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0815,
"step": 99
},
{
"epoch": 0.016717325227963525,
"eval_loss": 0.05151946842670441,
"eval_runtime": 549.1548,
"eval_samples_per_second": 9.081,
"eval_steps_per_second": 1.136,
"step": 99
},
{
"epoch": 0.016886187098953058,
"grad_norm": 0.8211965560913086,
"learning_rate": 0.0,
"loss": 0.0152,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.41887283560448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}