TracyTank's picture
Training in progress, step 100, checkpoint
63b8b05 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.73972602739726,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0273972602739726,
"grad_norm": 0.34039291739463806,
"learning_rate": 1e-05,
"loss": 0.7873,
"step": 1
},
{
"epoch": 0.0273972602739726,
"eval_loss": 0.7825180888175964,
"eval_runtime": 3.7233,
"eval_samples_per_second": 33.035,
"eval_steps_per_second": 1.074,
"step": 1
},
{
"epoch": 0.0547945205479452,
"grad_norm": 0.33236923813819885,
"learning_rate": 2e-05,
"loss": 0.7409,
"step": 2
},
{
"epoch": 0.0821917808219178,
"grad_norm": 0.3145294487476349,
"learning_rate": 3e-05,
"loss": 0.7692,
"step": 3
},
{
"epoch": 0.1095890410958904,
"grad_norm": 0.3200077712535858,
"learning_rate": 4e-05,
"loss": 0.8006,
"step": 4
},
{
"epoch": 0.136986301369863,
"grad_norm": 0.35172683000564575,
"learning_rate": 5e-05,
"loss": 0.7617,
"step": 5
},
{
"epoch": 0.1643835616438356,
"grad_norm": 0.3168494403362274,
"learning_rate": 6e-05,
"loss": 0.6911,
"step": 6
},
{
"epoch": 0.1917808219178082,
"grad_norm": 0.3026105463504791,
"learning_rate": 7e-05,
"loss": 0.7288,
"step": 7
},
{
"epoch": 0.2191780821917808,
"grad_norm": 0.23557651042938232,
"learning_rate": 8e-05,
"loss": 0.6837,
"step": 8
},
{
"epoch": 0.2465753424657534,
"grad_norm": 0.2609310746192932,
"learning_rate": 9e-05,
"loss": 0.7528,
"step": 9
},
{
"epoch": 0.2465753424657534,
"eval_loss": 0.7097108364105225,
"eval_runtime": 3.7358,
"eval_samples_per_second": 32.925,
"eval_steps_per_second": 1.071,
"step": 9
},
{
"epoch": 0.273972602739726,
"grad_norm": 0.38081562519073486,
"learning_rate": 0.0001,
"loss": 0.6966,
"step": 10
},
{
"epoch": 0.3013698630136986,
"grad_norm": 0.3591172993183136,
"learning_rate": 9.99695413509548e-05,
"loss": 0.7356,
"step": 11
},
{
"epoch": 0.3287671232876712,
"grad_norm": 0.2671995460987091,
"learning_rate": 9.987820251299122e-05,
"loss": 0.6586,
"step": 12
},
{
"epoch": 0.3561643835616438,
"grad_norm": 0.23940521478652954,
"learning_rate": 9.972609476841367e-05,
"loss": 0.6827,
"step": 13
},
{
"epoch": 0.3835616438356164,
"grad_norm": 0.1851789802312851,
"learning_rate": 9.951340343707852e-05,
"loss": 0.6027,
"step": 14
},
{
"epoch": 0.410958904109589,
"grad_norm": 0.1938449591398239,
"learning_rate": 9.924038765061042e-05,
"loss": 0.6249,
"step": 15
},
{
"epoch": 0.4383561643835616,
"grad_norm": 0.20314064621925354,
"learning_rate": 9.890738003669029e-05,
"loss": 0.6649,
"step": 16
},
{
"epoch": 0.4657534246575342,
"grad_norm": 0.20224684476852417,
"learning_rate": 9.851478631379982e-05,
"loss": 0.6648,
"step": 17
},
{
"epoch": 0.4931506849315068,
"grad_norm": 0.1758587509393692,
"learning_rate": 9.806308479691595e-05,
"loss": 0.6387,
"step": 18
},
{
"epoch": 0.4931506849315068,
"eval_loss": 0.641433835029602,
"eval_runtime": 3.7417,
"eval_samples_per_second": 32.873,
"eval_steps_per_second": 1.069,
"step": 18
},
{
"epoch": 0.5205479452054794,
"grad_norm": 0.17420168220996857,
"learning_rate": 9.755282581475769e-05,
"loss": 0.5897,
"step": 19
},
{
"epoch": 0.547945205479452,
"grad_norm": 0.16646286845207214,
"learning_rate": 9.698463103929542e-05,
"loss": 0.6144,
"step": 20
},
{
"epoch": 0.5753424657534246,
"grad_norm": 0.17843681573867798,
"learning_rate": 9.635919272833938e-05,
"loss": 0.5987,
"step": 21
},
{
"epoch": 0.6027397260273972,
"grad_norm": 0.17376701533794403,
"learning_rate": 9.567727288213005e-05,
"loss": 0.6542,
"step": 22
},
{
"epoch": 0.6301369863013698,
"grad_norm": 0.17084181308746338,
"learning_rate": 9.493970231495835e-05,
"loss": 0.6578,
"step": 23
},
{
"epoch": 0.6575342465753424,
"grad_norm": 0.17303168773651123,
"learning_rate": 9.414737964294636e-05,
"loss": 0.6007,
"step": 24
},
{
"epoch": 0.684931506849315,
"grad_norm": 0.17795169353485107,
"learning_rate": 9.330127018922194e-05,
"loss": 0.6037,
"step": 25
},
{
"epoch": 0.7123287671232876,
"grad_norm": 0.18882305920124054,
"learning_rate": 9.24024048078213e-05,
"loss": 0.6477,
"step": 26
},
{
"epoch": 0.7397260273972602,
"grad_norm": 0.17540940642356873,
"learning_rate": 9.145187862775209e-05,
"loss": 0.5942,
"step": 27
},
{
"epoch": 0.7397260273972602,
"eval_loss": 0.6050936579704285,
"eval_runtime": 3.7452,
"eval_samples_per_second": 32.842,
"eval_steps_per_second": 1.068,
"step": 27
},
{
"epoch": 0.7671232876712328,
"grad_norm": 0.15383704006671906,
"learning_rate": 9.045084971874738e-05,
"loss": 0.5708,
"step": 28
},
{
"epoch": 0.7945205479452054,
"grad_norm": 0.1529732495546341,
"learning_rate": 8.940053768033609e-05,
"loss": 0.5649,
"step": 29
},
{
"epoch": 0.821917808219178,
"grad_norm": 0.17093701660633087,
"learning_rate": 8.83022221559489e-05,
"loss": 0.5892,
"step": 30
},
{
"epoch": 0.8493150684931506,
"grad_norm": 0.16270044445991516,
"learning_rate": 8.715724127386972e-05,
"loss": 0.6012,
"step": 31
},
{
"epoch": 0.8767123287671232,
"grad_norm": 0.16807973384857178,
"learning_rate": 8.596699001693255e-05,
"loss": 0.6265,
"step": 32
},
{
"epoch": 0.9041095890410958,
"grad_norm": 0.17066408693790436,
"learning_rate": 8.473291852294987e-05,
"loss": 0.6891,
"step": 33
},
{
"epoch": 0.9315068493150684,
"grad_norm": 0.16887181997299194,
"learning_rate": 8.345653031794292e-05,
"loss": 0.5854,
"step": 34
},
{
"epoch": 0.958904109589041,
"grad_norm": 0.151829794049263,
"learning_rate": 8.213938048432697e-05,
"loss": 0.5384,
"step": 35
},
{
"epoch": 0.9863013698630136,
"grad_norm": 0.1556522399187088,
"learning_rate": 8.07830737662829e-05,
"loss": 0.5408,
"step": 36
},
{
"epoch": 0.9863013698630136,
"eval_loss": 0.581146240234375,
"eval_runtime": 3.7475,
"eval_samples_per_second": 32.822,
"eval_steps_per_second": 1.067,
"step": 36
},
{
"epoch": 1.0136986301369864,
"grad_norm": 0.24949964880943298,
"learning_rate": 7.938926261462366e-05,
"loss": 1.0727,
"step": 37
},
{
"epoch": 1.0410958904109588,
"grad_norm": 0.15431559085845947,
"learning_rate": 7.795964517353735e-05,
"loss": 0.5255,
"step": 38
},
{
"epoch": 1.0684931506849316,
"grad_norm": 0.16149845719337463,
"learning_rate": 7.649596321166024e-05,
"loss": 0.5169,
"step": 39
},
{
"epoch": 1.095890410958904,
"grad_norm": 0.17814257740974426,
"learning_rate": 7.500000000000001e-05,
"loss": 0.5936,
"step": 40
},
{
"epoch": 1.1232876712328768,
"grad_norm": 0.17237083613872528,
"learning_rate": 7.347357813929454e-05,
"loss": 0.5709,
"step": 41
},
{
"epoch": 1.1506849315068493,
"grad_norm": 0.16471445560455322,
"learning_rate": 7.191855733945387e-05,
"loss": 0.5481,
"step": 42
},
{
"epoch": 1.178082191780822,
"grad_norm": 0.16372527182102203,
"learning_rate": 7.033683215379002e-05,
"loss": 0.6038,
"step": 43
},
{
"epoch": 1.2054794520547945,
"grad_norm": 0.16758929193019867,
"learning_rate": 6.873032967079561e-05,
"loss": 0.5288,
"step": 44
},
{
"epoch": 1.2328767123287672,
"grad_norm": 0.18296092748641968,
"learning_rate": 6.710100716628344e-05,
"loss": 0.5391,
"step": 45
},
{
"epoch": 1.2328767123287672,
"eval_loss": 0.563045859336853,
"eval_runtime": 3.7488,
"eval_samples_per_second": 32.81,
"eval_steps_per_second": 1.067,
"step": 45
},
{
"epoch": 1.2602739726027397,
"grad_norm": 0.17584213614463806,
"learning_rate": 6.545084971874738e-05,
"loss": 0.4915,
"step": 46
},
{
"epoch": 1.2876712328767124,
"grad_norm": 0.1747244894504547,
"learning_rate": 6.378186779084995e-05,
"loss": 0.5201,
"step": 47
},
{
"epoch": 1.3150684931506849,
"grad_norm": 0.1775994747877121,
"learning_rate": 6.209609477998338e-05,
"loss": 0.5486,
"step": 48
},
{
"epoch": 1.3424657534246576,
"grad_norm": 0.18910668790340424,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.5333,
"step": 49
},
{
"epoch": 1.36986301369863,
"grad_norm": 0.18840499222278595,
"learning_rate": 5.868240888334653e-05,
"loss": 0.5564,
"step": 50
},
{
"epoch": 1.3972602739726028,
"grad_norm": 0.19617432355880737,
"learning_rate": 5.695865504800327e-05,
"loss": 0.6264,
"step": 51
},
{
"epoch": 1.4246575342465753,
"grad_norm": 0.16905656456947327,
"learning_rate": 5.522642316338268e-05,
"loss": 0.4914,
"step": 52
},
{
"epoch": 1.452054794520548,
"grad_norm": 0.19357207417488098,
"learning_rate": 5.348782368720626e-05,
"loss": 0.5915,
"step": 53
},
{
"epoch": 1.4794520547945205,
"grad_norm": 0.1903381496667862,
"learning_rate": 5.174497483512506e-05,
"loss": 0.5207,
"step": 54
},
{
"epoch": 1.4794520547945205,
"eval_loss": 0.5485405921936035,
"eval_runtime": 3.7239,
"eval_samples_per_second": 33.03,
"eval_steps_per_second": 1.074,
"step": 54
},
{
"epoch": 1.5068493150684932,
"grad_norm": 0.17761348187923431,
"learning_rate": 5e-05,
"loss": 0.5346,
"step": 55
},
{
"epoch": 1.5342465753424657,
"grad_norm": 0.18968743085861206,
"learning_rate": 4.825502516487497e-05,
"loss": 0.5365,
"step": 56
},
{
"epoch": 1.5616438356164384,
"grad_norm": 0.1862426996231079,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.5685,
"step": 57
},
{
"epoch": 1.589041095890411,
"grad_norm": 0.1861899346113205,
"learning_rate": 4.477357683661734e-05,
"loss": 0.5087,
"step": 58
},
{
"epoch": 1.6164383561643836,
"grad_norm": 0.20097282528877258,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.5512,
"step": 59
},
{
"epoch": 1.643835616438356,
"grad_norm": 0.19234813749790192,
"learning_rate": 4.131759111665349e-05,
"loss": 0.5802,
"step": 60
},
{
"epoch": 1.6712328767123288,
"grad_norm": 0.1840270459651947,
"learning_rate": 3.960441545911204e-05,
"loss": 0.5111,
"step": 61
},
{
"epoch": 1.6986301369863015,
"grad_norm": 0.1915348917245865,
"learning_rate": 3.790390522001662e-05,
"loss": 0.5219,
"step": 62
},
{
"epoch": 1.726027397260274,
"grad_norm": 0.1915121227502823,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.5399,
"step": 63
},
{
"epoch": 1.726027397260274,
"eval_loss": 0.5395578742027283,
"eval_runtime": 3.7376,
"eval_samples_per_second": 32.909,
"eval_steps_per_second": 1.07,
"step": 63
},
{
"epoch": 1.7534246575342465,
"grad_norm": 0.20380911231040955,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.5214,
"step": 64
},
{
"epoch": 1.7808219178082192,
"grad_norm": 0.20791220664978027,
"learning_rate": 3.289899283371657e-05,
"loss": 0.5881,
"step": 65
},
{
"epoch": 1.808219178082192,
"grad_norm": 0.20161592960357666,
"learning_rate": 3.12696703292044e-05,
"loss": 0.5263,
"step": 66
},
{
"epoch": 1.8356164383561644,
"grad_norm": 0.18923571705818176,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.5327,
"step": 67
},
{
"epoch": 1.8630136986301369,
"grad_norm": 0.19650258123874664,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.5184,
"step": 68
},
{
"epoch": 1.8904109589041096,
"grad_norm": 0.19661547243595123,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.5911,
"step": 69
},
{
"epoch": 1.9178082191780823,
"grad_norm": 0.20530207455158234,
"learning_rate": 2.500000000000001e-05,
"loss": 0.5041,
"step": 70
},
{
"epoch": 1.9452054794520548,
"grad_norm": 0.21102431416511536,
"learning_rate": 2.350403678833976e-05,
"loss": 0.5269,
"step": 71
},
{
"epoch": 1.9726027397260273,
"grad_norm": 0.19871897995471954,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.5147,
"step": 72
},
{
"epoch": 1.9726027397260273,
"eval_loss": 0.5325475335121155,
"eval_runtime": 3.8106,
"eval_samples_per_second": 32.279,
"eval_steps_per_second": 1.05,
"step": 72
},
{
"epoch": 2.0,
"grad_norm": 0.3174644410610199,
"learning_rate": 2.061073738537635e-05,
"loss": 0.7421,
"step": 73
},
{
"epoch": 2.0273972602739727,
"grad_norm": 0.19568274915218353,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.6202,
"step": 74
},
{
"epoch": 2.0547945205479454,
"grad_norm": 0.20575644075870514,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.4887,
"step": 75
},
{
"epoch": 2.0821917808219177,
"grad_norm": 0.1924329251050949,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.5011,
"step": 76
},
{
"epoch": 2.1095890410958904,
"grad_norm": 0.20766966044902802,
"learning_rate": 1.526708147705013e-05,
"loss": 0.5311,
"step": 77
},
{
"epoch": 2.136986301369863,
"grad_norm": 0.194940447807312,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.438,
"step": 78
},
{
"epoch": 2.1643835616438354,
"grad_norm": 0.19695548713207245,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.506,
"step": 79
},
{
"epoch": 2.191780821917808,
"grad_norm": 0.2021200954914093,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.4969,
"step": 80
},
{
"epoch": 2.219178082191781,
"grad_norm": 0.20260506868362427,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.4825,
"step": 81
},
{
"epoch": 2.219178082191781,
"eval_loss": 0.5286130905151367,
"eval_runtime": 3.7501,
"eval_samples_per_second": 32.799,
"eval_steps_per_second": 1.067,
"step": 81
},
{
"epoch": 2.2465753424657535,
"grad_norm": 0.200306698679924,
"learning_rate": 9.549150281252633e-06,
"loss": 0.5857,
"step": 82
},
{
"epoch": 2.2739726027397262,
"grad_norm": 0.19307281076908112,
"learning_rate": 8.548121372247918e-06,
"loss": 0.4812,
"step": 83
},
{
"epoch": 2.3013698630136985,
"grad_norm": 0.19841904938220978,
"learning_rate": 7.597595192178702e-06,
"loss": 0.505,
"step": 84
},
{
"epoch": 2.328767123287671,
"grad_norm": 0.21751321852207184,
"learning_rate": 6.698729810778065e-06,
"loss": 0.5608,
"step": 85
},
{
"epoch": 2.356164383561644,
"grad_norm": 0.21146072447299957,
"learning_rate": 5.852620357053651e-06,
"loss": 0.4821,
"step": 86
},
{
"epoch": 2.383561643835616,
"grad_norm": 0.2212534099817276,
"learning_rate": 5.060297685041659e-06,
"loss": 0.5162,
"step": 87
},
{
"epoch": 2.410958904109589,
"grad_norm": 0.21350644528865814,
"learning_rate": 4.322727117869951e-06,
"loss": 0.51,
"step": 88
},
{
"epoch": 2.4383561643835616,
"grad_norm": 0.18542467057704926,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.4835,
"step": 89
},
{
"epoch": 2.4657534246575343,
"grad_norm": 0.21330738067626953,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.4642,
"step": 90
},
{
"epoch": 2.4657534246575343,
"eval_loss": 0.526906430721283,
"eval_runtime": 3.7465,
"eval_samples_per_second": 32.831,
"eval_steps_per_second": 1.068,
"step": 90
},
{
"epoch": 2.493150684931507,
"grad_norm": 0.21417373418807983,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.5203,
"step": 91
},
{
"epoch": 2.5205479452054793,
"grad_norm": 0.2159249186515808,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.5275,
"step": 92
},
{
"epoch": 2.547945205479452,
"grad_norm": 0.1969815343618393,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.4667,
"step": 93
},
{
"epoch": 2.5753424657534247,
"grad_norm": 0.2035369873046875,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.5494,
"step": 94
},
{
"epoch": 2.602739726027397,
"grad_norm": 0.19194987416267395,
"learning_rate": 7.596123493895991e-07,
"loss": 0.4834,
"step": 95
},
{
"epoch": 2.6301369863013697,
"grad_norm": 0.20009766519069672,
"learning_rate": 4.865965629214819e-07,
"loss": 0.4848,
"step": 96
},
{
"epoch": 2.6575342465753424,
"grad_norm": 0.22390450537204742,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.5222,
"step": 97
},
{
"epoch": 2.684931506849315,
"grad_norm": 0.19695667922496796,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.4638,
"step": 98
},
{
"epoch": 2.712328767123288,
"grad_norm": 0.21116042137145996,
"learning_rate": 3.04586490452119e-08,
"loss": 0.5376,
"step": 99
},
{
"epoch": 2.712328767123288,
"eval_loss": 0.5265399217605591,
"eval_runtime": 3.7835,
"eval_samples_per_second": 32.509,
"eval_steps_per_second": 1.057,
"step": 99
},
{
"epoch": 2.73972602739726,
"grad_norm": 0.22835801541805267,
"learning_rate": 0.0,
"loss": 0.4968,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.935098268483584e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}