0x1202's picture
Training in progress, epoch 0, checkpoint
6e317d4 verified
raw
history blame
15.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1362281822051937,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00034057045551298427,
"eval_loss": 1.8361178636550903,
"eval_runtime": 101.827,
"eval_samples_per_second": 12.148,
"eval_steps_per_second": 6.079,
"step": 1
},
{
"epoch": 0.0017028522775649213,
"grad_norm": 0.41594547033309937,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.0333,
"step": 5
},
{
"epoch": 0.0034057045551298426,
"grad_norm": 0.6465234160423279,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.0047,
"step": 10
},
{
"epoch": 0.005108556832694764,
"grad_norm": 0.8802253603935242,
"learning_rate": 5e-05,
"loss": 1.1382,
"step": 15
},
{
"epoch": 0.006811409110259685,
"grad_norm": 0.41058072447776794,
"learning_rate": 6.666666666666667e-05,
"loss": 1.1082,
"step": 20
},
{
"epoch": 0.008514261387824606,
"grad_norm": 0.5125563144683838,
"learning_rate": 8.333333333333334e-05,
"loss": 0.9712,
"step": 25
},
{
"epoch": 0.010217113665389528,
"grad_norm": 0.751390278339386,
"learning_rate": 0.0001,
"loss": 1.0366,
"step": 30
},
{
"epoch": 0.011919965942954448,
"grad_norm": 0.7571478486061096,
"learning_rate": 9.995494831023409e-05,
"loss": 1.0073,
"step": 35
},
{
"epoch": 0.01362281822051937,
"grad_norm": 0.6469959616661072,
"learning_rate": 9.981987442712633e-05,
"loss": 0.9567,
"step": 40
},
{
"epoch": 0.01532567049808429,
"grad_norm": 0.8713700771331787,
"learning_rate": 9.959502176294383e-05,
"loss": 1.1128,
"step": 45
},
{
"epoch": 0.017028522775649212,
"grad_norm": 1.3194971084594727,
"learning_rate": 9.928079551738543e-05,
"loss": 1.1412,
"step": 50
},
{
"epoch": 0.018731375053214133,
"grad_norm": 0.25489541888237,
"learning_rate": 9.887776194738432e-05,
"loss": 0.776,
"step": 55
},
{
"epoch": 0.020434227330779056,
"grad_norm": 0.3596874475479126,
"learning_rate": 9.838664734667495e-05,
"loss": 0.8018,
"step": 60
},
{
"epoch": 0.022137079608343976,
"grad_norm": 0.35467663407325745,
"learning_rate": 9.780833673696254e-05,
"loss": 0.8187,
"step": 65
},
{
"epoch": 0.023839931885908897,
"grad_norm": 0.36793845891952515,
"learning_rate": 9.714387227305422e-05,
"loss": 0.8099,
"step": 70
},
{
"epoch": 0.02554278416347382,
"grad_norm": 0.44260016083717346,
"learning_rate": 9.639445136482548e-05,
"loss": 0.7714,
"step": 75
},
{
"epoch": 0.02724563644103874,
"grad_norm": 0.6863613128662109,
"learning_rate": 9.55614245194068e-05,
"loss": 0.8211,
"step": 80
},
{
"epoch": 0.02894848871860366,
"grad_norm": 0.6633279323577881,
"learning_rate": 9.464629290747842e-05,
"loss": 0.8158,
"step": 85
},
{
"epoch": 0.03065134099616858,
"grad_norm": 1.0327296257019043,
"learning_rate": 9.365070565805941e-05,
"loss": 0.9717,
"step": 90
},
{
"epoch": 0.032354193273733504,
"grad_norm": 0.7854035496711731,
"learning_rate": 9.257645688666556e-05,
"loss": 0.9801,
"step": 95
},
{
"epoch": 0.034057045551298425,
"grad_norm": 1.9543837308883667,
"learning_rate": 9.142548246219212e-05,
"loss": 1.2958,
"step": 100
},
{
"epoch": 0.034057045551298425,
"eval_loss": 0.8909078240394592,
"eval_runtime": 103.3047,
"eval_samples_per_second": 11.974,
"eval_steps_per_second": 5.992,
"step": 100
},
{
"epoch": 0.035759897828863345,
"grad_norm": 0.23156222701072693,
"learning_rate": 9.019985651834703e-05,
"loss": 0.6506,
"step": 105
},
{
"epoch": 0.037462750106428265,
"grad_norm": 0.3277677297592163,
"learning_rate": 8.890178771592199e-05,
"loss": 0.8633,
"step": 110
},
{
"epoch": 0.039165602383993185,
"grad_norm": 0.33476242423057556,
"learning_rate": 8.753361526263621e-05,
"loss": 0.8823,
"step": 115
},
{
"epoch": 0.04086845466155811,
"grad_norm": 0.36144304275512695,
"learning_rate": 8.609780469772623e-05,
"loss": 0.7879,
"step": 120
},
{
"epoch": 0.04257130693912303,
"grad_norm": 0.3987506926059723,
"learning_rate": 8.459694344887732e-05,
"loss": 0.7091,
"step": 125
},
{
"epoch": 0.04427415921668795,
"grad_norm": 0.5698655843734741,
"learning_rate": 8.303373616950408e-05,
"loss": 0.82,
"step": 130
},
{
"epoch": 0.04597701149425287,
"grad_norm": 0.805975615978241,
"learning_rate": 8.141099986478212e-05,
"loss": 0.8761,
"step": 135
},
{
"epoch": 0.04767986377181779,
"grad_norm": 1.0833746194839478,
"learning_rate": 7.973165881521434e-05,
"loss": 1.0889,
"step": 140
},
{
"epoch": 0.04938271604938271,
"grad_norm": 0.7163301110267639,
"learning_rate": 7.799873930687978e-05,
"loss": 0.94,
"step": 145
},
{
"epoch": 0.05108556832694764,
"grad_norm": 2.4993340969085693,
"learning_rate": 7.621536417786159e-05,
"loss": 1.1311,
"step": 150
},
{
"epoch": 0.05278842060451256,
"grad_norm": 0.2643802762031555,
"learning_rate": 7.438474719068173e-05,
"loss": 0.7175,
"step": 155
},
{
"epoch": 0.05449127288207748,
"grad_norm": 0.3255314826965332,
"learning_rate": 7.251018724088367e-05,
"loss": 0.7516,
"step": 160
},
{
"epoch": 0.0561941251596424,
"grad_norm": 0.5198604464530945,
"learning_rate": 7.059506241219965e-05,
"loss": 0.767,
"step": 165
},
{
"epoch": 0.05789697743720732,
"grad_norm": 0.477650910615921,
"learning_rate": 6.864282388901544e-05,
"loss": 0.7519,
"step": 170
},
{
"epoch": 0.05959982971477224,
"grad_norm": 0.48475128412246704,
"learning_rate": 6.665698973710288e-05,
"loss": 0.7067,
"step": 175
},
{
"epoch": 0.06130268199233716,
"grad_norm": 0.7966631650924683,
"learning_rate": 6.464113856382752e-05,
"loss": 0.8934,
"step": 180
},
{
"epoch": 0.06300553426990209,
"grad_norm": 0.8111737966537476,
"learning_rate": 6.259890306925627e-05,
"loss": 0.7036,
"step": 185
},
{
"epoch": 0.06470838654746701,
"grad_norm": 0.6757014393806458,
"learning_rate": 6.0533963499786314e-05,
"loss": 0.7597,
"step": 190
},
{
"epoch": 0.06641123882503193,
"grad_norm": 0.7240155935287476,
"learning_rate": 5.8450041016092464e-05,
"loss": 0.7963,
"step": 195
},
{
"epoch": 0.06811409110259685,
"grad_norm": 2.1751976013183594,
"learning_rate": 5.6350890987343944e-05,
"loss": 1.195,
"step": 200
},
{
"epoch": 0.06811409110259685,
"eval_loss": 0.832435131072998,
"eval_runtime": 103.3366,
"eval_samples_per_second": 11.971,
"eval_steps_per_second": 5.99,
"step": 200
},
{
"epoch": 0.06981694338016177,
"grad_norm": 0.2670830488204956,
"learning_rate": 5.4240296223775465e-05,
"loss": 0.8186,
"step": 205
},
{
"epoch": 0.07151979565772669,
"grad_norm": 0.37276825308799744,
"learning_rate": 5.212206015980742e-05,
"loss": 0.6461,
"step": 210
},
{
"epoch": 0.07322264793529161,
"grad_norm": 0.4148285686969757,
"learning_rate": 5e-05,
"loss": 0.6849,
"step": 215
},
{
"epoch": 0.07492550021285653,
"grad_norm": 0.467510849237442,
"learning_rate": 4.78779398401926e-05,
"loss": 0.7479,
"step": 220
},
{
"epoch": 0.07662835249042145,
"grad_norm": 0.4516083002090454,
"learning_rate": 4.575970377622456e-05,
"loss": 0.7046,
"step": 225
},
{
"epoch": 0.07833120476798637,
"grad_norm": 0.6386833786964417,
"learning_rate": 4.364910901265606e-05,
"loss": 0.8149,
"step": 230
},
{
"epoch": 0.0800340570455513,
"grad_norm": 0.7090473771095276,
"learning_rate": 4.1549958983907555e-05,
"loss": 0.7141,
"step": 235
},
{
"epoch": 0.08173690932311622,
"grad_norm": 0.8289636969566345,
"learning_rate": 3.94660365002137e-05,
"loss": 0.7959,
"step": 240
},
{
"epoch": 0.08343976160068114,
"grad_norm": 0.7692727446556091,
"learning_rate": 3.740109693074375e-05,
"loss": 0.8913,
"step": 245
},
{
"epoch": 0.08514261387824607,
"grad_norm": 2.1364786624908447,
"learning_rate": 3.5358861436172485e-05,
"loss": 1.1669,
"step": 250
},
{
"epoch": 0.08684546615581099,
"grad_norm": 0.2958797812461853,
"learning_rate": 3.334301026289712e-05,
"loss": 0.5825,
"step": 255
},
{
"epoch": 0.0885483184333759,
"grad_norm": 0.3214460611343384,
"learning_rate": 3.135717611098458e-05,
"loss": 0.6469,
"step": 260
},
{
"epoch": 0.09025117071094083,
"grad_norm": 0.35879385471343994,
"learning_rate": 2.9404937587800375e-05,
"loss": 0.6802,
"step": 265
},
{
"epoch": 0.09195402298850575,
"grad_norm": 0.4895831048488617,
"learning_rate": 2.748981275911633e-05,
"loss": 0.6844,
"step": 270
},
{
"epoch": 0.09365687526607067,
"grad_norm": 0.5248720645904541,
"learning_rate": 2.5615252809318284e-05,
"loss": 0.816,
"step": 275
},
{
"epoch": 0.09535972754363559,
"grad_norm": 0.9332192540168762,
"learning_rate": 2.3784635822138424e-05,
"loss": 0.7685,
"step": 280
},
{
"epoch": 0.0970625798212005,
"grad_norm": 1.0168631076812744,
"learning_rate": 2.2001260693120233e-05,
"loss": 0.8865,
"step": 285
},
{
"epoch": 0.09876543209876543,
"grad_norm": 0.9966294169425964,
"learning_rate": 2.026834118478567e-05,
"loss": 0.7829,
"step": 290
},
{
"epoch": 0.10046828437633035,
"grad_norm": 0.7733042240142822,
"learning_rate": 1.858900013521788e-05,
"loss": 0.8039,
"step": 295
},
{
"epoch": 0.10217113665389528,
"grad_norm": 1.4958066940307617,
"learning_rate": 1.6966263830495936e-05,
"loss": 1.0362,
"step": 300
},
{
"epoch": 0.10217113665389528,
"eval_loss": 0.8057827949523926,
"eval_runtime": 103.3191,
"eval_samples_per_second": 11.973,
"eval_steps_per_second": 5.991,
"step": 300
},
{
"epoch": 0.1038739889314602,
"grad_norm": 0.2826021909713745,
"learning_rate": 1.5403056551122697e-05,
"loss": 0.6531,
"step": 305
},
{
"epoch": 0.10557684120902512,
"grad_norm": 0.3743998110294342,
"learning_rate": 1.3902195302273779e-05,
"loss": 0.6584,
"step": 310
},
{
"epoch": 0.10727969348659004,
"grad_norm": 0.3901960849761963,
"learning_rate": 1.246638473736378e-05,
"loss": 0.6357,
"step": 315
},
{
"epoch": 0.10898254576415496,
"grad_norm": 0.45998629927635193,
"learning_rate": 1.1098212284078036e-05,
"loss": 0.7242,
"step": 320
},
{
"epoch": 0.11068539804171988,
"grad_norm": 0.5637640357017517,
"learning_rate": 9.800143481652979e-06,
"loss": 0.7037,
"step": 325
},
{
"epoch": 0.1123882503192848,
"grad_norm": 0.8985954523086548,
"learning_rate": 8.574517537807897e-06,
"loss": 0.6539,
"step": 330
},
{
"epoch": 0.11409110259684972,
"grad_norm": 0.580285370349884,
"learning_rate": 7.423543113334436e-06,
"loss": 0.6758,
"step": 335
},
{
"epoch": 0.11579395487441464,
"grad_norm": 0.9392384886741638,
"learning_rate": 6.349294341940593e-06,
"loss": 0.6773,
"step": 340
},
{
"epoch": 0.11749680715197956,
"grad_norm": 0.8998203873634338,
"learning_rate": 5.353707092521582e-06,
"loss": 0.7652,
"step": 345
},
{
"epoch": 0.11919965942954448,
"grad_norm": 1.7155072689056396,
"learning_rate": 4.43857548059321e-06,
"loss": 0.9315,
"step": 350
},
{
"epoch": 0.1209025117071094,
"grad_norm": 0.24951836466789246,
"learning_rate": 3.605548635174533e-06,
"loss": 0.6951,
"step": 355
},
{
"epoch": 0.12260536398467432,
"grad_norm": 0.3586573600769043,
"learning_rate": 2.85612772694579e-06,
"loss": 0.6712,
"step": 360
},
{
"epoch": 0.12430821626223926,
"grad_norm": 0.39840635657310486,
"learning_rate": 2.191663263037458e-06,
"loss": 0.7289,
"step": 365
},
{
"epoch": 0.12601106853980418,
"grad_norm": 0.4486914277076721,
"learning_rate": 1.6133526533250565e-06,
"loss": 0.6964,
"step": 370
},
{
"epoch": 0.1277139208173691,
"grad_norm": 0.60421222448349,
"learning_rate": 1.1222380526156928e-06,
"loss": 0.6867,
"step": 375
},
{
"epoch": 0.12941677309493402,
"grad_norm": 0.6045652031898499,
"learning_rate": 7.192044826145771e-07,
"loss": 0.8006,
"step": 380
},
{
"epoch": 0.13111962537249894,
"grad_norm": 0.9322503209114075,
"learning_rate": 4.049782370561583e-07,
"loss": 0.8982,
"step": 385
},
{
"epoch": 0.13282247765006386,
"grad_norm": 0.9230653643608093,
"learning_rate": 1.8012557287367392e-07,
"loss": 0.726,
"step": 390
},
{
"epoch": 0.13452532992762878,
"grad_norm": 0.8531010150909424,
"learning_rate": 4.5051689765929214e-08,
"loss": 0.7812,
"step": 395
},
{
"epoch": 0.1362281822051937,
"grad_norm": 2.1816444396972656,
"learning_rate": 0.0,
"loss": 1.0379,
"step": 400
},
{
"epoch": 0.1362281822051937,
"eval_loss": 0.7936921119689941,
"eval_runtime": 103.1818,
"eval_samples_per_second": 11.989,
"eval_steps_per_second": 5.999,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3767560524726272e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}