prxy5605's picture
Training in progress, epoch 0, checkpoint
6549c7e verified
raw
history blame
15.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.012007144250829244,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.0017860627073107e-05,
"eval_loss": 1.443517804145813,
"eval_runtime": 510.9532,
"eval_samples_per_second": 27.453,
"eval_steps_per_second": 13.727,
"step": 1
},
{
"epoch": 0.00015008930313536553,
"grad_norm": 0.3933664560317993,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.2961,
"step": 5
},
{
"epoch": 0.00030017860627073106,
"grad_norm": 0.7418891191482544,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.4393,
"step": 10
},
{
"epoch": 0.0004502679094060966,
"grad_norm": 0.9560197591781616,
"learning_rate": 5e-05,
"loss": 0.5289,
"step": 15
},
{
"epoch": 0.0006003572125414621,
"grad_norm": 0.680629551410675,
"learning_rate": 6.666666666666667e-05,
"loss": 0.5328,
"step": 20
},
{
"epoch": 0.0007504465156768277,
"grad_norm": 1.0983694791793823,
"learning_rate": 8.333333333333334e-05,
"loss": 0.4416,
"step": 25
},
{
"epoch": 0.0009005358188121932,
"grad_norm": 1.356644630432129,
"learning_rate": 0.0001,
"loss": 0.2916,
"step": 30
},
{
"epoch": 0.0010506251219475587,
"grad_norm": 1.1323988437652588,
"learning_rate": 9.995494831023409e-05,
"loss": 0.1692,
"step": 35
},
{
"epoch": 0.0012007144250829242,
"grad_norm": 0.3086409866809845,
"learning_rate": 9.981987442712633e-05,
"loss": 0.0314,
"step": 40
},
{
"epoch": 0.00135080372821829,
"grad_norm": 0.08165588974952698,
"learning_rate": 9.959502176294383e-05,
"loss": 0.0181,
"step": 45
},
{
"epoch": 0.0015008930313536555,
"grad_norm": 0.1854889690876007,
"learning_rate": 9.928079551738543e-05,
"loss": 0.0083,
"step": 50
},
{
"epoch": 0.001650982334489021,
"grad_norm": 0.049650534987449646,
"learning_rate": 9.887776194738432e-05,
"loss": 0.037,
"step": 55
},
{
"epoch": 0.0018010716376243865,
"grad_norm": 0.35255470871925354,
"learning_rate": 9.838664734667495e-05,
"loss": 0.0096,
"step": 60
},
{
"epoch": 0.001951160940759752,
"grad_norm": 0.16714519262313843,
"learning_rate": 9.780833673696254e-05,
"loss": 0.0045,
"step": 65
},
{
"epoch": 0.0021012502438951175,
"grad_norm": 0.05677841603755951,
"learning_rate": 9.714387227305422e-05,
"loss": 0.0107,
"step": 70
},
{
"epoch": 0.002251339547030483,
"grad_norm": 0.37986552715301514,
"learning_rate": 9.639445136482548e-05,
"loss": 0.0242,
"step": 75
},
{
"epoch": 0.0024014288501658485,
"grad_norm": 0.16469906270503998,
"learning_rate": 9.55614245194068e-05,
"loss": 0.0026,
"step": 80
},
{
"epoch": 0.002551518153301214,
"grad_norm": 0.03446728736162186,
"learning_rate": 9.464629290747842e-05,
"loss": 0.0125,
"step": 85
},
{
"epoch": 0.00270160745643658,
"grad_norm": 0.08204316347837448,
"learning_rate": 9.365070565805941e-05,
"loss": 0.0101,
"step": 90
},
{
"epoch": 0.002851696759571945,
"grad_norm": 0.133678138256073,
"learning_rate": 9.257645688666556e-05,
"loss": 0.0039,
"step": 95
},
{
"epoch": 0.003001786062707311,
"grad_norm": 0.03044506348669529,
"learning_rate": 9.142548246219212e-05,
"loss": 0.0094,
"step": 100
},
{
"epoch": 0.003001786062707311,
"eval_loss": 0.012762267142534256,
"eval_runtime": 510.9883,
"eval_samples_per_second": 27.451,
"eval_steps_per_second": 13.726,
"step": 100
},
{
"epoch": 0.003151875365842676,
"grad_norm": 0.28025349974632263,
"learning_rate": 9.019985651834703e-05,
"loss": 0.0103,
"step": 105
},
{
"epoch": 0.003301964668978042,
"grad_norm": 0.2238554209470749,
"learning_rate": 8.890178771592199e-05,
"loss": 0.0382,
"step": 110
},
{
"epoch": 0.0034520539721134076,
"grad_norm": 0.016901832073926926,
"learning_rate": 8.753361526263621e-05,
"loss": 0.0138,
"step": 115
},
{
"epoch": 0.003602143275248773,
"grad_norm": 0.008931729942560196,
"learning_rate": 8.609780469772623e-05,
"loss": 0.0128,
"step": 120
},
{
"epoch": 0.0037522325783841386,
"grad_norm": 0.07771596312522888,
"learning_rate": 8.459694344887732e-05,
"loss": 0.0017,
"step": 125
},
{
"epoch": 0.003902321881519504,
"grad_norm": 0.09958741813898087,
"learning_rate": 8.303373616950408e-05,
"loss": 0.0028,
"step": 130
},
{
"epoch": 0.00405241118465487,
"grad_norm": 0.3993448317050934,
"learning_rate": 8.141099986478212e-05,
"loss": 0.0142,
"step": 135
},
{
"epoch": 0.004202500487790235,
"grad_norm": 0.0050088949501514435,
"learning_rate": 7.973165881521434e-05,
"loss": 0.0008,
"step": 140
},
{
"epoch": 0.004352589790925601,
"grad_norm": 0.027677390724420547,
"learning_rate": 7.799873930687978e-05,
"loss": 0.0007,
"step": 145
},
{
"epoch": 0.004502679094060966,
"grad_norm": 1.8557262420654297,
"learning_rate": 7.621536417786159e-05,
"loss": 0.0486,
"step": 150
},
{
"epoch": 0.004652768397196332,
"grad_norm": 0.013031048700213432,
"learning_rate": 7.438474719068173e-05,
"loss": 0.0049,
"step": 155
},
{
"epoch": 0.004802857700331697,
"grad_norm": 0.013981903903186321,
"learning_rate": 7.251018724088367e-05,
"loss": 0.0006,
"step": 160
},
{
"epoch": 0.004952947003467063,
"grad_norm": 0.05604551360011101,
"learning_rate": 7.059506241219965e-05,
"loss": 0.0151,
"step": 165
},
{
"epoch": 0.005103036306602428,
"grad_norm": 0.00818183645606041,
"learning_rate": 6.864282388901544e-05,
"loss": 0.0046,
"step": 170
},
{
"epoch": 0.005253125609737794,
"grad_norm": 0.005792565643787384,
"learning_rate": 6.665698973710288e-05,
"loss": 0.0058,
"step": 175
},
{
"epoch": 0.00540321491287316,
"grad_norm": 0.008401204831898212,
"learning_rate": 6.464113856382752e-05,
"loss": 0.0006,
"step": 180
},
{
"epoch": 0.005553304216008525,
"grad_norm": 0.004655248951166868,
"learning_rate": 6.259890306925627e-05,
"loss": 0.0005,
"step": 185
},
{
"epoch": 0.00570339351914389,
"grad_norm": 0.40482771396636963,
"learning_rate": 6.0533963499786314e-05,
"loss": 0.0027,
"step": 190
},
{
"epoch": 0.005853482822279256,
"grad_norm": 0.005078907124698162,
"learning_rate": 5.8450041016092464e-05,
"loss": 0.0014,
"step": 195
},
{
"epoch": 0.006003572125414622,
"grad_norm": 4.6471452713012695,
"learning_rate": 5.6350890987343944e-05,
"loss": 0.078,
"step": 200
},
{
"epoch": 0.006003572125414622,
"eval_loss": 0.006731819361448288,
"eval_runtime": 510.7459,
"eval_samples_per_second": 27.464,
"eval_steps_per_second": 13.733,
"step": 200
},
{
"epoch": 0.0061536614285499875,
"grad_norm": 0.18328024446964264,
"learning_rate": 5.4240296223775465e-05,
"loss": 0.0073,
"step": 205
},
{
"epoch": 0.006303750731685352,
"grad_norm": 0.008907951414585114,
"learning_rate": 5.212206015980742e-05,
"loss": 0.0005,
"step": 210
},
{
"epoch": 0.006453840034820718,
"grad_norm": 0.18641109764575958,
"learning_rate": 5e-05,
"loss": 0.0078,
"step": 215
},
{
"epoch": 0.006603929337956084,
"grad_norm": 0.05424876883625984,
"learning_rate": 4.78779398401926e-05,
"loss": 0.0013,
"step": 220
},
{
"epoch": 0.0067540186410914495,
"grad_norm": 0.7451849579811096,
"learning_rate": 4.575970377622456e-05,
"loss": 0.0033,
"step": 225
},
{
"epoch": 0.006904107944226815,
"grad_norm": 0.0506892055273056,
"learning_rate": 4.364910901265606e-05,
"loss": 0.0056,
"step": 230
},
{
"epoch": 0.00705419724736218,
"grad_norm": 0.010004466399550438,
"learning_rate": 4.1549958983907555e-05,
"loss": 0.0009,
"step": 235
},
{
"epoch": 0.007204286550497546,
"grad_norm": 0.005493512377142906,
"learning_rate": 3.94660365002137e-05,
"loss": 0.0052,
"step": 240
},
{
"epoch": 0.0073543758536329116,
"grad_norm": 0.014798315241932869,
"learning_rate": 3.740109693074375e-05,
"loss": 0.0011,
"step": 245
},
{
"epoch": 0.007504465156768277,
"grad_norm": 0.019689688459038734,
"learning_rate": 3.5358861436172485e-05,
"loss": 0.0081,
"step": 250
},
{
"epoch": 0.007654554459903643,
"grad_norm": 0.0050315759144723415,
"learning_rate": 3.334301026289712e-05,
"loss": 0.0026,
"step": 255
},
{
"epoch": 0.007804643763039008,
"grad_norm": 0.20544902980327606,
"learning_rate": 3.135717611098458e-05,
"loss": 0.0047,
"step": 260
},
{
"epoch": 0.007954733066174374,
"grad_norm": 0.010924947448074818,
"learning_rate": 2.9404937587800375e-05,
"loss": 0.0011,
"step": 265
},
{
"epoch": 0.00810482236930974,
"grad_norm": 0.0026773347053676844,
"learning_rate": 2.748981275911633e-05,
"loss": 0.0024,
"step": 270
},
{
"epoch": 0.008254911672445105,
"grad_norm": 0.010959619656205177,
"learning_rate": 2.5615252809318284e-05,
"loss": 0.001,
"step": 275
},
{
"epoch": 0.00840500097558047,
"grad_norm": 0.005525967571884394,
"learning_rate": 2.3784635822138424e-05,
"loss": 0.0108,
"step": 280
},
{
"epoch": 0.008555090278715836,
"grad_norm": 0.0066901505924761295,
"learning_rate": 2.2001260693120233e-05,
"loss": 0.029,
"step": 285
},
{
"epoch": 0.008705179581851201,
"grad_norm": 0.006530699320137501,
"learning_rate": 2.026834118478567e-05,
"loss": 0.0013,
"step": 290
},
{
"epoch": 0.008855268884986566,
"grad_norm": 0.024501588195562363,
"learning_rate": 1.858900013521788e-05,
"loss": 0.0006,
"step": 295
},
{
"epoch": 0.009005358188121933,
"grad_norm": 0.005288040731102228,
"learning_rate": 1.6966263830495936e-05,
"loss": 0.0015,
"step": 300
},
{
"epoch": 0.009005358188121933,
"eval_loss": 0.005631438456475735,
"eval_runtime": 511.2808,
"eval_samples_per_second": 27.435,
"eval_steps_per_second": 13.718,
"step": 300
},
{
"epoch": 0.009155447491257298,
"grad_norm": 0.0063221510499715805,
"learning_rate": 1.5403056551122697e-05,
"loss": 0.005,
"step": 305
},
{
"epoch": 0.009305536794392664,
"grad_norm": 0.006659193895757198,
"learning_rate": 1.3902195302273779e-05,
"loss": 0.0024,
"step": 310
},
{
"epoch": 0.009455626097528029,
"grad_norm": 0.01631927862763405,
"learning_rate": 1.246638473736378e-05,
"loss": 0.0124,
"step": 315
},
{
"epoch": 0.009605715400663394,
"grad_norm": 0.007320760283619165,
"learning_rate": 1.1098212284078036e-05,
"loss": 0.0018,
"step": 320
},
{
"epoch": 0.00975580470379876,
"grad_norm": 0.007207963615655899,
"learning_rate": 9.800143481652979e-06,
"loss": 0.0005,
"step": 325
},
{
"epoch": 0.009905894006934125,
"grad_norm": 0.01264453399926424,
"learning_rate": 8.574517537807897e-06,
"loss": 0.0178,
"step": 330
},
{
"epoch": 0.010055983310069492,
"grad_norm": 0.010427780449390411,
"learning_rate": 7.423543113334436e-06,
"loss": 0.0009,
"step": 335
},
{
"epoch": 0.010206072613204857,
"grad_norm": 0.019130609929561615,
"learning_rate": 6.349294341940593e-06,
"loss": 0.015,
"step": 340
},
{
"epoch": 0.010356161916340222,
"grad_norm": 0.004735421389341354,
"learning_rate": 5.353707092521582e-06,
"loss": 0.0004,
"step": 345
},
{
"epoch": 0.010506251219475588,
"grad_norm": 0.004065056797116995,
"learning_rate": 4.43857548059321e-06,
"loss": 0.032,
"step": 350
},
{
"epoch": 0.010656340522610953,
"grad_norm": 0.01809353567659855,
"learning_rate": 3.605548635174533e-06,
"loss": 0.0105,
"step": 355
},
{
"epoch": 0.01080642982574632,
"grad_norm": 0.005520292557775974,
"learning_rate": 2.85612772694579e-06,
"loss": 0.002,
"step": 360
},
{
"epoch": 0.010956519128881684,
"grad_norm": 0.009921744465827942,
"learning_rate": 2.191663263037458e-06,
"loss": 0.0006,
"step": 365
},
{
"epoch": 0.01110660843201705,
"grad_norm": 0.015676943585276604,
"learning_rate": 1.6133526533250565e-06,
"loss": 0.0007,
"step": 370
},
{
"epoch": 0.011256697735152416,
"grad_norm": 0.001278752344660461,
"learning_rate": 1.1222380526156928e-06,
"loss": 0.0077,
"step": 375
},
{
"epoch": 0.01140678703828778,
"grad_norm": 0.038098469376564026,
"learning_rate": 7.192044826145771e-07,
"loss": 0.0006,
"step": 380
},
{
"epoch": 0.011556876341423147,
"grad_norm": 0.018751483410596848,
"learning_rate": 4.049782370561583e-07,
"loss": 0.0005,
"step": 385
},
{
"epoch": 0.011706965644558512,
"grad_norm": 0.012710168026387691,
"learning_rate": 1.8012557287367392e-07,
"loss": 0.0109,
"step": 390
},
{
"epoch": 0.011857054947693877,
"grad_norm": 0.010042375884950161,
"learning_rate": 4.5051689765929214e-08,
"loss": 0.0082,
"step": 395
},
{
"epoch": 0.012007144250829244,
"grad_norm": 0.014652569778263569,
"learning_rate": 0.0,
"loss": 0.0016,
"step": 400
},
{
"epoch": 0.012007144250829244,
"eval_loss": 0.005542390514165163,
"eval_runtime": 511.1894,
"eval_samples_per_second": 27.44,
"eval_steps_per_second": 13.721,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.6365448417181696e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}