prxy5605's picture
Training in progress, epoch 0, checkpoint
e1a5445 verified
raw
history blame
15.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03375171395422424,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.437928488556059e-05,
"eval_loss": 0.9957674145698547,
"eval_runtime": 419.6878,
"eval_samples_per_second": 11.89,
"eval_steps_per_second": 5.945,
"step": 1
},
{
"epoch": 0.00042189642442780296,
"grad_norm": 0.4106890559196472,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.8825,
"step": 5
},
{
"epoch": 0.0008437928488556059,
"grad_norm": 0.5586373209953308,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.9305,
"step": 10
},
{
"epoch": 0.0012656892732834089,
"grad_norm": 0.5272752046585083,
"learning_rate": 5e-05,
"loss": 0.9003,
"step": 15
},
{
"epoch": 0.0016875856977112118,
"grad_norm": 0.5173702836036682,
"learning_rate": 6.666666666666667e-05,
"loss": 0.7796,
"step": 20
},
{
"epoch": 0.002109482122139015,
"grad_norm": 0.5352601408958435,
"learning_rate": 8.333333333333334e-05,
"loss": 0.8669,
"step": 25
},
{
"epoch": 0.0025313785465668178,
"grad_norm": 0.48157429695129395,
"learning_rate": 0.0001,
"loss": 0.835,
"step": 30
},
{
"epoch": 0.0029532749709946207,
"grad_norm": 0.5189157724380493,
"learning_rate": 9.995494831023409e-05,
"loss": 0.7987,
"step": 35
},
{
"epoch": 0.0033751713954224237,
"grad_norm": 0.5436115860939026,
"learning_rate": 9.981987442712633e-05,
"loss": 0.9009,
"step": 40
},
{
"epoch": 0.0037970678198502267,
"grad_norm": 0.6219505071640015,
"learning_rate": 9.959502176294383e-05,
"loss": 0.8412,
"step": 45
},
{
"epoch": 0.00421896424427803,
"grad_norm": 1.6684471368789673,
"learning_rate": 9.928079551738543e-05,
"loss": 0.9159,
"step": 50
},
{
"epoch": 0.004640860668705833,
"grad_norm": 0.5012656450271606,
"learning_rate": 9.887776194738432e-05,
"loss": 0.7569,
"step": 55
},
{
"epoch": 0.0050627570931336355,
"grad_norm": 0.35097789764404297,
"learning_rate": 9.838664734667495e-05,
"loss": 0.7247,
"step": 60
},
{
"epoch": 0.005484653517561439,
"grad_norm": 0.4114624261856079,
"learning_rate": 9.780833673696254e-05,
"loss": 0.78,
"step": 65
},
{
"epoch": 0.0059065499419892415,
"grad_norm": 0.39524176716804504,
"learning_rate": 9.714387227305422e-05,
"loss": 0.7716,
"step": 70
},
{
"epoch": 0.006328446366417045,
"grad_norm": 0.39941051602363586,
"learning_rate": 9.639445136482548e-05,
"loss": 0.8346,
"step": 75
},
{
"epoch": 0.006750342790844847,
"grad_norm": 0.38641512393951416,
"learning_rate": 9.55614245194068e-05,
"loss": 0.7287,
"step": 80
},
{
"epoch": 0.007172239215272651,
"grad_norm": 0.5262604355812073,
"learning_rate": 9.464629290747842e-05,
"loss": 0.7636,
"step": 85
},
{
"epoch": 0.007594135639700453,
"grad_norm": 0.605768084526062,
"learning_rate": 9.365070565805941e-05,
"loss": 0.8944,
"step": 90
},
{
"epoch": 0.008016032064128256,
"grad_norm": 0.6336283683776855,
"learning_rate": 9.257645688666556e-05,
"loss": 0.8311,
"step": 95
},
{
"epoch": 0.00843792848855606,
"grad_norm": 1.458014965057373,
"learning_rate": 9.142548246219212e-05,
"loss": 0.8093,
"step": 100
},
{
"epoch": 0.00843792848855606,
"eval_loss": 0.7453532814979553,
"eval_runtime": 421.5346,
"eval_samples_per_second": 11.838,
"eval_steps_per_second": 5.919,
"step": 100
},
{
"epoch": 0.008859824912983863,
"grad_norm": 0.3544303774833679,
"learning_rate": 9.019985651834703e-05,
"loss": 0.6757,
"step": 105
},
{
"epoch": 0.009281721337411666,
"grad_norm": 0.31981468200683594,
"learning_rate": 8.890178771592199e-05,
"loss": 0.7116,
"step": 110
},
{
"epoch": 0.009703617761839468,
"grad_norm": 0.3641217350959778,
"learning_rate": 8.753361526263621e-05,
"loss": 0.704,
"step": 115
},
{
"epoch": 0.010125514186267271,
"grad_norm": 0.41695696115493774,
"learning_rate": 8.609780469772623e-05,
"loss": 0.6933,
"step": 120
},
{
"epoch": 0.010547410610695074,
"grad_norm": 0.40811964869499207,
"learning_rate": 8.459694344887732e-05,
"loss": 0.7293,
"step": 125
},
{
"epoch": 0.010969307035122878,
"grad_norm": 0.4640657901763916,
"learning_rate": 8.303373616950408e-05,
"loss": 0.8195,
"step": 130
},
{
"epoch": 0.01139120345955068,
"grad_norm": 0.4916837811470032,
"learning_rate": 8.141099986478212e-05,
"loss": 0.7061,
"step": 135
},
{
"epoch": 0.011813099883978483,
"grad_norm": 0.43793752789497375,
"learning_rate": 7.973165881521434e-05,
"loss": 0.8276,
"step": 140
},
{
"epoch": 0.012234996308406286,
"grad_norm": 0.6117736101150513,
"learning_rate": 7.799873930687978e-05,
"loss": 0.893,
"step": 145
},
{
"epoch": 0.01265689273283409,
"grad_norm": 1.3102891445159912,
"learning_rate": 7.621536417786159e-05,
"loss": 0.8688,
"step": 150
},
{
"epoch": 0.013078789157261891,
"grad_norm": 0.35164037346839905,
"learning_rate": 7.438474719068173e-05,
"loss": 0.5983,
"step": 155
},
{
"epoch": 0.013500685581689695,
"grad_norm": 0.32440951466560364,
"learning_rate": 7.251018724088367e-05,
"loss": 0.6319,
"step": 160
},
{
"epoch": 0.013922582006117498,
"grad_norm": 0.36062324047088623,
"learning_rate": 7.059506241219965e-05,
"loss": 0.6391,
"step": 165
},
{
"epoch": 0.014344478430545302,
"grad_norm": 0.38565579056739807,
"learning_rate": 6.864282388901544e-05,
"loss": 0.6643,
"step": 170
},
{
"epoch": 0.014766374854973103,
"grad_norm": 0.411285400390625,
"learning_rate": 6.665698973710288e-05,
"loss": 0.7558,
"step": 175
},
{
"epoch": 0.015188271279400907,
"grad_norm": 0.3739734888076782,
"learning_rate": 6.464113856382752e-05,
"loss": 0.6523,
"step": 180
},
{
"epoch": 0.01561016770382871,
"grad_norm": 0.4831923842430115,
"learning_rate": 6.259890306925627e-05,
"loss": 0.7802,
"step": 185
},
{
"epoch": 0.01603206412825651,
"grad_norm": 0.5024823546409607,
"learning_rate": 6.0533963499786314e-05,
"loss": 0.6872,
"step": 190
},
{
"epoch": 0.016453960552684315,
"grad_norm": 0.6557715535163879,
"learning_rate": 5.8450041016092464e-05,
"loss": 0.7473,
"step": 195
},
{
"epoch": 0.01687585697711212,
"grad_norm": 1.3833485841751099,
"learning_rate": 5.6350890987343944e-05,
"loss": 0.8261,
"step": 200
},
{
"epoch": 0.01687585697711212,
"eval_loss": 0.7295480370521545,
"eval_runtime": 421.3959,
"eval_samples_per_second": 11.842,
"eval_steps_per_second": 5.921,
"step": 200
},
{
"epoch": 0.017297753401539922,
"grad_norm": 0.4051777720451355,
"learning_rate": 5.4240296223775465e-05,
"loss": 0.6449,
"step": 205
},
{
"epoch": 0.017719649825967725,
"grad_norm": 0.36194536089897156,
"learning_rate": 5.212206015980742e-05,
"loss": 0.7147,
"step": 210
},
{
"epoch": 0.01814154625039553,
"grad_norm": 0.37661212682724,
"learning_rate": 5e-05,
"loss": 0.734,
"step": 215
},
{
"epoch": 0.018563442674823332,
"grad_norm": 0.37295767664909363,
"learning_rate": 4.78779398401926e-05,
"loss": 0.6716,
"step": 220
},
{
"epoch": 0.018985339099251135,
"grad_norm": 0.474931001663208,
"learning_rate": 4.575970377622456e-05,
"loss": 0.749,
"step": 225
},
{
"epoch": 0.019407235523678935,
"grad_norm": 0.5339241027832031,
"learning_rate": 4.364910901265606e-05,
"loss": 0.8035,
"step": 230
},
{
"epoch": 0.01982913194810674,
"grad_norm": 0.546904981136322,
"learning_rate": 4.1549958983907555e-05,
"loss": 0.7336,
"step": 235
},
{
"epoch": 0.020251028372534542,
"grad_norm": 0.5156819820404053,
"learning_rate": 3.94660365002137e-05,
"loss": 0.7587,
"step": 240
},
{
"epoch": 0.020672924796962346,
"grad_norm": 0.5965439677238464,
"learning_rate": 3.740109693074375e-05,
"loss": 0.9158,
"step": 245
},
{
"epoch": 0.02109482122139015,
"grad_norm": 1.0562564134597778,
"learning_rate": 3.5358861436172485e-05,
"loss": 1.0234,
"step": 250
},
{
"epoch": 0.021516717645817952,
"grad_norm": 0.33975425362586975,
"learning_rate": 3.334301026289712e-05,
"loss": 0.6781,
"step": 255
},
{
"epoch": 0.021938614070245756,
"grad_norm": 0.31108027696609497,
"learning_rate": 3.135717611098458e-05,
"loss": 0.6653,
"step": 260
},
{
"epoch": 0.02236051049467356,
"grad_norm": 0.3663179874420166,
"learning_rate": 2.9404937587800375e-05,
"loss": 0.6752,
"step": 265
},
{
"epoch": 0.02278240691910136,
"grad_norm": 0.5073758363723755,
"learning_rate": 2.748981275911633e-05,
"loss": 0.7357,
"step": 270
},
{
"epoch": 0.023204303343529162,
"grad_norm": 0.41087105870246887,
"learning_rate": 2.5615252809318284e-05,
"loss": 0.6715,
"step": 275
},
{
"epoch": 0.023626199767956966,
"grad_norm": 0.4009557068347931,
"learning_rate": 2.3784635822138424e-05,
"loss": 0.6759,
"step": 280
},
{
"epoch": 0.02404809619238477,
"grad_norm": 0.46906349062919617,
"learning_rate": 2.2001260693120233e-05,
"loss": 0.7395,
"step": 285
},
{
"epoch": 0.024469992616812573,
"grad_norm": 0.6131988763809204,
"learning_rate": 2.026834118478567e-05,
"loss": 0.8374,
"step": 290
},
{
"epoch": 0.024891889041240376,
"grad_norm": 0.735552191734314,
"learning_rate": 1.858900013521788e-05,
"loss": 0.9337,
"step": 295
},
{
"epoch": 0.02531378546566818,
"grad_norm": 1.2397476434707642,
"learning_rate": 1.6966263830495936e-05,
"loss": 0.8693,
"step": 300
},
{
"epoch": 0.02531378546566818,
"eval_loss": 0.718722939491272,
"eval_runtime": 421.5469,
"eval_samples_per_second": 11.837,
"eval_steps_per_second": 5.919,
"step": 300
},
{
"epoch": 0.025735681890095983,
"grad_norm": 0.2928362786769867,
"learning_rate": 1.5403056551122697e-05,
"loss": 0.6769,
"step": 305
},
{
"epoch": 0.026157578314523783,
"grad_norm": 0.35365402698516846,
"learning_rate": 1.3902195302273779e-05,
"loss": 0.6254,
"step": 310
},
{
"epoch": 0.026579474738951586,
"grad_norm": 0.3873966634273529,
"learning_rate": 1.246638473736378e-05,
"loss": 0.7157,
"step": 315
},
{
"epoch": 0.02700137116337939,
"grad_norm": 0.42512309551239014,
"learning_rate": 1.1098212284078036e-05,
"loss": 0.7382,
"step": 320
},
{
"epoch": 0.027423267587807193,
"grad_norm": 0.45126789808273315,
"learning_rate": 9.800143481652979e-06,
"loss": 0.6671,
"step": 325
},
{
"epoch": 0.027845164012234996,
"grad_norm": 0.4876130223274231,
"learning_rate": 8.574517537807897e-06,
"loss": 0.7558,
"step": 330
},
{
"epoch": 0.0282670604366628,
"grad_norm": 0.4966984689235687,
"learning_rate": 7.423543113334436e-06,
"loss": 0.7365,
"step": 335
},
{
"epoch": 0.028688956861090603,
"grad_norm": 0.4963556230068207,
"learning_rate": 6.349294341940593e-06,
"loss": 0.7839,
"step": 340
},
{
"epoch": 0.029110853285518407,
"grad_norm": 0.5703970193862915,
"learning_rate": 5.353707092521582e-06,
"loss": 0.6884,
"step": 345
},
{
"epoch": 0.029532749709946206,
"grad_norm": 1.9548819065093994,
"learning_rate": 4.43857548059321e-06,
"loss": 1.012,
"step": 350
},
{
"epoch": 0.02995464613437401,
"grad_norm": 0.31155750155448914,
"learning_rate": 3.605548635174533e-06,
"loss": 0.6154,
"step": 355
},
{
"epoch": 0.030376542558801813,
"grad_norm": 0.2996324896812439,
"learning_rate": 2.85612772694579e-06,
"loss": 0.733,
"step": 360
},
{
"epoch": 0.030798438983229617,
"grad_norm": 0.37856683135032654,
"learning_rate": 2.191663263037458e-06,
"loss": 0.6717,
"step": 365
},
{
"epoch": 0.03122033540765742,
"grad_norm": 0.37820345163345337,
"learning_rate": 1.6133526533250565e-06,
"loss": 0.6931,
"step": 370
},
{
"epoch": 0.03164223183208522,
"grad_norm": 0.39253878593444824,
"learning_rate": 1.1222380526156928e-06,
"loss": 0.6896,
"step": 375
},
{
"epoch": 0.03206412825651302,
"grad_norm": 0.4378221035003662,
"learning_rate": 7.192044826145771e-07,
"loss": 0.752,
"step": 380
},
{
"epoch": 0.03248602468094083,
"grad_norm": 0.5840653777122498,
"learning_rate": 4.049782370561583e-07,
"loss": 0.6758,
"step": 385
},
{
"epoch": 0.03290792110536863,
"grad_norm": 0.6441401839256287,
"learning_rate": 1.8012557287367392e-07,
"loss": 0.7952,
"step": 390
},
{
"epoch": 0.033329817529796434,
"grad_norm": 0.6340252161026001,
"learning_rate": 4.5051689765929214e-08,
"loss": 0.779,
"step": 395
},
{
"epoch": 0.03375171395422424,
"grad_norm": 1.275892734527588,
"learning_rate": 0.0,
"loss": 0.9144,
"step": 400
},
{
"epoch": 0.03375171395422424,
"eval_loss": 0.7176097631454468,
"eval_runtime": 421.8457,
"eval_samples_per_second": 11.829,
"eval_steps_per_second": 5.914,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5129435549833626e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}