Llama-2-7b-ultrachat-syn200k-1e / trainer_state.json
kykim0's picture
Upload folder using huggingface_hub
6b103e8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99968,
"eval_steps": 500,
"global_step": 781,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0128,
"grad_norm": 3.1302534958568193,
"learning_rate": 1.2738853503184715e-06,
"loss": 0.6569,
"step": 10
},
{
"epoch": 0.0256,
"grad_norm": 3.1468711921047627,
"learning_rate": 2.547770700636943e-06,
"loss": 0.5179,
"step": 20
},
{
"epoch": 0.0384,
"grad_norm": 0.5540977306160393,
"learning_rate": 3.821656050955415e-06,
"loss": 0.2369,
"step": 30
},
{
"epoch": 0.0512,
"grad_norm": 0.5002871919834374,
"learning_rate": 5.095541401273886e-06,
"loss": 0.1819,
"step": 40
},
{
"epoch": 0.064,
"grad_norm": 0.7150745925714665,
"learning_rate": 6.369426751592357e-06,
"loss": 0.1759,
"step": 50
},
{
"epoch": 0.0768,
"grad_norm": 0.7619036468756623,
"learning_rate": 7.64331210191083e-06,
"loss": 0.16,
"step": 60
},
{
"epoch": 0.0896,
"grad_norm": 0.44198933481881236,
"learning_rate": 8.9171974522293e-06,
"loss": 0.1608,
"step": 70
},
{
"epoch": 0.1024,
"grad_norm": 0.37478693328542767,
"learning_rate": 1.0191082802547772e-05,
"loss": 0.1617,
"step": 80
},
{
"epoch": 0.1152,
"grad_norm": 0.3696718954770576,
"learning_rate": 1.1464968152866242e-05,
"loss": 0.1596,
"step": 90
},
{
"epoch": 0.128,
"grad_norm": 0.5203159641478062,
"learning_rate": 1.2738853503184714e-05,
"loss": 0.1602,
"step": 100
},
{
"epoch": 0.1408,
"grad_norm": 0.37773491168605033,
"learning_rate": 1.4012738853503186e-05,
"loss": 0.1485,
"step": 110
},
{
"epoch": 0.1536,
"grad_norm": 0.24203685828143712,
"learning_rate": 1.528662420382166e-05,
"loss": 0.1555,
"step": 120
},
{
"epoch": 0.1664,
"grad_norm": 0.3898599912917371,
"learning_rate": 1.6560509554140128e-05,
"loss": 0.1571,
"step": 130
},
{
"epoch": 0.1792,
"grad_norm": 0.22025246679268023,
"learning_rate": 1.78343949044586e-05,
"loss": 0.1559,
"step": 140
},
{
"epoch": 0.192,
"grad_norm": 0.5943517884905586,
"learning_rate": 1.910828025477707e-05,
"loss": 0.1578,
"step": 150
},
{
"epoch": 0.2048,
"grad_norm": 0.4363975905376457,
"learning_rate": 1.999977501271127e-05,
"loss": 0.1468,
"step": 160
},
{
"epoch": 0.2176,
"grad_norm": 0.4874326999671375,
"learning_rate": 1.9995775520317924e-05,
"loss": 0.1519,
"step": 170
},
{
"epoch": 0.2304,
"grad_norm": 0.32242234734849035,
"learning_rate": 1.998677861199748e-05,
"loss": 0.1537,
"step": 180
},
{
"epoch": 0.2432,
"grad_norm": 0.338978065876802,
"learning_rate": 1.9972788785779404e-05,
"loss": 0.1535,
"step": 190
},
{
"epoch": 0.256,
"grad_norm": 0.5405845227789843,
"learning_rate": 1.9953813035916444e-05,
"loss": 0.1552,
"step": 200
},
{
"epoch": 0.2688,
"grad_norm": 0.3187325098825458,
"learning_rate": 1.9929860849387815e-05,
"loss": 0.1441,
"step": 210
},
{
"epoch": 0.2816,
"grad_norm": 0.181656187619335,
"learning_rate": 1.9900944201156164e-05,
"loss": 0.1489,
"step": 220
},
{
"epoch": 0.2944,
"grad_norm": 0.26409308919505864,
"learning_rate": 1.986707754818064e-05,
"loss": 0.1509,
"step": 230
},
{
"epoch": 0.3072,
"grad_norm": 0.23639855452929004,
"learning_rate": 1.982827782218912e-05,
"loss": 0.1525,
"step": 240
},
{
"epoch": 0.32,
"grad_norm": 0.42132507371171557,
"learning_rate": 1.9784564421213122e-05,
"loss": 0.1513,
"step": 250
},
{
"epoch": 0.3328,
"grad_norm": 0.30911958261458256,
"learning_rate": 1.9735959199889723e-05,
"loss": 0.1415,
"step": 260
},
{
"epoch": 0.3456,
"grad_norm": 0.286475064941514,
"learning_rate": 1.968248645853526e-05,
"loss": 0.1483,
"step": 270
},
{
"epoch": 0.3584,
"grad_norm": 0.3223686350575121,
"learning_rate": 1.9624172930996322e-05,
"loss": 0.1496,
"step": 280
},
{
"epoch": 0.3712,
"grad_norm": 0.33039144824318656,
"learning_rate": 1.956104777128409e-05,
"loss": 0.1496,
"step": 290
},
{
"epoch": 0.384,
"grad_norm": 0.3268436732392019,
"learning_rate": 1.9493142538998713e-05,
"loss": 0.1524,
"step": 300
},
{
"epoch": 0.3968,
"grad_norm": 0.3768976535567336,
"learning_rate": 1.9420491183550983e-05,
"loss": 0.1397,
"step": 310
},
{
"epoch": 0.4096,
"grad_norm": 0.344679435905093,
"learning_rate": 1.934313002718924e-05,
"loss": 0.1459,
"step": 320
},
{
"epoch": 0.4224,
"grad_norm": 0.1867877479920934,
"learning_rate": 1.9261097746839974e-05,
"loss": 0.149,
"step": 330
},
{
"epoch": 0.4352,
"grad_norm": 0.22826415693304572,
"learning_rate": 1.9174435354771167e-05,
"loss": 0.1515,
"step": 340
},
{
"epoch": 0.448,
"grad_norm": 0.2574654153325295,
"learning_rate": 1.9083186178088103e-05,
"loss": 0.1501,
"step": 350
},
{
"epoch": 0.4608,
"grad_norm": 0.23070296857068737,
"learning_rate": 1.898739583707187e-05,
"loss": 0.1383,
"step": 360
},
{
"epoch": 0.4736,
"grad_norm": 0.21129982050815863,
"learning_rate": 1.8887112222371363e-05,
"loss": 0.1437,
"step": 370
},
{
"epoch": 0.4864,
"grad_norm": 0.17026221614799503,
"learning_rate": 1.8782385471060217e-05,
"loss": 0.1495,
"step": 380
},
{
"epoch": 0.4992,
"grad_norm": 0.2103435712105906,
"learning_rate": 1.8673267941570646e-05,
"loss": 0.1495,
"step": 390
},
{
"epoch": 0.512,
"grad_norm": 0.2949165035198057,
"learning_rate": 1.8559814187516692e-05,
"loss": 0.1482,
"step": 400
},
{
"epoch": 0.5248,
"grad_norm": 0.2727792628609804,
"learning_rate": 1.844208093042e-05,
"loss": 0.1385,
"step": 410
},
{
"epoch": 0.5376,
"grad_norm": 0.19011236871362203,
"learning_rate": 1.8320127031351723e-05,
"loss": 0.1447,
"step": 420
},
{
"epoch": 0.5504,
"grad_norm": 0.20224724252134554,
"learning_rate": 1.8194013461504774e-05,
"loss": 0.1457,
"step": 430
},
{
"epoch": 0.5632,
"grad_norm": 0.21758912206027958,
"learning_rate": 1.806380327171111e-05,
"loss": 0.146,
"step": 440
},
{
"epoch": 0.576,
"grad_norm": 0.394244725563091,
"learning_rate": 1.792956156091928e-05,
"loss": 0.1473,
"step": 450
},
{
"epoch": 0.5888,
"grad_norm": 0.3292629665585757,
"learning_rate": 1.7791355443648045e-05,
"loss": 0.1378,
"step": 460
},
{
"epoch": 0.6016,
"grad_norm": 0.19521004688992183,
"learning_rate": 1.7649254016432247e-05,
"loss": 0.1456,
"step": 470
},
{
"epoch": 0.6144,
"grad_norm": 0.2316602793370239,
"learning_rate": 1.750332832327786e-05,
"loss": 0.1451,
"step": 480
},
{
"epoch": 0.6272,
"grad_norm": 0.17573541945620594,
"learning_rate": 1.735365132014329e-05,
"loss": 0.1486,
"step": 490
},
{
"epoch": 0.64,
"grad_norm": 0.3527896812147239,
"learning_rate": 1.7200297838464864e-05,
"loss": 0.1488,
"step": 500
},
{
"epoch": 0.6528,
"grad_norm": 0.21609855998849087,
"learning_rate": 1.7043344547744637e-05,
"loss": 0.138,
"step": 510
},
{
"epoch": 0.6656,
"grad_norm": 0.20743287144243716,
"learning_rate": 1.6882869917219265e-05,
"loss": 0.1432,
"step": 520
},
{
"epoch": 0.6784,
"grad_norm": 0.14306391155148201,
"learning_rate": 1.6718954176629088e-05,
"loss": 0.1458,
"step": 530
},
{
"epoch": 0.6912,
"grad_norm": 0.13082293792565916,
"learning_rate": 1.6551679276107047e-05,
"loss": 0.1468,
"step": 540
},
{
"epoch": 0.704,
"grad_norm": 0.36161924084937497,
"learning_rate": 1.638112884520748e-05,
"loss": 0.1469,
"step": 550
},
{
"epoch": 0.7168,
"grad_norm": 0.28617618689753893,
"learning_rate": 1.620738815109531e-05,
"loss": 0.1371,
"step": 560
},
{
"epoch": 0.7296,
"grad_norm": 0.13750653322717393,
"learning_rate": 1.6030544055916462e-05,
"loss": 0.1423,
"step": 570
},
{
"epoch": 0.7424,
"grad_norm": 0.15234113342700173,
"learning_rate": 1.5850684973370913e-05,
"loss": 0.1473,
"step": 580
},
{
"epoch": 0.7552,
"grad_norm": 0.1674605203651358,
"learning_rate": 1.5667900824510005e-05,
"loss": 0.1465,
"step": 590
},
{
"epoch": 0.768,
"grad_norm": 0.30120726166690853,
"learning_rate": 1.5482282992780155e-05,
"loss": 0.1459,
"step": 600
},
{
"epoch": 0.7808,
"grad_norm": 0.2556669288569797,
"learning_rate": 1.5293924278335475e-05,
"loss": 0.1372,
"step": 610
},
{
"epoch": 0.7936,
"grad_norm": 0.19878411049962907,
"learning_rate": 1.5102918851642035e-05,
"loss": 0.1416,
"step": 620
},
{
"epoch": 0.8064,
"grad_norm": 0.14869095864210355,
"learning_rate": 1.4909362206397114e-05,
"loss": 0.1435,
"step": 630
},
{
"epoch": 0.8192,
"grad_norm": 0.21996626003622385,
"learning_rate": 1.4713351111786824e-05,
"loss": 0.1462,
"step": 640
},
{
"epoch": 0.832,
"grad_norm": 0.29224476727899246,
"learning_rate": 1.4514983564106117e-05,
"loss": 0.1449,
"step": 650
},
{
"epoch": 0.8448,
"grad_norm": 0.2688310061351837,
"learning_rate": 1.431435873776524e-05,
"loss": 0.1374,
"step": 660
},
{
"epoch": 0.8576,
"grad_norm": 0.20038216433613684,
"learning_rate": 1.4111576935707214e-05,
"loss": 0.1426,
"step": 670
},
{
"epoch": 0.8704,
"grad_norm": 0.13687808263034185,
"learning_rate": 1.3906739539261115e-05,
"loss": 0.1457,
"step": 680
},
{
"epoch": 0.8832,
"grad_norm": 0.19032971439937477,
"learning_rate": 1.3699948957456176e-05,
"loss": 0.1455,
"step": 690
},
{
"epoch": 0.896,
"grad_norm": 0.3208508208806956,
"learning_rate": 1.3491308575822111e-05,
"loss": 0.1435,
"step": 700
},
{
"epoch": 0.9088,
"grad_norm": 0.19524278584922394,
"learning_rate": 1.3280922704701231e-05,
"loss": 0.1365,
"step": 710
},
{
"epoch": 0.9216,
"grad_norm": 0.17798035245935892,
"learning_rate": 1.3068896527098205e-05,
"loss": 0.1433,
"step": 720
},
{
"epoch": 0.9344,
"grad_norm": 0.17884600087369845,
"learning_rate": 1.2855336046093513e-05,
"loss": 0.1447,
"step": 730
},
{
"epoch": 0.9472,
"grad_norm": 0.14100954677816485,
"learning_rate": 1.2640348031846897e-05,
"loss": 0.1447,
"step": 740
},
{
"epoch": 0.96,
"grad_norm": 0.33304512484187876,
"learning_rate": 1.2424039968217361e-05,
"loss": 0.145,
"step": 750
},
{
"epoch": 0.9728,
"grad_norm": 0.2096407271139896,
"learning_rate": 1.220651999902627e-05,
"loss": 0.1355,
"step": 760
},
{
"epoch": 0.9856,
"grad_norm": 0.18948626555533854,
"learning_rate": 1.1987896873990572e-05,
"loss": 0.1422,
"step": 770
},
{
"epoch": 0.9984,
"grad_norm": 0.25576476392968256,
"learning_rate": 1.176827989435307e-05,
"loss": 0.1473,
"step": 780
}
],
"logging_steps": 10,
"max_steps": 1562,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 781,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 267046948257792.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}