zephyr-7b-hh-dpo / trainer_state.json
NicholasCorrado's picture
Model save
cea8f12 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9996020692399522,
"eval_steps": 1000,
"global_step": 628,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015917230401910067,
"grad_norm": 3.414436464235707,
"learning_rate": 7.936507936507936e-09,
"logits/chosen": -2.458916187286377,
"logits/rejected": -2.50296688079834,
"logps/chosen": -112.41877746582031,
"logps/rejected": -153.82745361328125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.01591723040191007,
"grad_norm": 3.47500960837261,
"learning_rate": 7.936507936507936e-08,
"logits/chosen": -2.5214052200317383,
"logits/rejected": -2.508702039718628,
"logps/chosen": -143.01979064941406,
"logps/rejected": -133.09210205078125,
"loss": 0.6932,
"rewards/accuracies": 0.4166666567325592,
"rewards/chosen": -0.00031594105530530214,
"rewards/margins": -0.00045727533870376647,
"rewards/rejected": 0.00014133438526187092,
"step": 10
},
{
"epoch": 0.03183446080382014,
"grad_norm": 3.343704534649061,
"learning_rate": 1.5873015873015872e-07,
"logits/chosen": -2.4521286487579346,
"logits/rejected": -2.448809862136841,
"logps/chosen": -151.37095642089844,
"logps/rejected": -162.80328369140625,
"loss": 0.6931,
"rewards/accuracies": 0.43437498807907104,
"rewards/chosen": 4.1505660192342475e-05,
"rewards/margins": -0.00036320873186923563,
"rewards/rejected": 0.000404714432079345,
"step": 20
},
{
"epoch": 0.0477516912057302,
"grad_norm": 3.366996896296212,
"learning_rate": 2.3809523809523806e-07,
"logits/chosen": -2.4904391765594482,
"logits/rejected": -2.479889392852783,
"logps/chosen": -147.56060791015625,
"logps/rejected": -150.96810913085938,
"loss": 0.6921,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.003838698612526059,
"rewards/margins": 0.002006606664508581,
"rewards/rejected": 0.001832091948017478,
"step": 30
},
{
"epoch": 0.06366892160764027,
"grad_norm": 3.3714607115747497,
"learning_rate": 3.1746031746031743e-07,
"logits/chosen": -2.503981590270996,
"logits/rejected": -2.486074447631836,
"logps/chosen": -153.0246124267578,
"logps/rejected": -145.2753448486328,
"loss": 0.6899,
"rewards/accuracies": 0.53125,
"rewards/chosen": 0.007592098321765661,
"rewards/margins": 0.00429057702422142,
"rewards/rejected": 0.0033015217632055283,
"step": 40
},
{
"epoch": 0.07958615200955034,
"grad_norm": 4.06423931986909,
"learning_rate": 3.968253968253968e-07,
"logits/chosen": -2.4848971366882324,
"logits/rejected": -2.4830713272094727,
"logps/chosen": -146.40438842773438,
"logps/rejected": -152.903564453125,
"loss": 0.6849,
"rewards/accuracies": 0.5531250238418579,
"rewards/chosen": -0.01682356372475624,
"rewards/margins": 0.0171933826059103,
"rewards/rejected": -0.03401694819331169,
"step": 50
},
{
"epoch": 0.0955033824114604,
"grad_norm": 3.933215748835345,
"learning_rate": 4.761904761904761e-07,
"logits/chosen": -2.4494073390960693,
"logits/rejected": -2.425264835357666,
"logps/chosen": -164.67039489746094,
"logps/rejected": -159.42379760742188,
"loss": 0.6811,
"rewards/accuracies": 0.528124988079071,
"rewards/chosen": -0.13071545958518982,
"rewards/margins": 0.01678677648305893,
"rewards/rejected": -0.14750224351882935,
"step": 60
},
{
"epoch": 0.11142061281337047,
"grad_norm": 6.465696654064857,
"learning_rate": 4.998106548810311e-07,
"logits/chosen": -2.4344656467437744,
"logits/rejected": -2.3922276496887207,
"logps/chosen": -171.0955352783203,
"logps/rejected": -158.62640380859375,
"loss": 0.6666,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -0.17716214060783386,
"rewards/margins": 0.07567773759365082,
"rewards/rejected": -0.2528398633003235,
"step": 70
},
{
"epoch": 0.12733784321528055,
"grad_norm": 9.086502750868021,
"learning_rate": 4.988839406031596e-07,
"logits/chosen": -2.384453058242798,
"logits/rejected": -2.3677101135253906,
"logps/chosen": -153.10414123535156,
"logps/rejected": -182.54360961914062,
"loss": 0.6529,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -0.20188739895820618,
"rewards/margins": 0.12667986750602722,
"rewards/rejected": -0.3285672962665558,
"step": 80
},
{
"epoch": 0.14325507361719061,
"grad_norm": 14.480905623348013,
"learning_rate": 4.971879403278432e-07,
"logits/chosen": -2.2432703971862793,
"logits/rejected": -2.2313826084136963,
"logps/chosen": -185.1287384033203,
"logps/rejected": -201.3049774169922,
"loss": 0.6398,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.39208894968032837,
"rewards/margins": 0.144916832447052,
"rewards/rejected": -0.5370057821273804,
"step": 90
},
{
"epoch": 0.15917230401910068,
"grad_norm": 12.626265646656822,
"learning_rate": 4.947278962947386e-07,
"logits/chosen": -1.7865173816680908,
"logits/rejected": -1.7619308233261108,
"logps/chosen": -234.49526977539062,
"logps/rejected": -266.22235107421875,
"loss": 0.6186,
"rewards/accuracies": 0.659375011920929,
"rewards/chosen": -0.8486677408218384,
"rewards/margins": 0.2785636782646179,
"rewards/rejected": -1.127231478691101,
"step": 100
},
{
"epoch": 0.17508953442101075,
"grad_norm": 11.440443186979932,
"learning_rate": 4.915114123589732e-07,
"logits/chosen": -1.477772831916809,
"logits/rejected": -1.394840955734253,
"logps/chosen": -206.17166137695312,
"logps/rejected": -243.156005859375,
"loss": 0.6156,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.719870388507843,
"rewards/margins": 0.34280186891555786,
"rewards/rejected": -1.0626722574234009,
"step": 110
},
{
"epoch": 0.1910067648229208,
"grad_norm": 14.957458897670607,
"learning_rate": 4.875484304880629e-07,
"logits/chosen": -0.8948208093643188,
"logits/rejected": -0.773229718208313,
"logps/chosen": -234.4495086669922,
"logps/rejected": -270.4801330566406,
"loss": 0.5931,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.8048633337020874,
"rewards/margins": 0.34693923592567444,
"rewards/rejected": -1.151802659034729,
"step": 120
},
{
"epoch": 0.20692399522483088,
"grad_norm": 13.673290760971415,
"learning_rate": 4.828512000318616e-07,
"logits/chosen": -0.20325860381126404,
"logits/rejected": -0.022473735734820366,
"logps/chosen": -233.56399536132812,
"logps/rejected": -278.225830078125,
"loss": 0.5842,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -0.7881739139556885,
"rewards/margins": 0.4328451156616211,
"rewards/rejected": -1.2210190296173096,
"step": 130
},
{
"epoch": 0.22284122562674094,
"grad_norm": 15.252755388662429,
"learning_rate": 4.774342398605221e-07,
"logits/chosen": -0.4290715754032135,
"logits/rejected": -0.26301848888397217,
"logps/chosen": -245.796142578125,
"logps/rejected": -278.6325988769531,
"loss": 0.5803,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -0.9634370803833008,
"rewards/margins": 0.42050686478614807,
"rewards/rejected": -1.383944034576416,
"step": 140
},
{
"epoch": 0.238758456028651,
"grad_norm": 15.98257839847235,
"learning_rate": 4.713142934875005e-07,
"logits/chosen": 0.04155537486076355,
"logits/rejected": 0.4105464518070221,
"logps/chosen": -294.55078125,
"logps/rejected": -322.9659118652344,
"loss": 0.5928,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -1.3449639081954956,
"rewards/margins": 0.41515955328941345,
"rewards/rejected": -1.7601234912872314,
"step": 150
},
{
"epoch": 0.2546756864305611,
"grad_norm": 16.296020720117806,
"learning_rate": 4.64510277316316e-07,
"logits/chosen": 0.07806523889303207,
"logits/rejected": 0.4625118672847748,
"logps/chosen": -242.23171997070312,
"logps/rejected": -306.7259216308594,
"loss": 0.5619,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -1.0831429958343506,
"rewards/margins": 0.6153541803359985,
"rewards/rejected": -1.6984970569610596,
"step": 160
},
{
"epoch": 0.27059291683247116,
"grad_norm": 15.29787072223291,
"learning_rate": 4.570432221710314e-07,
"logits/chosen": 0.8908351063728333,
"logits/rejected": 1.1234996318817139,
"logps/chosen": -285.02685546875,
"logps/rejected": -332.22576904296875,
"loss": 0.5968,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -1.369902491569519,
"rewards/margins": 0.42481645941734314,
"rewards/rejected": -1.7947190999984741,
"step": 170
},
{
"epoch": 0.28651014723438123,
"grad_norm": 12.440638905265972,
"learning_rate": 4.4893620829118124e-07,
"logits/chosen": 0.45191067457199097,
"logits/rejected": 0.8097447156906128,
"logps/chosen": -267.55511474609375,
"logps/rejected": -303.0255432128906,
"loss": 0.5782,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -1.130518913269043,
"rewards/margins": 0.47403573989868164,
"rewards/rejected": -1.6045547723770142,
"step": 180
},
{
"epoch": 0.3024273776362913,
"grad_norm": 14.662664269581565,
"learning_rate": 4.40214293992074e-07,
"logits/chosen": 0.07046165317296982,
"logits/rejected": 0.5015997886657715,
"logps/chosen": -276.8005676269531,
"logps/rejected": -317.6512451171875,
"loss": 0.5684,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.328792929649353,
"rewards/margins": 0.5068601369857788,
"rewards/rejected": -1.835653305053711,
"step": 190
},
{
"epoch": 0.31834460803820136,
"grad_norm": 13.757436126080059,
"learning_rate": 4.3090443821097566e-07,
"logits/chosen": 0.2187742292881012,
"logits/rejected": 0.5610963106155396,
"logps/chosen": -268.94757080078125,
"logps/rejected": -333.0284729003906,
"loss": 0.5596,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.2326629161834717,
"rewards/margins": 0.5554476976394653,
"rewards/rejected": -1.7881107330322266,
"step": 200
},
{
"epoch": 0.3342618384401114,
"grad_norm": 12.638546225207325,
"learning_rate": 4.210354171785795e-07,
"logits/chosen": 0.5555538535118103,
"logits/rejected": 0.9611243009567261,
"logps/chosen": -258.8009338378906,
"logps/rejected": -315.17242431640625,
"loss": 0.5627,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.1692590713500977,
"rewards/margins": 0.5407182574272156,
"rewards/rejected": -1.709977388381958,
"step": 210
},
{
"epoch": 0.3501790688420215,
"grad_norm": 12.533508219187622,
"learning_rate": 4.1063773547332584e-07,
"logits/chosen": 0.4853738248348236,
"logits/rejected": 0.7825923562049866,
"logps/chosen": -255.5019073486328,
"logps/rejected": -307.07843017578125,
"loss": 0.5684,
"rewards/accuracies": 0.6656249761581421,
"rewards/chosen": -1.0966681241989136,
"rewards/margins": 0.5020471215248108,
"rewards/rejected": -1.59871506690979,
"step": 220
},
{
"epoch": 0.36609629924393156,
"grad_norm": 13.14652988888945,
"learning_rate": 3.997435317334988e-07,
"logits/chosen": 1.3910820484161377,
"logits/rejected": 1.7489607334136963,
"logps/chosen": -285.7388916015625,
"logps/rejected": -342.02557373046875,
"loss": 0.5556,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.430323600769043,
"rewards/margins": 0.5691004991531372,
"rewards/rejected": -1.9994239807128906,
"step": 230
},
{
"epoch": 0.3820135296458416,
"grad_norm": 16.893312636051345,
"learning_rate": 3.8838647931853684e-07,
"logits/chosen": 0.9681981801986694,
"logits/rejected": 1.3831474781036377,
"logps/chosen": -274.5780029296875,
"logps/rejected": -334.71624755859375,
"loss": 0.565,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -1.331866979598999,
"rewards/margins": 0.6004709005355835,
"rewards/rejected": -1.932337999343872,
"step": 240
},
{
"epoch": 0.3979307600477517,
"grad_norm": 16.93758119869443,
"learning_rate": 3.7660168222660824e-07,
"logits/chosen": -0.0617893747985363,
"logits/rejected": 0.3982541561126709,
"logps/chosen": -291.5325012207031,
"logps/rejected": -349.10357666015625,
"loss": 0.546,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -1.3192743062973022,
"rewards/margins": 0.6018632650375366,
"rewards/rejected": -1.9211375713348389,
"step": 250
},
{
"epoch": 0.41384799044966175,
"grad_norm": 13.594747823748454,
"learning_rate": 3.6442556659016475e-07,
"logits/chosen": -0.1596679985523224,
"logits/rejected": 0.12459886074066162,
"logps/chosen": -283.2134704589844,
"logps/rejected": -335.0664367675781,
"loss": 0.5677,
"rewards/accuracies": 0.703125,
"rewards/chosen": -1.3012964725494385,
"rewards/margins": 0.5175098180770874,
"rewards/rejected": -1.8188062906265259,
"step": 260
},
{
"epoch": 0.4297652208515718,
"grad_norm": 13.029006886811343,
"learning_rate": 3.5189576808485404e-07,
"logits/chosen": 0.6624835729598999,
"logits/rejected": 0.867287278175354,
"logps/chosen": -290.3371887207031,
"logps/rejected": -335.45745849609375,
"loss": 0.5603,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -1.3652312755584717,
"rewards/margins": 0.44845834374427795,
"rewards/rejected": -1.8136895895004272,
"step": 270
},
{
"epoch": 0.4456824512534819,
"grad_norm": 14.229518064110062,
"learning_rate": 3.390510155998023e-07,
"logits/chosen": 1.0853196382522583,
"logits/rejected": 1.5426385402679443,
"logps/chosen": -301.244873046875,
"logps/rejected": -352.26214599609375,
"loss": 0.5576,
"rewards/accuracies": 0.721875011920929,
"rewards/chosen": -1.4498573541641235,
"rewards/margins": 0.5846196413040161,
"rewards/rejected": -2.0344767570495605,
"step": 280
},
{
"epoch": 0.46159968165539195,
"grad_norm": 13.247814891025222,
"learning_rate": 3.2593101152883795e-07,
"logits/chosen": 0.6425064206123352,
"logits/rejected": 1.0470354557037354,
"logps/chosen": -304.13165283203125,
"logps/rejected": -356.2009582519531,
"loss": 0.5699,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -1.4935743808746338,
"rewards/margins": 0.543701708316803,
"rewards/rejected": -2.037276029586792,
"step": 290
},
{
"epoch": 0.477516912057302,
"grad_norm": 14.136317593633118,
"learning_rate": 3.125763090526674e-07,
"logits/chosen": 1.010498285293579,
"logits/rejected": 1.5458686351776123,
"logps/chosen": -312.71893310546875,
"logps/rejected": -367.11956787109375,
"loss": 0.5526,
"rewards/accuracies": 0.6968749761581421,
"rewards/chosen": -1.607622742652893,
"rewards/margins": 0.5898184180259705,
"rewards/rejected": -2.1974411010742188,
"step": 300
},
{
"epoch": 0.4934341424592121,
"grad_norm": 13.599580845693339,
"learning_rate": 2.9902818679131775e-07,
"logits/chosen": 1.556601881980896,
"logits/rejected": 2.024049758911133,
"logps/chosen": -317.42938232421875,
"logps/rejected": -371.38623046875,
"loss": 0.5571,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.6657898426055908,
"rewards/margins": 0.5584100484848022,
"rewards/rejected": -2.2241997718811035,
"step": 310
},
{
"epoch": 0.5093513728611222,
"grad_norm": 15.486325517579422,
"learning_rate": 2.8532852121428733e-07,
"logits/chosen": 2.424159526824951,
"logits/rejected": 2.8681328296661377,
"logps/chosen": -326.8099670410156,
"logps/rejected": -373.8744201660156,
"loss": 0.5411,
"rewards/accuracies": 0.6656249761581421,
"rewards/chosen": -1.7966935634613037,
"rewards/margins": 0.5875706076622009,
"rewards/rejected": -2.3842644691467285,
"step": 320
},
{
"epoch": 0.5252686032630323,
"grad_norm": 13.394275013231457,
"learning_rate": 2.715196572027789e-07,
"logits/chosen": 3.215583324432373,
"logits/rejected": 3.725792646408081,
"logps/chosen": -343.72344970703125,
"logps/rejected": -432.32916259765625,
"loss": 0.5414,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.9976279735565186,
"rewards/margins": 0.806243896484375,
"rewards/rejected": -2.8038716316223145,
"step": 330
},
{
"epoch": 0.5411858336649423,
"grad_norm": 13.373618178706888,
"learning_rate": 2.5764427716409815e-07,
"logits/chosen": 3.000930070877075,
"logits/rejected": 3.6059279441833496,
"logps/chosen": -347.54022216796875,
"logps/rejected": -404.14654541015625,
"loss": 0.5507,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.9271198511123657,
"rewards/margins": 0.6116258502006531,
"rewards/rejected": -2.538745880126953,
"step": 340
},
{
"epoch": 0.5571030640668524,
"grad_norm": 14.475581092351593,
"learning_rate": 2.4374526910277886e-07,
"logits/chosen": 2.816096782684326,
"logits/rejected": 3.264390230178833,
"logps/chosen": -353.548828125,
"logps/rejected": -420.4794006347656,
"loss": 0.5564,
"rewards/accuracies": 0.671875,
"rewards/chosen": -2.0463950634002686,
"rewards/margins": 0.6025586128234863,
"rewards/rejected": -2.648953914642334,
"step": 350
},
{
"epoch": 0.5730202944687625,
"grad_norm": 14.682353004178683,
"learning_rate": 2.2986559405621886e-07,
"logits/chosen": 2.8994641304016113,
"logits/rejected": 3.608001232147217,
"logps/chosen": -360.53143310546875,
"logps/rejected": -417.3299865722656,
"loss": 0.5525,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.981393814086914,
"rewards/margins": 0.6567780375480652,
"rewards/rejected": -2.638171911239624,
"step": 360
},
{
"epoch": 0.5889375248706725,
"grad_norm": 18.068831076710342,
"learning_rate": 2.160481533045751e-07,
"logits/chosen": 2.5152053833007812,
"logits/rejected": 3.3655197620391846,
"logps/chosen": -339.1783752441406,
"logps/rejected": -407.2680358886719,
"loss": 0.5377,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.932498574256897,
"rewards/margins": 0.7526292204856873,
"rewards/rejected": -2.6851277351379395,
"step": 370
},
{
"epoch": 0.6048547552725826,
"grad_norm": 13.903302227889153,
"learning_rate": 2.0233565576536564e-07,
"logits/chosen": 2.7033469676971436,
"logits/rejected": 3.3517203330993652,
"logps/chosen": -352.88543701171875,
"logps/rejected": -414.46807861328125,
"loss": 0.5464,
"rewards/accuracies": 0.671875,
"rewards/chosen": -2.0058979988098145,
"rewards/margins": 0.6991748809814453,
"rewards/rejected": -2.7050728797912598,
"step": 380
},
{
"epoch": 0.6207719856744927,
"grad_norm": 14.437387596932032,
"learning_rate": 1.887704859826528e-07,
"logits/chosen": 2.677304744720459,
"logits/rejected": 3.3016350269317627,
"logps/chosen": -345.10186767578125,
"logps/rejected": -422.6465759277344,
"loss": 0.5441,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -1.9631824493408203,
"rewards/margins": 0.7392277717590332,
"rewards/rejected": -2.7024102210998535,
"step": 390
},
{
"epoch": 0.6366892160764027,
"grad_norm": 15.504689068952228,
"learning_rate": 1.7539457311884675e-07,
"logits/chosen": 2.6285808086395264,
"logits/rejected": 3.402756452560425,
"logps/chosen": -350.1034240722656,
"logps/rejected": -420.3218688964844,
"loss": 0.5387,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.9105831384658813,
"rewards/margins": 0.7946080565452576,
"rewards/rejected": -2.705191135406494,
"step": 400
},
{
"epoch": 0.6526064464783128,
"grad_norm": 12.730811962712194,
"learning_rate": 1.6224926135406693e-07,
"logits/chosen": 2.7364511489868164,
"logits/rejected": 3.1644163131713867,
"logps/chosen": -364.25469970703125,
"logps/rejected": -419.5701599121094,
"loss": 0.5618,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -2.067072629928589,
"rewards/margins": 0.5998597741127014,
"rewards/rejected": -2.6669325828552246,
"step": 410
},
{
"epoch": 0.6685236768802229,
"grad_norm": 14.2356920381672,
"learning_rate": 1.4937518209365108e-07,
"logits/chosen": 2.5111005306243896,
"logits/rejected": 3.2514469623565674,
"logps/chosen": -381.5763244628906,
"logps/rejected": -433.4600524902344,
"loss": 0.5494,
"rewards/accuracies": 0.778124988079071,
"rewards/chosen": -2.099307060241699,
"rewards/margins": 0.6568456888198853,
"rewards/rejected": -2.756152629852295,
"step": 420
},
{
"epoch": 0.6844409072821329,
"grad_norm": 15.52832700397283,
"learning_rate": 1.3681212837880977e-07,
"logits/chosen": 2.709282398223877,
"logits/rejected": 3.0862138271331787,
"logps/chosen": -374.52288818359375,
"logps/rejected": -447.3203125,
"loss": 0.5539,
"rewards/accuracies": 0.71875,
"rewards/chosen": -2.311601161956787,
"rewards/margins": 0.6582018136978149,
"rewards/rejected": -2.9698030948638916,
"step": 430
},
{
"epoch": 0.700358137684043,
"grad_norm": 13.300949643909883,
"learning_rate": 1.2459893188861613e-07,
"logits/chosen": 2.896282196044922,
"logits/rejected": 3.553002119064331,
"logps/chosen": -382.75054931640625,
"logps/rejected": -458.756591796875,
"loss": 0.5426,
"rewards/accuracies": 0.765625,
"rewards/chosen": -2.2905333042144775,
"rewards/margins": 0.7688990831375122,
"rewards/rejected": -3.0594325065612793,
"step": 440
},
{
"epoch": 0.716275368085953,
"grad_norm": 14.064737088004772,
"learning_rate": 1.1277334291351145e-07,
"logits/chosen": 3.348231077194214,
"logits/rejected": 4.010863780975342,
"logps/chosen": -383.77569580078125,
"logps/rejected": -465.36767578125,
"loss": 0.5372,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -2.3260040283203125,
"rewards/margins": 0.8284826278686523,
"rewards/rejected": -3.1544864177703857,
"step": 450
},
{
"epoch": 0.7321925984878631,
"grad_norm": 13.977439772366749,
"learning_rate": 1.0137191367132078e-07,
"logits/chosen": 3.446108341217041,
"logits/rejected": 3.9590706825256348,
"logps/chosen": -375.79705810546875,
"logps/rejected": -461.6592712402344,
"loss": 0.5282,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -2.228874683380127,
"rewards/margins": 0.7711628675460815,
"rewards/rejected": -3.000037670135498,
"step": 460
},
{
"epoch": 0.7481098288897732,
"grad_norm": 15.466730145904025,
"learning_rate": 9.042988532644249e-08,
"logits/chosen": 3.792773485183716,
"logits/rejected": 4.424340724945068,
"logps/chosen": -395.2272033691406,
"logps/rejected": -461.55352783203125,
"loss": 0.5425,
"rewards/accuracies": 0.690625011920929,
"rewards/chosen": -2.369027614593506,
"rewards/margins": 0.6728731393814087,
"rewards/rejected": -3.041900157928467,
"step": 470
},
{
"epoch": 0.7640270592916832,
"grad_norm": 14.83331875533172,
"learning_rate": 7.998107906142839e-08,
"logits/chosen": 3.4053382873535156,
"logits/rejected": 3.8911843299865723,
"logps/chosen": -388.53155517578125,
"logps/rejected": -455.40185546875,
"loss": 0.5425,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -2.44258189201355,
"rewards/margins": 0.6656660437583923,
"rewards/rejected": -3.108247756958008,
"step": 480
},
{
"epoch": 0.7799442896935933,
"grad_norm": 15.620806793742632,
"learning_rate": 7.005779153764682e-08,
"logits/chosen": 3.4802298545837402,
"logits/rejected": 3.920941114425659,
"logps/chosen": -378.8732604980469,
"logps/rejected": -466.740478515625,
"loss": 0.5309,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -2.4023540019989014,
"rewards/margins": 0.7670234441757202,
"rewards/rejected": -3.169377565383911,
"step": 490
},
{
"epoch": 0.7958615200955034,
"grad_norm": 17.188173872991122,
"learning_rate": 6.069069506815325e-08,
"logits/chosen": 3.2549641132354736,
"logits/rejected": 4.011484146118164,
"logps/chosen": -376.5075988769531,
"logps/rejected": -464.5885314941406,
"loss": 0.5443,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -2.2434165477752686,
"rewards/margins": 0.8864636421203613,
"rewards/rejected": -3.129879951477051,
"step": 500
},
{
"epoch": 0.8117787504974134,
"grad_norm": 15.502718885907624,
"learning_rate": 5.190874281132851e-08,
"logits/chosen": 3.2985496520996094,
"logits/rejected": 3.910979747772217,
"logps/chosen": -390.97381591796875,
"logps/rejected": -462.67816162109375,
"loss": 0.5418,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -2.4295220375061035,
"rewards/margins": 0.7420149445533752,
"rewards/rejected": -3.171536922454834,
"step": 510
},
{
"epoch": 0.8276959808993235,
"grad_norm": 14.242739712121452,
"learning_rate": 4.373907927832513e-08,
"logits/chosen": 3.5766899585723877,
"logits/rejected": 4.207441806793213,
"logps/chosen": -382.2040710449219,
"logps/rejected": -460.5889587402344,
"loss": 0.5461,
"rewards/accuracies": 0.6968749761581421,
"rewards/chosen": -2.4501075744628906,
"rewards/margins": 0.7706823348999023,
"rewards/rejected": -3.220790147781372,
"step": 520
},
{
"epoch": 0.8436132113012336,
"grad_norm": 12.998296781925998,
"learning_rate": 3.620695643093924e-08,
"logits/chosen": 3.5845916271209717,
"logits/rejected": 4.0032148361206055,
"logps/chosen": -372.14898681640625,
"logps/rejected": -455.68212890625,
"loss": 0.5414,
"rewards/accuracies": 0.734375,
"rewards/chosen": -2.2561497688293457,
"rewards/margins": 0.7765234112739563,
"rewards/rejected": -3.032672643661499,
"step": 530
},
{
"epoch": 0.8595304417031436,
"grad_norm": 17.292649632838344,
"learning_rate": 2.9335655629243645e-08,
"logits/chosen": 3.523092269897461,
"logits/rejected": 3.9805259704589844,
"logps/chosen": -388.76800537109375,
"logps/rejected": -462.15625,
"loss": 0.5454,
"rewards/accuracies": 0.721875011920929,
"rewards/chosen": -2.4221224784851074,
"rewards/margins": 0.6886903643608093,
"rewards/rejected": -3.1108126640319824,
"step": 540
},
{
"epoch": 0.8754476721050537,
"grad_norm": 13.631324448665103,
"learning_rate": 2.31464156702382e-08,
"logits/chosen": 3.492096424102783,
"logits/rejected": 4.266209125518799,
"logps/chosen": -393.3036804199219,
"logps/rejected": -445.7330627441406,
"loss": 0.5519,
"rewards/accuracies": 0.671875,
"rewards/chosen": -2.4094600677490234,
"rewards/margins": 0.6608883738517761,
"rewards/rejected": -3.0703485012054443,
"step": 550
},
{
"epoch": 0.8913649025069638,
"grad_norm": 15.338497216975412,
"learning_rate": 1.7658367139945228e-08,
"logits/chosen": 3.498994827270508,
"logits/rejected": 3.9349846839904785,
"logps/chosen": -387.2663269042969,
"logps/rejected": -449.8990173339844,
"loss": 0.5364,
"rewards/accuracies": 0.659375011920929,
"rewards/chosen": -2.4634361267089844,
"rewards/margins": 0.5876127481460571,
"rewards/rejected": -3.051048755645752,
"step": 560
},
{
"epoch": 0.9072821329088738,
"grad_norm": 13.324839693428412,
"learning_rate": 1.2888473281864597e-08,
"logits/chosen": 3.275439500808716,
"logits/rejected": 3.773510694503784,
"logps/chosen": -398.3496398925781,
"logps/rejected": -472.8216247558594,
"loss": 0.5281,
"rewards/accuracies": 0.6968749761581421,
"rewards/chosen": -2.447859287261963,
"rewards/margins": 0.6992862820625305,
"rewards/rejected": -3.1471457481384277,
"step": 570
},
{
"epoch": 0.9231993633107839,
"grad_norm": 16.191483693504896,
"learning_rate": 8.851477564560061e-09,
"logits/chosen": 3.820493221282959,
"logits/rejected": 4.351908206939697,
"logps/chosen": -389.7720947265625,
"logps/rejected": -459.3812561035156,
"loss": 0.55,
"rewards/accuracies": 0.6968749761581421,
"rewards/chosen": -2.404622793197632,
"rewards/margins": 0.6956027746200562,
"rewards/rejected": -3.1002252101898193,
"step": 580
},
{
"epoch": 0.939116593712694,
"grad_norm": 12.583560577719238,
"learning_rate": 5.559858110443016e-09,
"logits/chosen": 3.1959145069122314,
"logits/rejected": 3.7669143676757812,
"logps/chosen": -405.40618896484375,
"logps/rejected": -479.1631774902344,
"loss": 0.5251,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -2.4700753688812256,
"rewards/margins": 0.7442026138305664,
"rewards/rejected": -3.214277982711792,
"step": 590
},
{
"epoch": 0.955033824114604,
"grad_norm": 15.78612631007661,
"learning_rate": 3.023789126611137e-09,
"logits/chosen": 3.5097384452819824,
"logits/rejected": 4.086105823516846,
"logps/chosen": -395.22027587890625,
"logps/rejected": -475.81787109375,
"loss": 0.5376,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -2.4702706336975098,
"rewards/margins": 0.8460418581962585,
"rewards/rejected": -3.316312313079834,
"step": 600
},
{
"epoch": 0.9709510545165141,
"grad_norm": 15.913083563945913,
"learning_rate": 1.2511094569571668e-09,
"logits/chosen": 3.64544939994812,
"logits/rejected": 4.29144811630249,
"logps/chosen": -404.07501220703125,
"logps/rejected": -467.01153564453125,
"loss": 0.5567,
"rewards/accuracies": 0.659375011920929,
"rewards/chosen": -2.5605225563049316,
"rewards/margins": 0.6661086082458496,
"rewards/rejected": -3.2266311645507812,
"step": 610
},
{
"epoch": 0.9868682849184242,
"grad_norm": 15.29692820991952,
"learning_rate": 2.4729835275189016e-10,
"logits/chosen": 3.532379150390625,
"logits/rejected": 3.938063859939575,
"logps/chosen": -392.1960754394531,
"logps/rejected": -481.100830078125,
"loss": 0.5331,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -2.4987053871154785,
"rewards/margins": 0.7970455288887024,
"rewards/rejected": -3.295750379562378,
"step": 620
},
{
"epoch": 0.9996020692399522,
"step": 628,
"total_flos": 0.0,
"train_loss": 0.5721431234080321,
"train_runtime": 15529.5131,
"train_samples_per_second": 10.354,
"train_steps_per_second": 0.04
}
],
"logging_steps": 10,
"max_steps": 628,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}