zephyr-7b-dpo-full / trainer_state.json
zodi1121's picture
Model save
3c507fd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020920502092050207,
"grad_norm": 9.169488248390985,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -2.7655434608459473,
"logits/rejected": -2.7158141136169434,
"logps/chosen": -269.8756103515625,
"logps/rejected": -360.6216125488281,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02092050209205021,
"grad_norm": 9.132666281035473,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -2.5923004150390625,
"logits/rejected": -2.5624728202819824,
"logps/chosen": -264.7728576660156,
"logps/rejected": -251.53372192382812,
"loss": 0.6931,
"rewards/accuracies": 0.3958333432674408,
"rewards/chosen": 0.00020155739912297577,
"rewards/margins": -0.00015585176879540086,
"rewards/rejected": 0.00035740918247029185,
"step": 10
},
{
"epoch": 0.04184100418410042,
"grad_norm": 8.379180253402911,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -2.652967929840088,
"logits/rejected": -2.6051251888275146,
"logps/chosen": -281.42034912109375,
"logps/rejected": -296.83856201171875,
"loss": 0.6924,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.000834028993267566,
"rewards/margins": 0.001213615294545889,
"rewards/rejected": -0.00037958630127832294,
"step": 20
},
{
"epoch": 0.06276150627615062,
"grad_norm": 8.329138184590803,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.664080858230591,
"logits/rejected": -2.5915699005126953,
"logps/chosen": -299.40093994140625,
"logps/rejected": -263.69439697265625,
"loss": 0.6885,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": 0.012552594766020775,
"rewards/margins": 0.010249468497931957,
"rewards/rejected": 0.0023031283635646105,
"step": 30
},
{
"epoch": 0.08368200836820083,
"grad_norm": 8.173707856343666,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.556405544281006,
"logits/rejected": -2.5214595794677734,
"logps/chosen": -263.1281433105469,
"logps/rejected": -245.1213836669922,
"loss": 0.6776,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.03967439383268356,
"rewards/margins": 0.04709646850824356,
"rewards/rejected": -0.0074220686219632626,
"step": 40
},
{
"epoch": 0.10460251046025104,
"grad_norm": 8.447504654209819,
"learning_rate": 4.999733114418725e-07,
"logits/chosen": -2.4980883598327637,
"logits/rejected": -2.458181381225586,
"logps/chosen": -279.6307678222656,
"logps/rejected": -297.44720458984375,
"loss": 0.6616,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.001089364057406783,
"rewards/margins": 0.04791159927845001,
"rewards/rejected": -0.04682223126292229,
"step": 50
},
{
"epoch": 0.12552301255230125,
"grad_norm": 11.395386111552323,
"learning_rate": 4.990398100856366e-07,
"logits/chosen": -2.5134081840515137,
"logits/rejected": -2.4417667388916016,
"logps/chosen": -273.7184143066406,
"logps/rejected": -312.87664794921875,
"loss": 0.6388,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.020547768101096153,
"rewards/margins": 0.12676255404949188,
"rewards/rejected": -0.14731034636497498,
"step": 60
},
{
"epoch": 0.14644351464435146,
"grad_norm": 11.067906107691849,
"learning_rate": 4.967775735898179e-07,
"logits/chosen": -2.3636722564697266,
"logits/rejected": -2.3617496490478516,
"logps/chosen": -270.6546936035156,
"logps/rejected": -283.1260681152344,
"loss": 0.616,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.04985763877630234,
"rewards/margins": 0.21395531296730042,
"rewards/rejected": -0.26381292939186096,
"step": 70
},
{
"epoch": 0.16736401673640167,
"grad_norm": 17.68754980709847,
"learning_rate": 4.931986719649298e-07,
"logits/chosen": -2.552147626876831,
"logits/rejected": -2.4690425395965576,
"logps/chosen": -336.59686279296875,
"logps/rejected": -304.60662841796875,
"loss": 0.6025,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.13819542527198792,
"rewards/margins": 0.26680219173431396,
"rewards/rejected": -0.40499764680862427,
"step": 80
},
{
"epoch": 0.18828451882845187,
"grad_norm": 15.872515076322841,
"learning_rate": 4.883222001996351e-07,
"logits/chosen": -1.9810800552368164,
"logits/rejected": -1.9242852926254272,
"logps/chosen": -292.8248596191406,
"logps/rejected": -333.0174560546875,
"loss": 0.5682,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.3367532193660736,
"rewards/margins": 0.4693824350833893,
"rewards/rejected": -0.8061355352401733,
"step": 90
},
{
"epoch": 0.20920502092050208,
"grad_norm": 17.67293052067369,
"learning_rate": 4.821741763807186e-07,
"logits/chosen": -0.9724929928779602,
"logits/rejected": -0.756173312664032,
"logps/chosen": -315.0499267578125,
"logps/rejected": -370.2469482421875,
"loss": 0.5644,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.546815037727356,
"rewards/margins": 0.5847707986831665,
"rewards/rejected": -1.1315858364105225,
"step": 100
},
{
"epoch": 0.20920502092050208,
"eval_logits/chosen": -0.6325794458389282,
"eval_logits/rejected": -0.42406219244003296,
"eval_logps/chosen": -330.3586120605469,
"eval_logps/rejected": -383.287353515625,
"eval_loss": 0.5695399045944214,
"eval_rewards/accuracies": 0.71875,
"eval_rewards/chosen": -0.6772869825363159,
"eval_rewards/margins": 0.5289655327796936,
"eval_rewards/rejected": -1.2062525749206543,
"eval_runtime": 93.9486,
"eval_samples_per_second": 21.288,
"eval_steps_per_second": 0.341,
"step": 100
},
{
"epoch": 0.2301255230125523,
"grad_norm": 19.504882215784345,
"learning_rate": 4.747874028753375e-07,
"logits/chosen": -0.6634742021560669,
"logits/rejected": -0.1376529037952423,
"logps/chosen": -370.84906005859375,
"logps/rejected": -380.2650146484375,
"loss": 0.5637,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.6632061004638672,
"rewards/margins": 0.5609452128410339,
"rewards/rejected": -1.224151372909546,
"step": 110
},
{
"epoch": 0.2510460251046025,
"grad_norm": 22.919441722362766,
"learning_rate": 4.662012913161997e-07,
"logits/chosen": -0.41334447264671326,
"logits/rejected": -0.056368906050920486,
"logps/chosen": -343.59808349609375,
"logps/rejected": -384.07171630859375,
"loss": 0.5526,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.6241872906684875,
"rewards/margins": 0.6175822019577026,
"rewards/rejected": -1.241769552230835,
"step": 120
},
{
"epoch": 0.2719665271966527,
"grad_norm": 23.51365798269679,
"learning_rate": 4.5646165232345103e-07,
"logits/chosen": 0.1640959084033966,
"logits/rejected": 0.5434903502464294,
"logps/chosen": -387.5578308105469,
"logps/rejected": -431.74737548828125,
"loss": 0.543,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.0557366609573364,
"rewards/margins": 0.5772033929824829,
"rewards/rejected": -1.6329400539398193,
"step": 130
},
{
"epoch": 0.2928870292887029,
"grad_norm": 21.966118387144043,
"learning_rate": 4.456204510851956e-07,
"logits/chosen": 0.21006186306476593,
"logits/rejected": 0.5628204345703125,
"logps/chosen": -439.6205139160156,
"logps/rejected": -486.50213623046875,
"loss": 0.547,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.2365432977676392,
"rewards/margins": 0.6220105886459351,
"rewards/rejected": -1.8585538864135742,
"step": 140
},
{
"epoch": 0.3138075313807531,
"grad_norm": 25.216734329837617,
"learning_rate": 4.337355301007335e-07,
"logits/chosen": -0.12071762979030609,
"logits/rejected": 0.32984182238578796,
"logps/chosen": -368.8601989746094,
"logps/rejected": -416.48101806640625,
"loss": 0.5444,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.8562356233596802,
"rewards/margins": 0.5465985536575317,
"rewards/rejected": -1.4028340578079224,
"step": 150
},
{
"epoch": 0.33472803347280333,
"grad_norm": 27.13158239567326,
"learning_rate": 4.2087030056579986e-07,
"logits/chosen": 0.2426595389842987,
"logits/rejected": 0.9978505373001099,
"logps/chosen": -358.2662353515625,
"logps/rejected": -418.1039123535156,
"loss": 0.5383,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.8522323369979858,
"rewards/margins": 0.7546002864837646,
"rewards/rejected": -1.60683274269104,
"step": 160
},
{
"epoch": 0.35564853556485354,
"grad_norm": 23.860913284499137,
"learning_rate": 4.070934040463998e-07,
"logits/chosen": 0.5063488483428955,
"logits/rejected": 0.9657643437385559,
"logps/chosen": -333.5655212402344,
"logps/rejected": -372.67657470703125,
"loss": 0.5413,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.8425030708312988,
"rewards/margins": 0.57291579246521,
"rewards/rejected": -1.4154188632965088,
"step": 170
},
{
"epoch": 0.37656903765690375,
"grad_norm": 22.58619850722941,
"learning_rate": 3.9247834624635404e-07,
"logits/chosen": 1.7443010807037354,
"logits/rejected": 2.3398690223693848,
"logps/chosen": -361.9225769042969,
"logps/rejected": -401.04473876953125,
"loss": 0.5088,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.0559453964233398,
"rewards/margins": 0.6787471771240234,
"rewards/rejected": -1.7346923351287842,
"step": 180
},
{
"epoch": 0.39748953974895396,
"grad_norm": 23.3743639513868,
"learning_rate": 3.7710310482256523e-07,
"logits/chosen": 1.0638327598571777,
"logits/rejected": 1.6086857318878174,
"logps/chosen": -352.91046142578125,
"logps/rejected": -408.85784912109375,
"loss": 0.5196,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.8161503076553345,
"rewards/margins": 0.6746509671211243,
"rewards/rejected": -1.490801215171814,
"step": 190
},
{
"epoch": 0.41841004184100417,
"grad_norm": 49.548116648788316,
"learning_rate": 3.610497133404795e-07,
"logits/chosen": 0.8589725494384766,
"logits/rejected": 1.4762446880340576,
"logps/chosen": -341.1542663574219,
"logps/rejected": -407.9635009765625,
"loss": 0.5445,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.8125879168510437,
"rewards/margins": 0.7465857267379761,
"rewards/rejected": -1.559173583984375,
"step": 200
},
{
"epoch": 0.41841004184100417,
"eval_logits/chosen": 0.19241900742053986,
"eval_logits/rejected": 0.969687283039093,
"eval_logps/chosen": -323.71527099609375,
"eval_logps/rejected": -402.0177307128906,
"eval_loss": 0.5353989005088806,
"eval_rewards/accuracies": 0.76953125,
"eval_rewards/chosen": -0.6108533143997192,
"eval_rewards/margins": 0.7827030420303345,
"eval_rewards/rejected": -1.3935563564300537,
"eval_runtime": 93.8143,
"eval_samples_per_second": 21.319,
"eval_steps_per_second": 0.341,
"step": 200
},
{
"epoch": 0.4393305439330544,
"grad_norm": 23.324141646005582,
"learning_rate": 3.4440382358952115e-07,
"logits/chosen": 0.8318442106246948,
"logits/rejected": 1.7365608215332031,
"logps/chosen": -374.9430847167969,
"logps/rejected": -405.85406494140625,
"loss": 0.5368,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.8527874946594238,
"rewards/margins": 0.6931684613227844,
"rewards/rejected": -1.5459558963775635,
"step": 210
},
{
"epoch": 0.4602510460251046,
"grad_norm": 31.425698930769954,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": 1.3936798572540283,
"logits/rejected": 2.1958301067352295,
"logps/chosen": -372.55242919921875,
"logps/rejected": -432.43658447265625,
"loss": 0.5238,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.071751356124878,
"rewards/margins": 0.7956344485282898,
"rewards/rejected": -1.8673858642578125,
"step": 220
},
{
"epoch": 0.4811715481171548,
"grad_norm": 23.84771859224052,
"learning_rate": 3.096924887558854e-07,
"logits/chosen": 1.1196578741073608,
"logits/rejected": 1.908739686012268,
"logps/chosen": -338.7588806152344,
"logps/rejected": -430.38177490234375,
"loss": 0.5365,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.8980579376220703,
"rewards/margins": 0.9280304908752441,
"rewards/rejected": -1.826088309288025,
"step": 230
},
{
"epoch": 0.502092050209205,
"grad_norm": 25.326708869779715,
"learning_rate": 2.9181224366319943e-07,
"logits/chosen": 0.7311414480209351,
"logits/rejected": 1.439819097518921,
"logps/chosen": -337.96356201171875,
"logps/rejected": -394.6842346191406,
"loss": 0.5039,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.74227374792099,
"rewards/margins": 0.7140385508537292,
"rewards/rejected": -1.4563124179840088,
"step": 240
},
{
"epoch": 0.5230125523012552,
"grad_norm": 23.27146742453404,
"learning_rate": 2.7370891215954565e-07,
"logits/chosen": 1.5978848934173584,
"logits/rejected": 2.462660551071167,
"logps/chosen": -394.6571044921875,
"logps/rejected": -444.3811950683594,
"loss": 0.5066,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.9645415544509888,
"rewards/margins": 0.8838118314743042,
"rewards/rejected": -1.848353385925293,
"step": 250
},
{
"epoch": 0.5439330543933054,
"grad_norm": 28.502041662438575,
"learning_rate": 2.55479083351317e-07,
"logits/chosen": 1.593186616897583,
"logits/rejected": 2.44622802734375,
"logps/chosen": -398.6466064453125,
"logps/rejected": -443.5439453125,
"loss": 0.5116,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.0434881448745728,
"rewards/margins": 0.8526838421821594,
"rewards/rejected": -1.8961719274520874,
"step": 260
},
{
"epoch": 0.5648535564853556,
"grad_norm": 21.02294818188671,
"learning_rate": 2.3722002126275822e-07,
"logits/chosen": 1.052922010421753,
"logits/rejected": 1.8219140768051147,
"logps/chosen": -361.7513122558594,
"logps/rejected": -412.5428771972656,
"loss": 0.521,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.847709059715271,
"rewards/margins": 0.7012378573417664,
"rewards/rejected": -1.5489470958709717,
"step": 270
},
{
"epoch": 0.5857740585774058,
"grad_norm": 25.834888960693267,
"learning_rate": 2.19029145890313e-07,
"logits/chosen": 1.7453187704086304,
"logits/rejected": 2.704537868499756,
"logps/chosen": -369.0428161621094,
"logps/rejected": -429.200927734375,
"loss": 0.5279,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.073177456855774,
"rewards/margins": 0.7966909408569336,
"rewards/rejected": -1.869868278503418,
"step": 280
},
{
"epoch": 0.606694560669456,
"grad_norm": 23.200570814556684,
"learning_rate": 2.0100351342479216e-07,
"logits/chosen": 1.8309924602508545,
"logits/rejected": 2.505741596221924,
"logps/chosen": -375.9858703613281,
"logps/rejected": -437.6570739746094,
"loss": 0.5144,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.2489399909973145,
"rewards/margins": 0.7080605030059814,
"rewards/rejected": -1.957000494003296,
"step": 290
},
{
"epoch": 0.6276150627615062,
"grad_norm": 27.865655196796528,
"learning_rate": 1.8323929841460178e-07,
"logits/chosen": 1.2169395685195923,
"logits/rejected": 2.430232048034668,
"logps/chosen": -399.8282165527344,
"logps/rejected": -438.07861328125,
"loss": 0.4909,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.987991452217102,
"rewards/margins": 0.7920428514480591,
"rewards/rejected": -1.7800343036651611,
"step": 300
},
{
"epoch": 0.6276150627615062,
"eval_logits/chosen": 1.4139740467071533,
"eval_logits/rejected": 2.1666512489318848,
"eval_logps/chosen": -355.1510009765625,
"eval_logps/rejected": -446.0438232421875,
"eval_loss": 0.5138201117515564,
"eval_rewards/accuracies": 0.7578125,
"eval_rewards/chosen": -0.9252106547355652,
"eval_rewards/margins": 0.9086061716079712,
"eval_rewards/rejected": -1.8338167667388916,
"eval_runtime": 93.7782,
"eval_samples_per_second": 21.327,
"eval_steps_per_second": 0.341,
"step": 300
},
{
"epoch": 0.6485355648535565,
"grad_norm": 29.586629493453568,
"learning_rate": 1.6583128063291573e-07,
"logits/chosen": 1.761592149734497,
"logits/rejected": 2.4888718128204346,
"logps/chosen": -405.75091552734375,
"logps/rejected": -444.1439514160156,
"loss": 0.4948,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -1.1044785976409912,
"rewards/margins": 0.7519679665565491,
"rewards/rejected": -1.8564465045928955,
"step": 310
},
{
"epoch": 0.6694560669456067,
"grad_norm": 28.12248662851152,
"learning_rate": 1.488723393865766e-07,
"logits/chosen": 2.008812427520752,
"logits/rejected": 2.907062530517578,
"logps/chosen": -408.342529296875,
"logps/rejected": -436.1858825683594,
"loss": 0.4912,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -1.1462968587875366,
"rewards/margins": 0.7669790387153625,
"rewards/rejected": -1.913275957107544,
"step": 320
},
{
"epoch": 0.6903765690376569,
"grad_norm": 29.887208796367812,
"learning_rate": 1.3245295796480788e-07,
"logits/chosen": 1.4809958934783936,
"logits/rejected": 2.334693193435669,
"logps/chosen": -368.56500244140625,
"logps/rejected": -438.9111328125,
"loss": 0.4951,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.0632619857788086,
"rewards/margins": 0.7305603623390198,
"rewards/rejected": -1.7938222885131836,
"step": 330
},
{
"epoch": 0.7112970711297071,
"grad_norm": 38.50152594632388,
"learning_rate": 1.1666074087171627e-07,
"logits/chosen": 1.5904924869537354,
"logits/rejected": 2.3698315620422363,
"logps/chosen": -392.74053955078125,
"logps/rejected": -485.83966064453125,
"loss": 0.5032,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.0266270637512207,
"rewards/margins": 1.030725121498108,
"rewards/rejected": -2.057352066040039,
"step": 340
},
{
"epoch": 0.7322175732217573,
"grad_norm": 25.77639486551186,
"learning_rate": 1.0157994641835734e-07,
"logits/chosen": 1.6322052478790283,
"logits/rejected": 2.4968695640563965,
"logps/chosen": -368.20428466796875,
"logps/rejected": -436.6559143066406,
"loss": 0.4763,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.1000165939331055,
"rewards/margins": 0.8595396876335144,
"rewards/rejected": -1.9595565795898438,
"step": 350
},
{
"epoch": 0.7531380753138075,
"grad_norm": 22.519856777137164,
"learning_rate": 8.729103716819111e-08,
"logits/chosen": 1.4749292135238647,
"logits/rejected": 2.6089377403259277,
"logps/chosen": -413.32354736328125,
"logps/rejected": -461.50213623046875,
"loss": 0.5153,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.1062767505645752,
"rewards/margins": 0.8961655497550964,
"rewards/rejected": -2.0024421215057373,
"step": 360
},
{
"epoch": 0.7740585774058577,
"grad_norm": 25.05710591816908,
"learning_rate": 7.387025063449081e-08,
"logits/chosen": 1.9852964878082275,
"logits/rejected": 2.7102103233337402,
"logps/chosen": -397.772216796875,
"logps/rejected": -431.29083251953125,
"loss": 0.5068,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.2243640422821045,
"rewards/margins": 0.7056778073310852,
"rewards/rejected": -1.930042028427124,
"step": 370
},
{
"epoch": 0.7949790794979079,
"grad_norm": 26.679151914355828,
"learning_rate": 6.138919252022435e-08,
"logits/chosen": 2.23157000541687,
"logits/rejected": 2.7516064643859863,
"logps/chosen": -376.5492248535156,
"logps/rejected": -489.54620361328125,
"loss": 0.5026,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.3706672191619873,
"rewards/margins": 0.9382585287094116,
"rewards/rejected": -2.3089256286621094,
"step": 380
},
{
"epoch": 0.8158995815899581,
"grad_norm": 24.84556158087066,
"learning_rate": 4.991445467064689e-08,
"logits/chosen": 1.709517240524292,
"logits/rejected": 2.2737374305725098,
"logps/chosen": -423.19622802734375,
"logps/rejected": -487.203857421875,
"loss": 0.4908,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.1757529973983765,
"rewards/margins": 0.8564424514770508,
"rewards/rejected": -2.0321953296661377,
"step": 390
},
{
"epoch": 0.8368200836820083,
"grad_norm": 25.903753753002636,
"learning_rate": 3.9507259776993954e-08,
"logits/chosen": 1.9856996536254883,
"logits/rejected": 2.878208637237549,
"logps/chosen": -387.4896240234375,
"logps/rejected": -474.1192932128906,
"loss": 0.5036,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -1.1694214344024658,
"rewards/margins": 0.9022589921951294,
"rewards/rejected": -2.0716805458068848,
"step": 400
},
{
"epoch": 0.8368200836820083,
"eval_logits/chosen": 1.5651072263717651,
"eval_logits/rejected": 2.3428351879119873,
"eval_logps/chosen": -362.6148681640625,
"eval_logps/rejected": -458.140380859375,
"eval_loss": 0.5066322088241577,
"eval_rewards/accuracies": 0.78125,
"eval_rewards/chosen": -0.9998489618301392,
"eval_rewards/margins": 0.9549338817596436,
"eval_rewards/rejected": -1.9547827243804932,
"eval_runtime": 93.7833,
"eval_samples_per_second": 21.326,
"eval_steps_per_second": 0.341,
"step": 400
},
{
"epoch": 0.8577405857740585,
"grad_norm": 27.223878961638523,
"learning_rate": 3.022313472693447e-08,
"logits/chosen": 1.7999566793441772,
"logits/rejected": 2.912705183029175,
"logps/chosen": -406.19830322265625,
"logps/rejected": -464.55419921875,
"loss": 0.5098,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -1.0726454257965088,
"rewards/margins": 0.8846432566642761,
"rewards/rejected": -1.9572887420654297,
"step": 410
},
{
"epoch": 0.8786610878661087,
"grad_norm": 24.576314036164316,
"learning_rate": 2.2111614344599684e-08,
"logits/chosen": 1.4636269807815552,
"logits/rejected": 2.3438045978546143,
"logps/chosen": -404.0235900878906,
"logps/rejected": -464.222900390625,
"loss": 0.4945,
"rewards/accuracies": 0.6875,
"rewards/chosen": -1.0694642066955566,
"rewards/margins": 0.8283600807189941,
"rewards/rejected": -1.8978245258331299,
"step": 420
},
{
"epoch": 0.899581589958159,
"grad_norm": 26.751044345392213,
"learning_rate": 1.521597710086439e-08,
"logits/chosen": 1.9449716806411743,
"logits/rejected": 2.7110419273376465,
"logps/chosen": -393.8571472167969,
"logps/rejected": -460.3021545410156,
"loss": 0.474,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -1.1481584310531616,
"rewards/margins": 0.8771858215332031,
"rewards/rejected": -2.0253443717956543,
"step": 430
},
{
"epoch": 0.9205020920502092,
"grad_norm": 30.050302790236575,
"learning_rate": 9.57301420397924e-09,
"logits/chosen": 1.566941499710083,
"logits/rejected": 2.495941400527954,
"logps/chosen": -384.67755126953125,
"logps/rejected": -449.6517639160156,
"loss": 0.4998,
"rewards/accuracies": 0.78125,
"rewards/chosen": -1.0034703016281128,
"rewards/margins": 0.8020319938659668,
"rewards/rejected": -1.8055026531219482,
"step": 440
},
{
"epoch": 0.9414225941422594,
"grad_norm": 27.265945171563892,
"learning_rate": 5.212833302556258e-09,
"logits/chosen": 1.5636577606201172,
"logits/rejected": 2.303377866744995,
"logps/chosen": -403.6795654296875,
"logps/rejected": -497.7889709472656,
"loss": 0.4996,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.0995432138442993,
"rewards/margins": 0.8072215914726257,
"rewards/rejected": -1.9067647457122803,
"step": 450
},
{
"epoch": 0.9623430962343096,
"grad_norm": 26.83929563886876,
"learning_rate": 2.158697848236607e-09,
"logits/chosen": 1.8378353118896484,
"logits/rejected": 2.7034897804260254,
"logps/chosen": -376.66876220703125,
"logps/rejected": -429.076904296875,
"loss": 0.5008,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.046795129776001,
"rewards/margins": 0.8175219297409058,
"rewards/rejected": -1.8643172979354858,
"step": 460
},
{
"epoch": 0.9832635983263598,
"grad_norm": 25.63144929511813,
"learning_rate": 4.269029751107489e-10,
"logits/chosen": 1.5859147310256958,
"logits/rejected": 2.591291666030884,
"logps/chosen": -382.67071533203125,
"logps/rejected": -471.57861328125,
"loss": 0.4918,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -1.0397284030914307,
"rewards/margins": 0.852301299571991,
"rewards/rejected": -1.8920300006866455,
"step": 470
},
{
"epoch": 1.0,
"step": 478,
"total_flos": 0.0,
"train_loss": 0.5401068107852377,
"train_runtime": 7660.5652,
"train_samples_per_second": 7.98,
"train_steps_per_second": 0.062
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}