|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 100, |
|
"global_step": 1250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-08, |
|
"logits/chosen": -1.8503975868225098, |
|
"logits/rejected": -1.8503975868225098, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.4075, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"logits/chosen": -1.8588156700134277, |
|
"logits/rejected": -1.8588156700134277, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3636, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 8.000000000000001e-07, |
|
"logits/chosen": -1.970517873764038, |
|
"logits/rejected": -1.970517873764038, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3902, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"logits/chosen": -1.9209930896759033, |
|
"logits/rejected": -1.9209930896759033, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3482, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"logits/chosen": -1.883547067642212, |
|
"logits/rejected": -1.883547067642212, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3507, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"logits/chosen": -1.9128715991973877, |
|
"logits/rejected": -1.9128715991973877, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3359, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"logits/chosen": -2.0107295513153076, |
|
"logits/rejected": -2.0107295513153076, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3828, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"logits/chosen": -1.9920228719711304, |
|
"logits/rejected": -1.9920228719711304, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3112, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"logits/chosen": -1.8801155090332031, |
|
"logits/rejected": -1.8801155090332031, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3778, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"logits/chosen": -2.050198793411255, |
|
"logits/rejected": -2.050198793411255, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3655, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": -1.8852717876434326, |
|
"logits/rejected": -1.8852717876434326, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3803, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_logits/chosen": -1.9617642164230347, |
|
"eval_logits/rejected": -1.8066532611846924, |
|
"eval_logps/chosen": -266.6976013183594, |
|
"eval_logps/rejected": -254.9398193359375, |
|
"eval_loss": 0.053734518587589264, |
|
"eval_rewards/accuracies": 0.0, |
|
"eval_rewards/chosen": 0.0, |
|
"eval_rewards/margins": 0.0, |
|
"eval_rewards/rejected": 0.0, |
|
"eval_runtime": 701.958, |
|
"eval_samples_per_second": 2.849, |
|
"eval_steps_per_second": 1.425, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.4e-06, |
|
"logits/chosen": -1.731688141822815, |
|
"logits/rejected": -1.731688141822815, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2717, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.800000000000001e-06, |
|
"logits/chosen": -1.8530235290527344, |
|
"logits/rejected": -1.8530235290527344, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3482, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.999756310023261e-06, |
|
"logits/chosen": -2.0225424766540527, |
|
"logits/rejected": -2.0225424766540527, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3507, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.997807075247147e-06, |
|
"logits/chosen": -1.8995482921600342, |
|
"logits/rejected": -1.8995482921600342, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3186, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.993910125649561e-06, |
|
"logits/chosen": -1.8709551095962524, |
|
"logits/rejected": -1.8500862121582031, |
|
"logps/chosen": -4.896004676818848, |
|
"logps/rejected": -1.6084611415863037, |
|
"loss": 0.3112, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.988068499954578e-06, |
|
"logits/chosen": -2.044778823852539, |
|
"logits/rejected": -2.044778823852539, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.980286753286196e-06, |
|
"logits/chosen": -1.8561241626739502, |
|
"logits/rejected": -1.8561241626739502, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3531, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.970570953616383e-06, |
|
"logits/chosen": -1.956254243850708, |
|
"logits/rejected": -1.922114372253418, |
|
"logps/chosen": -12.786833763122559, |
|
"logps/rejected": -5.852889060974121, |
|
"loss": 0.3401, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.015111232176423073, |
|
"rewards/margins": 0.0011773437727242708, |
|
"rewards/rejected": -0.016288574784994125, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.958928677033465e-06, |
|
"logits/chosen": -1.8586145639419556, |
|
"logits/rejected": -1.8586145639419556, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.9453690018345144e-06, |
|
"logits/chosen": -1.908278226852417, |
|
"logits/rejected": -1.9099280834197998, |
|
"logps/chosen": -5.618971824645996, |
|
"logps/rejected": -6.245630741119385, |
|
"loss": 0.2725, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": -0.00010124015534529462, |
|
"rewards/margins": -0.0011139239650219679, |
|
"rewards/rejected": 0.0010126838460564613, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_logits/chosen": -1.9946638345718384, |
|
"eval_logits/rejected": -1.8361071348190308, |
|
"eval_logps/chosen": -270.886474609375, |
|
"eval_logps/rejected": -259.3279724121094, |
|
"eval_loss": 0.054894037544727325, |
|
"eval_rewards/accuracies": 0.4410000145435333, |
|
"eval_rewards/chosen": -0.02094435505568981, |
|
"eval_rewards/margins": 0.0009965725475922227, |
|
"eval_rewards/rejected": -0.021940927952528, |
|
"eval_runtime": 702.6065, |
|
"eval_samples_per_second": 2.847, |
|
"eval_steps_per_second": 1.423, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.9299025014463665e-06, |
|
"logits/chosen": -1.8702771663665771, |
|
"logits/rejected": -1.8702771663665771, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3062, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.912541236180779e-06, |
|
"logits/chosen": -2.0095884799957275, |
|
"logits/rejected": -2.0095884799957275, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.893298743830168e-06, |
|
"logits/chosen": -1.931212067604065, |
|
"logits/rejected": -1.930567741394043, |
|
"logps/chosen": -2.1643919944763184, |
|
"logps/rejected": -2.322953462600708, |
|
"loss": 0.3507, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.003725755959749222, |
|
"rewards/margins": 6.57886266708374e-06, |
|
"rewards/rejected": -0.0037323348224163055, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.8721900291112415e-06, |
|
"logits/chosen": -2.0807600021362305, |
|
"logits/rejected": -2.0807600021362305, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3211, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.849231551964771e-06, |
|
"logits/chosen": -1.9053341150283813, |
|
"logits/rejected": -1.9053341150283813, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3581, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.824441214720629e-06, |
|
"logits/chosen": -1.7558910846710205, |
|
"logits/rejected": -1.7558910846710205, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3778, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.7978383481380865e-06, |
|
"logits/chosen": -1.8789889812469482, |
|
"logits/rejected": -1.847283959388733, |
|
"logps/chosen": -14.500409126281738, |
|
"logps/rejected": -22.52828598022461, |
|
"loss": 0.5408, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.023679107427597046, |
|
"rewards/margins": 0.07598645985126495, |
|
"rewards/rejected": -0.099665567278862, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.769443696332272e-06, |
|
"logits/chosen": -1.9984527826309204, |
|
"logits/rejected": -1.9984527826309204, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3754, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.7392794005985324e-06, |
|
"logits/chosen": -1.9221277236938477, |
|
"logits/rejected": -1.9221277236938477, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3087, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.707368982147318e-06, |
|
"logits/chosen": -2.0028936862945557, |
|
"logits/rejected": -2.0028936862945557, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3013, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_logits/chosen": -1.990155816078186, |
|
"eval_logits/rejected": -1.8278305530548096, |
|
"eval_logps/chosen": -300.0851135253906, |
|
"eval_logps/rejected": -291.15228271484375, |
|
"eval_loss": 0.05316770076751709, |
|
"eval_rewards/accuracies": 0.4675000011920929, |
|
"eval_rewards/chosen": -0.1669376790523529, |
|
"eval_rewards/margins": 0.014124665409326553, |
|
"eval_rewards/rejected": -0.18106235563755035, |
|
"eval_runtime": 702.8753, |
|
"eval_samples_per_second": 2.845, |
|
"eval_steps_per_second": 1.423, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.673737323763048e-06, |
|
"logits/chosen": -1.927883505821228, |
|
"logits/rejected": -1.927883505821228, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.638410650401267e-06, |
|
"logits/chosen": -2.018455982208252, |
|
"logits/rejected": -2.018455982208252, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3951, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.601416508739211e-06, |
|
"logits/chosen": -2.0109939575195312, |
|
"logits/rejected": -2.0108282566070557, |
|
"logps/chosen": -2.7990431785583496, |
|
"logps/rejected": -4.229598045349121, |
|
"loss": 0.3279, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.006899011321365833, |
|
"rewards/margins": 0.006366544868797064, |
|
"rewards/rejected": -0.01326555572450161, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.562783745695738e-06, |
|
"logits/chosen": -1.74103581905365, |
|
"logits/rejected": -1.74103581905365, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.522542485937369e-06, |
|
"logits/chosen": -1.9381316900253296, |
|
"logits/rejected": -1.9381316900253296, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3754, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.4807241083879774e-06, |
|
"logits/chosen": -1.9683544635772705, |
|
"logits/rejected": -1.9683544635772705, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3778, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.437361221760449e-06, |
|
"logits/chosen": -1.944864273071289, |
|
"logits/rejected": -1.944864273071289, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3852, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.3924876391293915e-06, |
|
"logits/chosen": -1.887290358543396, |
|
"logits/rejected": -1.887290358543396, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3852, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.346138351564711e-06, |
|
"logits/chosen": -1.8793548345565796, |
|
"logits/rejected": -1.8793548345565796, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.2983495008466285e-06, |
|
"logits/chosen": -1.9503295421600342, |
|
"logits/rejected": -1.9503295421600342, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3433, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": -1.9909396171569824, |
|
"eval_logits/rejected": -1.8287452459335327, |
|
"eval_logps/chosen": -301.09478759765625, |
|
"eval_logps/rejected": -292.806884765625, |
|
"eval_loss": 0.05226544290781021, |
|
"eval_rewards/accuracies": 0.4779999852180481, |
|
"eval_rewards/chosen": -0.17198601365089417, |
|
"eval_rewards/margins": 0.017349353060126305, |
|
"eval_rewards/rejected": -0.18933536112308502, |
|
"eval_runtime": 702.207, |
|
"eval_samples_per_second": 2.848, |
|
"eval_steps_per_second": 1.424, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.249158351283414e-06, |
|
"logits/chosen": -1.9229347705841064, |
|
"logits/rejected": -1.9229347705841064, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3186, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.198603260653792e-06, |
|
"logits/chosen": -1.6426639556884766, |
|
"logits/rejected": -1.6426639556884766, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.284, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.146723650296701e-06, |
|
"logits/chosen": -1.8314998149871826, |
|
"logits/rejected": -1.8095579147338867, |
|
"logps/chosen": -7.950663089752197, |
|
"logps/rejected": -6.032578468322754, |
|
"loss": 0.2585, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.015273292548954487, |
|
"rewards/margins": 0.006847293581813574, |
|
"rewards/rejected": -0.022120585665106773, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 4.093559974371725e-06, |
|
"logits/chosen": -2.019379138946533, |
|
"logits/rejected": -2.0201730728149414, |
|
"logps/chosen": -3.879962205886841, |
|
"logps/rejected": -14.236474990844727, |
|
"loss": 0.3847, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": 0.008593808859586716, |
|
"rewards/margins": 0.04753534495830536, |
|
"rewards/rejected": -0.03894153609871864, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.039153688314146e-06, |
|
"logits/chosen": -1.8816545009613037, |
|
"logits/rejected": -1.8816545009613037, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.983547216509254e-06, |
|
"logits/chosen": -2.0326955318450928, |
|
"logits/rejected": -2.0326955318450928, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3211, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 3.92678391921108e-06, |
|
"logits/chosen": -1.758758783340454, |
|
"logits/rejected": -1.758758783340454, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3087, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.868908058731376e-06, |
|
"logits/chosen": -1.7623834609985352, |
|
"logits/rejected": -1.7623834609985352, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3606, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 3.8099647649251984e-06, |
|
"logits/chosen": -1.9364054203033447, |
|
"logits/rejected": -1.9364054203033447, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3062, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"logits/chosen": -1.7947114706039429, |
|
"logits/rejected": -1.7947114706039429, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3606, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_logits/chosen": -2.01118803024292, |
|
"eval_logits/rejected": -1.84914231300354, |
|
"eval_logps/chosen": -296.96820068359375, |
|
"eval_logps/rejected": -284.247802734375, |
|
"eval_loss": 0.06230342388153076, |
|
"eval_rewards/accuracies": 0.4050000011920929, |
|
"eval_rewards/chosen": -0.1513528972864151, |
|
"eval_rewards/margins": -0.004812860395759344, |
|
"eval_rewards/rejected": -0.14654004573822021, |
|
"eval_runtime": 706.3052, |
|
"eval_samples_per_second": 2.832, |
|
"eval_steps_per_second": 1.416, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 3.689060522675689e-06, |
|
"logits/chosen": -1.8099037408828735, |
|
"logits/rejected": -1.809592843055725, |
|
"logps/chosen": -1.9884506464004517, |
|
"logps/rejected": -3.5382981300354004, |
|
"loss": 0.2955, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.0028460482135415077, |
|
"rewards/margins": 0.006963008549064398, |
|
"rewards/rejected": -0.009809056296944618, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 3.627193851723577e-06, |
|
"logits/chosen": -1.992038369178772, |
|
"logits/rejected": -1.992038369178772, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 3.564448228912682e-06, |
|
"logits/chosen": -2.0280678272247314, |
|
"logits/rejected": -2.0280678272247314, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.5008725813922383e-06, |
|
"logits/chosen": -1.9298874139785767, |
|
"logits/rejected": -1.9298874139785767, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3926, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.436516483539781e-06, |
|
"logits/chosen": -1.9765506982803345, |
|
"logits/rejected": -1.9765506982803345, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3235, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.3714301183045382e-06, |
|
"logits/chosen": -1.7808916568756104, |
|
"logits/rejected": -1.7808916568756104, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3334, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 3.3056642380762783e-06, |
|
"logits/chosen": -1.847675085067749, |
|
"logits/rejected": -1.847675085067749, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3581, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 3.2392701251101172e-06, |
|
"logits/chosen": -1.9916722774505615, |
|
"logits/rejected": -1.9916722774505615, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3334, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 3.1722995515381644e-06, |
|
"logits/chosen": -2.0442426204681396, |
|
"logits/rejected": -2.0442426204681396, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3754, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 3.1048047389991693e-06, |
|
"logits/chosen": -2.0616250038146973, |
|
"logits/rejected": -2.0616250038146973, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3038, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_logits/chosen": -2.0113329887390137, |
|
"eval_logits/rejected": -1.8490314483642578, |
|
"eval_logps/chosen": -298.9019775390625, |
|
"eval_logps/rejected": -286.5705261230469, |
|
"eval_loss": 0.06159820035099983, |
|
"eval_rewards/accuracies": 0.4090000092983246, |
|
"eval_rewards/chosen": -0.16102181375026703, |
|
"eval_rewards/margins": -0.002868250710889697, |
|
"eval_rewards/rejected": -0.15815356373786926, |
|
"eval_runtime": 704.1706, |
|
"eval_samples_per_second": 2.84, |
|
"eval_steps_per_second": 1.42, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 3.0368383179176584e-06, |
|
"logits/chosen": -2.056675434112549, |
|
"logits/rejected": -2.056675434112549, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3211, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 2.9684532864643123e-06, |
|
"logits/chosen": -1.8173696994781494, |
|
"logits/rejected": -1.8173696994781494, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3062, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 2.8997029692295875e-06, |
|
"logits/chosen": -1.9147436618804932, |
|
"logits/rejected": -1.9147436618804932, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3457, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.8306409756428067e-06, |
|
"logits/chosen": -1.8020957708358765, |
|
"logits/rejected": -1.8020957708358765, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3359, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.761321158169134e-06, |
|
"logits/chosen": -1.6737282276153564, |
|
"logits/rejected": -1.6737282276153564, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.4099, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.6917975703170466e-06, |
|
"logits/chosen": -1.8559491634368896, |
|
"logits/rejected": -1.8559491634368896, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3902, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.6221244244890336e-06, |
|
"logits/chosen": -1.8110735416412354, |
|
"logits/rejected": -1.789332628250122, |
|
"logps/chosen": -5.0051374435424805, |
|
"logps/rejected": -3.856720447540283, |
|
"loss": 0.294, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.0005456657381728292, |
|
"rewards/margins": 0.010695630684494972, |
|
"rewards/rejected": -0.01124129630625248, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 2.5523560497083927e-06, |
|
"logits/chosen": -1.8780758380889893, |
|
"logits/rejected": -1.8780758380889893, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3112, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 2.482546849255096e-06, |
|
"logits/chosen": -1.8856204748153687, |
|
"logits/rejected": -1.8856204748153687, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3852, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.4127512582437486e-06, |
|
"logits/chosen": -1.9257001876831055, |
|
"logits/rejected": -1.9257001876831055, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3161, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"eval_logits/chosen": -2.0096490383148193, |
|
"eval_logits/rejected": -1.8473448753356934, |
|
"eval_logps/chosen": -299.4931640625, |
|
"eval_logps/rejected": -287.3162536621094, |
|
"eval_loss": 0.06128256395459175, |
|
"eval_rewards/accuracies": 0.4124999940395355, |
|
"eval_rewards/chosen": -0.1639777570962906, |
|
"eval_rewards/margins": -0.0020956520456820726, |
|
"eval_rewards/rejected": -0.16188210248947144, |
|
"eval_runtime": 702.8312, |
|
"eval_samples_per_second": 2.846, |
|
"eval_steps_per_second": 1.423, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 2.3430237011767166e-06, |
|
"logits/chosen": -2.005964756011963, |
|
"logits/rejected": -2.005964756011963, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3704, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 2.2734185495055503e-06, |
|
"logits/chosen": -1.7200000286102295, |
|
"logits/rejected": -1.6872419118881226, |
|
"logps/chosen": -21.240198135375977, |
|
"logps/rejected": -10.208666801452637, |
|
"loss": 0.418, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": -0.057378046214580536, |
|
"rewards/margins": -0.01931058242917061, |
|
"rewards/rejected": -0.038067467510700226, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.2039900792337477e-06, |
|
"logits/chosen": -1.8157488107681274, |
|
"logits/rejected": -1.8157488107681274, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3013, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 2.134792428593971e-06, |
|
"logits/chosen": -1.9513260126113892, |
|
"logits/rejected": -1.9513260126113892, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3531, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.0658795558326745e-06, |
|
"logits/chosen": -2.0181970596313477, |
|
"logits/rejected": -2.0197319984436035, |
|
"logps/chosen": -5.593586444854736, |
|
"logps/rejected": -8.769502639770508, |
|
"loss": 0.2891, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": 2.568817035353277e-05, |
|
"rewards/margins": 0.011632362380623817, |
|
"rewards/rejected": -0.011606673710048199, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.997305197135089e-06, |
|
"logits/chosen": -1.9442050457000732, |
|
"logits/rejected": -1.9442050457000732, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3087, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 1.9291228247233607e-06, |
|
"logits/chosen": -1.9681403636932373, |
|
"logits/rejected": -1.9681403636932373, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2939, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 1.8613856051605242e-06, |
|
"logits/chosen": -1.9257924556732178, |
|
"logits/rejected": -1.9257924556732178, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.4025, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.7941463578928088e-06, |
|
"logits/chosen": -1.9661931991577148, |
|
"logits/rejected": -1.9661931991577148, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3038, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"logits/chosen": -2.049327850341797, |
|
"logits/rejected": -2.049327850341797, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3852, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_logits/chosen": -2.015657663345337, |
|
"eval_logits/rejected": -1.853660225868225, |
|
"eval_logps/chosen": -293.5318603515625, |
|
"eval_logps/rejected": -282.0105895996094, |
|
"eval_loss": 0.057407207787036896, |
|
"eval_rewards/accuracies": 0.4259999990463257, |
|
"eval_rewards/chosen": -0.1341715157032013, |
|
"eval_rewards/margins": 0.0011823932873085141, |
|
"eval_rewards/rejected": -0.1353539079427719, |
|
"eval_runtime": 703.0551, |
|
"eval_samples_per_second": 2.845, |
|
"eval_steps_per_second": 1.422, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.661371075624363e-06, |
|
"logits/chosen": -1.9243803024291992, |
|
"logits/rejected": -1.9243803024291992, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2964, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.5959385747947697e-06, |
|
"logits/chosen": -2.015688419342041, |
|
"logits/rejected": -2.015688419342041, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3433, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.5312110338697427e-06, |
|
"logits/chosen": -1.9285329580307007, |
|
"logits/rejected": -1.9285329580307007, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3136, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.467238925438646e-06, |
|
"logits/chosen": -1.7059694528579712, |
|
"logits/rejected": -1.6732099056243896, |
|
"logps/chosen": -13.629135131835938, |
|
"logps/rejected": -13.050885200500488, |
|
"loss": 0.3573, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.01932273805141449, |
|
"rewards/margins": 0.03295581787824631, |
|
"rewards/rejected": -0.0522785559296608, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.4040721330273063e-06, |
|
"logits/chosen": -1.8047428131103516, |
|
"logits/rejected": -1.8059968948364258, |
|
"logps/chosen": -7.918952941894531, |
|
"logps/rejected": -12.563011169433594, |
|
"loss": 0.3272, |
|
"rewards/accuracies": 0.05000000074505806, |
|
"rewards/chosen": -0.00450493860989809, |
|
"rewards/margins": 0.018186846747994423, |
|
"rewards/rejected": -0.02269178442656994, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.3417599122003464e-06, |
|
"logits/chosen": -2.0053718090057373, |
|
"logits/rejected": -2.0053718090057373, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3828, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 1.280350852153168e-06, |
|
"logits/chosen": -2.047586441040039, |
|
"logits/rejected": -2.047586441040039, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3211, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.2198928378235717e-06, |
|
"logits/chosen": -1.888236403465271, |
|
"logits/rejected": -1.888236403465271, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 1.160433012552508e-06, |
|
"logits/chosen": -1.9232513904571533, |
|
"logits/rejected": -1.9232513904571533, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2791, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.1020177413231334e-06, |
|
"logits/chosen": -1.919179916381836, |
|
"logits/rejected": -1.919179916381836, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3359, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"eval_logits/chosen": -2.0115582942962646, |
|
"eval_logits/rejected": -1.8507084846496582, |
|
"eval_logps/chosen": -289.3095397949219, |
|
"eval_logps/rejected": -276.667236328125, |
|
"eval_loss": 0.059549059718847275, |
|
"eval_rewards/accuracies": 0.40049999952316284, |
|
"eval_rewards/chosen": -0.11305965483188629, |
|
"eval_rewards/margins": -0.004422400612384081, |
|
"eval_rewards/rejected": -0.1086372584104538, |
|
"eval_runtime": 701.696, |
|
"eval_samples_per_second": 2.85, |
|
"eval_steps_per_second": 1.425, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.0446925746067768e-06, |
|
"logits/chosen": -1.8642170429229736, |
|
"logits/rejected": -1.8642170429229736, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 9.88502212844063e-07, |
|
"logits/chosen": -1.916985273361206, |
|
"logits/rejected": -1.916985273361206, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3433, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 9.334904715888496e-07, |
|
"logits/chosen": -1.7782055139541626, |
|
"logits/rejected": -1.7782055139541626, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3606, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 8.797002473421729e-07, |
|
"logits/chosen": -1.8740730285644531, |
|
"logits/rejected": -1.8740730285644531, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2939, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 8.271734841028553e-07, |
|
"logits/chosen": -1.9757732152938843, |
|
"logits/rejected": -1.9757732152938843, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3334, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 7.759511406608255e-07, |
|
"logits/chosen": -2.042243480682373, |
|
"logits/rejected": -2.042243480682373, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3556, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 7.260731586586983e-07, |
|
"logits/chosen": -2.03684139251709, |
|
"logits/rejected": -2.03684139251709, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 6.775784314464717e-07, |
|
"logits/chosen": -1.9843076467514038, |
|
"logits/rejected": -1.9843076467514038, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.4149, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 6.305047737536707e-07, |
|
"logits/chosen": -1.831923484802246, |
|
"logits/rejected": -1.831923484802246, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3927, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 5.848888922025553e-07, |
|
"logits/chosen": -1.9579887390136719, |
|
"logits/rejected": -1.9367748498916626, |
|
"logps/chosen": -4.523745536804199, |
|
"logps/rejected": -3.9445228576660156, |
|
"loss": 0.3701, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": 0.00186129380017519, |
|
"rewards/margins": 0.013541601598262787, |
|
"rewards/rejected": -0.011680307798087597, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_logits/chosen": -2.0123445987701416, |
|
"eval_logits/rejected": -1.8512893915176392, |
|
"eval_logps/chosen": -289.3762512207031, |
|
"eval_logps/rejected": -276.7308654785156, |
|
"eval_loss": 0.05955318734049797, |
|
"eval_rewards/accuracies": 0.4000000059604645, |
|
"eval_rewards/chosen": -0.1133933812379837, |
|
"eval_rewards/margins": -0.004438146483153105, |
|
"eval_rewards/rejected": -0.10895523428916931, |
|
"eval_runtime": 701.4448, |
|
"eval_samples_per_second": 2.851, |
|
"eval_steps_per_second": 1.426, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 5.407663566854008e-07, |
|
"logits/chosen": -1.8984695672988892, |
|
"logits/rejected": -1.876189947128296, |
|
"logps/chosen": -15.62798023223877, |
|
"logps/rejected": -10.268167495727539, |
|
"loss": 0.3084, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.029316961765289307, |
|
"rewards/margins": 0.009048005566000938, |
|
"rewards/rejected": -0.038364969193935394, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 4.981715726281666e-07, |
|
"logits/chosen": -1.95705246925354, |
|
"logits/rejected": -1.95705246925354, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2618, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 4.5713775416217884e-07, |
|
"logits/chosen": -1.9425817728042603, |
|
"logits/rejected": -1.9425817728042603, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3655, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 4.1769689822475147e-07, |
|
"logits/chosen": -2.1460211277008057, |
|
"logits/rejected": -2.1460211277008057, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3359, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.798797596089351e-07, |
|
"logits/chosen": -2.158970594406128, |
|
"logits/rejected": -2.158970594406128, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3507, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 3.4371582698185636e-07, |
|
"logits/chosen": -1.8473154306411743, |
|
"logits/rejected": -1.8473154306411743, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3235, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 3.092332998903416e-07, |
|
"logits/chosen": -2.088589668273926, |
|
"logits/rejected": -2.088589668273926, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3606, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 2.764590667717562e-07, |
|
"logits/chosen": -2.165351390838623, |
|
"logits/rejected": -2.165351390838623, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3902, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 2.454186839872158e-07, |
|
"logits/chosen": -1.7897307872772217, |
|
"logits/rejected": -1.7897307872772217, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3803, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 2.1613635589349756e-07, |
|
"logits/chosen": -2.0501227378845215, |
|
"logits/rejected": -2.0501227378845215, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.4025, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"eval_logits/chosen": -2.012660264968872, |
|
"eval_logits/rejected": -1.8515597581863403, |
|
"eval_logps/chosen": -289.3721923828125, |
|
"eval_logps/rejected": -276.7074279785156, |
|
"eval_loss": 0.05955703556537628, |
|
"eval_rewards/accuracies": 0.40299999713897705, |
|
"eval_rewards/chosen": -0.11337299644947052, |
|
"eval_rewards/margins": -0.0045348722487688065, |
|
"eval_rewards/rejected": -0.10883813351392746, |
|
"eval_runtime": 702.6742, |
|
"eval_samples_per_second": 2.846, |
|
"eval_steps_per_second": 1.423, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.8863491596921745e-07, |
|
"logits/chosen": -1.87649667263031, |
|
"logits/rejected": -1.87649667263031, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3408, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 1.629358090099639e-07, |
|
"logits/chosen": -1.876665472984314, |
|
"logits/rejected": -1.876665472984314, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.2692, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.3905907440629752e-07, |
|
"logits/chosen": -2.0340044498443604, |
|
"logits/rejected": -2.0340044498443604, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3112, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.1702333051763271e-07, |
|
"logits/chosen": -2.0787806510925293, |
|
"logits/rejected": -2.0787806510925293, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3581, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 9.684576015420277e-08, |
|
"logits/chosen": -1.9570324420928955, |
|
"logits/rejected": -1.9570324420928955, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3852, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 7.854209717842231e-08, |
|
"logits/chosen": -1.659864068031311, |
|
"logits/rejected": -1.659592866897583, |
|
"logps/chosen": -1.9274085760116577, |
|
"logps/rejected": -4.110763072967529, |
|
"loss": 0.3386, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.002540838671848178, |
|
"rewards/margins": 0.010130545124411583, |
|
"rewards/rejected": -0.012671384029090405, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 6.212661423609184e-08, |
|
"logits/chosen": -1.8022902011871338, |
|
"logits/rejected": -1.8022902011871338, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3655, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 4.761211162702117e-08, |
|
"logits/chosen": -2.0399909019470215, |
|
"logits/rejected": -2.018575668334961, |
|
"logps/chosen": -4.519004821777344, |
|
"logps/rejected": -3.9544930458068848, |
|
"loss": 0.3454, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": 0.0018849981715902686, |
|
"rewards/margins": 0.013615156523883343, |
|
"rewards/rejected": -0.01173015870153904, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 3.5009907323737826e-08, |
|
"logits/chosen": -1.8716793060302734, |
|
"logits/rejected": -1.8716793060302734, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3334, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.4329828146074096e-08, |
|
"logits/chosen": -1.8407377004623413, |
|
"logits/rejected": -1.8407377004623413, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3754, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"eval_logits/chosen": -2.012519359588623, |
|
"eval_logits/rejected": -1.8515361547470093, |
|
"eval_logps/chosen": -289.41143798828125, |
|
"eval_logps/rejected": -276.7694091796875, |
|
"eval_loss": 0.05953500419855118, |
|
"eval_rewards/accuracies": 0.4009999930858612, |
|
"eval_rewards/chosen": -0.11356925219297409, |
|
"eval_rewards/margins": -0.004421224817633629, |
|
"eval_rewards/rejected": -0.1091480404138565, |
|
"eval_runtime": 705.2081, |
|
"eval_samples_per_second": 2.836, |
|
"eval_steps_per_second": 1.418, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 1.5580202098509078e-08, |
|
"logits/chosen": -2.0209155082702637, |
|
"logits/rejected": -2.022444486618042, |
|
"logps/chosen": -5.759219169616699, |
|
"logps/rejected": -8.652463912963867, |
|
"loss": 0.3135, |
|
"rewards/accuracies": 0.02500000037252903, |
|
"rewards/chosen": -0.0008024768903851509, |
|
"rewards/margins": 0.010219003073871136, |
|
"rewards/rejected": -0.011021479964256287, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 8.767851876239075e-09, |
|
"logits/chosen": -1.9093472957611084, |
|
"logits/rejected": -1.9093472957611084, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3112, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 3.8980895450474455e-09, |
|
"logits/chosen": -1.937591314315796, |
|
"logits/rejected": -1.937591314315796, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.284, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 9.747123991141193e-10, |
|
"logits/chosen": -2.029675006866455, |
|
"logits/rejected": -2.029675006866455, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3606, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -1.9245744943618774, |
|
"logits/rejected": -1.9245744943618774, |
|
"logps/chosen": 0.0, |
|
"logps/rejected": 0.0, |
|
"loss": 0.3383, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 1250, |
|
"total_flos": 0.0, |
|
"train_loss": 0.3429992333650589, |
|
"train_runtime": 12841.5285, |
|
"train_samples_per_second": 0.389, |
|
"train_steps_per_second": 0.097 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|