|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.971563981042654, |
|
"eval_steps": 100, |
|
"global_step": 104, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018957345971563982, |
|
"grad_norm": 99.03995946284127, |
|
"learning_rate": 4.545454545454545e-08, |
|
"logits/chosen": 163.72256469726562, |
|
"logits/rejected": 157.14466857910156, |
|
"logps/chosen": -692.1337890625, |
|
"logps/rejected": -783.4187622070312, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 111.11388003021844, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": 171.66250610351562, |
|
"logits/rejected": 172.8583221435547, |
|
"logps/chosen": -742.216064453125, |
|
"logps/rejected": -781.2522583007812, |
|
"loss": 0.7117, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": 0.023412303999066353, |
|
"rewards/margins": 0.02978678233921528, |
|
"rewards/rejected": -0.006374475546181202, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 82.52463810141795, |
|
"learning_rate": 4.885348141000122e-07, |
|
"logits/chosen": 163.12501525878906, |
|
"logits/rejected": 165.84164428710938, |
|
"logps/chosen": -709.082275390625, |
|
"logps/rejected": -749.2286376953125, |
|
"loss": 0.6534, |
|
"rewards/accuracies": 0.606249988079071, |
|
"rewards/chosen": 0.553031325340271, |
|
"rewards/margins": 0.20293028652668, |
|
"rewards/rejected": 0.35010096430778503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 69.67853625499806, |
|
"learning_rate": 4.5025027361734613e-07, |
|
"logits/chosen": 184.1649932861328, |
|
"logits/rejected": 176.99354553222656, |
|
"logps/chosen": -710.6904907226562, |
|
"logps/rejected": -736.7250366210938, |
|
"loss": 0.5795, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 1.0020567178726196, |
|
"rewards/margins": 0.513039767742157, |
|
"rewards/rejected": 0.4890168309211731, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 72.75906612350751, |
|
"learning_rate": 3.893311157806091e-07, |
|
"logits/chosen": 167.30398559570312, |
|
"logits/rejected": 155.48980712890625, |
|
"logps/chosen": -697.3994140625, |
|
"logps/rejected": -700.686767578125, |
|
"loss": 0.5672, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.41778701543807983, |
|
"rewards/margins": 0.858893871307373, |
|
"rewards/rejected": -0.44110679626464844, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 91.77276628667218, |
|
"learning_rate": 3.126631330646801e-07, |
|
"logits/chosen": 182.02633666992188, |
|
"logits/rejected": 182.9945068359375, |
|
"logps/chosen": -771.2996826171875, |
|
"logps/rejected": -821.66943359375, |
|
"loss": 0.4967, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.10474413633346558, |
|
"rewards/margins": 0.8699092864990234, |
|
"rewards/rejected": -0.7651651501655579, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1374407582938388, |
|
"grad_norm": 29.047916941624063, |
|
"learning_rate": 2.2891223348923882e-07, |
|
"logits/chosen": 174.08924865722656, |
|
"logits/rejected": 179.21060180664062, |
|
"logps/chosen": -725.1399536132812, |
|
"logps/rejected": -794.7996215820312, |
|
"loss": 0.2763, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.714134693145752, |
|
"rewards/margins": 2.342179298400879, |
|
"rewards/rejected": -1.6280447244644165, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3270142180094786, |
|
"grad_norm": 28.169902323181958, |
|
"learning_rate": 1.4754491880085317e-07, |
|
"logits/chosen": 169.7666778564453, |
|
"logits/rejected": 167.54342651367188, |
|
"logps/chosen": -683.6437377929688, |
|
"logps/rejected": -783.1070556640625, |
|
"loss": 0.1734, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.8705979585647583, |
|
"rewards/margins": 2.8242533206939697, |
|
"rewards/rejected": -1.9536556005477905, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5165876777251186, |
|
"grad_norm": 24.1551722040215, |
|
"learning_rate": 7.775827023107834e-08, |
|
"logits/chosen": 160.0974578857422, |
|
"logits/rejected": 172.2356414794922, |
|
"logps/chosen": -684.6734619140625, |
|
"logps/rejected": -806.4854125976562, |
|
"loss": 0.1605, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": 0.5792978405952454, |
|
"rewards/margins": 3.1283910274505615, |
|
"rewards/rejected": -2.549093246459961, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7061611374407581, |
|
"grad_norm": 25.406031573666652, |
|
"learning_rate": 2.7440387297912122e-08, |
|
"logits/chosen": 158.98464965820312, |
|
"logits/rejected": 170.3443145751953, |
|
"logps/chosen": -717.3196411132812, |
|
"logps/rejected": -813.6456298828125, |
|
"loss": 0.1493, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": 0.6181727647781372, |
|
"rewards/margins": 3.2944443225860596, |
|
"rewards/rejected": -2.6762712001800537, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"grad_norm": 22.259191659621163, |
|
"learning_rate": 2.27878296044029e-09, |
|
"logits/chosen": 163.0000762939453, |
|
"logits/rejected": 165.51370239257812, |
|
"logps/chosen": -720.561767578125, |
|
"logps/rejected": -794.7210693359375, |
|
"loss": 0.1424, |
|
"rewards/accuracies": 0.9312499761581421, |
|
"rewards/chosen": 0.7237287759780884, |
|
"rewards/margins": 2.908165454864502, |
|
"rewards/rejected": -2.184436559677124, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"eval_logits/chosen": 153.1355743408203, |
|
"eval_logits/rejected": 152.9660186767578, |
|
"eval_logps/chosen": -697.602294921875, |
|
"eval_logps/rejected": -720.2744750976562, |
|
"eval_loss": 0.47219032049179077, |
|
"eval_rewards/accuracies": 0.7395833134651184, |
|
"eval_rewards/chosen": -0.06579157710075378, |
|
"eval_rewards/margins": 1.2015198469161987, |
|
"eval_rewards/rejected": -1.267311453819275, |
|
"eval_runtime": 116.4984, |
|
"eval_samples_per_second": 6.438, |
|
"eval_steps_per_second": 0.206, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.971563981042654, |
|
"step": 104, |
|
"total_flos": 0.0, |
|
"train_loss": 0.3875045489806395, |
|
"train_runtime": 2331.3598, |
|
"train_samples_per_second": 5.791, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 104, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|