|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9893390191897655, |
|
"eval_steps": 39, |
|
"global_step": 116, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0, |
|
"eval_completion_length": 580.933360529403, |
|
"eval_kl": 0.0, |
|
"eval_loss": 1.623958087293431e-05, |
|
"eval_reward": 0.5275844605966878, |
|
"eval_reward_std": 0.35651993932434545, |
|
"eval_rewards/accuracy_reward": 0.5264149032080897, |
|
"eval_rewards/format_reward": 0.0011695573350396782, |
|
"eval_runtime": 4528.6778, |
|
"eval_samples_per_second": 1.104, |
|
"eval_steps_per_second": 0.01, |
|
"step": 0 |
|
}, |
|
{ |
|
"completion_length": 604.259627532959, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 1.9525558948516846, |
|
"kl": 0.00012731552124023438, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.0, |
|
"reward": 0.6582589574158192, |
|
"reward_std": 0.3278909016400576, |
|
"rewards/accuracy_reward": 0.6573660992085933, |
|
"rewards/format_reward": 0.0008928571827709675, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 616.6596267700195, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 1.6776472330093384, |
|
"kl": 0.0016300678253173828, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6910714641213417, |
|
"reward_std": 0.3150623720139265, |
|
"rewards/accuracy_reward": 0.6908482506871223, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 640.9160995483398, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 0.13862861692905426, |
|
"kl": 0.004425430297851562, |
|
"learning_rate": 2.9938448364256362e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7236607506871223, |
|
"reward_std": 0.26740851644426583, |
|
"rewards/accuracy_reward": 0.7236607506871223, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 631.7879745483399, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 0.13688388466835022, |
|
"kl": 0.007097053527832031, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7348214671015739, |
|
"reward_std": 0.22995020002126693, |
|
"rewards/accuracy_reward": 0.7348214671015739, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 619.988638305664, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 0.8423397541046143, |
|
"kl": 0.004146957397460937, |
|
"learning_rate": 2.88581929876693e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7430803924798965, |
|
"reward_std": 0.22932490669190883, |
|
"rewards/accuracy_reward": 0.7430803924798965, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 606.4966804504395, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.11532779783010483, |
|
"kl": 0.00457305908203125, |
|
"learning_rate": 2.7836719084521715e-06, |
|
"loss": 0.0002, |
|
"reward": 0.752455385029316, |
|
"reward_std": 0.206120328605175, |
|
"rewards/accuracy_reward": 0.752455385029316, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 589.5859703063965, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.10156127065420151, |
|
"kl": 0.004618453979492188, |
|
"learning_rate": 2.652296367060421e-06, |
|
"loss": 0.0002, |
|
"reward": 0.783928607404232, |
|
"reward_std": 0.19030314590781927, |
|
"rewards/accuracy_reward": 0.783928607404232, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6652452025586354, |
|
"eval_completion_length": 582.3961823836617, |
|
"eval_kl": 0.005152472896852355, |
|
"eval_loss": 0.00021490754443220794, |
|
"eval_reward": 0.6949793272260306, |
|
"eval_reward_std": 0.20541082589306695, |
|
"eval_rewards/accuracy_reward": 0.6949793272260306, |
|
"eval_rewards/format_reward": 0.0, |
|
"eval_runtime": 4509.8983, |
|
"eval_samples_per_second": 1.109, |
|
"eval_steps_per_second": 0.01, |
|
"step": 39 |
|
}, |
|
{ |
|
"completion_length": 591.9185485839844, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.12734617292881012, |
|
"kl": 0.005767822265625, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7678571790456772, |
|
"reward_std": 0.2127251597121358, |
|
"rewards/accuracy_reward": 0.7678571790456772, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 592.6504753112793, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.08878223598003387, |
|
"kl": 0.00388336181640625, |
|
"learning_rate": 2.314423473302218e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7566964656114579, |
|
"reward_std": 0.1911870390176773, |
|
"rewards/accuracy_reward": 0.7566964656114579, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 579.0308303833008, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.10042494535446167, |
|
"kl": 0.006903076171875, |
|
"learning_rate": 2.1156192081791355e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7850446835160255, |
|
"reward_std": 0.17857401072978973, |
|
"rewards/accuracy_reward": 0.7850446835160255, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 574.0143119812012, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.10724026709794998, |
|
"kl": 0.00340118408203125, |
|
"learning_rate": 1.9027978012115653e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7665178909897804, |
|
"reward_std": 0.16513762380927802, |
|
"rewards/accuracy_reward": 0.7665178909897804, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 575.9379244717685, |
|
"epoch": 1.0341151385927505, |
|
"grad_norm": 0.07216066867113113, |
|
"kl": 0.0033409812233664775, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7869318506934426, |
|
"reward_std": 0.18544116904112426, |
|
"rewards/accuracy_reward": 0.7869318506934426, |
|
"rewards/format_reward": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 565.3899772644043, |
|
"epoch": 1.1194029850746268, |
|
"grad_norm": 0.09482983499765396, |
|
"kl": 0.0034759521484375, |
|
"learning_rate": 1.454695458298667e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7991071745753289, |
|
"reward_std": 0.17083449736237527, |
|
"rewards/accuracy_reward": 0.7991071745753289, |
|
"rewards/format_reward": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 571.7268089294433, |
|
"epoch": 1.2046908315565032, |
|
"grad_norm": 0.08009294420480728, |
|
"kl": 0.00347137451171875, |
|
"learning_rate": 1.2296174432791415e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7839286103844643, |
|
"reward_std": 0.17667411090806126, |
|
"rewards/accuracy_reward": 0.7837053954601287, |
|
"rewards/format_reward": 0.00022321429569274187, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 565.4739059448242, |
|
"epoch": 1.2899786780383795, |
|
"grad_norm": 0.06680043786764145, |
|
"kl": 0.00329742431640625, |
|
"learning_rate": 1.0106958161686963e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7879464671015739, |
|
"reward_std": 0.1701756412163377, |
|
"rewards/accuracy_reward": 0.7879464671015739, |
|
"rewards/format_reward": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3411513859275053, |
|
"eval_completion_length": 560.9655020060101, |
|
"eval_kl": 0.004444744891158197, |
|
"eval_loss": 0.00021425970771815628, |
|
"eval_reward": 0.6994489503012921, |
|
"eval_reward_std": 0.208146112796903, |
|
"eval_rewards/accuracy_reward": 0.6994489503012921, |
|
"eval_rewards/format_reward": 0.0, |
|
"eval_runtime": 4435.898, |
|
"eval_samples_per_second": 1.127, |
|
"eval_steps_per_second": 0.01, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 551.9654197692871, |
|
"epoch": 1.375266524520256, |
|
"grad_norm": 0.1283424198627472, |
|
"kl": 0.0043430328369140625, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0002, |
|
"reward": 0.7924107536673546, |
|
"reward_std": 0.17128037009388208, |
|
"rewards/accuracy_reward": 0.7924107536673546, |
|
"rewards/format_reward": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 541.5716796875, |
|
"epoch": 1.4605543710021323, |
|
"grad_norm": 0.3723917603492737, |
|
"kl": 0.003989791870117188, |
|
"learning_rate": 6.11006712953975e-07, |
|
"loss": 0.0002, |
|
"reward": 0.8220982536673546, |
|
"reward_std": 0.15211040936410428, |
|
"rewards/accuracy_reward": 0.8220982536673546, |
|
"rewards/format_reward": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 563.2312767028809, |
|
"epoch": 1.5458422174840085, |
|
"grad_norm": 0.09019841253757477, |
|
"kl": 0.0033016204833984375, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7973214641213417, |
|
"reward_std": 0.17157677421346307, |
|
"rewards/accuracy_reward": 0.7973214641213417, |
|
"rewards/format_reward": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 554.155606842041, |
|
"epoch": 1.6311300639658848, |
|
"grad_norm": 0.06605476140975952, |
|
"kl": 0.0035243988037109374, |
|
"learning_rate": 2.9182330117358096e-07, |
|
"loss": 0.0001, |
|
"reward": 0.799107177555561, |
|
"reward_std": 0.15023288973607124, |
|
"rewards/accuracy_reward": 0.799107177555561, |
|
"rewards/format_reward": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 562.7194396972657, |
|
"epoch": 1.716417910447761, |
|
"grad_norm": 0.09438558667898178, |
|
"kl": 0.003635406494140625, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7908482491970062, |
|
"reward_std": 0.1763416165485978, |
|
"rewards/accuracy_reward": 0.7908482491970062, |
|
"rewards/format_reward": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 581.4640892028808, |
|
"epoch": 1.8017057569296375, |
|
"grad_norm": 0.08443931490182877, |
|
"kl": 0.003319549560546875, |
|
"learning_rate": 8.20502774480395e-08, |
|
"loss": 0.0001, |
|
"reward": 0.7691964648663998, |
|
"reward_std": 0.178659126162529, |
|
"rewards/accuracy_reward": 0.7691964648663998, |
|
"rewards/format_reward": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 578.9288177490234, |
|
"epoch": 1.886993603411514, |
|
"grad_norm": 0.09037923067808151, |
|
"kl": 0.0033283233642578125, |
|
"learning_rate": 2.4570139579284723e-08, |
|
"loss": 0.0001, |
|
"reward": 0.7825893223285675, |
|
"reward_std": 0.16389768896624446, |
|
"rewards/accuracy_reward": 0.7825893223285675, |
|
"rewards/format_reward": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 573.8774856567383, |
|
"epoch": 1.9722814498933903, |
|
"grad_norm": 0.06668173521757126, |
|
"kl": 0.0031665802001953126, |
|
"learning_rate": 6.843232656998933e-10, |
|
"loss": 0.0001, |
|
"reward": 0.7763393208384514, |
|
"reward_std": 0.15611171433702112, |
|
"rewards/accuracy_reward": 0.7763393208384514, |
|
"rewards/format_reward": 0.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 572.2143058776855, |
|
"epoch": 1.9893390191897655, |
|
"kl": 0.00342559814453125, |
|
"reward": 0.7700893357396126, |
|
"reward_std": 0.18100517243146896, |
|
"rewards/accuracy_reward": 0.7700893357396126, |
|
"rewards/format_reward": 0.0, |
|
"step": 116, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0002975721653738533, |
|
"train_runtime": 28855.0053, |
|
"train_samples_per_second": 0.52, |
|
"train_steps_per_second": 0.004 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 116, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|