|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.002597672485453034, |
|
"eval_steps": 500, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.195344970906068e-05, |
|
"grad_norm": 0.3428809344768524, |
|
"learning_rate": 1e-05, |
|
"loss": 5.037, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00010390689941812137, |
|
"grad_norm": 0.3120958209037781, |
|
"learning_rate": 2e-05, |
|
"loss": 7.7427, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00015586034912718204, |
|
"grad_norm": 0.3821350932121277, |
|
"learning_rate": 3e-05, |
|
"loss": 4.9036, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00020781379883624273, |
|
"grad_norm": 0.22991156578063965, |
|
"learning_rate": 4e-05, |
|
"loss": 5.292, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0002597672485453034, |
|
"grad_norm": 0.3285081088542938, |
|
"learning_rate": 5e-05, |
|
"loss": 5.5576, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0003117206982543641, |
|
"grad_norm": 0.45770159363746643, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 5.3301, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0003636741479634248, |
|
"grad_norm": 0.3909569978713989, |
|
"learning_rate": 4.975670171853926e-05, |
|
"loss": 5.0764, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00041562759767248546, |
|
"grad_norm": 0.6831026077270508, |
|
"learning_rate": 4.9453690018345144e-05, |
|
"loss": 5.1683, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0004675810473815461, |
|
"grad_norm": 0.5983854532241821, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 5.1088, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005195344970906068, |
|
"grad_norm": 0.45557770133018494, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 5.6262, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005714879467996675, |
|
"grad_norm": 0.555141270160675, |
|
"learning_rate": 4.783863644106502e-05, |
|
"loss": 4.3214, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0006234413965087282, |
|
"grad_norm": 0.5098687410354614, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 4.7553, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0006753948462177888, |
|
"grad_norm": 0.7282283306121826, |
|
"learning_rate": 4.620120240391065e-05, |
|
"loss": 4.6829, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0007273482959268496, |
|
"grad_norm": 0.6713859438896179, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 4.2758, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0007793017456359103, |
|
"grad_norm": 0.7060822248458862, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 4.0086, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0008312551953449709, |
|
"grad_norm": 0.34293320775032043, |
|
"learning_rate": 4.2983495008466276e-05, |
|
"loss": 11.5521, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0008832086450540316, |
|
"grad_norm": 0.682274341583252, |
|
"learning_rate": 4.172826515897146e-05, |
|
"loss": 3.6475, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0009351620947630922, |
|
"grad_norm": 0.7243533730506897, |
|
"learning_rate": 4.039153688314145e-05, |
|
"loss": 4.0459, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.000987115544472153, |
|
"grad_norm": 0.6419519186019897, |
|
"learning_rate": 3.897982258676867e-05, |
|
"loss": 6.2183, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0010390689941812137, |
|
"grad_norm": 0.8105183243751526, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 4.3719, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0010910224438902742, |
|
"grad_norm": 0.5565342307090759, |
|
"learning_rate": 3.5959278669726935e-05, |
|
"loss": 3.3225, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.001142975893599335, |
|
"grad_norm": 0.9197883009910583, |
|
"learning_rate": 3.436516483539781e-05, |
|
"loss": 4.0731, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0011949293433083958, |
|
"grad_norm": 0.710243284702301, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 3.0375, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0012468827930174563, |
|
"grad_norm": 0.7943474650382996, |
|
"learning_rate": 3.104804738999169e-05, |
|
"loss": 3.1489, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.001298836242726517, |
|
"grad_norm": 0.9444668889045715, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 3.802, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013507896924355777, |
|
"grad_norm": 0.7393707633018494, |
|
"learning_rate": 2.761321158169134e-05, |
|
"loss": 4.3187, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0014027431421446384, |
|
"grad_norm": 0.9460581541061401, |
|
"learning_rate": 2.587248741756253e-05, |
|
"loss": 4.1431, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0014546965918536992, |
|
"grad_norm": 0.8911897540092468, |
|
"learning_rate": 2.4127512582437485e-05, |
|
"loss": 3.6565, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0015066500415627597, |
|
"grad_norm": 0.9491592049598694, |
|
"learning_rate": 2.238678841830867e-05, |
|
"loss": 3.8242, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0015586034912718205, |
|
"grad_norm": 0.9719251394271851, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 3.4834, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.001610556940980881, |
|
"grad_norm": 0.9136457443237305, |
|
"learning_rate": 1.895195261000831e-05, |
|
"loss": 5.4603, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0016625103906899418, |
|
"grad_norm": 1.1147466897964478, |
|
"learning_rate": 1.7274575140626318e-05, |
|
"loss": 2.776, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0017144638403990024, |
|
"grad_norm": 1.168950080871582, |
|
"learning_rate": 1.56348351646022e-05, |
|
"loss": 6.0062, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0017664172901080632, |
|
"grad_norm": 0.9625309109687805, |
|
"learning_rate": 1.4040721330273062e-05, |
|
"loss": 3.677, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.001818370739817124, |
|
"grad_norm": 1.099805474281311, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 4.4438, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0018703241895261845, |
|
"grad_norm": 1.117089867591858, |
|
"learning_rate": 1.1020177413231334e-05, |
|
"loss": 3.9246, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0019222776392352453, |
|
"grad_norm": 1.025370717048645, |
|
"learning_rate": 9.608463116858542e-06, |
|
"loss": 4.1575, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.001974231088944306, |
|
"grad_norm": 0.6934594511985779, |
|
"learning_rate": 8.271734841028553e-06, |
|
"loss": 2.9733, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0020261845386533666, |
|
"grad_norm": 0.980570375919342, |
|
"learning_rate": 7.016504991533726e-06, |
|
"loss": 6.5256, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0020781379883624274, |
|
"grad_norm": 1.1663200855255127, |
|
"learning_rate": 5.848888922025553e-06, |
|
"loss": 4.696, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.002130091438071488, |
|
"grad_norm": 1.1429848670959473, |
|
"learning_rate": 4.7745751406263165e-06, |
|
"loss": 3.8524, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0021820448877805485, |
|
"grad_norm": 0.39903584122657776, |
|
"learning_rate": 3.798797596089351e-06, |
|
"loss": 8.6591, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0022339983374896092, |
|
"grad_norm": 0.8342665433883667, |
|
"learning_rate": 2.9263101785268254e-06, |
|
"loss": 5.6047, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.00228595178719867, |
|
"grad_norm": 1.1964373588562012, |
|
"learning_rate": 2.1613635589349756e-06, |
|
"loss": 4.8146, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0023379052369077308, |
|
"grad_norm": 1.1753668785095215, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 4.9578, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0023898586866167915, |
|
"grad_norm": 0.7276975512504578, |
|
"learning_rate": 9.684576015420278e-07, |
|
"loss": 3.2606, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.002441812136325852, |
|
"grad_norm": 0.7749328017234802, |
|
"learning_rate": 5.463099816548579e-07, |
|
"loss": 5.6076, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0024937655860349127, |
|
"grad_norm": 0.9183135032653809, |
|
"learning_rate": 2.4329828146074095e-07, |
|
"loss": 2.5721, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0025457190357439734, |
|
"grad_norm": 0.9333743453025818, |
|
"learning_rate": 6.089874350439506e-08, |
|
"loss": 6.1596, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.002597672485453034, |
|
"grad_norm": 0.874055027961731, |
|
"learning_rate": 0.0, |
|
"loss": 2.981, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.002597672485453034, |
|
"eval_loss": NaN, |
|
"eval_runtime": 1586.5993, |
|
"eval_samples_per_second": 5.108, |
|
"eval_steps_per_second": 2.555, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.709931739086848e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|