|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4500.0, |
|
"eval_steps": 500, |
|
"global_step": 4500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 100.0, |
|
"grad_norm": 0.09399763494729996, |
|
"learning_rate": 0.00019975640502598244, |
|
"loss": 0.4328, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"grad_norm": 0.008415983989834785, |
|
"learning_rate": 0.00019902680687415705, |
|
"loss": 0.0012, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 300.0, |
|
"grad_norm": 0.004644488915801048, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 0.0003, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 400.0, |
|
"grad_norm": 0.002689788118004799, |
|
"learning_rate": 0.0001961261695938319, |
|
"loss": 0.0002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 500.0, |
|
"grad_norm": 0.0023731214459985495, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 600.0, |
|
"grad_norm": 0.0013438506284728646, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 0.0001, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 700.0, |
|
"grad_norm": 0.0014795837923884392, |
|
"learning_rate": 0.00018829475928589271, |
|
"loss": 0.0001, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 800.0, |
|
"grad_norm": 0.0012762774713337421, |
|
"learning_rate": 0.0001848048096156426, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 900.0, |
|
"grad_norm": 0.0009800918633118272, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.0001, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1000.0, |
|
"grad_norm": 0.001324972603470087, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 0.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1100.0, |
|
"grad_norm": 0.0007171508623287082, |
|
"learning_rate": 0.0001719339800338651, |
|
"loss": 0.0, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1200.0, |
|
"grad_norm": 0.0006033563986420631, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 0.0, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1300.0, |
|
"grad_norm": 0.0005752713768742979, |
|
"learning_rate": 0.0001615661475325658, |
|
"loss": 0.0, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1400.0, |
|
"grad_norm": 0.000505088537465781, |
|
"learning_rate": 0.0001559192903470747, |
|
"loss": 0.0, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1500.0, |
|
"grad_norm": 0.0010271323844790459, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1600.0, |
|
"grad_norm": 0.0004594104248099029, |
|
"learning_rate": 0.00014383711467890774, |
|
"loss": 0.0, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1700.0, |
|
"grad_norm": 0.00037568225525319576, |
|
"learning_rate": 0.00013746065934159123, |
|
"loss": 0.0, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1800.0, |
|
"grad_norm": 0.0006100877653807402, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.0, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1900.0, |
|
"grad_norm": 0.0006700914236716926, |
|
"learning_rate": 0.00012419218955996676, |
|
"loss": 0.0, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2000.0, |
|
"grad_norm": 0.0004988115979358554, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2100.0, |
|
"grad_norm": 0.0003406632167752832, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2200.0, |
|
"grad_norm": 0.00035023069358430803, |
|
"learning_rate": 0.00010348994967025012, |
|
"loss": 0.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2300.0, |
|
"grad_norm": 0.00035629895864985883, |
|
"learning_rate": 9.657982181634475e-05, |
|
"loss": 0.0003, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2400.0, |
|
"grad_norm": 0.00036875385558232665, |
|
"learning_rate": 8.961658694111929e-05, |
|
"loss": 0.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2500.0, |
|
"grad_norm": 0.0003993694263044745, |
|
"learning_rate": 8.270393901057964e-05, |
|
"loss": 0.0, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2600.0, |
|
"grad_norm": 0.00030426381272263825, |
|
"learning_rate": 7.587555575058649e-05, |
|
"loss": 0.0, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2700.0, |
|
"grad_norm": 0.0005253420677036047, |
|
"learning_rate": 6.916470435799587e-05, |
|
"loss": 0.0, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2800.0, |
|
"grad_norm": 0.000337547215167433, |
|
"learning_rate": 6.260407942621998e-05, |
|
"loss": 0.0, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2900.0, |
|
"grad_norm": 0.00034444258199073374, |
|
"learning_rate": 5.622564366045472e-05, |
|
"loss": 0.0, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3000.0, |
|
"grad_norm": 0.00015016530232969671, |
|
"learning_rate": 5.006047215859289e-05, |
|
"loss": 0.0, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3100.0, |
|
"grad_norm": 0.00021075463155284524, |
|
"learning_rate": 4.413860101647055e-05, |
|
"loss": 0.0, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3200.0, |
|
"grad_norm": 0.00025653259945102036, |
|
"learning_rate": 3.8488880995027786e-05, |
|
"loss": 0.0, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 3300.0, |
|
"grad_norm": 0.00018334439664613456, |
|
"learning_rate": 3.313883696230119e-05, |
|
"loss": 0.0, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 3400.0, |
|
"grad_norm": 0.00026421225629746914, |
|
"learning_rate": 2.8114533795033683e-05, |
|
"loss": 0.0, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 3500.0, |
|
"grad_norm": 0.00012184748629806563, |
|
"learning_rate": 2.3440449393214948e-05, |
|
"loss": 0.0, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3600.0, |
|
"grad_norm": 0.0003666615521069616, |
|
"learning_rate": 1.9139355426213347e-05, |
|
"loss": 0.0, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3700.0, |
|
"grad_norm": 0.00032034661853685975, |
|
"learning_rate": 1.5232206391491699e-05, |
|
"loss": 0.0, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3800.0, |
|
"grad_norm": 0.000288996787276119, |
|
"learning_rate": 1.1738037526401857e-05, |
|
"loss": 0.0, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 3900.0, |
|
"grad_norm": 0.00018436899699736387, |
|
"learning_rate": 8.67387207042194e-06, |
|
"loss": 0.0, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 4000.0, |
|
"grad_norm": 0.0004207100428175181, |
|
"learning_rate": 6.054638329644657e-06, |
|
"loss": 0.0, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 4100.0, |
|
"grad_norm": 0.0002757696493063122, |
|
"learning_rate": 3.893096947570618e-06, |
|
"loss": 0.0, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 4200.0, |
|
"grad_norm": 0.000379523349693045, |
|
"learning_rate": 2.1997787365358958e-06, |
|
"loss": 0.0, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 4300.0, |
|
"grad_norm": 0.0002130218199454248, |
|
"learning_rate": 9.829333726529056e-07, |
|
"loss": 0.0, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 4400.0, |
|
"grad_norm": 0.00015009118942543864, |
|
"learning_rate": 2.484892042176279e-07, |
|
"loss": 0.0, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 4500.0, |
|
"grad_norm": 0.00018362345872446895, |
|
"learning_rate": 2.4369392592760164e-11, |
|
"loss": 0.0, |
|
"step": 4500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4500, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 884950147584000.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|