|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.06691201070592172, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00033456005352960856, |
|
"eval_loss": 0.528853178024292, |
|
"eval_runtime": 39.5233, |
|
"eval_samples_per_second": 127.393, |
|
"eval_steps_per_second": 15.94, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0016728002676480427, |
|
"grad_norm": 0.0893344134092331, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5191, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0033456005352960855, |
|
"grad_norm": 0.12134993076324463, |
|
"learning_rate": 0.0001, |
|
"loss": 0.584, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005018400802944129, |
|
"grad_norm": 0.08858965337276459, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 0.5242, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.006691201070592171, |
|
"grad_norm": 0.09778374433517456, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.5202, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.008364001338240215, |
|
"grad_norm": 0.10649651288986206, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 0.4911, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.010036801605888258, |
|
"grad_norm": 0.06970233470201492, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 0.3985, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0117096018735363, |
|
"grad_norm": 0.0955878272652626, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 0.43, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.013382402141184342, |
|
"grad_norm": 0.060985565185546875, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.4198, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.015055202408832385, |
|
"grad_norm": 0.0769026130437851, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 0.4067, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01672800267648043, |
|
"grad_norm": 0.1008349359035492, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.3662, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01672800267648043, |
|
"eval_loss": 0.34106191992759705, |
|
"eval_runtime": 39.432, |
|
"eval_samples_per_second": 127.688, |
|
"eval_steps_per_second": 15.977, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01840080294412847, |
|
"grad_norm": 0.07255814969539642, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 0.3496, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.020073603211776515, |
|
"grad_norm": 0.10015455633401871, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.328, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.021746403479424557, |
|
"grad_norm": 0.07114718109369278, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 0.3169, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0234192037470726, |
|
"grad_norm": 0.07913073152303696, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 0.3134, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.025092004014720642, |
|
"grad_norm": 0.1750458925962448, |
|
"learning_rate": 7.379736965185368e-05, |
|
"loss": 0.2852, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.026764804282368684, |
|
"grad_norm": 0.061932601034641266, |
|
"learning_rate": 7.008477123264848e-05, |
|
"loss": 0.2737, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02843760455001673, |
|
"grad_norm": 0.06881576776504517, |
|
"learning_rate": 6.623497346023418e-05, |
|
"loss": 0.2502, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.03011040481766477, |
|
"grad_norm": 0.062322817742824554, |
|
"learning_rate": 6.227427435703997e-05, |
|
"loss": 0.2659, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.031783205085312814, |
|
"grad_norm": 0.06230627000331879, |
|
"learning_rate": 5.8229729514036705e-05, |
|
"loss": 0.2645, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.03345600535296086, |
|
"grad_norm": 0.11283061653375626, |
|
"learning_rate": 5.4128967273616625e-05, |
|
"loss": 0.2267, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03345600535296086, |
|
"eval_loss": 0.23386509716510773, |
|
"eval_runtime": 39.348, |
|
"eval_samples_per_second": 127.961, |
|
"eval_steps_per_second": 16.011, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0351288056206089, |
|
"grad_norm": 0.0672779530286789, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2497, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.03680160588825694, |
|
"grad_norm": 0.06097684055566788, |
|
"learning_rate": 4.5871032726383386e-05, |
|
"loss": 0.2516, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.038474406155904986, |
|
"grad_norm": 0.06827884912490845, |
|
"learning_rate": 4.17702704859633e-05, |
|
"loss": 0.2279, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.04014720642355303, |
|
"grad_norm": 0.0606272853910923, |
|
"learning_rate": 3.772572564296005e-05, |
|
"loss": 0.2541, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04182000669120107, |
|
"grad_norm": 0.14288191497325897, |
|
"learning_rate": 3.3765026539765834e-05, |
|
"loss": 0.1806, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.04349280695884911, |
|
"grad_norm": 0.052643969655036926, |
|
"learning_rate": 2.991522876735154e-05, |
|
"loss": 0.246, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.04516560722649716, |
|
"grad_norm": 0.06944818049669266, |
|
"learning_rate": 2.6202630348146324e-05, |
|
"loss": 0.2419, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.0468384074941452, |
|
"grad_norm": 0.06182454898953438, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 0.2159, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.04851120776179324, |
|
"grad_norm": 0.06247089058160782, |
|
"learning_rate": 1.928936436551661e-05, |
|
"loss": 0.2275, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.050184008029441285, |
|
"grad_norm": 0.1229231059551239, |
|
"learning_rate": 1.6135921418712956e-05, |
|
"loss": 0.1912, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.050184008029441285, |
|
"eval_loss": 0.2059282809495926, |
|
"eval_runtime": 39.4366, |
|
"eval_samples_per_second": 127.673, |
|
"eval_steps_per_second": 15.975, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05185680829708933, |
|
"grad_norm": 0.06170298531651497, |
|
"learning_rate": 1.3213804466343421e-05, |
|
"loss": 0.2107, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.05352960856473737, |
|
"grad_norm": 0.06112481653690338, |
|
"learning_rate": 1.0542974530180327e-05, |
|
"loss": 0.2021, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.05520240883238541, |
|
"grad_norm": 0.060423221439123154, |
|
"learning_rate": 8.141676086873572e-06, |
|
"loss": 0.2191, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.05687520910003346, |
|
"grad_norm": 0.05647290125489235, |
|
"learning_rate": 6.026312439675552e-06, |
|
"loss": 0.2134, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0585480093676815, |
|
"grad_norm": 0.10841598361730576, |
|
"learning_rate": 4.2113336672471245e-06, |
|
"loss": 0.212, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.06022080963532954, |
|
"grad_norm": 0.05532608553767204, |
|
"learning_rate": 2.7091379149682685e-06, |
|
"loss": 0.2154, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.061893609902977584, |
|
"grad_norm": 0.06235940009355545, |
|
"learning_rate": 1.5299867030334814e-06, |
|
"loss": 0.1993, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.06356641017062563, |
|
"grad_norm": 0.06464989483356476, |
|
"learning_rate": 6.819348298638839e-07, |
|
"loss": 0.1926, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.06523921043827367, |
|
"grad_norm": 0.06490304321050644, |
|
"learning_rate": 1.7077534966650766e-07, |
|
"loss": 0.2289, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.06691201070592172, |
|
"grad_norm": 0.10941293835639954, |
|
"learning_rate": 0.0, |
|
"loss": 0.1909, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06691201070592172, |
|
"eval_loss": 0.20260320603847504, |
|
"eval_runtime": 39.2566, |
|
"eval_samples_per_second": 128.259, |
|
"eval_steps_per_second": 16.048, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.670528771293184e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|