|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2881844380403458, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005763688760806916, |
|
"grad_norm": 2.4409587383270264, |
|
"learning_rate": 1e-05, |
|
"loss": 5.5006, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005763688760806916, |
|
"eval_loss": 5.976919174194336, |
|
"eval_runtime": 0.8589, |
|
"eval_samples_per_second": 678.789, |
|
"eval_steps_per_second": 22.122, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011527377521613832, |
|
"grad_norm": 2.5246589183807373, |
|
"learning_rate": 2e-05, |
|
"loss": 5.7539, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01729106628242075, |
|
"grad_norm": 5.136504173278809, |
|
"learning_rate": 3e-05, |
|
"loss": 5.7782, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.023054755043227664, |
|
"grad_norm": 1.6736292839050293, |
|
"learning_rate": 4e-05, |
|
"loss": 5.7622, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02881844380403458, |
|
"grad_norm": 2.1508893966674805, |
|
"learning_rate": 5e-05, |
|
"loss": 5.9264, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0345821325648415, |
|
"grad_norm": 2.719834089279175, |
|
"learning_rate": 6e-05, |
|
"loss": 5.6231, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.040345821325648415, |
|
"grad_norm": 1.862425684928894, |
|
"learning_rate": 7e-05, |
|
"loss": 5.7143, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04610951008645533, |
|
"grad_norm": 2.0583953857421875, |
|
"learning_rate": 8e-05, |
|
"loss": 5.7937, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05187319884726225, |
|
"grad_norm": 3.5244977474212646, |
|
"learning_rate": 9e-05, |
|
"loss": 5.5759, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05187319884726225, |
|
"eval_loss": 5.762571334838867, |
|
"eval_runtime": 0.8404, |
|
"eval_samples_per_second": 693.698, |
|
"eval_steps_per_second": 22.608, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05763688760806916, |
|
"grad_norm": 2.2905080318450928, |
|
"learning_rate": 0.0001, |
|
"loss": 5.6307, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06340057636887608, |
|
"grad_norm": 2.9346985816955566, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 5.5274, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.069164265129683, |
|
"grad_norm": 3.4477925300598145, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 5.4946, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07492795389048991, |
|
"grad_norm": 2.5624496936798096, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 5.5983, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08069164265129683, |
|
"grad_norm": 2.30907940864563, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 4.965, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.08645533141210375, |
|
"grad_norm": 3.060831308364868, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 5.0549, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 1.7213447093963623, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 5.1858, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.09798270893371758, |
|
"grad_norm": 1.8246761560440063, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 5.1216, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1037463976945245, |
|
"grad_norm": 1.6783112287521362, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 5.0034, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1037463976945245, |
|
"eval_loss": 4.902072429656982, |
|
"eval_runtime": 0.8459, |
|
"eval_samples_per_second": 689.167, |
|
"eval_steps_per_second": 22.46, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.10951008645533142, |
|
"grad_norm": 1.777565836906433, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 4.9386, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.11527377521613832, |
|
"grad_norm": 2.9292523860931396, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 4.5237, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12103746397694524, |
|
"grad_norm": 1.343632459640503, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 4.4949, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.12680115273775217, |
|
"grad_norm": 1.9242039918899536, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 4.7291, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.13256484149855907, |
|
"grad_norm": 1.7752628326416016, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 4.6048, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.138328530259366, |
|
"grad_norm": 1.608022689819336, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 4.3314, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1440922190201729, |
|
"grad_norm": 1.1817405223846436, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 4.163, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14985590778097982, |
|
"grad_norm": 1.5801705121994019, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 4.3659, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.15561959654178675, |
|
"grad_norm": 3.451111316680908, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 4.3982, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.15561959654178675, |
|
"eval_loss": 4.300004005432129, |
|
"eval_runtime": 0.8515, |
|
"eval_samples_per_second": 684.681, |
|
"eval_steps_per_second": 22.314, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.16138328530259366, |
|
"grad_norm": 1.6136767864227295, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 4.3207, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.16714697406340057, |
|
"grad_norm": 1.5284059047698975, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 4.2439, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1729106628242075, |
|
"grad_norm": 1.335671305656433, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 3.9352, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1786743515850144, |
|
"grad_norm": 2.7042202949523926, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 4.0291, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.1844380403458213, |
|
"grad_norm": 2.1285061836242676, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 3.9501, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.19020172910662825, |
|
"grad_norm": 1.8937512636184692, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 3.7774, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.19596541786743515, |
|
"grad_norm": 1.931766152381897, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 3.7974, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2017291066282421, |
|
"grad_norm": 1.651713490486145, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 3.8174, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.207492795389049, |
|
"grad_norm": 1.7000616788864136, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 3.8321, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.207492795389049, |
|
"eval_loss": 3.770657777786255, |
|
"eval_runtime": 0.8553, |
|
"eval_samples_per_second": 681.651, |
|
"eval_steps_per_second": 22.215, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2132564841498559, |
|
"grad_norm": 1.4033437967300415, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 3.7747, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.21902017291066284, |
|
"grad_norm": 2.2183494567871094, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 3.7118, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.22478386167146974, |
|
"grad_norm": 1.5647684335708618, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 3.6716, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.23054755043227665, |
|
"grad_norm": 1.6753904819488525, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.4879, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.23631123919308358, |
|
"grad_norm": 1.5152138471603394, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 3.4866, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2420749279538905, |
|
"grad_norm": 1.228210210800171, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 3.4079, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2478386167146974, |
|
"grad_norm": 1.3091694116592407, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 3.7129, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.25360230547550433, |
|
"grad_norm": 1.4786443710327148, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 3.3594, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.25936599423631124, |
|
"grad_norm": 1.6930687427520752, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 3.4402, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.25936599423631124, |
|
"eval_loss": 3.5291929244995117, |
|
"eval_runtime": 0.8391, |
|
"eval_samples_per_second": 694.79, |
|
"eval_steps_per_second": 22.643, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.26512968299711814, |
|
"grad_norm": 1.3388841152191162, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 3.2864, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.27089337175792505, |
|
"grad_norm": 1.3085576295852661, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 3.5233, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.276657060518732, |
|
"grad_norm": 1.5007169246673584, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 3.4159, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.2824207492795389, |
|
"grad_norm": 1.2105474472045898, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 3.4101, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.2881844380403458, |
|
"grad_norm": 1.4784791469573975, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 3.3902, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2776751721676800.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|