|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03566333808844508, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003566333808844508, |
|
"grad_norm": 9.774100303649902, |
|
"learning_rate": 1e-05, |
|
"loss": 4.3855, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007132667617689016, |
|
"grad_norm": 6.548297882080078, |
|
"learning_rate": 2e-05, |
|
"loss": 3.1545, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0010699001426533524, |
|
"grad_norm": 7.257200241088867, |
|
"learning_rate": 3e-05, |
|
"loss": 3.3344, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0014265335235378032, |
|
"grad_norm": 6.974522113800049, |
|
"learning_rate": 4e-05, |
|
"loss": 3.152, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001783166904422254, |
|
"grad_norm": 8.030047416687012, |
|
"learning_rate": 5e-05, |
|
"loss": 3.5981, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0021398002853067048, |
|
"grad_norm": 8.43385124206543, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3772, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0024964336661911554, |
|
"grad_norm": 6.553490161895752, |
|
"learning_rate": 7e-05, |
|
"loss": 2.7528, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0028530670470756064, |
|
"grad_norm": 5.480027675628662, |
|
"learning_rate": 8e-05, |
|
"loss": 2.5841, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.003209700427960057, |
|
"grad_norm": 5.729024887084961, |
|
"learning_rate": 9e-05, |
|
"loss": 2.5566, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003566333808844508, |
|
"grad_norm": 6.299459934234619, |
|
"learning_rate": 0.0001, |
|
"loss": 2.656, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0039229671897289585, |
|
"grad_norm": 5.80184268951416, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.2856, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0042796005706134095, |
|
"grad_norm": 5.892313003540039, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.4129, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0046362339514978606, |
|
"grad_norm": 4.244318008422852, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 1.697, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004992867332382311, |
|
"grad_norm": 3.284872531890869, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.9739, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005349500713266762, |
|
"grad_norm": 4.807908058166504, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 1.6373, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005706134094151213, |
|
"grad_norm": 5.345113277435303, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.3676, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.006062767475035664, |
|
"grad_norm": 3.9351136684417725, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 1.0759, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.006419400855920114, |
|
"grad_norm": 4.3018412590026855, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.8651, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006776034236804565, |
|
"grad_norm": 3.912882089614868, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.6577, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.007132667617689016, |
|
"grad_norm": 3.2970550060272217, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.7112, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.007489300998573466, |
|
"grad_norm": 4.662540435791016, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.7851, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.007845934379457917, |
|
"grad_norm": 4.208030700683594, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.8795, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.008202567760342368, |
|
"grad_norm": 3.992008924484253, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 0.9006, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.008559201141226819, |
|
"grad_norm": 3.2357306480407715, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.7336, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00891583452211127, |
|
"grad_norm": 4.444036483764648, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.7507, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.009272467902995721, |
|
"grad_norm": 2.9058542251586914, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.5678, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00962910128388017, |
|
"grad_norm": 2.881979465484619, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.522, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.009985734664764621, |
|
"grad_norm": 6.814059257507324, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.7406, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.010342368045649072, |
|
"grad_norm": 2.85461163520813, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 0.535, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.010699001426533523, |
|
"grad_norm": 4.374093532562256, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.918, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.011055634807417974, |
|
"grad_norm": 4.2099080085754395, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.8811, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.011412268188302425, |
|
"grad_norm": 3.8226873874664307, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.6557, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.011768901569186876, |
|
"grad_norm": 4.4234700202941895, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.9053, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.012125534950071327, |
|
"grad_norm": 3.8180363178253174, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.7339, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.012482168330955777, |
|
"grad_norm": 3.6584701538085938, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.6399, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.012838801711840228, |
|
"grad_norm": 3.0617523193359375, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.6536, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.013195435092724679, |
|
"grad_norm": 3.4767324924468994, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.8274, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01355206847360913, |
|
"grad_norm": 3.2626168727874756, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.5745, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01390870185449358, |
|
"grad_norm": 2.738389492034912, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.6844, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.014265335235378032, |
|
"grad_norm": 3.461836576461792, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.887, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.014621968616262483, |
|
"grad_norm": 2.5409317016601562, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.6947, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.014978601997146932, |
|
"grad_norm": 1.8447601795196533, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.3823, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.015335235378031383, |
|
"grad_norm": 2.9046192169189453, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.7798, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.015691868758915834, |
|
"grad_norm": 2.498103141784668, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.5778, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.016048502139800285, |
|
"grad_norm": 2.751084566116333, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.4831, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.016405135520684736, |
|
"grad_norm": 2.245828628540039, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.4149, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.016761768901569187, |
|
"grad_norm": 3.826903820037842, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 0.8814, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.017118402282453638, |
|
"grad_norm": 3.1380362510681152, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.5521, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.01747503566333809, |
|
"grad_norm": 3.0199174880981445, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.7336, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01783166904422254, |
|
"grad_norm": 2.6297054290771484, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.6997, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01818830242510699, |
|
"grad_norm": 3.390153169631958, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 0.8219, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.018544935805991442, |
|
"grad_norm": 3.1203787326812744, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 0.6488, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.018901569186875893, |
|
"grad_norm": 2.848087787628174, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 0.6625, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.01925820256776034, |
|
"grad_norm": 1.9604121446609497, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.3796, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.019614835948644792, |
|
"grad_norm": 2.3012568950653076, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5187, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.019971469329529243, |
|
"grad_norm": 2.307490825653076, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 0.4771, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.020328102710413694, |
|
"grad_norm": 3.150341033935547, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 0.8166, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.020684736091298145, |
|
"grad_norm": 3.0458898544311523, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 0.7438, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.021041369472182596, |
|
"grad_norm": 3.054081439971924, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 0.7589, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.021398002853067047, |
|
"grad_norm": 3.3433187007904053, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.7532, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.021754636233951498, |
|
"grad_norm": 1.860802412033081, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 0.2933, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.02211126961483595, |
|
"grad_norm": 1.9675403833389282, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 0.4654, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0224679029957204, |
|
"grad_norm": 2.4809985160827637, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.5167, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.02282453637660485, |
|
"grad_norm": 2.7916054725646973, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.6389, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.023181169757489302, |
|
"grad_norm": 2.9464380741119385, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.8379, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.023537803138373753, |
|
"grad_norm": 2.0889384746551514, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.6764, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.023894436519258204, |
|
"grad_norm": 1.9802026748657227, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 0.4098, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.024251069900142655, |
|
"grad_norm": 2.649928331375122, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 0.7426, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.024607703281027102, |
|
"grad_norm": 2.019575357437134, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.4589, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.024964336661911554, |
|
"grad_norm": 2.0410661697387695, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.4953, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.025320970042796005, |
|
"grad_norm": 3.207294464111328, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 0.6211, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.025677603423680456, |
|
"grad_norm": 2.813213348388672, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.769, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.026034236804564907, |
|
"grad_norm": 2.10286283493042, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 0.5105, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.026390870185449358, |
|
"grad_norm": 2.057298183441162, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 0.4323, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.02674750356633381, |
|
"grad_norm": 2.6843760013580322, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.7312, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02710413694721826, |
|
"grad_norm": 1.9968925714492798, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 0.508, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.02746077032810271, |
|
"grad_norm": 2.609607696533203, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 0.542, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.02781740370898716, |
|
"grad_norm": 2.267988443374634, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.413, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.028174037089871613, |
|
"grad_norm": 2.5811314582824707, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 0.7585, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.028530670470756064, |
|
"grad_norm": 2.054327964782715, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.4524, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.028887303851640515, |
|
"grad_norm": 2.4103705883026123, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.6176, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.029243937232524966, |
|
"grad_norm": 2.5763814449310303, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.696, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.029600570613409417, |
|
"grad_norm": 2.2559149265289307, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 0.4984, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.029957203994293864, |
|
"grad_norm": 1.892920970916748, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.4141, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.030313837375178315, |
|
"grad_norm": 2.494680404663086, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.5068, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.030670470756062766, |
|
"grad_norm": 3.0079333782196045, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 0.8526, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.031027104136947217, |
|
"grad_norm": 2.6471354961395264, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.6676, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.03138373751783167, |
|
"grad_norm": 2.3440585136413574, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 0.5147, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.03174037089871612, |
|
"grad_norm": 2.2918765544891357, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 0.5938, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.03209700427960057, |
|
"grad_norm": 2.1986241340637207, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.4577, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03245363766048502, |
|
"grad_norm": 2.8510165214538574, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 0.571, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.03281027104136947, |
|
"grad_norm": 2.722109079360962, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 0.4728, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.03316690442225392, |
|
"grad_norm": 1.798582911491394, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.3732, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.033523537803138374, |
|
"grad_norm": 1.8506275415420532, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.536, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.033880171184022825, |
|
"grad_norm": 2.3318088054656982, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.6051, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.034236804564907276, |
|
"grad_norm": 2.459251642227173, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.6138, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.03459343794579173, |
|
"grad_norm": 2.914124011993408, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 0.5388, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.03495007132667618, |
|
"grad_norm": 2.980940103530884, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 0.7465, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.03530670470756063, |
|
"grad_norm": 2.870051145553589, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 0.7109, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.03566333808844508, |
|
"grad_norm": 1.8577947616577148, |
|
"learning_rate": 0.0, |
|
"loss": 0.341, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5000973307084800.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|