|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.006587615283267457, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 6.587615283267457e-05, |
|
"grad_norm": 6.192081928253174, |
|
"learning_rate": 1e-05, |
|
"loss": 12.3252, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 6.587615283267457e-05, |
|
"eval_loss": 12.318095207214355, |
|
"eval_runtime": 184.4228, |
|
"eval_samples_per_second": 69.319, |
|
"eval_steps_per_second": 8.665, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00013175230566534913, |
|
"grad_norm": 6.010153770446777, |
|
"learning_rate": 2e-05, |
|
"loss": 12.2193, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0001976284584980237, |
|
"grad_norm": 5.602427959442139, |
|
"learning_rate": 3e-05, |
|
"loss": 12.4322, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00026350461133069827, |
|
"grad_norm": 5.586225509643555, |
|
"learning_rate": 4e-05, |
|
"loss": 12.0126, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00032938076416337287, |
|
"grad_norm": 5.574303150177002, |
|
"learning_rate": 5e-05, |
|
"loss": 12.1146, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0003952569169960474, |
|
"grad_norm": 5.685383319854736, |
|
"learning_rate": 6e-05, |
|
"loss": 12.0445, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.000461133069828722, |
|
"grad_norm": 6.527904987335205, |
|
"learning_rate": 7e-05, |
|
"loss": 12.0914, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0005270092226613965, |
|
"grad_norm": 5.289760589599609, |
|
"learning_rate": 8e-05, |
|
"loss": 10.8712, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0005928853754940712, |
|
"grad_norm": 6.140544414520264, |
|
"learning_rate": 9e-05, |
|
"loss": 11.6504, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005928853754940712, |
|
"eval_loss": 11.612039566040039, |
|
"eval_runtime": 184.6499, |
|
"eval_samples_per_second": 69.234, |
|
"eval_steps_per_second": 8.654, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0006587615283267457, |
|
"grad_norm": 6.362745761871338, |
|
"learning_rate": 0.0001, |
|
"loss": 11.7407, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0007246376811594203, |
|
"grad_norm": 5.845798015594482, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 11.0677, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0007905138339920949, |
|
"grad_norm": 6.456894397735596, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 11.3561, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0008563899868247694, |
|
"grad_norm": 5.976311206817627, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 10.7305, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.000922266139657444, |
|
"grad_norm": 5.910308361053467, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.7511, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0009881422924901185, |
|
"grad_norm": 5.393200397491455, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 9.8402, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.001054018445322793, |
|
"grad_norm": 6.385056495666504, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 9.648, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0011198945981554676, |
|
"grad_norm": 6.098160266876221, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 9.3232, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0011857707509881424, |
|
"grad_norm": 6.31523323059082, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 8.8209, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0011857707509881424, |
|
"eval_loss": 8.577109336853027, |
|
"eval_runtime": 184.3733, |
|
"eval_samples_per_second": 69.338, |
|
"eval_steps_per_second": 8.667, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.001251646903820817, |
|
"grad_norm": 6.199258804321289, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 8.415, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0013175230566534915, |
|
"grad_norm": 6.069029331207275, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 7.9969, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.001383399209486166, |
|
"grad_norm": 6.013519763946533, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 8.2204, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0014492753623188406, |
|
"grad_norm": 6.165053367614746, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 7.5931, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0015151515151515152, |
|
"grad_norm": 6.0091753005981445, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 6.9307, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0015810276679841897, |
|
"grad_norm": 6.555554389953613, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 6.9404, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0016469038208168643, |
|
"grad_norm": 6.848858833312988, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 6.8355, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0017127799736495388, |
|
"grad_norm": 6.512882709503174, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 5.8496, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0017786561264822134, |
|
"grad_norm": 6.7052001953125, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 5.7517, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0017786561264822134, |
|
"eval_loss": 5.528936862945557, |
|
"eval_runtime": 184.5038, |
|
"eval_samples_per_second": 69.289, |
|
"eval_steps_per_second": 8.661, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.001844532279314888, |
|
"grad_norm": 6.696857929229736, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 5.744, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0019104084321475627, |
|
"grad_norm": 6.88131046295166, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 5.3715, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.001976284584980237, |
|
"grad_norm": 7.05979061126709, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 4.748, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.002042160737812912, |
|
"grad_norm": 7.165943145751953, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 4.3262, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.002108036890645586, |
|
"grad_norm": 6.821268081665039, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 4.4215, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.002173913043478261, |
|
"grad_norm": 5.98926305770874, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 3.6921, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0022397891963109352, |
|
"grad_norm": 5.945371150970459, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 3.7048, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.00230566534914361, |
|
"grad_norm": 4.138054847717285, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 3.308, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0023715415019762848, |
|
"grad_norm": 4.569094181060791, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 3.4081, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0023715415019762848, |
|
"eval_loss": 3.0261588096618652, |
|
"eval_runtime": 184.3164, |
|
"eval_samples_per_second": 69.359, |
|
"eval_steps_per_second": 8.67, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.002437417654808959, |
|
"grad_norm": 3.269929885864258, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.8406, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.002503293807641634, |
|
"grad_norm": 3.704793930053711, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.7271, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.002569169960474308, |
|
"grad_norm": 5.287662029266357, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 3.0801, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.002635046113306983, |
|
"grad_norm": 3.6738574504852295, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.5241, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0027009222661396573, |
|
"grad_norm": 3.433509111404419, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.4624, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.002766798418972332, |
|
"grad_norm": 3.7118277549743652, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.7331, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0028326745718050064, |
|
"grad_norm": 4.565731525421143, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.8115, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.002898550724637681, |
|
"grad_norm": 2.9661378860473633, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.0928, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0029644268774703555, |
|
"grad_norm": 3.1142585277557373, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.0762, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0029644268774703555, |
|
"eval_loss": 2.1579360961914062, |
|
"eval_runtime": 184.323, |
|
"eval_samples_per_second": 69.356, |
|
"eval_steps_per_second": 8.67, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0030303030303030303, |
|
"grad_norm": 5.064971446990967, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.2238, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.003096179183135705, |
|
"grad_norm": 4.904120922088623, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.1618, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0031620553359683794, |
|
"grad_norm": 3.264500141143799, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.8662, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.003227931488801054, |
|
"grad_norm": 3.768419027328491, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 1.7912, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0032938076416337285, |
|
"grad_norm": 3.241663694381714, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.68, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0033596837944664033, |
|
"grad_norm": 3.5803892612457275, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 1.7835, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0034255599472990776, |
|
"grad_norm": 3.1176340579986572, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 1.5672, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0034914361001317524, |
|
"grad_norm": 2.987795114517212, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 1.4914, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0035573122529644267, |
|
"grad_norm": 6.246347427368164, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.5927, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0035573122529644267, |
|
"eval_loss": 1.4257876873016357, |
|
"eval_runtime": 184.4229, |
|
"eval_samples_per_second": 69.319, |
|
"eval_steps_per_second": 8.665, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0036231884057971015, |
|
"grad_norm": 3.4647183418273926, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4436, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.003689064558629776, |
|
"grad_norm": 3.349560499191284, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 1.3829, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0037549407114624506, |
|
"grad_norm": 3.176090955734253, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 1.2451, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0038208168642951254, |
|
"grad_norm": 3.0225908756256104, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.3579, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0038866930171277997, |
|
"grad_norm": 2.4458398818969727, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 1.101, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.003952569169960474, |
|
"grad_norm": 3.876741886138916, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.3687, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.004018445322793149, |
|
"grad_norm": 3.945711374282837, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 1.1523, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.004084321475625824, |
|
"grad_norm": 3.1722943782806396, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 0.9701, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.004150197628458498, |
|
"grad_norm": 4.902099609375, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 0.9159, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.004150197628458498, |
|
"eval_loss": 1.0228184461593628, |
|
"eval_runtime": 184.3336, |
|
"eval_samples_per_second": 69.353, |
|
"eval_steps_per_second": 8.669, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.004216073781291172, |
|
"grad_norm": 2.736250400543213, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.9313, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.004281949934123847, |
|
"grad_norm": 2.595193862915039, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.9, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.004347826086956522, |
|
"grad_norm": 1.9990233182907104, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 0.8328, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0044137022397891966, |
|
"grad_norm": 4.0702223777771, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 1.0872, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0044795783926218705, |
|
"grad_norm": 1.7851134538650513, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 0.8029, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.004545454545454545, |
|
"grad_norm": 2.5434176921844482, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 0.835, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.00461133069828722, |
|
"grad_norm": 3.252372980117798, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.8726, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.004677206851119895, |
|
"grad_norm": 1.7027548551559448, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 0.7541, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0047430830039525695, |
|
"grad_norm": 2.5379581451416016, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 0.8231, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0047430830039525695, |
|
"eval_loss": 0.8483124375343323, |
|
"eval_runtime": 184.3066, |
|
"eval_samples_per_second": 69.363, |
|
"eval_steps_per_second": 8.67, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0048089591567852434, |
|
"grad_norm": 1.877734899520874, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 0.7158, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.004874835309617918, |
|
"grad_norm": 1.7974519729614258, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 0.7645, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.004940711462450593, |
|
"grad_norm": 4.693776607513428, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 1.0939, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.005006587615283268, |
|
"grad_norm": 2.774831533432007, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 0.7907, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.005072463768115942, |
|
"grad_norm": 1.4862602949142456, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 0.6863, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.005138339920948616, |
|
"grad_norm": 2.5560407638549805, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 0.7421, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.005204216073781291, |
|
"grad_norm": 3.1836204528808594, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 0.7768, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.005270092226613966, |
|
"grad_norm": 1.9725606441497803, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.7135, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.005335968379446641, |
|
"grad_norm": 1.8420541286468506, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 0.7451, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.005335968379446641, |
|
"eval_loss": 0.7732530832290649, |
|
"eval_runtime": 184.4371, |
|
"eval_samples_per_second": 69.314, |
|
"eval_steps_per_second": 8.664, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.005401844532279315, |
|
"grad_norm": 1.9438694715499878, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.7781, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.005467720685111989, |
|
"grad_norm": 1.7922385931015015, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 0.6667, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.005533596837944664, |
|
"grad_norm": 1.500645637512207, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 0.6971, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.005599472990777339, |
|
"grad_norm": 3.3169381618499756, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.7024, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.005665349143610013, |
|
"grad_norm": 2.3479697704315186, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 0.6829, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.005731225296442688, |
|
"grad_norm": 1.7033846378326416, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 0.6208, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.005797101449275362, |
|
"grad_norm": 2.391817569732666, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 0.7019, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.005862977602108037, |
|
"grad_norm": 2.0309882164001465, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 0.7454, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.005928853754940711, |
|
"grad_norm": 2.8557684421539307, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.763, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.005928853754940711, |
|
"eval_loss": 0.748928427696228, |
|
"eval_runtime": 184.3653, |
|
"eval_samples_per_second": 69.341, |
|
"eval_steps_per_second": 8.668, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.005994729907773386, |
|
"grad_norm": 1.738196611404419, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 0.6203, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.006060606060606061, |
|
"grad_norm": 3.5670719146728516, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 0.8929, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.006126482213438735, |
|
"grad_norm": 2.1231579780578613, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 0.6805, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.00619235836627141, |
|
"grad_norm": 1.6460092067718506, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 0.7208, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.006258234519104084, |
|
"grad_norm": 3.6686792373657227, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.7887, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.006324110671936759, |
|
"grad_norm": 2.6409358978271484, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 0.7277, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.006389986824769434, |
|
"grad_norm": 1.6902509927749634, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 0.5849, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.006455862977602108, |
|
"grad_norm": 1.3296384811401367, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 0.5937, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.006521739130434782, |
|
"grad_norm": 3.772615671157837, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 0.8282, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.006521739130434782, |
|
"eval_loss": 0.7452791929244995, |
|
"eval_runtime": 184.1886, |
|
"eval_samples_per_second": 69.407, |
|
"eval_steps_per_second": 8.676, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.006587615283267457, |
|
"grad_norm": 1.7011206150054932, |
|
"learning_rate": 0.0, |
|
"loss": 0.6518, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2047854394736640.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|