|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 11, |
|
"global_step": 108, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009259259259259259, |
|
"grad_norm": 2.362648319863932, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5807, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 2.136281230840684, |
|
"learning_rate": 1e-05, |
|
"loss": 0.534, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.027777777777777776, |
|
"grad_norm": 2.2915803466254743, |
|
"learning_rate": 9.997804182543973e-06, |
|
"loss": 0.6069, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"grad_norm": 1.817521417636344, |
|
"learning_rate": 9.991218658821609e-06, |
|
"loss": 0.5863, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.046296296296296294, |
|
"grad_norm": 1.4406196993263543, |
|
"learning_rate": 9.980249213076085e-06, |
|
"loss": 0.5952, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 1.098132624881383, |
|
"learning_rate": 9.964905480067585e-06, |
|
"loss": 0.5384, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06481481481481481, |
|
"grad_norm": 1.2038829059152325, |
|
"learning_rate": 9.945200936610821e-06, |
|
"loss": 0.5779, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 1.1568981182086557, |
|
"learning_rate": 9.921152889737985e-06, |
|
"loss": 0.5487, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"grad_norm": 1.009464783024193, |
|
"learning_rate": 9.892782461497521e-06, |
|
"loss": 0.5162, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 1.0403262733706924, |
|
"learning_rate": 9.860114570402055e-06, |
|
"loss": 0.4825, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10185185185185185, |
|
"grad_norm": 1.017871214392586, |
|
"learning_rate": 9.823177909541795e-06, |
|
"loss": 0.4962, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10185185185185185, |
|
"eval_loss": 0.5000379085540771, |
|
"eval_runtime": 7.2133, |
|
"eval_samples_per_second": 1.248, |
|
"eval_steps_per_second": 0.277, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 1.0511034662384473, |
|
"learning_rate": 9.782004921382612e-06, |
|
"loss": 0.4456, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12037037037037036, |
|
"grad_norm": 0.8657886515614241, |
|
"learning_rate": 9.736631769270958e-06, |
|
"loss": 0.4288, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12962962962962962, |
|
"grad_norm": 1.040741797545574, |
|
"learning_rate": 9.687098305670606e-06, |
|
"loss": 0.5855, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1388888888888889, |
|
"grad_norm": 1.3516228019504957, |
|
"learning_rate": 9.633448037159167e-06, |
|
"loss": 0.5184, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.9843907475003576, |
|
"learning_rate": 9.575728086215093e-06, |
|
"loss": 0.5568, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1574074074074074, |
|
"grad_norm": 0.8979836323930017, |
|
"learning_rate": 9.513989149828718e-06, |
|
"loss": 0.4272, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 1.1188974861045773, |
|
"learning_rate": 9.448285454973739e-06, |
|
"loss": 0.487, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.17592592592592593, |
|
"grad_norm": 1.048669002845475, |
|
"learning_rate": 9.378674710978185e-06, |
|
"loss": 0.5321, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.18518518518518517, |
|
"grad_norm": 1.2366432725685659, |
|
"learning_rate": 9.305218058836778e-06, |
|
"loss": 0.5396, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19444444444444445, |
|
"grad_norm": 1.0072812014692536, |
|
"learning_rate": 9.22798001750913e-06, |
|
"loss": 0.446, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2037037037037037, |
|
"grad_norm": 1.0654455593583398, |
|
"learning_rate": 9.14702842725101e-06, |
|
"loss": 0.5313, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2037037037037037, |
|
"eval_loss": 0.4791085124015808, |
|
"eval_runtime": 7.1652, |
|
"eval_samples_per_second": 1.256, |
|
"eval_steps_per_second": 0.279, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21296296296296297, |
|
"grad_norm": 1.0051504278398855, |
|
"learning_rate": 9.062434390028407e-06, |
|
"loss": 0.4983, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 0.9621751996571424, |
|
"learning_rate": 8.974272207066767e-06, |
|
"loss": 0.4642, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.23148148148148148, |
|
"grad_norm": 0.9763151052182862, |
|
"learning_rate": 8.882619313590212e-06, |
|
"loss": 0.4832, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24074074074074073, |
|
"grad_norm": 0.934861026969845, |
|
"learning_rate": 8.787556210808101e-06, |
|
"loss": 0.441, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 1.0362041830687405, |
|
"learning_rate": 8.689166395208638e-06, |
|
"loss": 0.4489, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.25925925925925924, |
|
"grad_norm": 0.9049359816184893, |
|
"learning_rate": 8.587536285221656e-06, |
|
"loss": 0.4971, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.26851851851851855, |
|
"grad_norm": 0.9982959368756156, |
|
"learning_rate": 8.482755145314987e-06, |
|
"loss": 0.5289, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2777777777777778, |
|
"grad_norm": 2.050950638692456, |
|
"learning_rate": 8.374915007591053e-06, |
|
"loss": 0.5496, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28703703703703703, |
|
"grad_norm": 1.0501945401080932, |
|
"learning_rate": 8.264110590952609e-06, |
|
"loss": 0.5261, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 0.9229687907732845, |
|
"learning_rate": 8.150439217908557e-06, |
|
"loss": 0.4814, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3055555555555556, |
|
"grad_norm": 1.4319914933917381, |
|
"learning_rate": 8.034000729092967e-06, |
|
"loss": 0.4692, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3055555555555556, |
|
"eval_loss": 0.46850860118865967, |
|
"eval_runtime": 7.148, |
|
"eval_samples_per_second": 1.259, |
|
"eval_steps_per_second": 0.28, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3148148148148148, |
|
"grad_norm": 0.8881275342675574, |
|
"learning_rate": 7.914897395572362e-06, |
|
"loss": 0.4372, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.32407407407407407, |
|
"grad_norm": 0.9840343978467488, |
|
"learning_rate": 7.793233829018263e-06, |
|
"loss": 0.5235, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 0.8470332819900018, |
|
"learning_rate": 7.669116889823955e-06, |
|
"loss": 0.4339, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3425925925925926, |
|
"grad_norm": 0.8893330836178615, |
|
"learning_rate": 7.542655593246103e-06, |
|
"loss": 0.4426, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.35185185185185186, |
|
"grad_norm": 1.0550698950573756, |
|
"learning_rate": 7.413961013653725e-06, |
|
"loss": 0.5744, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3611111111111111, |
|
"grad_norm": 0.8784333801568516, |
|
"learning_rate": 7.283146186968566e-06, |
|
"loss": 0.3849, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 0.9142401659654863, |
|
"learning_rate": 7.1503260113826035e-06, |
|
"loss": 0.4057, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37962962962962965, |
|
"grad_norm": 0.8743924562233322, |
|
"learning_rate": 7.015617146439863e-06, |
|
"loss": 0.4149, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3888888888888889, |
|
"grad_norm": 0.9155700365107549, |
|
"learning_rate": 6.879137910571191e-06, |
|
"loss": 0.4557, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.39814814814814814, |
|
"grad_norm": 0.9844599172183693, |
|
"learning_rate": 6.741008177171995e-06, |
|
"loss": 0.5052, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4074074074074074, |
|
"grad_norm": 0.8432119339426061, |
|
"learning_rate": 6.601349269314188e-06, |
|
"loss": 0.3876, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4074074074074074, |
|
"eval_loss": 0.4595443606376648, |
|
"eval_runtime": 7.1719, |
|
"eval_samples_per_second": 1.255, |
|
"eval_steps_per_second": 0.279, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4166666666666667, |
|
"grad_norm": 0.9547733839048285, |
|
"learning_rate": 6.46028385318488e-06, |
|
"loss": 0.4381, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.42592592592592593, |
|
"grad_norm": 0.9165610947735227, |
|
"learning_rate": 6.3179358303453386e-06, |
|
"loss": 0.433, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4351851851851852, |
|
"grad_norm": 0.9276788988821667, |
|
"learning_rate": 6.17443022890492e-06, |
|
"loss": 0.4699, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 0.9408707549804078, |
|
"learning_rate": 6.029893093705492e-06, |
|
"loss": 0.4584, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4537037037037037, |
|
"grad_norm": 0.9822969953152866, |
|
"learning_rate": 5.884451375612865e-06, |
|
"loss": 0.5243, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.46296296296296297, |
|
"grad_norm": 1.0689960747907674, |
|
"learning_rate": 5.738232820012407e-06, |
|
"loss": 0.4329, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4722222222222222, |
|
"grad_norm": 1.0494906655322942, |
|
"learning_rate": 5.591365854606829e-06, |
|
"loss": 0.5633, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.48148148148148145, |
|
"grad_norm": 0.8987830176611722, |
|
"learning_rate": 5.443979476614674e-06, |
|
"loss": 0.445, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.49074074074074076, |
|
"grad_norm": 0.8746204581445627, |
|
"learning_rate": 5.296203139468572e-06, |
|
"loss": 0.3809, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.8125477508425051, |
|
"learning_rate": 5.148166639112799e-06, |
|
"loss": 0.3964, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5092592592592593, |
|
"grad_norm": 0.907249706367073, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4768, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5092592592592593, |
|
"eval_loss": 0.4542139768600464, |
|
"eval_runtime": 7.1716, |
|
"eval_samples_per_second": 1.255, |
|
"eval_steps_per_second": 0.279, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5185185185185185, |
|
"grad_norm": 0.8641393129712072, |
|
"learning_rate": 4.8518333608872015e-06, |
|
"loss": 0.4238, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5277777777777778, |
|
"grad_norm": 1.3038267949545501, |
|
"learning_rate": 4.703796860531429e-06, |
|
"loss": 0.5484, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5370370370370371, |
|
"grad_norm": 1.0286161889066556, |
|
"learning_rate": 4.556020523385326e-06, |
|
"loss": 0.397, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5462962962962963, |
|
"grad_norm": 0.8754084053422794, |
|
"learning_rate": 4.408634145393172e-06, |
|
"loss": 0.4452, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5555555555555556, |
|
"grad_norm": 0.935284958978312, |
|
"learning_rate": 4.261767179987595e-06, |
|
"loss": 0.4896, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5648148148148148, |
|
"grad_norm": 0.9674393986889579, |
|
"learning_rate": 4.115548624387136e-06, |
|
"loss": 0.4887, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5740740740740741, |
|
"grad_norm": 0.9724892358854195, |
|
"learning_rate": 3.970106906294509e-06, |
|
"loss": 0.4997, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5833333333333334, |
|
"grad_norm": 0.8794971763479272, |
|
"learning_rate": 3.825569771095082e-06, |
|
"loss": 0.417, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.8881388944091057, |
|
"learning_rate": 3.682064169654663e-06, |
|
"loss": 0.4504, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6018518518518519, |
|
"grad_norm": 0.9193120996402042, |
|
"learning_rate": 3.539716146815122e-06, |
|
"loss": 0.4694, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"grad_norm": 1.0073500011381085, |
|
"learning_rate": 3.398650730685813e-06, |
|
"loss": 0.4985, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6111111111111112, |
|
"eval_loss": 0.4495806097984314, |
|
"eval_runtime": 7.1798, |
|
"eval_samples_per_second": 1.254, |
|
"eval_steps_per_second": 0.279, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6203703703703703, |
|
"grad_norm": 0.824519734053682, |
|
"learning_rate": 3.258991822828007e-06, |
|
"loss": 0.4104, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6296296296296297, |
|
"grad_norm": 0.9368830049136432, |
|
"learning_rate": 3.1208620894288105e-06, |
|
"loss": 0.4926, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6388888888888888, |
|
"grad_norm": 1.1454803265014575, |
|
"learning_rate": 2.98438285356014e-06, |
|
"loss": 0.4655, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6481481481481481, |
|
"grad_norm": 0.8964782710358511, |
|
"learning_rate": 2.8496739886173994e-06, |
|
"loss": 0.4535, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6574074074074074, |
|
"grad_norm": 0.9529134597913587, |
|
"learning_rate": 2.716853813031435e-06, |
|
"loss": 0.5186, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.963788290306135, |
|
"learning_rate": 2.5860389863462765e-06, |
|
"loss": 0.522, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6759259259259259, |
|
"grad_norm": 0.8411162085924286, |
|
"learning_rate": 2.457344406753899e-06, |
|
"loss": 0.4245, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6851851851851852, |
|
"grad_norm": 0.8871221033397801, |
|
"learning_rate": 2.330883110176049e-06, |
|
"loss": 0.4806, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6944444444444444, |
|
"grad_norm": 0.9550800145418555, |
|
"learning_rate": 2.2067661709817384e-06, |
|
"loss": 0.5149, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7037037037037037, |
|
"grad_norm": 0.8501908519121222, |
|
"learning_rate": 2.0851026044276405e-06, |
|
"loss": 0.4107, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7129629629629629, |
|
"grad_norm": 0.8678719046436287, |
|
"learning_rate": 1.9659992709070346e-06, |
|
"loss": 0.4687, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7129629629629629, |
|
"eval_loss": 0.44653645157814026, |
|
"eval_runtime": 7.1486, |
|
"eval_samples_per_second": 1.259, |
|
"eval_steps_per_second": 0.28, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7222222222222222, |
|
"grad_norm": 0.876245605736801, |
|
"learning_rate": 1.8495607820914451e-06, |
|
"loss": 0.4197, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7314814814814815, |
|
"grad_norm": 0.7970757955872202, |
|
"learning_rate": 1.7358894090473928e-06, |
|
"loss": 0.3619, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 0.7955115388136204, |
|
"learning_rate": 1.6250849924089485e-06, |
|
"loss": 0.3587, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.7782242902212891, |
|
"learning_rate": 1.5172448546850166e-06, |
|
"loss": 0.3391, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7592592592592593, |
|
"grad_norm": 0.9995473034082181, |
|
"learning_rate": 1.4124637147783431e-06, |
|
"loss": 0.5386, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7685185185185185, |
|
"grad_norm": 0.8444693019666363, |
|
"learning_rate": 1.3108336047913633e-06, |
|
"loss": 0.4069, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.7777777777777778, |
|
"grad_norm": 0.8808187390006551, |
|
"learning_rate": 1.2124437891918995e-06, |
|
"loss": 0.4371, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.7870370370370371, |
|
"grad_norm": 0.9813077333772956, |
|
"learning_rate": 1.1173806864097885e-06, |
|
"loss": 0.4374, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.7962962962962963, |
|
"grad_norm": 0.8064326056384253, |
|
"learning_rate": 1.0257277929332332e-06, |
|
"loss": 0.3986, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8055555555555556, |
|
"grad_norm": 0.8710631137320209, |
|
"learning_rate": 9.375656099715935e-07, |
|
"loss": 0.4473, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8148148148148148, |
|
"grad_norm": 0.887311644170428, |
|
"learning_rate": 8.529715727489912e-07, |
|
"loss": 0.4484, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8148148148148148, |
|
"eval_loss": 0.4449349045753479, |
|
"eval_runtime": 7.1634, |
|
"eval_samples_per_second": 1.256, |
|
"eval_steps_per_second": 0.279, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8240740740740741, |
|
"grad_norm": 0.7647806015755853, |
|
"learning_rate": 7.720199824908692e-07, |
|
"loss": 0.3425, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 0.8976917694082569, |
|
"learning_rate": 6.947819411632223e-07, |
|
"loss": 0.4269, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8425925925925926, |
|
"grad_norm": 0.9316801386853545, |
|
"learning_rate": 6.213252890218163e-07, |
|
"loss": 0.5674, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8518518518518519, |
|
"grad_norm": 0.8870482831956321, |
|
"learning_rate": 5.517145450262639e-07, |
|
"loss": 0.454, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8611111111111112, |
|
"grad_norm": 0.7768323848063172, |
|
"learning_rate": 4.860108501712824e-07, |
|
"loss": 0.3412, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8703703703703703, |
|
"grad_norm": 0.8658069877384894, |
|
"learning_rate": 4.242719137849077e-07, |
|
"loss": 0.4292, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.8796296296296297, |
|
"grad_norm": 0.9112502068381936, |
|
"learning_rate": 3.665519628408332e-07, |
|
"loss": 0.4765, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.8505617937603335, |
|
"learning_rate": 3.1290169432939556e-07, |
|
"loss": 0.428, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.8981481481481481, |
|
"grad_norm": 0.8868085158482631, |
|
"learning_rate": 2.6336823072904305e-07, |
|
"loss": 0.4488, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9074074074074074, |
|
"grad_norm": 0.8291365248164002, |
|
"learning_rate": 2.179950786173879e-07, |
|
"loss": 0.3881, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9166666666666666, |
|
"grad_norm": 0.9100636810782011, |
|
"learning_rate": 1.7682209045820687e-07, |
|
"loss": 0.4809, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.9166666666666666, |
|
"eval_loss": 0.4441840946674347, |
|
"eval_runtime": 7.1692, |
|
"eval_samples_per_second": 1.255, |
|
"eval_steps_per_second": 0.279, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.9259259259259259, |
|
"grad_norm": 0.8985637565061618, |
|
"learning_rate": 1.3988542959794627e-07, |
|
"loss": 0.4533, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9351851851851852, |
|
"grad_norm": 0.9612580974302548, |
|
"learning_rate": 1.0721753850247984e-07, |
|
"loss": 0.5236, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9444444444444444, |
|
"grad_norm": 0.8120796428633341, |
|
"learning_rate": 7.884711026201586e-08, |
|
"loss": 0.3763, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9537037037037037, |
|
"grad_norm": 0.8644849276991213, |
|
"learning_rate": 5.479906338917984e-08, |
|
"loss": 0.4101, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9629629629629629, |
|
"grad_norm": 0.8946182268462048, |
|
"learning_rate": 3.5094519932415417e-08, |
|
"loss": 0.4942, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.9722222222222222, |
|
"grad_norm": 0.9166159846522884, |
|
"learning_rate": 1.975078692391552e-08, |
|
"loss": 0.4778, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.9814814814814815, |
|
"grad_norm": 0.9473852601201989, |
|
"learning_rate": 8.781341178393244e-09, |
|
"loss": 0.4537, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.9907407407407407, |
|
"grad_norm": 0.8895854364679423, |
|
"learning_rate": 2.19581745602826e-09, |
|
"loss": 0.4192, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.7849733548858487, |
|
"learning_rate": 0.0, |
|
"loss": 0.3589, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 108, |
|
"total_flos": 17851226259456.0, |
|
"train_loss": 0.46821982275556634, |
|
"train_runtime": 1249.8096, |
|
"train_samples_per_second": 0.69, |
|
"train_steps_per_second": 0.086 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 108, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 108, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 17851226259456.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|