{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.506649778340722, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01013299556681444, "grad_norm": 2.299077033996582, "learning_rate": 1e-05, "loss": 3.7257, "step": 1 }, { "epoch": 0.01013299556681444, "eval_loss": 3.523265838623047, "eval_runtime": 98.4632, "eval_samples_per_second": 1.696, "eval_steps_per_second": 0.853, "step": 1 }, { "epoch": 0.02026599113362888, "grad_norm": 2.1457676887512207, "learning_rate": 2e-05, "loss": 3.3609, "step": 2 }, { "epoch": 0.030398986700443317, "grad_norm": 2.0729289054870605, "learning_rate": 3e-05, "loss": 3.2415, "step": 3 }, { "epoch": 0.04053198226725776, "grad_norm": 2.152341365814209, "learning_rate": 4e-05, "loss": 3.0918, "step": 4 }, { "epoch": 0.0506649778340722, "grad_norm": 2.4330685138702393, "learning_rate": 5e-05, "loss": 3.1871, "step": 5 }, { "epoch": 0.060797973400886635, "grad_norm": 2.830127000808716, "learning_rate": 6e-05, "loss": 3.007, "step": 6 }, { "epoch": 0.07093096896770108, "grad_norm": 2.7601826190948486, "learning_rate": 7e-05, "loss": 2.7736, "step": 7 }, { "epoch": 0.08106396453451552, "grad_norm": 3.2441565990448, "learning_rate": 8e-05, "loss": 2.668, "step": 8 }, { "epoch": 0.09119696010132995, "grad_norm": 3.553675651550293, "learning_rate": 9e-05, "loss": 2.3073, "step": 9 }, { "epoch": 0.1013299556681444, "grad_norm": 3.3283772468566895, "learning_rate": 0.0001, "loss": 2.017, "step": 10 }, { "epoch": 0.11146295123495883, "grad_norm": 3.136801242828369, "learning_rate": 9.98458666866564e-05, "loss": 1.7767, "step": 11 }, { "epoch": 0.12159594680177327, "grad_norm": 2.8050150871276855, "learning_rate": 9.938441702975689e-05, "loss": 1.563, "step": 12 }, { "epoch": 0.1317289423685877, "grad_norm": 2.3837151527404785, "learning_rate": 9.861849601988383e-05, "loss": 1.1287, "step": 13 }, { "epoch": 0.1317289423685877, "eval_loss": 1.2567861080169678, "eval_runtime": 3.9408, "eval_samples_per_second": 42.377, "eval_steps_per_second": 21.315, "step": 13 }, { "epoch": 0.14186193793540217, "grad_norm": 2.615652084350586, "learning_rate": 9.755282581475769e-05, "loss": 1.1747, "step": 14 }, { "epoch": 0.1519949335022166, "grad_norm": 3.228269338607788, "learning_rate": 9.619397662556435e-05, "loss": 1.1195, "step": 15 }, { "epoch": 0.16212792906903103, "grad_norm": 3.2835893630981445, "learning_rate": 9.45503262094184e-05, "loss": 0.7324, "step": 16 }, { "epoch": 0.17226092463584547, "grad_norm": 3.4717910289764404, "learning_rate": 9.263200821770461e-05, "loss": 0.4257, "step": 17 }, { "epoch": 0.1823939202026599, "grad_norm": 1.8152432441711426, "learning_rate": 9.045084971874738e-05, "loss": 0.2427, "step": 18 }, { "epoch": 0.19252691576947434, "grad_norm": 1.5609540939331055, "learning_rate": 8.802029828000156e-05, "loss": 0.1929, "step": 19 }, { "epoch": 0.2026599113362888, "grad_norm": 1.3951321840286255, "learning_rate": 8.535533905932738e-05, "loss": 0.0777, "step": 20 }, { "epoch": 0.21279290690310323, "grad_norm": 1.6223750114440918, "learning_rate": 8.247240241650918e-05, "loss": 0.061, "step": 21 }, { "epoch": 0.22292590246991767, "grad_norm": 0.5461512804031372, "learning_rate": 7.938926261462366e-05, "loss": 0.0276, "step": 22 }, { "epoch": 0.2330588980367321, "grad_norm": 2.7356250286102295, "learning_rate": 7.612492823579745e-05, "loss": 0.258, "step": 23 }, { "epoch": 0.24319189360354654, "grad_norm": 1.6690704822540283, "learning_rate": 7.269952498697734e-05, "loss": 0.0822, "step": 24 }, { "epoch": 0.253324889170361, "grad_norm": 1.9990379810333252, "learning_rate": 6.91341716182545e-05, "loss": 0.1948, "step": 25 }, { "epoch": 0.2634578847371754, "grad_norm": 1.84889817237854, "learning_rate": 6.545084971874738e-05, "loss": 0.0953, "step": 26 }, { "epoch": 0.2634578847371754, "eval_loss": 0.1430395096540451, "eval_runtime": 3.934, "eval_samples_per_second": 42.45, "eval_steps_per_second": 21.352, "step": 26 }, { "epoch": 0.27359088030398987, "grad_norm": 2.240417718887329, "learning_rate": 6.167226819279528e-05, "loss": 0.2279, "step": 27 }, { "epoch": 0.28372387587080433, "grad_norm": 1.6692705154418945, "learning_rate": 5.782172325201155e-05, "loss": 0.1103, "step": 28 }, { "epoch": 0.29385687143761874, "grad_norm": 1.1334484815597534, "learning_rate": 5.392295478639225e-05, "loss": 0.107, "step": 29 }, { "epoch": 0.3039898670044332, "grad_norm": 1.8195340633392334, "learning_rate": 5e-05, "loss": 0.0558, "step": 30 }, { "epoch": 0.3141228625712476, "grad_norm": 1.8854806423187256, "learning_rate": 4.607704521360776e-05, "loss": 0.3479, "step": 31 }, { "epoch": 0.32425585813806207, "grad_norm": 2.1316421031951904, "learning_rate": 4.2178276747988446e-05, "loss": 0.1326, "step": 32 }, { "epoch": 0.3343888537048765, "grad_norm": 2.0381174087524414, "learning_rate": 3.832773180720475e-05, "loss": 0.3254, "step": 33 }, { "epoch": 0.34452184927169094, "grad_norm": 1.3313711881637573, "learning_rate": 3.4549150281252636e-05, "loss": 0.1059, "step": 34 }, { "epoch": 0.3546548448385054, "grad_norm": 0.8397698998451233, "learning_rate": 3.086582838174551e-05, "loss": 0.0336, "step": 35 }, { "epoch": 0.3647878404053198, "grad_norm": 1.6760809421539307, "learning_rate": 2.7300475013022663e-05, "loss": 0.0713, "step": 36 }, { "epoch": 0.37492083597213427, "grad_norm": 0.7013064622879028, "learning_rate": 2.3875071764202563e-05, "loss": 0.0364, "step": 37 }, { "epoch": 0.3850538315389487, "grad_norm": 0.7092528939247131, "learning_rate": 2.061073738537635e-05, "loss": 0.1465, "step": 38 }, { "epoch": 0.39518682710576314, "grad_norm": 0.9758922457695007, "learning_rate": 1.7527597583490822e-05, "loss": 0.1129, "step": 39 }, { "epoch": 0.39518682710576314, "eval_loss": 0.10418615490198135, "eval_runtime": 3.9348, "eval_samples_per_second": 42.441, "eval_steps_per_second": 21.348, "step": 39 }, { "epoch": 0.4053198226725776, "grad_norm": 1.2004624605178833, "learning_rate": 1.4644660940672627e-05, "loss": 0.1043, "step": 40 }, { "epoch": 0.415452818239392, "grad_norm": 0.6554136872291565, "learning_rate": 1.1979701719998453e-05, "loss": 0.059, "step": 41 }, { "epoch": 0.42558581380620647, "grad_norm": 2.516598701477051, "learning_rate": 9.549150281252633e-06, "loss": 0.2211, "step": 42 }, { "epoch": 0.4357188093730209, "grad_norm": 0.694494366645813, "learning_rate": 7.367991782295391e-06, "loss": 0.0519, "step": 43 }, { "epoch": 0.44585180493983534, "grad_norm": 0.9121596217155457, "learning_rate": 5.449673790581611e-06, "loss": 0.0693, "step": 44 }, { "epoch": 0.4559848005066498, "grad_norm": 0.816668689250946, "learning_rate": 3.8060233744356633e-06, "loss": 0.0688, "step": 45 }, { "epoch": 0.4661177960734642, "grad_norm": 0.7210935354232788, "learning_rate": 2.4471741852423237e-06, "loss": 0.0747, "step": 46 }, { "epoch": 0.47625079164027867, "grad_norm": 0.6753107309341431, "learning_rate": 1.3815039801161721e-06, "loss": 0.0283, "step": 47 }, { "epoch": 0.4863837872070931, "grad_norm": 0.4177698791027069, "learning_rate": 6.15582970243117e-07, "loss": 0.0249, "step": 48 }, { "epoch": 0.49651678277390754, "grad_norm": 1.1643296480178833, "learning_rate": 1.5413331334360182e-07, "loss": 0.1529, "step": 49 }, { "epoch": 0.506649778340722, "grad_norm": 1.1032949686050415, "learning_rate": 0.0, "loss": 0.0554, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.2956458467328e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }