|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 63, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 2.636066198348999, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.5447, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 1.7816333770751953, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.1323, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 2.2745041847229004, |
|
"learning_rate": 1e-05, |
|
"loss": 1.2092, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 2.6683919429779053, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.5569, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 2.3222100734710693, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.4135, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 2.067908525466919, |
|
"learning_rate": 2e-05, |
|
"loss": 1.2277, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 2.5608789920806885, |
|
"learning_rate": 1.9984815164333163e-05, |
|
"loss": 1.4595, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 2.3394458293914795, |
|
"learning_rate": 1.9939306773179498e-05, |
|
"loss": 1.0971, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 2.624566078186035, |
|
"learning_rate": 1.9863613034027224e-05, |
|
"loss": 1.3249, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 3.019270420074463, |
|
"learning_rate": 1.9757963826274357e-05, |
|
"loss": 1.1663, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5238095238095238, |
|
"grad_norm": 4.040117263793945, |
|
"learning_rate": 1.9622680003092503e-05, |
|
"loss": 1.3519, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 5.076770782470703, |
|
"learning_rate": 1.9458172417006347e-05, |
|
"loss": 1.212, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6190476190476191, |
|
"grad_norm": 3.281177282333374, |
|
"learning_rate": 1.9264940672148018e-05, |
|
"loss": 0.8725, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 3.5001425743103027, |
|
"learning_rate": 1.9043571606975776e-05, |
|
"loss": 0.6642, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 2.4256069660186768, |
|
"learning_rate": 1.879473751206489e-05, |
|
"loss": 0.5313, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 2.3582112789154053, |
|
"learning_rate": 1.851919408838327e-05, |
|
"loss": 0.73, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8095238095238095, |
|
"grad_norm": 1.449530005455017, |
|
"learning_rate": 1.821777815225245e-05, |
|
"loss": 0.3238, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 2.057817220687866, |
|
"learning_rate": 1.789140509396394e-05, |
|
"loss": 0.3367, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9047619047619048, |
|
"grad_norm": 1.7016539573669434, |
|
"learning_rate": 1.7541066097768965e-05, |
|
"loss": 0.3914, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.2818858623504639, |
|
"learning_rate": 1.7167825131684516e-05, |
|
"loss": 0.1852, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.9452447891235352, |
|
"learning_rate": 1.6772815716257414e-05, |
|
"loss": 0.1722, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0476190476190477, |
|
"grad_norm": 0.5685144662857056, |
|
"learning_rate": 1.6357237482099682e-05, |
|
"loss": 0.0953, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.0952380952380953, |
|
"grad_norm": 0.6435515880584717, |
|
"learning_rate": 1.5922352526649803e-05, |
|
"loss": 0.1832, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 1.049676537513733, |
|
"learning_rate": 1.5469481581224274e-05, |
|
"loss": 0.402, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 0.9065538048744202, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.2622, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2380952380952381, |
|
"grad_norm": 0.5284762978553772, |
|
"learning_rate": 1.4515333583108896e-05, |
|
"loss": 0.1086, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.7903798222541809, |
|
"learning_rate": 1.4016954246529697e-05, |
|
"loss": 0.0996, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.2656748294830322, |
|
"learning_rate": 1.3506375551927546e-05, |
|
"loss": 0.0154, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.380952380952381, |
|
"grad_norm": 1.171624779701233, |
|
"learning_rate": 1.2985148110016947e-05, |
|
"loss": 0.2659, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.31690454483032227, |
|
"learning_rate": 1.2454854871407993e-05, |
|
"loss": 0.0673, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.4761904761904763, |
|
"grad_norm": 0.7295234799385071, |
|
"learning_rate": 1.1917106319237386e-05, |
|
"loss": 0.2074, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 0.6296242475509644, |
|
"learning_rate": 1.1373535578184083e-05, |
|
"loss": 0.1208, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 1.0464074611663818, |
|
"learning_rate": 1.0825793454723325e-05, |
|
"loss": 0.2525, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.619047619047619, |
|
"grad_norm": 0.4286152422428131, |
|
"learning_rate": 1.0275543423681622e-05, |
|
"loss": 0.1035, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.5248931646347046, |
|
"learning_rate": 9.724456576318383e-06, |
|
"loss": 0.1614, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.40034690499305725, |
|
"learning_rate": 9.174206545276678e-06, |
|
"loss": 0.1218, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.7619047619047619, |
|
"grad_norm": 0.4369080662727356, |
|
"learning_rate": 8.626464421815919e-06, |
|
"loss": 0.06, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.8095238095238095, |
|
"grad_norm": 1.110698938369751, |
|
"learning_rate": 8.082893680762619e-06, |
|
"loss": 0.3501, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.314057320356369, |
|
"learning_rate": 7.545145128592009e-06, |
|
"loss": 0.0499, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.9931963086128235, |
|
"learning_rate": 7.014851889983058e-06, |
|
"loss": 0.1744, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9523809523809523, |
|
"grad_norm": 0.31384727358818054, |
|
"learning_rate": 6.4936244480724575e-06, |
|
"loss": 0.0713, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.4129275977611542, |
|
"learning_rate": 5.983045753470308e-06, |
|
"loss": 0.0776, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.0476190476190474, |
|
"grad_norm": 0.31003817915916443, |
|
"learning_rate": 5.484666416891109e-06, |
|
"loss": 0.043, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.0952380952380953, |
|
"grad_norm": 0.5723528265953064, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.0901, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.6738455295562744, |
|
"learning_rate": 4.530518418775734e-06, |
|
"loss": 0.1227, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.1904761904761907, |
|
"grad_norm": 0.5331130027770996, |
|
"learning_rate": 4.077647473350201e-06, |
|
"loss": 0.1491, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.238095238095238, |
|
"grad_norm": 0.4636727571487427, |
|
"learning_rate": 3.6427625179003223e-06, |
|
"loss": 0.0874, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.5442101359367371, |
|
"learning_rate": 3.2271842837425917e-06, |
|
"loss": 0.1945, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.3333333333333335, |
|
"grad_norm": 0.3890051543712616, |
|
"learning_rate": 2.8321748683154893e-06, |
|
"loss": 0.1032, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.4027661085128784, |
|
"learning_rate": 2.4589339022310386e-06, |
|
"loss": 0.1239, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 0.5832112431526184, |
|
"learning_rate": 2.1085949060360654e-06, |
|
"loss": 0.1892, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.4761904761904763, |
|
"grad_norm": 0.29090118408203125, |
|
"learning_rate": 1.7822218477475496e-06, |
|
"loss": 0.0538, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.5238095238095237, |
|
"grad_norm": 1.1339997053146362, |
|
"learning_rate": 1.4808059116167306e-06, |
|
"loss": 0.256, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 0.41949254274368286, |
|
"learning_rate": 1.2052624879351105e-06, |
|
"loss": 0.0908, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.7024145126342773, |
|
"learning_rate": 9.564283930242258e-07, |
|
"loss": 0.1081, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.236419215798378, |
|
"learning_rate": 7.350593278519824e-07, |
|
"loss": 0.0255, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.7142857142857144, |
|
"grad_norm": 0.49600574374198914, |
|
"learning_rate": 5.418275829936537e-07, |
|
"loss": 0.1057, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.761904761904762, |
|
"grad_norm": 0.2218826860189438, |
|
"learning_rate": 3.773199969074959e-07, |
|
"loss": 0.0306, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.8095238095238093, |
|
"grad_norm": 0.793448269367218, |
|
"learning_rate": 2.420361737256438e-07, |
|
"loss": 0.0991, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.2536832094192505, |
|
"learning_rate": 1.3638696597277678e-07, |
|
"loss": 0.0382, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.9047619047619047, |
|
"grad_norm": 0.38577455282211304, |
|
"learning_rate": 6.069322682050516e-08, |
|
"loss": 0.0753, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.9523809523809526, |
|
"grad_norm": 0.2848428785800934, |
|
"learning_rate": 1.518483566683826e-08, |
|
"loss": 0.0502, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.2466929852962494, |
|
"learning_rate": 0.0, |
|
"loss": 0.032, |
|
"step": 63 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 63, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2094121699246080.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|