|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 63, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 2.8511476516723633, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 1.0485, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 2.6251747608184814, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 1.0247, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 2.595073699951172, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 1.0005, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 2.1916372776031494, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 0.9607, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 1.1861807107925415, |
|
"learning_rate": 1.4285714285714287e-05, |
|
"loss": 0.9474, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 1.0186238288879395, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 0.9091, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 1.270335078239441, |
|
"learning_rate": 2e-05, |
|
"loss": 0.9359, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 1.0229175090789795, |
|
"learning_rate": 1.998426815017817e-05, |
|
"loss": 0.9072, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 0.9742695093154907, |
|
"learning_rate": 1.9937122098932428e-05, |
|
"loss": 0.9051, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 0.7967286109924316, |
|
"learning_rate": 1.985871018518236e-05, |
|
"loss": 0.8882, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5238095238095238, |
|
"grad_norm": 0.759879469871521, |
|
"learning_rate": 1.9749279121818235e-05, |
|
"loss": 0.894, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.6994519829750061, |
|
"learning_rate": 1.9609173219450998e-05, |
|
"loss": 0.8808, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6190476190476191, |
|
"grad_norm": 0.7012657523155212, |
|
"learning_rate": 1.9438833303083677e-05, |
|
"loss": 0.8738, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.7178092002868652, |
|
"learning_rate": 1.9238795325112867e-05, |
|
"loss": 0.9113, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.6520066261291504, |
|
"learning_rate": 1.900968867902419e-05, |
|
"loss": 0.8581, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 0.6900270581245422, |
|
"learning_rate": 1.8752234219087538e-05, |
|
"loss": 0.8969, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8095238095238095, |
|
"grad_norm": 0.6860277056694031, |
|
"learning_rate": 1.8467241992282842e-05, |
|
"loss": 0.906, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 0.6491706371307373, |
|
"learning_rate": 1.8155608689592604e-05, |
|
"loss": 0.8859, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9047619047619048, |
|
"grad_norm": 0.6634758710861206, |
|
"learning_rate": 1.78183148246803e-05, |
|
"loss": 0.8931, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 0.6383916139602661, |
|
"learning_rate": 1.7456421648831658e-05, |
|
"loss": 0.8946, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.7505225539207458, |
|
"learning_rate": 1.7071067811865477e-05, |
|
"loss": 0.8441, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0476190476190477, |
|
"grad_norm": 0.9270818829536438, |
|
"learning_rate": 1.6663465779520042e-05, |
|
"loss": 0.6841, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.0952380952380953, |
|
"grad_norm": 0.9149900674819946, |
|
"learning_rate": 1.6234898018587336e-05, |
|
"loss": 0.6804, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 0.8434793949127197, |
|
"learning_rate": 1.578671296179806e-05, |
|
"loss": 0.6712, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 0.8060764670372009, |
|
"learning_rate": 1.5320320765153367e-05, |
|
"loss": 0.6592, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2380952380952381, |
|
"grad_norm": 0.8197386264801025, |
|
"learning_rate": 1.4837188871052399e-05, |
|
"loss": 0.6547, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 0.7730692625045776, |
|
"learning_rate": 1.4338837391175582e-05, |
|
"loss": 0.6555, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 0.7399494647979736, |
|
"learning_rate": 1.3826834323650899e-05, |
|
"loss": 0.6326, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.380952380952381, |
|
"grad_norm": 0.7800103425979614, |
|
"learning_rate": 1.3302790619551673e-05, |
|
"loss": 0.6659, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.6559262871742249, |
|
"learning_rate": 1.2768355114248493e-05, |
|
"loss": 0.6172, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.4761904761904763, |
|
"grad_norm": 0.7337315678596497, |
|
"learning_rate": 1.2225209339563144e-05, |
|
"loss": 0.6394, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 0.6704586744308472, |
|
"learning_rate": 1.1675062233047365e-05, |
|
"loss": 0.6278, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 0.6687719225883484, |
|
"learning_rate": 1.1119644761033079e-05, |
|
"loss": 0.6447, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.619047619047619, |
|
"grad_norm": 0.674872100353241, |
|
"learning_rate": 1.0560704472371919e-05, |
|
"loss": 0.6293, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 0.63092041015625, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6135, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 0.6361706256866455, |
|
"learning_rate": 9.439295527628083e-06, |
|
"loss": 0.5951, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.7619047619047619, |
|
"grad_norm": 0.6201922297477722, |
|
"learning_rate": 8.880355238966923e-06, |
|
"loss": 0.6291, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.8095238095238095, |
|
"grad_norm": 0.6166723370552063, |
|
"learning_rate": 8.324937766952638e-06, |
|
"loss": 0.6515, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 0.6040306687355042, |
|
"learning_rate": 7.774790660436857e-06, |
|
"loss": 0.6229, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.5973318815231323, |
|
"learning_rate": 7.2316448857515076e-06, |
|
"loss": 0.634, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9523809523809523, |
|
"grad_norm": 0.6414107084274292, |
|
"learning_rate": 6.697209380448333e-06, |
|
"loss": 0.6351, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.7759326696395874, |
|
"learning_rate": 6.173165676349103e-06, |
|
"loss": 0.5002, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.0476190476190474, |
|
"grad_norm": 0.9337152242660522, |
|
"learning_rate": 5.66116260882442e-06, |
|
"loss": 0.4895, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.0952380952380953, |
|
"grad_norm": 0.6880698204040527, |
|
"learning_rate": 5.1628111289476025e-06, |
|
"loss": 0.4941, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.7491559386253357, |
|
"learning_rate": 4.679679234846636e-06, |
|
"loss": 0.4848, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.1904761904761907, |
|
"grad_norm": 0.9208627939224243, |
|
"learning_rate": 4.213287038201943e-06, |
|
"loss": 0.4795, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.238095238095238, |
|
"grad_norm": 0.8678593039512634, |
|
"learning_rate": 3.7651019814126656e-06, |
|
"loss": 0.4889, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.680047869682312, |
|
"learning_rate": 3.3365342204799613e-06, |
|
"loss": 0.4689, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.3333333333333335, |
|
"grad_norm": 0.6818625926971436, |
|
"learning_rate": 2.9289321881345257e-06, |
|
"loss": 0.4845, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.691443145275116, |
|
"learning_rate": 2.5435783511683444e-06, |
|
"loss": 0.4718, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 0.6897311806678772, |
|
"learning_rate": 2.1816851753197023e-06, |
|
"loss": 0.4781, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.4761904761904763, |
|
"grad_norm": 0.6840229034423828, |
|
"learning_rate": 1.8443913104073984e-06, |
|
"loss": 0.459, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.5238095238095237, |
|
"grad_norm": 0.6606849431991577, |
|
"learning_rate": 1.5327580077171589e-06, |
|
"loss": 0.4772, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 0.6099680066108704, |
|
"learning_rate": 1.2477657809124632e-06, |
|
"loss": 0.4674, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.6196358799934387, |
|
"learning_rate": 9.903113209758098e-07, |
|
"loss": 0.4697, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.6188323497772217, |
|
"learning_rate": 7.612046748871327e-07, |
|
"loss": 0.4661, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.7142857142857144, |
|
"grad_norm": 0.5998156070709229, |
|
"learning_rate": 5.611666969163243e-07, |
|
"loss": 0.4517, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.761904761904762, |
|
"grad_norm": 0.6303263902664185, |
|
"learning_rate": 3.908267805490051e-07, |
|
"loss": 0.4688, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.8095238095238093, |
|
"grad_norm": 0.61989825963974, |
|
"learning_rate": 2.507208781817638e-07, |
|
"loss": 0.4674, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.639975905418396, |
|
"learning_rate": 1.4128981481764115e-07, |
|
"loss": 0.4591, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.9047619047619047, |
|
"grad_norm": 0.6036660671234131, |
|
"learning_rate": 6.287790106757396e-08, |
|
"loss": 0.4704, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.9523809523809526, |
|
"grad_norm": 0.6197323203086853, |
|
"learning_rate": 1.5731849821833955e-08, |
|
"loss": 0.4716, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.6455105543136597, |
|
"learning_rate": 0.0, |
|
"loss": 0.4279, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 63, |
|
"total_flos": 3.895140077000458e+17, |
|
"train_loss": 0.674688827896875, |
|
"train_runtime": 982.3694, |
|
"train_samples_per_second": 15.88, |
|
"train_steps_per_second": 0.064 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 63, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 0, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.895140077000458e+17, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|