|
{ |
|
"best_metric": 1.3272449970245361, |
|
"best_model_checkpoint": "data/Gemma-2-2B_task-1_120-samples_config-1/checkpoint-55", |
|
"epoch": 12.0, |
|
"eval_steps": 500, |
|
"global_step": 132, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 2.7654104232788086, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 2.4749, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 2.517627000808716, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 2.5354, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 2.8291656970977783, |
|
"learning_rate": 7.272727272727272e-06, |
|
"loss": 2.4329, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 2.6653389930725098, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 2.3833, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 2.7465708255767822, |
|
"learning_rate": 1.4545454545454545e-05, |
|
"loss": 2.3999, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 2.671473503112793, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 2.311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.237502098083496, |
|
"eval_runtime": 2.2179, |
|
"eval_samples_per_second": 10.821, |
|
"eval_steps_per_second": 10.821, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 2.5977914333343506, |
|
"learning_rate": 2.1818181818181818e-05, |
|
"loss": 2.2169, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 2.3312153816223145, |
|
"learning_rate": 2.5454545454545454e-05, |
|
"loss": 2.1782, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 2.1294496059417725, |
|
"learning_rate": 2.909090909090909e-05, |
|
"loss": 2.0556, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 2.0936222076416016, |
|
"learning_rate": 3.272727272727273e-05, |
|
"loss": 1.8615, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.9292643070220947, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 1.7459, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.2698487043380737, |
|
"learning_rate": 4e-05, |
|
"loss": 1.6775, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.6417160034179688, |
|
"eval_runtime": 2.1987, |
|
"eval_samples_per_second": 10.916, |
|
"eval_steps_per_second": 10.916, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.4562098979949951, |
|
"learning_rate": 4.3636363636363636e-05, |
|
"loss": 1.6336, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.3636363636363638, |
|
"grad_norm": 1.1218253374099731, |
|
"learning_rate": 4.7272727272727275e-05, |
|
"loss": 1.571, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 1.04109525680542, |
|
"learning_rate": 5.090909090909091e-05, |
|
"loss": 1.4032, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 1.1949294805526733, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 1.3756, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 1.2722885608673096, |
|
"learning_rate": 5.818181818181818e-05, |
|
"loss": 1.4086, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.4307798147201538, |
|
"eval_runtime": 2.2389, |
|
"eval_samples_per_second": 10.72, |
|
"eval_steps_per_second": 10.72, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.090909090909091, |
|
"grad_norm": 1.1219353675842285, |
|
"learning_rate": 6.181818181818182e-05, |
|
"loss": 1.4083, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.9904714226722717, |
|
"learning_rate": 6.545454545454546e-05, |
|
"loss": 1.3082, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.4545454545454546, |
|
"grad_norm": 1.2756577730178833, |
|
"learning_rate": 6.90909090909091e-05, |
|
"loss": 1.3081, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 1.146028995513916, |
|
"learning_rate": 7.272727272727273e-05, |
|
"loss": 1.2388, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8181818181818183, |
|
"grad_norm": 1.2157866954803467, |
|
"learning_rate": 7.636363636363637e-05, |
|
"loss": 1.1917, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.1651657819747925, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1316, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.337244987487793, |
|
"eval_runtime": 2.1906, |
|
"eval_samples_per_second": 10.956, |
|
"eval_steps_per_second": 10.956, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.181818181818182, |
|
"grad_norm": 1.262305498123169, |
|
"learning_rate": 8.363636363636364e-05, |
|
"loss": 1.0936, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.339064121246338, |
|
"learning_rate": 8.727272727272727e-05, |
|
"loss": 1.0332, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 1.416332721710205, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 1.0611, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.3856735229492188, |
|
"learning_rate": 9.454545454545455e-05, |
|
"loss": 1.035, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"grad_norm": 1.427396297454834, |
|
"learning_rate": 9.818181818181818e-05, |
|
"loss": 0.9745, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.3272449970245361, |
|
"eval_runtime": 2.2368, |
|
"eval_samples_per_second": 10.73, |
|
"eval_steps_per_second": 10.73, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.4408196210861206, |
|
"learning_rate": 9.999899300364532e-05, |
|
"loss": 0.9447, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.2727272727272725, |
|
"grad_norm": 1.4390223026275635, |
|
"learning_rate": 9.99909372761763e-05, |
|
"loss": 0.7821, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.4396119117736816, |
|
"learning_rate": 9.997482711915927e-05, |
|
"loss": 0.7663, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.636363636363637, |
|
"grad_norm": 1.6289987564086914, |
|
"learning_rate": 9.99506651282272e-05, |
|
"loss": 0.8362, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.8272826671600342, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.7933, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.8998042345046997, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.7135, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.3980852365493774, |
|
"eval_runtime": 2.1963, |
|
"eval_samples_per_second": 10.927, |
|
"eval_steps_per_second": 10.927, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.608075499534607, |
|
"learning_rate": 9.982991356370404e-05, |
|
"loss": 0.6363, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 1.8586798906326294, |
|
"learning_rate": 9.977359612865423e-05, |
|
"loss": 0.5902, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 1.8311554193496704, |
|
"learning_rate": 9.970925928158274e-05, |
|
"loss": 0.4982, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.7272727272727275, |
|
"grad_norm": 2.392566442489624, |
|
"learning_rate": 9.963691338830044e-05, |
|
"loss": 0.548, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 2.681428909301758, |
|
"learning_rate": 9.955657010501806e-05, |
|
"loss": 0.4228, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.7707710266113281, |
|
"eval_runtime": 2.1916, |
|
"eval_samples_per_second": 10.951, |
|
"eval_steps_per_second": 10.951, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.090909090909091, |
|
"grad_norm": 1.9553114175796509, |
|
"learning_rate": 9.946824237646824e-05, |
|
"loss": 0.3491, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 2.017584800720215, |
|
"learning_rate": 9.937194443381972e-05, |
|
"loss": 0.3254, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.454545454545454, |
|
"grad_norm": 2.6024396419525146, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.2919, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 2.637301445007324, |
|
"learning_rate": 9.915550124911866e-05, |
|
"loss": 0.2924, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.818181818181818, |
|
"grad_norm": 2.9900031089782715, |
|
"learning_rate": 9.903539087991462e-05, |
|
"loss": 0.3155, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 3.341743230819702, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.3143, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.152059316635132, |
|
"eval_runtime": 2.1986, |
|
"eval_samples_per_second": 10.916, |
|
"eval_steps_per_second": 10.916, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 1.73684561252594, |
|
"learning_rate": 9.877148934427037e-05, |
|
"loss": 0.1885, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 2.3624324798583984, |
|
"learning_rate": 9.862774069706346e-05, |
|
"loss": 0.159, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.545454545454545, |
|
"grad_norm": 2.342822551727295, |
|
"learning_rate": 9.847615725553456e-05, |
|
"loss": 0.123, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 2.090047836303711, |
|
"learning_rate": 9.831676344247342e-05, |
|
"loss": 0.1231, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"grad_norm": 3.0720016956329346, |
|
"learning_rate": 9.814958493905963e-05, |
|
"loss": 0.1454, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.689652442932129, |
|
"eval_runtime": 2.1925, |
|
"eval_samples_per_second": 10.947, |
|
"eval_steps_per_second": 10.947, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 2.5275495052337646, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.11, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.272727272727273, |
|
"grad_norm": 2.512793779373169, |
|
"learning_rate": 9.779198285281325e-05, |
|
"loss": 0.0796, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 3.505725383758545, |
|
"learning_rate": 9.760161688604008e-05, |
|
"loss": 0.0786, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.636363636363637, |
|
"grad_norm": 3.8895909786224365, |
|
"learning_rate": 9.740358145174998e-05, |
|
"loss": 0.0944, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 2.5016000270843506, |
|
"learning_rate": 9.719790845697533e-05, |
|
"loss": 0.0798, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.5594393014907837, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0857, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.6891584396362305, |
|
"eval_runtime": 2.2175, |
|
"eval_samples_per_second": 10.823, |
|
"eval_steps_per_second": 10.823, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 1.6706209182739258, |
|
"learning_rate": 9.676378356149734e-05, |
|
"loss": 0.0373, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.363636363636363, |
|
"grad_norm": 1.8536970615386963, |
|
"learning_rate": 9.653540160603956e-05, |
|
"loss": 0.0545, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 2.2033114433288574, |
|
"learning_rate": 9.629952196931901e-05, |
|
"loss": 0.0719, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.727272727272727, |
|
"grad_norm": 2.138977527618408, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.0587, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 3.207634210586548, |
|
"learning_rate": 9.580542287160348e-05, |
|
"loss": 0.06, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 2.8942203521728516, |
|
"eval_runtime": 2.2274, |
|
"eval_samples_per_second": 10.775, |
|
"eval_steps_per_second": 10.775, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.090909090909092, |
|
"grad_norm": 2.0782763957977295, |
|
"learning_rate": 9.554728301876526e-05, |
|
"loss": 0.0442, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 1.5070956945419312, |
|
"learning_rate": 9.528180468815155e-05, |
|
"loss": 0.0264, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.454545454545455, |
|
"grad_norm": 2.6724226474761963, |
|
"learning_rate": 9.50090306530454e-05, |
|
"loss": 0.0485, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 2.257985830307007, |
|
"learning_rate": 9.472900486219769e-05, |
|
"loss": 0.0457, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 2.384425163269043, |
|
"learning_rate": 9.444177243274618e-05, |
|
"loss": 0.0572, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 3.0404624938964844, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0671, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 2.884262800216675, |
|
"eval_runtime": 2.1852, |
|
"eval_samples_per_second": 10.983, |
|
"eval_steps_per_second": 10.983, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 132, |
|
"total_flos": 4216951435952128.0, |
|
"train_loss": 0.836406874667966, |
|
"train_runtime": 459.9231, |
|
"train_samples_per_second": 9.567, |
|
"train_steps_per_second": 1.196 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4216951435952128.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|