|
{ |
|
"best_metric": 0.08122972398996353, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.05336535254486025, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00071153803393147, |
|
"grad_norm": 4.1395392417907715, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.1572, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00071153803393147, |
|
"eval_loss": 4.378438949584961, |
|
"eval_runtime": 3.4572, |
|
"eval_samples_per_second": 14.463, |
|
"eval_steps_per_second": 3.76, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00142307606786294, |
|
"grad_norm": 4.174035549163818, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 3.5344, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00213461410179441, |
|
"grad_norm": 4.1275458335876465, |
|
"learning_rate": 0.0001, |
|
"loss": 3.6334, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00284615213572588, |
|
"grad_norm": 3.8864400386810303, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 3.1984, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00355769016965735, |
|
"grad_norm": 5.862198829650879, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 2.5223, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00426922820358882, |
|
"grad_norm": 4.417128562927246, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.8482, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00498076623752029, |
|
"grad_norm": 3.6660585403442383, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 1.3222, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00569230427145176, |
|
"grad_norm": 2.8245842456817627, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 0.9788, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00640384230538323, |
|
"grad_norm": 2.523778200149536, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 0.7001, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0071153803393147, |
|
"grad_norm": 2.617717742919922, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 0.5022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00782691837324617, |
|
"grad_norm": 2.721231698989868, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 0.4812, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00853845640717764, |
|
"grad_norm": 2.917170524597168, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.407, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00924999444110911, |
|
"grad_norm": 4.139233112335205, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 0.965, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00996153247504058, |
|
"grad_norm": 3.3612873554229736, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 0.7595, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01067307050897205, |
|
"grad_norm": 3.3063018321990967, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.5902, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01138460854290352, |
|
"grad_norm": 3.0980429649353027, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 0.4998, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01209614657683499, |
|
"grad_norm": 1.7593947649002075, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 0.3775, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01280768461076646, |
|
"grad_norm": 0.964061439037323, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.3036, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01351922264469793, |
|
"grad_norm": 1.0408120155334473, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 0.2508, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0142307606786294, |
|
"grad_norm": 0.7718386650085449, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 0.1979, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01494229871256087, |
|
"grad_norm": 1.073320746421814, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.1971, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01565383674649234, |
|
"grad_norm": 0.6882893443107605, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 0.1843, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01636537478042381, |
|
"grad_norm": 0.8627312183380127, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 0.1888, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01707691281435528, |
|
"grad_norm": 0.8838256597518921, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.1508, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01778845084828675, |
|
"grad_norm": 1.3713014125823975, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 0.1561, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01778845084828675, |
|
"eval_loss": 0.30335336923599243, |
|
"eval_runtime": 3.496, |
|
"eval_samples_per_second": 14.302, |
|
"eval_steps_per_second": 3.719, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01849998888221822, |
|
"grad_norm": 2.975804090499878, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 0.5658, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01921152691614969, |
|
"grad_norm": 2.311894416809082, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.429, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01992306495008116, |
|
"grad_norm": 1.3672640323638916, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 0.2389, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02063460298401263, |
|
"grad_norm": 0.7648161053657532, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 0.2071, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0213461410179441, |
|
"grad_norm": 1.1158180236816406, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.1953, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02205767905187557, |
|
"grad_norm": 1.5425399541854858, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 0.1899, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02276921708580704, |
|
"grad_norm": 1.3230940103530884, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 0.131, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.02348075511973851, |
|
"grad_norm": 0.7612606287002563, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.112, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02419229315366998, |
|
"grad_norm": 0.4409436285495758, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 0.107, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02490383118760145, |
|
"grad_norm": 0.4337151050567627, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 0.088, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02561536922153292, |
|
"grad_norm": 0.5711820125579834, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.1323, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02632690725546439, |
|
"grad_norm": 0.6534641981124878, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 0.1032, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02703844528939586, |
|
"grad_norm": 1.1616266965866089, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 0.3308, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.02774998332332733, |
|
"grad_norm": 1.08228600025177, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.2675, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0284615213572588, |
|
"grad_norm": 0.8149510025978088, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 0.177, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02917305939119027, |
|
"grad_norm": 0.7890299558639526, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 0.1581, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02988459742512174, |
|
"grad_norm": 0.46428173780441284, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.1306, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03059613545905321, |
|
"grad_norm": 0.4688973128795624, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 0.0909, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03130767349298468, |
|
"grad_norm": 0.5706167817115784, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 0.0766, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03201921152691615, |
|
"grad_norm": 0.7713613510131836, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.093, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03273074956084762, |
|
"grad_norm": 0.7292242646217346, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 0.0818, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03344228759477909, |
|
"grad_norm": 0.745477557182312, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 0.0861, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03415382562871056, |
|
"grad_norm": 0.8162257671356201, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.1041, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03486536366264203, |
|
"grad_norm": 0.8203452229499817, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 0.0862, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0355769016965735, |
|
"grad_norm": 0.8991295695304871, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 0.0849, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0355769016965735, |
|
"eval_loss": 0.12278789281845093, |
|
"eval_runtime": 3.5088, |
|
"eval_samples_per_second": 14.25, |
|
"eval_steps_per_second": 3.705, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03628843973050497, |
|
"grad_norm": 1.0250176191329956, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.2113, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.03699997776443644, |
|
"grad_norm": 0.8808900713920593, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 0.1611, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.03771151579836791, |
|
"grad_norm": 0.8555270433425903, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 0.127, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.03842305383229938, |
|
"grad_norm": 0.9593107104301453, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.1143, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.03913459186623085, |
|
"grad_norm": 0.8327104449272156, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 0.0989, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03984612990016232, |
|
"grad_norm": 0.6464086174964905, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 0.0817, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04055766793409379, |
|
"grad_norm": 0.49059954285621643, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.0748, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04126920596802526, |
|
"grad_norm": 0.3070560097694397, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 0.0581, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.04198074400195673, |
|
"grad_norm": 0.3088557720184326, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 0.051, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0426922820358882, |
|
"grad_norm": 0.3666839301586151, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.0588, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04340382006981967, |
|
"grad_norm": 0.4506279230117798, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 0.0734, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04411535810375114, |
|
"grad_norm": 0.6894549131393433, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 0.0656, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.044826896137682606, |
|
"grad_norm": 2.1693735122680664, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.1742, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04553843417161408, |
|
"grad_norm": 0.7992150783538818, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 0.1506, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.046249972205545546, |
|
"grad_norm": 0.5185897350311279, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 0.113, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.04696151023947702, |
|
"grad_norm": 0.43015819787979126, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.0864, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.04767304827340849, |
|
"grad_norm": 0.38855844736099243, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 0.0895, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.04838458630733996, |
|
"grad_norm": 0.2907514274120331, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 0.0589, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.04909612434127143, |
|
"grad_norm": 0.34849977493286133, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.0473, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0498076623752029, |
|
"grad_norm": 0.2817358672618866, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 0.0527, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05051920040913437, |
|
"grad_norm": 0.3551785349845886, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 0.0596, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05123073844306584, |
|
"grad_norm": 0.3679645359516144, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.0543, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05194227647699731, |
|
"grad_norm": 0.4032980799674988, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 0.0499, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05265381451092878, |
|
"grad_norm": 0.48960503935813904, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 0.0701, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.05336535254486025, |
|
"grad_norm": 0.8739669322967529, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0798, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05336535254486025, |
|
"eval_loss": 0.08122972398996353, |
|
"eval_runtime": 3.517, |
|
"eval_samples_per_second": 14.217, |
|
"eval_steps_per_second": 3.696, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.952125425975296e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|