|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.036697247706422, |
|
"eval_steps": 500, |
|
"global_step": 220, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00019919156561236823, |
|
"loss": 2.1538, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019835812809934576, |
|
"loss": 2.1787, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001975246905863233, |
|
"loss": 1.769, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019669125307330082, |
|
"loss": 1.4897, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019585781556027837, |
|
"loss": 1.3456, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001950243780472559, |
|
"loss": 1.3128, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019419094053423346, |
|
"loss": 1.1232, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000193357503021211, |
|
"loss": 1.0925, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019252406550818854, |
|
"loss": 0.9432, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019169062799516607, |
|
"loss": 1.1487, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001908571904821436, |
|
"loss": 0.8783, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019002375296912116, |
|
"loss": 1.0987, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018919031545609868, |
|
"loss": 1.1263, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00018835687794307624, |
|
"loss": 1.0026, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018752344043005377, |
|
"loss": 0.9925, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00018669000291703133, |
|
"loss": 0.9272, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00018585656540400885, |
|
"loss": 1.1142, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00018502312789098638, |
|
"loss": 0.8863, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001841896903779639, |
|
"loss": 1.4011, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00018335625286494147, |
|
"loss": 1.1558, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.000182522815351919, |
|
"loss": 1.1528, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00018168937783889655, |
|
"loss": 0.9246, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00018085594032587408, |
|
"loss": 0.8869, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00018002250281285164, |
|
"loss": 0.9948, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00017918906529982914, |
|
"loss": 1.1422, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001783556277868067, |
|
"loss": 1.0643, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00017752219027378422, |
|
"loss": 1.0188, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00017668875276076178, |
|
"loss": 1.2852, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001758553152477393, |
|
"loss": 1.1224, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00017502187773471686, |
|
"loss": 0.9911, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0001741884402216944, |
|
"loss": 1.0483, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00017335500270867192, |
|
"loss": 1.0113, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00017252156519564945, |
|
"loss": 0.9042, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.000171688127682627, |
|
"loss": 0.747, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00017085469016960453, |
|
"loss": 1.0973, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001700212526565821, |
|
"loss": 1.2896, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00016918781514355962, |
|
"loss": 0.7344, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00016835437763053717, |
|
"loss": 1.1291, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001675209401175147, |
|
"loss": 0.9825, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00016668750260449223, |
|
"loss": 1.1524, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00016585406509146976, |
|
"loss": 0.8895, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00016502062757844731, |
|
"loss": 0.8906, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00016418719006542484, |
|
"loss": 0.9501, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001633537525524024, |
|
"loss": 1.215, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016252031503937993, |
|
"loss": 1.1934, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00016168687752635748, |
|
"loss": 1.138, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.000160853440013335, |
|
"loss": 1.147, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016002000250031254, |
|
"loss": 1.1176, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00015918656498729007, |
|
"loss": 1.2923, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015835312747426763, |
|
"loss": 0.6327, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015751968996124515, |
|
"loss": 1.1538, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0001566862524482227, |
|
"loss": 0.9916, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00015585281493520024, |
|
"loss": 1.088, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015501937742217777, |
|
"loss": 1.1032, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015418593990915532, |
|
"loss": 0.9807, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00015335250239613285, |
|
"loss": 0.992, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001525190648831104, |
|
"loss": 0.8244, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00015168562737008794, |
|
"loss": 0.6069, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001508521898570655, |
|
"loss": 0.5618, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00015001875234404302, |
|
"loss": 0.7973, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014918531483102055, |
|
"loss": 0.7905, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014835187731799808, |
|
"loss": 0.557, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00014751843980497563, |
|
"loss": 0.5413, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00014668500229195316, |
|
"loss": 0.5814, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00014585156477893072, |
|
"loss": 0.4426, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00014501812726590825, |
|
"loss": 0.962, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0001441846897528858, |
|
"loss": 0.6668, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00014335125223986333, |
|
"loss": 0.5324, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00014251781472684086, |
|
"loss": 0.5355, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001416843772138184, |
|
"loss": 0.8187, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00014085093970079594, |
|
"loss": 0.8092, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00014001750218777347, |
|
"loss": 0.5255, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00013918406467475103, |
|
"loss": 0.4591, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00013835062716172856, |
|
"loss": 0.9451, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00013751718964870611, |
|
"loss": 1.0429, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013668375213568364, |
|
"loss": 0.9241, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00013585031462266117, |
|
"loss": 0.8724, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0001350168771096387, |
|
"loss": 0.5596, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00013418343959661626, |
|
"loss": 0.8192, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00013335000208359378, |
|
"loss": 0.9415, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00013251656457057134, |
|
"loss": 0.739, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00013168312705754887, |
|
"loss": 0.8788, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001308496895445264, |
|
"loss": 0.6318, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00013001625203150393, |
|
"loss": 0.5367, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00012918281451848148, |
|
"loss": 0.7263, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.000128349377005459, |
|
"loss": 0.9614, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00012751593949243657, |
|
"loss": 0.8175, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0001266825019794141, |
|
"loss": 0.7122, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00012584906446639165, |
|
"loss": 0.5881, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00012501562695336918, |
|
"loss": 0.7257, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0001241821894403467, |
|
"loss": 0.7989, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00012334875192732424, |
|
"loss": 0.6186, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0001225153144143018, |
|
"loss": 0.3911, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00012168187690127932, |
|
"loss": 0.5717, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00012084843938825688, |
|
"loss": 0.6305, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001200150018752344, |
|
"loss": 0.6012, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00011918156436221195, |
|
"loss": 0.7495, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00011834812684918948, |
|
"loss": 0.7502, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011751468933616703, |
|
"loss": 0.85, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00011668125182314456, |
|
"loss": 0.604, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001158478143101221, |
|
"loss": 0.9405, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00011501437679709966, |
|
"loss": 0.4737, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00011418093928407719, |
|
"loss": 0.8025, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00011334750177105473, |
|
"loss": 0.7759, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00011251406425803226, |
|
"loss": 0.7529, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00011168062674500981, |
|
"loss": 0.6858, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00011084718923198733, |
|
"loss": 0.8112, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00011001375171896489, |
|
"loss": 0.6097, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010918031420594241, |
|
"loss": 0.7296, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00010834687669291996, |
|
"loss": 0.5612, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.00010751343917989749, |
|
"loss": 0.4, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.00010668000166687504, |
|
"loss": 0.3471, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.00010584656415385257, |
|
"loss": 0.5078, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00010501312664083011, |
|
"loss": 0.2617, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.00010417968912780764, |
|
"loss": 0.4909, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.0001033462516147852, |
|
"loss": 0.4184, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00010251281410176273, |
|
"loss": 0.2669, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 0.00010167937658874027, |
|
"loss": 0.3185, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 0.0001008459390757178, |
|
"loss": 0.4198, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.00010001250156269535, |
|
"loss": 0.3946, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 9.917906404967288e-05, |
|
"loss": 0.3378, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 9.834562653665041e-05, |
|
"loss": 0.287, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 9.751218902362795e-05, |
|
"loss": 0.2577, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 9.66787515106055e-05, |
|
"loss": 0.3326, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 9.584531399758304e-05, |
|
"loss": 0.3724, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 9.501187648456058e-05, |
|
"loss": 0.1924, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 9.417843897153812e-05, |
|
"loss": 0.2683, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 9.334500145851566e-05, |
|
"loss": 0.368, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 9.251156394549319e-05, |
|
"loss": 0.3479, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 9.167812643247073e-05, |
|
"loss": 0.4842, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 9.084468891944828e-05, |
|
"loss": 0.404, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 9.001125140642582e-05, |
|
"loss": 0.5347, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 8.917781389340335e-05, |
|
"loss": 0.1655, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 8.834437638038089e-05, |
|
"loss": 0.3897, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 8.751093886735843e-05, |
|
"loss": 0.4423, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 8.667750135433596e-05, |
|
"loss": 0.5997, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 8.58440638413135e-05, |
|
"loss": 0.4646, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 8.501062632829104e-05, |
|
"loss": 0.3572, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 8.417718881526859e-05, |
|
"loss": 0.1949, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 8.334375130224612e-05, |
|
"loss": 0.2445, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 8.251031378922366e-05, |
|
"loss": 0.1282, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.16768762762012e-05, |
|
"loss": 0.3019, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 8.084343876317874e-05, |
|
"loss": 0.5425, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 8.001000125015627e-05, |
|
"loss": 0.2889, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 7.917656373713381e-05, |
|
"loss": 0.5389, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 7.834312622411136e-05, |
|
"loss": 0.3105, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 7.750968871108888e-05, |
|
"loss": 0.2775, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 7.667625119806643e-05, |
|
"loss": 0.2566, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 7.584281368504397e-05, |
|
"loss": 0.4733, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 7.500937617202151e-05, |
|
"loss": 0.5824, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 7.417593865899904e-05, |
|
"loss": 0.4396, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 7.334250114597658e-05, |
|
"loss": 0.3624, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 7.250906363295412e-05, |
|
"loss": 0.3028, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 7.167562611993167e-05, |
|
"loss": 0.4637, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 7.08421886069092e-05, |
|
"loss": 0.4665, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 7.000875109388674e-05, |
|
"loss": 0.395, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 6.917531358086428e-05, |
|
"loss": 0.349, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 6.834187606784182e-05, |
|
"loss": 0.308, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 6.750843855481935e-05, |
|
"loss": 0.3379, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 6.667500104179689e-05, |
|
"loss": 0.3393, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 6.584156352877443e-05, |
|
"loss": 0.265, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 6.500812601575196e-05, |
|
"loss": 0.3515, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 6.41746885027295e-05, |
|
"loss": 0.3806, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 6.334125098970705e-05, |
|
"loss": 0.1495, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 6.250781347668459e-05, |
|
"loss": 0.1339, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 6.167437596366212e-05, |
|
"loss": 0.1661, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 6.084093845063966e-05, |
|
"loss": 0.1169, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 6.00075009376172e-05, |
|
"loss": 0.1587, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 5.917406342459474e-05, |
|
"loss": 0.1224, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 5.834062591157228e-05, |
|
"loss": 0.1495, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 5.750718839854983e-05, |
|
"loss": 0.1137, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 5.6673750885527365e-05, |
|
"loss": 0.1997, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 5.584031337250491e-05, |
|
"loss": 0.1012, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 5.500687585948244e-05, |
|
"loss": 0.0897, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 5.417343834645998e-05, |
|
"loss": 0.0843, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 5.334000083343752e-05, |
|
"loss": 0.1603, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 5.2506563320415056e-05, |
|
"loss": 0.1169, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 5.16731258073926e-05, |
|
"loss": 0.0993, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 5.0839688294370134e-05, |
|
"loss": 0.1018, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 5.0006250781347676e-05, |
|
"loss": 0.1279, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 4.9172813268325205e-05, |
|
"loss": 0.27, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 4.833937575530275e-05, |
|
"loss": 0.2169, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 4.750593824228029e-05, |
|
"loss": 0.0919, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 4.667250072925783e-05, |
|
"loss": 0.1618, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 4.583906321623537e-05, |
|
"loss": 0.0735, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 4.500562570321291e-05, |
|
"loss": 0.0924, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 4.4172188190190444e-05, |
|
"loss": 0.1398, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 4.333875067716798e-05, |
|
"loss": 0.3616, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 4.250531316414552e-05, |
|
"loss": 0.2796, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 4.167187565112306e-05, |
|
"loss": 0.1794, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 4.08384381381006e-05, |
|
"loss": 0.2183, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 4.0005000625078135e-05, |
|
"loss": 0.2381, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.917156311205568e-05, |
|
"loss": 0.1094, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 3.833812559903321e-05, |
|
"loss": 0.1113, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.7504688086010755e-05, |
|
"loss": 0.1681, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.667125057298829e-05, |
|
"loss": 0.1226, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.583781305996583e-05, |
|
"loss": 0.2821, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.500437554694337e-05, |
|
"loss": 0.1742, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.417093803392091e-05, |
|
"loss": 0.289, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.3337500520898446e-05, |
|
"loss": 0.2191, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 3.250406300787598e-05, |
|
"loss": 0.1286, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.1670625494853524e-05, |
|
"loss": 0.2275, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.083718798183106e-05, |
|
"loss": 0.0647, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 3.00037504688086e-05, |
|
"loss": 0.0477, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 2.917031295578614e-05, |
|
"loss": 0.2203, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 2.8336875442763683e-05, |
|
"loss": 0.2017, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 2.750343792974122e-05, |
|
"loss": 0.1703, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 2.667000041671876e-05, |
|
"loss": 0.2045, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 2.58365629036963e-05, |
|
"loss": 0.0735, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 2.5003125390673838e-05, |
|
"loss": 0.143, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 2.4169687877651373e-05, |
|
"loss": 0.0957, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 2.3336250364628916e-05, |
|
"loss": 0.1068, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.2502812851606454e-05, |
|
"loss": 0.0779, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 2.166937533858399e-05, |
|
"loss": 0.1485, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 2.083593782556153e-05, |
|
"loss": 0.1665, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 2.0002500312539068e-05, |
|
"loss": 0.1002, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 1.9169062799516606e-05, |
|
"loss": 0.0731, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.8335625286494145e-05, |
|
"loss": 0.2159, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 1.7502187773471684e-05, |
|
"loss": 0.1446, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 1.6668750260449223e-05, |
|
"loss": 0.0548, |
|
"step": 220 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 240, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 20, |
|
"total_flos": 1.3658971558649856e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|