|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03592276605298608, |
|
"eval_steps": 100, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 8.98069151324652e-05, |
|
"eval_loss": 3.1951746940612793, |
|
"eval_runtime": 353.9742, |
|
"eval_samples_per_second": 13.247, |
|
"eval_steps_per_second": 6.625, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.000449034575662326, |
|
"grad_norm": 0.28459519147872925, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 2.6336, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.000898069151324652, |
|
"grad_norm": 0.35409072041511536, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.6537, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001347103726986978, |
|
"grad_norm": 0.49534764885902405, |
|
"learning_rate": 5e-05, |
|
"loss": 2.809, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.001796138302649304, |
|
"grad_norm": 0.5985381007194519, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 2.9052, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00224517287831163, |
|
"grad_norm": 0.9438591003417969, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 2.8994, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.002694207453973956, |
|
"grad_norm": 1.277062177658081, |
|
"learning_rate": 0.0001, |
|
"loss": 2.8801, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.003143242029636282, |
|
"grad_norm": 1.6340757608413696, |
|
"learning_rate": 9.995494831023409e-05, |
|
"loss": 2.7491, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.003592276605298608, |
|
"grad_norm": 1.941489338874817, |
|
"learning_rate": 9.981987442712633e-05, |
|
"loss": 2.8053, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.004041311180960934, |
|
"grad_norm": 2.2817068099975586, |
|
"learning_rate": 9.959502176294383e-05, |
|
"loss": 2.5856, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00449034575662326, |
|
"grad_norm": 2.839555501937866, |
|
"learning_rate": 9.928079551738543e-05, |
|
"loss": 2.9146, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.004939380332285586, |
|
"grad_norm": 0.7805606722831726, |
|
"learning_rate": 9.887776194738432e-05, |
|
"loss": 2.4364, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.005388414907947912, |
|
"grad_norm": 0.6951726078987122, |
|
"learning_rate": 9.838664734667495e-05, |
|
"loss": 2.3357, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0058374494836102376, |
|
"grad_norm": 0.7993314862251282, |
|
"learning_rate": 9.780833673696254e-05, |
|
"loss": 2.2939, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.006286484059272564, |
|
"grad_norm": 1.141896367073059, |
|
"learning_rate": 9.714387227305422e-05, |
|
"loss": 2.3438, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00673551863493489, |
|
"grad_norm": 1.4399296045303345, |
|
"learning_rate": 9.639445136482548e-05, |
|
"loss": 2.4126, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.007184553210597216, |
|
"grad_norm": 1.676002025604248, |
|
"learning_rate": 9.55614245194068e-05, |
|
"loss": 2.4796, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.007633587786259542, |
|
"grad_norm": 2.033027410507202, |
|
"learning_rate": 9.464629290747842e-05, |
|
"loss": 2.5079, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.008082622361921869, |
|
"grad_norm": 2.0834078788757324, |
|
"learning_rate": 9.365070565805941e-05, |
|
"loss": 2.4775, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.008531656937584195, |
|
"grad_norm": 2.7009525299072266, |
|
"learning_rate": 9.257645688666556e-05, |
|
"loss": 2.5451, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.00898069151324652, |
|
"grad_norm": 3.329258918762207, |
|
"learning_rate": 9.142548246219212e-05, |
|
"loss": 2.6497, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.00898069151324652, |
|
"eval_loss": 2.4617698192596436, |
|
"eval_runtime": 356.5654, |
|
"eval_samples_per_second": 13.15, |
|
"eval_steps_per_second": 6.577, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.009429726088908847, |
|
"grad_norm": 0.7947559952735901, |
|
"learning_rate": 9.019985651834703e-05, |
|
"loss": 2.3265, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.009878760664571173, |
|
"grad_norm": 0.9079984426498413, |
|
"learning_rate": 8.890178771592199e-05, |
|
"loss": 2.3397, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.010327795240233499, |
|
"grad_norm": 1.0366816520690918, |
|
"learning_rate": 8.753361526263621e-05, |
|
"loss": 2.2172, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.010776829815895825, |
|
"grad_norm": 1.396443486213684, |
|
"learning_rate": 8.609780469772623e-05, |
|
"loss": 2.3083, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.01122586439155815, |
|
"grad_norm": 1.6878368854522705, |
|
"learning_rate": 8.459694344887732e-05, |
|
"loss": 2.3579, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.011674898967220475, |
|
"grad_norm": 2.2573277950286865, |
|
"learning_rate": 8.303373616950408e-05, |
|
"loss": 2.4, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.012123933542882801, |
|
"grad_norm": 2.4351515769958496, |
|
"learning_rate": 8.141099986478212e-05, |
|
"loss": 2.557, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.012572968118545127, |
|
"grad_norm": 2.702453851699829, |
|
"learning_rate": 7.973165881521434e-05, |
|
"loss": 2.485, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.013022002694207453, |
|
"grad_norm": 2.9839582443237305, |
|
"learning_rate": 7.799873930687978e-05, |
|
"loss": 2.3986, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.01347103726986978, |
|
"grad_norm": 3.9269957542419434, |
|
"learning_rate": 7.621536417786159e-05, |
|
"loss": 2.6132, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.013920071845532105, |
|
"grad_norm": 0.9305656552314758, |
|
"learning_rate": 7.438474719068173e-05, |
|
"loss": 2.2556, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.014369106421194431, |
|
"grad_norm": 1.079910159111023, |
|
"learning_rate": 7.251018724088367e-05, |
|
"loss": 2.1759, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.014818140996856757, |
|
"grad_norm": 1.2208424806594849, |
|
"learning_rate": 7.059506241219965e-05, |
|
"loss": 2.2838, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.015267175572519083, |
|
"grad_norm": 1.546343207359314, |
|
"learning_rate": 6.864282388901544e-05, |
|
"loss": 2.2865, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.01571621014818141, |
|
"grad_norm": 1.7968132495880127, |
|
"learning_rate": 6.665698973710288e-05, |
|
"loss": 2.3424, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.016165244723843737, |
|
"grad_norm": 2.363734006881714, |
|
"learning_rate": 6.464113856382752e-05, |
|
"loss": 2.3408, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.01661427929950606, |
|
"grad_norm": 2.706956624984741, |
|
"learning_rate": 6.259890306925627e-05, |
|
"loss": 2.2287, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.01706331387516839, |
|
"grad_norm": 2.653686285018921, |
|
"learning_rate": 6.0533963499786314e-05, |
|
"loss": 2.3489, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.017512348450830714, |
|
"grad_norm": 3.6265406608581543, |
|
"learning_rate": 5.8450041016092464e-05, |
|
"loss": 2.3393, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.01796138302649304, |
|
"grad_norm": 4.439182281494141, |
|
"learning_rate": 5.6350890987343944e-05, |
|
"loss": 2.4922, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.01796138302649304, |
|
"eval_loss": 2.3509719371795654, |
|
"eval_runtime": 356.6582, |
|
"eval_samples_per_second": 13.147, |
|
"eval_steps_per_second": 6.575, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.018410417602155366, |
|
"grad_norm": 1.0145143270492554, |
|
"learning_rate": 5.4240296223775465e-05, |
|
"loss": 2.114, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.018859452177817693, |
|
"grad_norm": 1.121131420135498, |
|
"learning_rate": 5.212206015980742e-05, |
|
"loss": 2.1236, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.019308486753480018, |
|
"grad_norm": 1.5506993532180786, |
|
"learning_rate": 5e-05, |
|
"loss": 2.2213, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.019757521329142345, |
|
"grad_norm": 1.7119569778442383, |
|
"learning_rate": 4.78779398401926e-05, |
|
"loss": 2.3372, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.02020655590480467, |
|
"grad_norm": 2.3294599056243896, |
|
"learning_rate": 4.575970377622456e-05, |
|
"loss": 2.348, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.020655590480466997, |
|
"grad_norm": 2.8338170051574707, |
|
"learning_rate": 4.364910901265606e-05, |
|
"loss": 2.361, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.021104625056129322, |
|
"grad_norm": 2.876967668533325, |
|
"learning_rate": 4.1549958983907555e-05, |
|
"loss": 2.275, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.02155365963179165, |
|
"grad_norm": 2.95035719871521, |
|
"learning_rate": 3.94660365002137e-05, |
|
"loss": 2.3335, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.022002694207453974, |
|
"grad_norm": 4.696662425994873, |
|
"learning_rate": 3.740109693074375e-05, |
|
"loss": 2.4499, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.0224517287831163, |
|
"grad_norm": 4.856019496917725, |
|
"learning_rate": 3.5358861436172485e-05, |
|
"loss": 2.6149, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.022900763358778626, |
|
"grad_norm": 1.1213271617889404, |
|
"learning_rate": 3.334301026289712e-05, |
|
"loss": 2.1865, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.02334979793444095, |
|
"grad_norm": 1.2899647951126099, |
|
"learning_rate": 3.135717611098458e-05, |
|
"loss": 2.2324, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.023798832510103278, |
|
"grad_norm": 1.463903546333313, |
|
"learning_rate": 2.9404937587800375e-05, |
|
"loss": 2.1198, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.024247867085765602, |
|
"grad_norm": 1.8549535274505615, |
|
"learning_rate": 2.748981275911633e-05, |
|
"loss": 2.3003, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.02469690166142793, |
|
"grad_norm": 2.2730584144592285, |
|
"learning_rate": 2.5615252809318284e-05, |
|
"loss": 2.3141, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.025145936237090254, |
|
"grad_norm": 2.8046603202819824, |
|
"learning_rate": 2.3784635822138424e-05, |
|
"loss": 2.2976, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.025594970812752582, |
|
"grad_norm": 3.0348989963531494, |
|
"learning_rate": 2.2001260693120233e-05, |
|
"loss": 2.3604, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.026044005388414906, |
|
"grad_norm": 3.0083751678466797, |
|
"learning_rate": 2.026834118478567e-05, |
|
"loss": 2.2966, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.026493039964077234, |
|
"grad_norm": 3.9714059829711914, |
|
"learning_rate": 1.858900013521788e-05, |
|
"loss": 2.4667, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.02694207453973956, |
|
"grad_norm": 4.601074695587158, |
|
"learning_rate": 1.6966263830495936e-05, |
|
"loss": 2.5235, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.02694207453973956, |
|
"eval_loss": 2.3027594089508057, |
|
"eval_runtime": 356.3728, |
|
"eval_samples_per_second": 13.158, |
|
"eval_steps_per_second": 6.58, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.027391109115401886, |
|
"grad_norm": 1.0686273574829102, |
|
"learning_rate": 1.5403056551122697e-05, |
|
"loss": 2.3023, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.02784014369106421, |
|
"grad_norm": 1.3059085607528687, |
|
"learning_rate": 1.3902195302273779e-05, |
|
"loss": 2.1677, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.02828917826672654, |
|
"grad_norm": 1.4381647109985352, |
|
"learning_rate": 1.246638473736378e-05, |
|
"loss": 2.282, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.028738212842388863, |
|
"grad_norm": 1.7172763347625732, |
|
"learning_rate": 1.1098212284078036e-05, |
|
"loss": 2.0908, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.02918724741805119, |
|
"grad_norm": 2.1044275760650635, |
|
"learning_rate": 9.800143481652979e-06, |
|
"loss": 2.322, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.029636281993713515, |
|
"grad_norm": 2.5188639163970947, |
|
"learning_rate": 8.574517537807897e-06, |
|
"loss": 2.2956, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.030085316569375842, |
|
"grad_norm": 3.0851542949676514, |
|
"learning_rate": 7.423543113334436e-06, |
|
"loss": 2.2503, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.030534351145038167, |
|
"grad_norm": 3.5301318168640137, |
|
"learning_rate": 6.349294341940593e-06, |
|
"loss": 2.3532, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.030983385720700495, |
|
"grad_norm": 3.9213593006134033, |
|
"learning_rate": 5.353707092521582e-06, |
|
"loss": 2.6252, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.03143242029636282, |
|
"grad_norm": 4.902388572692871, |
|
"learning_rate": 4.43857548059321e-06, |
|
"loss": 2.5284, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.03188145487202514, |
|
"grad_norm": 1.0451959371566772, |
|
"learning_rate": 3.605548635174533e-06, |
|
"loss": 2.2551, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.032330489447687474, |
|
"grad_norm": 1.2155513763427734, |
|
"learning_rate": 2.85612772694579e-06, |
|
"loss": 2.139, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.0327795240233498, |
|
"grad_norm": 1.5189533233642578, |
|
"learning_rate": 2.191663263037458e-06, |
|
"loss": 2.2051, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.03322855859901212, |
|
"grad_norm": 1.8762248754501343, |
|
"learning_rate": 1.6133526533250565e-06, |
|
"loss": 2.2809, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.03367759317467445, |
|
"grad_norm": 2.338930130004883, |
|
"learning_rate": 1.1222380526156928e-06, |
|
"loss": 2.2125, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.03412662775033678, |
|
"grad_norm": 2.407233476638794, |
|
"learning_rate": 7.192044826145771e-07, |
|
"loss": 2.3318, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.0345756623259991, |
|
"grad_norm": 2.822512149810791, |
|
"learning_rate": 4.049782370561583e-07, |
|
"loss": 2.2985, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.03502469690166143, |
|
"grad_norm": 3.503061294555664, |
|
"learning_rate": 1.8012557287367392e-07, |
|
"loss": 2.4015, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.03547373147732375, |
|
"grad_norm": 3.7975668907165527, |
|
"learning_rate": 4.5051689765929214e-08, |
|
"loss": 2.4348, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.03592276605298608, |
|
"grad_norm": 4.950035095214844, |
|
"learning_rate": 0.0, |
|
"loss": 2.444, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.03592276605298608, |
|
"eval_loss": 2.294800281524658, |
|
"eval_runtime": 356.4397, |
|
"eval_samples_per_second": 13.155, |
|
"eval_steps_per_second": 6.579, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4341894659322675e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|