|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.6797488226059656, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01674515960230246, |
|
"eval_loss": 1.4724388122558594, |
|
"eval_runtime": 73.0963, |
|
"eval_samples_per_second": 2.763, |
|
"eval_steps_per_second": 1.382, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05023547880690738, |
|
"grad_norm": 102.90454864501953, |
|
"learning_rate": 3e-05, |
|
"loss": 45.0108, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.10047095761381476, |
|
"grad_norm": 66.23411560058594, |
|
"learning_rate": 6e-05, |
|
"loss": 42.1127, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.15070643642072212, |
|
"grad_norm": 49.03892517089844, |
|
"learning_rate": 9e-05, |
|
"loss": 25.533, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.15070643642072212, |
|
"eval_loss": 0.48065412044525146, |
|
"eval_runtime": 73.1001, |
|
"eval_samples_per_second": 2.763, |
|
"eval_steps_per_second": 1.382, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.20094191522762953, |
|
"grad_norm": 40.06981658935547, |
|
"learning_rate": 0.00012, |
|
"loss": 12.9041, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.25117739403453687, |
|
"grad_norm": 18.526214599609375, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 8.4587, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.30141287284144425, |
|
"grad_norm": 26.23703384399414, |
|
"learning_rate": 0.00018, |
|
"loss": 6.8537, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.30141287284144425, |
|
"eval_loss": 0.16974753141403198, |
|
"eval_runtime": 73.0714, |
|
"eval_samples_per_second": 2.764, |
|
"eval_steps_per_second": 1.382, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3516483516483517, |
|
"grad_norm": 25.50685691833496, |
|
"learning_rate": 0.0001999229036240723, |
|
"loss": 5.2731, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.40188383045525905, |
|
"grad_norm": 16.794132232666016, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 3.9976, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4521193092621664, |
|
"grad_norm": 16.295429229736328, |
|
"learning_rate": 0.00019624552364536473, |
|
"loss": 3.9352, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4521193092621664, |
|
"eval_loss": 0.1071891188621521, |
|
"eval_runtime": 73.2333, |
|
"eval_samples_per_second": 2.758, |
|
"eval_steps_per_second": 1.379, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5023547880690737, |
|
"grad_norm": 12.938387870788574, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 3.7022, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5525902668759811, |
|
"grad_norm": 11.224324226379395, |
|
"learning_rate": 0.00018724960070727972, |
|
"loss": 2.9652, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6028257456828885, |
|
"grad_norm": 8.89415454864502, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 3.0822, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6028257456828885, |
|
"eval_loss": 0.09062672406435013, |
|
"eval_runtime": 73.1812, |
|
"eval_samples_per_second": 2.76, |
|
"eval_steps_per_second": 1.38, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6530612244897959, |
|
"grad_norm": 9.960721969604492, |
|
"learning_rate": 0.00017343225094356855, |
|
"loss": 3.1298, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7032967032967034, |
|
"grad_norm": 8.25685977935791, |
|
"learning_rate": 0.00016494480483301836, |
|
"loss": 2.9661, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7535321821036107, |
|
"grad_norm": 9.187298774719238, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 2.7679, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7535321821036107, |
|
"eval_loss": 0.073814257979393, |
|
"eval_runtime": 73.168, |
|
"eval_samples_per_second": 2.761, |
|
"eval_steps_per_second": 1.38, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8037676609105181, |
|
"grad_norm": 8.129315376281738, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 2.3404, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8540031397174255, |
|
"grad_norm": 9.582749366760254, |
|
"learning_rate": 0.0001346117057077493, |
|
"loss": 2.556, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.9042386185243328, |
|
"grad_norm": 8.937057495117188, |
|
"learning_rate": 0.00012334453638559057, |
|
"loss": 2.3485, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9042386185243328, |
|
"eval_loss": 0.06895963847637177, |
|
"eval_runtime": 72.5181, |
|
"eval_samples_per_second": 2.786, |
|
"eval_steps_per_second": 1.393, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9544740973312402, |
|
"grad_norm": 7.194057941436768, |
|
"learning_rate": 0.00011175373974578378, |
|
"loss": 2.2831, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0099424385138671, |
|
"grad_norm": 6.12868070602417, |
|
"learning_rate": 0.0001, |
|
"loss": 2.2803, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0601779173207744, |
|
"grad_norm": 6.514719009399414, |
|
"learning_rate": 8.824626025421626e-05, |
|
"loss": 1.8185, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.0601779173207744, |
|
"eval_loss": 0.058350082486867905, |
|
"eval_runtime": 73.106, |
|
"eval_samples_per_second": 2.763, |
|
"eval_steps_per_second": 1.382, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1104133961276819, |
|
"grad_norm": 5.304825782775879, |
|
"learning_rate": 7.66554636144095e-05, |
|
"loss": 1.6933, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.1606488749345891, |
|
"grad_norm": 5.396218776702881, |
|
"learning_rate": 6.538829429225069e-05, |
|
"loss": 1.6576, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.2108843537414966, |
|
"grad_norm": 7.664739608764648, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 1.566, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.2108843537414966, |
|
"eval_loss": 0.05733713135123253, |
|
"eval_runtime": 73.1797, |
|
"eval_samples_per_second": 2.76, |
|
"eval_steps_per_second": 1.38, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.2611198325484039, |
|
"grad_norm": 8.14849853515625, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.6062, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.3113553113553114, |
|
"grad_norm": 6.613561153411865, |
|
"learning_rate": 3.5055195166981645e-05, |
|
"loss": 1.7725, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.3615907901622188, |
|
"grad_norm": 6.738033771514893, |
|
"learning_rate": 2.6567749056431467e-05, |
|
"loss": 1.4988, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.3615907901622188, |
|
"eval_loss": 0.052346888929605484, |
|
"eval_runtime": 73.2318, |
|
"eval_samples_per_second": 2.758, |
|
"eval_steps_per_second": 1.379, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.411826268969126, |
|
"grad_norm": 5.425446033477783, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 1.4703, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.4620617477760334, |
|
"grad_norm": 5.082719802856445, |
|
"learning_rate": 1.2750399292720283e-05, |
|
"loss": 1.6151, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.5122972265829409, |
|
"grad_norm": 5.419787406921387, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.6353, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5122972265829409, |
|
"eval_loss": 0.0507192499935627, |
|
"eval_runtime": 73.2046, |
|
"eval_samples_per_second": 2.759, |
|
"eval_steps_per_second": 1.38, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5625327053898483, |
|
"grad_norm": 5.491358757019043, |
|
"learning_rate": 3.7544763546352834e-06, |
|
"loss": 1.5038, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.6127681841967556, |
|
"grad_norm": 4.776278495788574, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 1.4482, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.6630036630036629, |
|
"grad_norm": 4.944296836853027, |
|
"learning_rate": 7.709637592770991e-08, |
|
"loss": 1.5689, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.6630036630036629, |
|
"eval_loss": 0.05006199702620506, |
|
"eval_runtime": 73.1223, |
|
"eval_samples_per_second": 2.762, |
|
"eval_steps_per_second": 1.381, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.259104775831552e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|