|
{ |
|
"best_metric": 1.8761990070343018, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0755287009063444, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0015105740181268882, |
|
"grad_norm": 38.372501373291016, |
|
"learning_rate": 5e-05, |
|
"loss": 2.0533, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015105740181268882, |
|
"eval_loss": 2.11383318901062, |
|
"eval_runtime": 379.0141, |
|
"eval_samples_per_second": 23.535, |
|
"eval_steps_per_second": 2.942, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0030211480362537764, |
|
"grad_norm": 45.164649963378906, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0999, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004531722054380665, |
|
"grad_norm": 22.255449295043945, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 2.0574, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006042296072507553, |
|
"grad_norm": 15.518555641174316, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.9927, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0075528700906344415, |
|
"grad_norm": 12.416341781616211, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.9789, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00906344410876133, |
|
"grad_norm": 11.008262634277344, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.9897, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010574018126888218, |
|
"grad_norm": 10.411260604858398, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.9794, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.012084592145015106, |
|
"grad_norm": 10.190176010131836, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.9458, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013595166163141994, |
|
"grad_norm": 10.882474899291992, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.9277, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.015105740181268883, |
|
"grad_norm": 12.2539701461792, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.9583, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01661631419939577, |
|
"grad_norm": 13.870675086975098, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.9288, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01812688821752266, |
|
"grad_norm": 16.904359817504883, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.8735, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.019637462235649546, |
|
"grad_norm": 20.735435485839844, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.9198, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.021148036253776436, |
|
"grad_norm": 18.89630699157715, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.9836, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.022658610271903322, |
|
"grad_norm": 17.90799903869629, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.9534, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02416918429003021, |
|
"grad_norm": 11.054027557373047, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.9361, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0256797583081571, |
|
"grad_norm": 12.436798095703125, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.9447, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.027190332326283987, |
|
"grad_norm": 14.795900344848633, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.9125, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.028700906344410877, |
|
"grad_norm": 14.699880599975586, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.941, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.030211480362537766, |
|
"grad_norm": 9.646371841430664, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.9222, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03172205438066465, |
|
"grad_norm": 9.113815307617188, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.8951, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03323262839879154, |
|
"grad_norm": 73.9245376586914, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.8928, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03474320241691843, |
|
"grad_norm": 56.74176025390625, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.8689, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03625377643504532, |
|
"grad_norm": 10.495920181274414, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.8297, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0377643504531722, |
|
"grad_norm": 10.864616394042969, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.7874, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0377643504531722, |
|
"eval_loss": 1.894762635231018, |
|
"eval_runtime": 367.8915, |
|
"eval_samples_per_second": 24.246, |
|
"eval_steps_per_second": 3.031, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03927492447129909, |
|
"grad_norm": 13.991535186767578, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9516, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04078549848942598, |
|
"grad_norm": 12.654444694519043, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.9724, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04229607250755287, |
|
"grad_norm": 9.985688209533691, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.9129, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04380664652567976, |
|
"grad_norm": 8.882885932922363, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.9091, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.045317220543806644, |
|
"grad_norm": 8.36329460144043, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.9237, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04682779456193353, |
|
"grad_norm": 8.64007568359375, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.886, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04833836858006042, |
|
"grad_norm": 9.498104095458984, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.9173, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04984894259818731, |
|
"grad_norm": 9.14793586730957, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.9047, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0513595166163142, |
|
"grad_norm": 9.84379768371582, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.8504, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.052870090634441085, |
|
"grad_norm": 10.553916931152344, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.8816, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.054380664652567974, |
|
"grad_norm": 11.11711311340332, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.852, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.055891238670694864, |
|
"grad_norm": 12.357012748718262, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.8347, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05740181268882175, |
|
"grad_norm": 10.349846839904785, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.8391, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05891238670694864, |
|
"grad_norm": 7.760517597198486, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.9363, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06042296072507553, |
|
"grad_norm": 8.133441925048828, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.9542, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.061933534743202415, |
|
"grad_norm": 7.650514125823975, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.927, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0634441087613293, |
|
"grad_norm": 7.582345008850098, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.9362, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0649546827794562, |
|
"grad_norm": 8.583966255187988, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.9035, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06646525679758308, |
|
"grad_norm": 8.344738960266113, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.8908, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06797583081570997, |
|
"grad_norm": 7.929678440093994, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.871, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06948640483383686, |
|
"grad_norm": 7.730828285217285, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.8825, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07099697885196375, |
|
"grad_norm": 8.460161209106445, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.8743, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07250755287009064, |
|
"grad_norm": 8.845512390136719, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.8665, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07401812688821752, |
|
"grad_norm": 9.84435749053955, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.8485, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0755287009063444, |
|
"grad_norm": 12.877066612243652, |
|
"learning_rate": 0.0, |
|
"loss": 1.7575, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0755287009063444, |
|
"eval_loss": 1.8761990070343018, |
|
"eval_runtime": 367.8841, |
|
"eval_samples_per_second": 24.247, |
|
"eval_steps_per_second": 3.031, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1316042676568064e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|