|
{ |
|
"best_metric": 0.3359534740447998, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 2.477064220183486, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04892966360856269, |
|
"grad_norm": 860.2722778320312, |
|
"learning_rate": 5e-05, |
|
"loss": 8.2572, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04892966360856269, |
|
"eval_loss": 7.375894069671631, |
|
"eval_runtime": 9.3559, |
|
"eval_samples_per_second": 14.75, |
|
"eval_steps_per_second": 1.924, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09785932721712538, |
|
"grad_norm": 805.065185546875, |
|
"learning_rate": 0.0001, |
|
"loss": 8.5249, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.14678899082568808, |
|
"grad_norm": 521.0182495117188, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 4.8112, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.19571865443425077, |
|
"grad_norm": 288.8603820800781, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.0428, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.24464831804281345, |
|
"grad_norm": 291.7355041503906, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 2.0672, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.29357798165137616, |
|
"grad_norm": 205.27493286132812, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.1652, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3425076452599388, |
|
"grad_norm": 125.26005554199219, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.8289, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.39143730886850153, |
|
"grad_norm": 83.22518920898438, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.5171, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.44036697247706424, |
|
"grad_norm": 156.4967041015625, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.4905, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.4892966360856269, |
|
"grad_norm": 159.6315460205078, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.4991, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5382262996941896, |
|
"grad_norm": 90.45294189453125, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.6751, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5871559633027523, |
|
"grad_norm": 65.43336486816406, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.4683, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.636085626911315, |
|
"grad_norm": 73.60994720458984, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.448, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6850152905198776, |
|
"grad_norm": 51.45248031616211, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.4062, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7339449541284404, |
|
"grad_norm": 55.65335464477539, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.3956, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7828746177370031, |
|
"grad_norm": 55.53349304199219, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.5444, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8318042813455657, |
|
"grad_norm": 38.786128997802734, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.4223, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8807339449541285, |
|
"grad_norm": 38.95067596435547, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.3362, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9296636085626911, |
|
"grad_norm": 30.522335052490234, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.3267, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.9785932721712538, |
|
"grad_norm": 77.3564224243164, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.4163, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0428134556574924, |
|
"grad_norm": 81.9765853881836, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.5787, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.091743119266055, |
|
"grad_norm": 91.37242126464844, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.4669, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.1406727828746177, |
|
"grad_norm": 53.006507873535156, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.3037, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.1896024464831805, |
|
"grad_norm": 31.78158187866211, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.268, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.238532110091743, |
|
"grad_norm": 58.46190643310547, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.2589, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.238532110091743, |
|
"eval_loss": 0.344561368227005, |
|
"eval_runtime": 9.3281, |
|
"eval_samples_per_second": 14.794, |
|
"eval_steps_per_second": 1.93, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.2874617737003058, |
|
"grad_norm": 73.28158569335938, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5206, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.3363914373088686, |
|
"grad_norm": 84.08252716064453, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.4166, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.385321100917431, |
|
"grad_norm": 95.0881118774414, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.3284, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.4342507645259939, |
|
"grad_norm": 84.19164276123047, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.3914, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.4831804281345566, |
|
"grad_norm": 86.7741470336914, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.3869, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.5321100917431192, |
|
"grad_norm": 67.30615997314453, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.5661, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.581039755351682, |
|
"grad_norm": 38.15361404418945, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.363, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.6299694189602447, |
|
"grad_norm": 62.01075744628906, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.3029, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.6788990825688073, |
|
"grad_norm": 23.574642181396484, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.289, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.72782874617737, |
|
"grad_norm": 39.977779388427734, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.2959, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.7767584097859328, |
|
"grad_norm": 53.3297004699707, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.4495, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.8256880733944953, |
|
"grad_norm": 35.10194778442383, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.3355, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.8746177370030581, |
|
"grad_norm": 26.82624626159668, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.2645, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.9235474006116209, |
|
"grad_norm": 57.93483352661133, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.2688, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.9724770642201834, |
|
"grad_norm": 33.57123565673828, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.2798, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.036697247706422, |
|
"grad_norm": 44.15616989135742, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.4424, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.085626911314985, |
|
"grad_norm": 36.59967041015625, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.3808, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.1345565749235473, |
|
"grad_norm": 58.102577209472656, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.2978, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.18348623853211, |
|
"grad_norm": 26.42946434020996, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.2555, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.232415902140673, |
|
"grad_norm": 69.52690124511719, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.2367, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.2813455657492354, |
|
"grad_norm": 53.76517105102539, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.5083, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.330275229357798, |
|
"grad_norm": 37.09881591796875, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.3288, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.379204892966361, |
|
"grad_norm": 33.916202545166016, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.3584, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.4281345565749235, |
|
"grad_norm": 34.20186233520508, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.2662, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.477064220183486, |
|
"grad_norm": 34.20256805419922, |
|
"learning_rate": 0.0, |
|
"loss": 0.2307, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.477064220183486, |
|
"eval_loss": 0.3359534740447998, |
|
"eval_runtime": 9.3234, |
|
"eval_samples_per_second": 14.801, |
|
"eval_steps_per_second": 1.931, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.660495239446528e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|