|
{ |
|
"best_metric": 2.859468698501587, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1949317738791423, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003898635477582846, |
|
"grad_norm": 0.3461493253707886, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8801, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003898635477582846, |
|
"eval_loss": 3.456559419631958, |
|
"eval_runtime": 1.7721, |
|
"eval_samples_per_second": 28.215, |
|
"eval_steps_per_second": 7.336, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007797270955165692, |
|
"grad_norm": 0.4024673104286194, |
|
"learning_rate": 0.0001, |
|
"loss": 2.96, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.011695906432748537, |
|
"grad_norm": 0.4904806613922119, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 2.9874, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.015594541910331383, |
|
"grad_norm": 0.4809294641017914, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.8788, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01949317738791423, |
|
"grad_norm": 0.5154537558555603, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 2.8999, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.023391812865497075, |
|
"grad_norm": 0.544384241104126, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.8836, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02729044834307992, |
|
"grad_norm": 0.5780109167098999, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.8689, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.031189083820662766, |
|
"grad_norm": 0.5785645246505737, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.8486, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03508771929824561, |
|
"grad_norm": 0.619056761264801, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.8994, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03898635477582846, |
|
"grad_norm": 0.5879754424095154, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.8553, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.042884990253411304, |
|
"grad_norm": 0.7349144220352173, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 2.8687, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04678362573099415, |
|
"grad_norm": 2.103039503097534, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.3661, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.050682261208576995, |
|
"grad_norm": 0.9148193597793579, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.8528, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05458089668615984, |
|
"grad_norm": 0.8443722128868103, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.8349, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05847953216374269, |
|
"grad_norm": 0.6842778921127319, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.7925, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06237816764132553, |
|
"grad_norm": 0.5673577785491943, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.765, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06627680311890838, |
|
"grad_norm": 0.4525413513183594, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 2.7274, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.07017543859649122, |
|
"grad_norm": 0.39243853092193604, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.7372, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 0.4517950713634491, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 2.7865, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07797270955165692, |
|
"grad_norm": 0.5114383697509766, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 2.8224, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08187134502923976, |
|
"grad_norm": 0.5781399011611938, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 2.7982, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08576998050682261, |
|
"grad_norm": 0.6390553116798401, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 2.8437, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08966861598440545, |
|
"grad_norm": 0.7765193581581116, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 2.7538, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0935672514619883, |
|
"grad_norm": 1.0726369619369507, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 2.5752, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09746588693957114, |
|
"grad_norm": 3.1702678203582764, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 2.8117, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09746588693957114, |
|
"eval_loss": 2.9354588985443115, |
|
"eval_runtime": 1.759, |
|
"eval_samples_per_second": 28.425, |
|
"eval_steps_per_second": 7.39, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.10136452241715399, |
|
"grad_norm": 0.28435713052749634, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.7583, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 0.3302833139896393, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.7186, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10916179337231968, |
|
"grad_norm": 0.37060415744781494, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.7439, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.11306042884990253, |
|
"grad_norm": 0.37979695200920105, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 2.7777, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.11695906432748537, |
|
"grad_norm": 0.36751940846443176, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 2.719, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12085769980506822, |
|
"grad_norm": 0.3829987943172455, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 2.7896, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.12475633528265107, |
|
"grad_norm": 0.4021106958389282, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 2.731, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.1286549707602339, |
|
"grad_norm": 0.44854679703712463, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 2.7024, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.13255360623781676, |
|
"grad_norm": 0.49962687492370605, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 2.7642, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1364522417153996, |
|
"grad_norm": 0.5894955992698669, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 2.7013, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.14035087719298245, |
|
"grad_norm": 0.7012891173362732, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 2.7387, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1442495126705653, |
|
"grad_norm": 1.5239250659942627, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 2.2031, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.5231661796569824, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 2.7886, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.15204678362573099, |
|
"grad_norm": 0.3127061128616333, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.6986, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.15594541910331383, |
|
"grad_norm": 0.30186259746551514, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.7358, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15984405458089668, |
|
"grad_norm": 0.32524609565734863, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 2.7544, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.16374269005847952, |
|
"grad_norm": 0.3468033969402313, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 2.8003, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.16764132553606237, |
|
"grad_norm": 0.35150423645973206, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 2.7102, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.17153996101364521, |
|
"grad_norm": 0.36665645241737366, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 2.7094, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.17543859649122806, |
|
"grad_norm": 0.3968718945980072, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 2.6526, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1793372319688109, |
|
"grad_norm": 0.44515466690063477, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 2.725, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.18323586744639375, |
|
"grad_norm": 0.488920658826828, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 2.739, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1871345029239766, |
|
"grad_norm": 0.6259700059890747, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 2.8396, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.19103313840155944, |
|
"grad_norm": 0.9088934659957886, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 2.5351, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1949317738791423, |
|
"grad_norm": 4.3960280418396, |
|
"learning_rate": 1e-05, |
|
"loss": 2.3839, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1949317738791423, |
|
"eval_loss": 2.859468698501587, |
|
"eval_runtime": 1.7859, |
|
"eval_samples_per_second": 27.997, |
|
"eval_steps_per_second": 7.279, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.059536353886208e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|