|
{ |
|
"best_metric": 2.4627528190612793, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.02857959416976279, |
|
"eval_steps": 5, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0011431837667905116, |
|
"grad_norm": 4.984986782073975, |
|
"learning_rate": 2e-05, |
|
"loss": 16.5546, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011431837667905116, |
|
"eval_loss": 4.0086774826049805, |
|
"eval_runtime": 69.5577, |
|
"eval_samples_per_second": 5.305, |
|
"eval_steps_per_second": 2.66, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002286367533581023, |
|
"grad_norm": 4.664716720581055, |
|
"learning_rate": 4e-05, |
|
"loss": 14.4938, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003429551300371535, |
|
"grad_norm": 3.985492706298828, |
|
"learning_rate": 6e-05, |
|
"loss": 15.3814, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004572735067162046, |
|
"grad_norm": 5.596559524536133, |
|
"learning_rate": 8e-05, |
|
"loss": 14.7979, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005715918833952558, |
|
"grad_norm": 3.7223613262176514, |
|
"learning_rate": 0.0001, |
|
"loss": 14.6139, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.005715918833952558, |
|
"eval_loss": 3.9355335235595703, |
|
"eval_runtime": 68.1578, |
|
"eval_samples_per_second": 5.414, |
|
"eval_steps_per_second": 2.714, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00685910260074307, |
|
"grad_norm": 6.831216335296631, |
|
"learning_rate": 0.00012, |
|
"loss": 17.7024, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00800228636753358, |
|
"grad_norm": 9.130233764648438, |
|
"learning_rate": 0.00014, |
|
"loss": 17.1566, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009145470134324093, |
|
"grad_norm": 9.352496147155762, |
|
"learning_rate": 0.00016, |
|
"loss": 14.3919, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010288653901114605, |
|
"grad_norm": 6.8752617835998535, |
|
"learning_rate": 0.00018, |
|
"loss": 11.42, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.011431837667905115, |
|
"grad_norm": 8.414678573608398, |
|
"learning_rate": 0.0002, |
|
"loss": 11.5911, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011431837667905115, |
|
"eval_loss": 2.95451283454895, |
|
"eval_runtime": 68.9337, |
|
"eval_samples_per_second": 5.353, |
|
"eval_steps_per_second": 2.684, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.012575021434695627, |
|
"grad_norm": 9.174803733825684, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 11.5496, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01371820520148614, |
|
"grad_norm": 10.449328422546387, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 11.4081, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.014861388968276651, |
|
"grad_norm": 9.23154354095459, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 12.0689, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01600457273506716, |
|
"grad_norm": 8.31028938293457, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 11.5999, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.017147756501857674, |
|
"grad_norm": 6.680941581726074, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 11.2596, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.017147756501857674, |
|
"eval_loss": 2.5652663707733154, |
|
"eval_runtime": 69.4645, |
|
"eval_samples_per_second": 5.312, |
|
"eval_steps_per_second": 2.663, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018290940268648186, |
|
"grad_norm": 6.815301895141602, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 11.2358, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.019434124035438698, |
|
"grad_norm": 9.210680961608887, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 11.0398, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02057730780222921, |
|
"grad_norm": 11.145197868347168, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 10.5009, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.021720491569019718, |
|
"grad_norm": 10.375300407409668, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 12.2334, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02286367533581023, |
|
"grad_norm": 10.878546714782715, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 9.7952, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02286367533581023, |
|
"eval_loss": 2.4870645999908447, |
|
"eval_runtime": 68.3821, |
|
"eval_samples_per_second": 5.396, |
|
"eval_steps_per_second": 2.705, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.024006859102600742, |
|
"grad_norm": 7.396235466003418, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 8.5751, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.025150042869391254, |
|
"grad_norm": 7.178986072540283, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 9.0461, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.026293226636181766, |
|
"grad_norm": 8.065658569335938, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 9.4133, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02743641040297228, |
|
"grad_norm": 10.194811820983887, |
|
"learning_rate": 2.1852399266194314e-06, |
|
"loss": 8.1633, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02857959416976279, |
|
"grad_norm": 7.041247844696045, |
|
"learning_rate": 0.0, |
|
"loss": 8.6386, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02857959416976279, |
|
"eval_loss": 2.4627528190612793, |
|
"eval_runtime": 68.7471, |
|
"eval_samples_per_second": 5.368, |
|
"eval_steps_per_second": 2.691, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7915560763392000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|