|
{ |
|
"best_metric": 2.4592623710632324, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.02857959416976279, |
|
"eval_steps": 5, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0011431837667905116, |
|
"grad_norm": 5.08370304107666, |
|
"learning_rate": 2e-05, |
|
"loss": 16.5546, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011431837667905116, |
|
"eval_loss": 4.0086774826049805, |
|
"eval_runtime": 68.1057, |
|
"eval_samples_per_second": 5.418, |
|
"eval_steps_per_second": 2.716, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002286367533581023, |
|
"grad_norm": 4.6169304847717285, |
|
"learning_rate": 4e-05, |
|
"loss": 14.4938, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003429551300371535, |
|
"grad_norm": 3.9980452060699463, |
|
"learning_rate": 6e-05, |
|
"loss": 15.4817, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004572735067162046, |
|
"grad_norm": 5.561739444732666, |
|
"learning_rate": 8e-05, |
|
"loss": 14.7683, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005715918833952558, |
|
"grad_norm": 3.723816394805908, |
|
"learning_rate": 0.0001, |
|
"loss": 14.5941, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.005715918833952558, |
|
"eval_loss": 3.934452772140503, |
|
"eval_runtime": 67.1504, |
|
"eval_samples_per_second": 5.495, |
|
"eval_steps_per_second": 2.755, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00685910260074307, |
|
"grad_norm": 7.259289741516113, |
|
"learning_rate": 0.00012, |
|
"loss": 17.6895, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00800228636753358, |
|
"grad_norm": 9.807666778564453, |
|
"learning_rate": 0.00014, |
|
"loss": 17.1144, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009145470134324093, |
|
"grad_norm": 8.382555961608887, |
|
"learning_rate": 0.00016, |
|
"loss": 14.5067, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010288653901114605, |
|
"grad_norm": 6.742790222167969, |
|
"learning_rate": 0.00018, |
|
"loss": 11.457, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.011431837667905115, |
|
"grad_norm": 8.512904167175293, |
|
"learning_rate": 0.0002, |
|
"loss": 11.6258, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011431837667905115, |
|
"eval_loss": 2.9601454734802246, |
|
"eval_runtime": 68.7087, |
|
"eval_samples_per_second": 5.37, |
|
"eval_steps_per_second": 2.693, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.012575021434695627, |
|
"grad_norm": 9.215521812438965, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 11.6118, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01371820520148614, |
|
"grad_norm": 10.720466613769531, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 11.387, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.014861388968276651, |
|
"grad_norm": 9.58012866973877, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 12.0335, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01600457273506716, |
|
"grad_norm": 8.0549955368042, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 11.5604, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.017147756501857674, |
|
"grad_norm": 6.788261413574219, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 11.255, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.017147756501857674, |
|
"eval_loss": 2.5551671981811523, |
|
"eval_runtime": 67.9449, |
|
"eval_samples_per_second": 5.431, |
|
"eval_steps_per_second": 2.723, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018290940268648186, |
|
"grad_norm": 6.838973522186279, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 11.1901, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.019434124035438698, |
|
"grad_norm": 8.572029113769531, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 10.9187, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02057730780222921, |
|
"grad_norm": 11.447615623474121, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 10.5933, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.021720491569019718, |
|
"grad_norm": 11.014549255371094, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 12.2246, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02286367533581023, |
|
"grad_norm": 10.644768714904785, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 9.7867, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02286367533581023, |
|
"eval_loss": 2.4806277751922607, |
|
"eval_runtime": 67.9043, |
|
"eval_samples_per_second": 5.434, |
|
"eval_steps_per_second": 2.724, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.024006859102600742, |
|
"grad_norm": 7.599691390991211, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 8.5645, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.025150042869391254, |
|
"grad_norm": 7.398324966430664, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 9.0756, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.026293226636181766, |
|
"grad_norm": 8.772193908691406, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 9.3727, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02743641040297228, |
|
"grad_norm": 11.05912971496582, |
|
"learning_rate": 2.1852399266194314e-06, |
|
"loss": 8.183, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02857959416976279, |
|
"grad_norm": 6.729010105133057, |
|
"learning_rate": 0.0, |
|
"loss": 8.6281, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02857959416976279, |
|
"eval_loss": 2.4592623710632324, |
|
"eval_runtime": 67.5037, |
|
"eval_samples_per_second": 5.466, |
|
"eval_steps_per_second": 2.741, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7915560763392000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|