|
{ |
|
"best_metric": 0.9468892216682434, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-20", |
|
"epoch": 0.002733454060887689, |
|
"eval_steps": 5, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00013667270304438446, |
|
"grad_norm": 0.50577712059021, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0777, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00013667270304438446, |
|
"eval_loss": 1.2357397079467773, |
|
"eval_runtime": 652.1609, |
|
"eval_samples_per_second": 4.724, |
|
"eval_steps_per_second": 2.363, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00027334540608876893, |
|
"grad_norm": 0.3678644299507141, |
|
"learning_rate": 2e-05, |
|
"loss": 1.2261, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00041001810913315336, |
|
"grad_norm": 0.49455997347831726, |
|
"learning_rate": 3e-05, |
|
"loss": 1.0898, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005466908121775379, |
|
"grad_norm": 0.4921368658542633, |
|
"learning_rate": 4e-05, |
|
"loss": 1.069, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0006833635152219223, |
|
"grad_norm": 0.43720000982284546, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1512, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0006833635152219223, |
|
"eval_loss": 1.2310501337051392, |
|
"eval_runtime": 649.2905, |
|
"eval_samples_per_second": 4.745, |
|
"eval_steps_per_second": 2.373, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0008200362182663067, |
|
"grad_norm": 0.5448674559593201, |
|
"learning_rate": 6e-05, |
|
"loss": 1.0919, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0009567089213106913, |
|
"grad_norm": 0.24378623068332672, |
|
"learning_rate": 7e-05, |
|
"loss": 0.7775, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0010933816243550757, |
|
"grad_norm": 0.347241073846817, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1632, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0012300543273994601, |
|
"grad_norm": 0.4972077012062073, |
|
"learning_rate": 9e-05, |
|
"loss": 1.0055, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0013667270304438446, |
|
"grad_norm": 0.6088771224021912, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1217, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0013667270304438446, |
|
"eval_loss": 1.1784987449645996, |
|
"eval_runtime": 649.9639, |
|
"eval_samples_per_second": 4.74, |
|
"eval_steps_per_second": 2.371, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001503399733488229, |
|
"grad_norm": 0.6936522126197815, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.0377, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0016400724365326135, |
|
"grad_norm": 0.6066957116127014, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.1414, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.001776745139576998, |
|
"grad_norm": 1.024498701095581, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.9257, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0019134178426213825, |
|
"grad_norm": 0.39579325914382935, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.7189, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002050090545665767, |
|
"grad_norm": 0.5156039595603943, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.0717, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002050090545665767, |
|
"eval_loss": 1.0189374685287476, |
|
"eval_runtime": 651.4577, |
|
"eval_samples_per_second": 4.729, |
|
"eval_steps_per_second": 2.365, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0021867632487101514, |
|
"grad_norm": 0.4737545847892761, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.9375, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002323435951754536, |
|
"grad_norm": 0.37397414445877075, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 0.794, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0024601086547989203, |
|
"grad_norm": 2.7903082370758057, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 0.8632, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0025967813578433047, |
|
"grad_norm": 0.5934101343154907, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.7099, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.002733454060887689, |
|
"grad_norm": 0.43005478382110596, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.9802, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.002733454060887689, |
|
"eval_loss": 0.9468892216682434, |
|
"eval_runtime": 652.2901, |
|
"eval_samples_per_second": 4.723, |
|
"eval_steps_per_second": 2.362, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 25, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 2, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6138140389539840.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|