|
{ |
|
"best_metric": 0.9521203830369357, |
|
"best_model_checkpoint": "/content/dissertation/scripts/ner/output/checkpoint-2440", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 4880, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9987641945981572, |
|
"eval_f1": 0.9355123674911661, |
|
"eval_loss": 0.003916793502867222, |
|
"eval_precision": 0.9005102040816326, |
|
"eval_recall": 0.9733455882352942, |
|
"eval_runtime": 13.936, |
|
"eval_samples_per_second": 488.661, |
|
"eval_steps_per_second": 61.136, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.0245901639344261, |
|
"grad_norm": 0.24995087087154388, |
|
"learning_rate": 4.487704918032787e-05, |
|
"loss": 0.0189, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9989495654084337, |
|
"eval_f1": 0.9413886384129847, |
|
"eval_loss": 0.0032420416828244925, |
|
"eval_precision": 0.9238938053097345, |
|
"eval_recall": 0.9595588235294118, |
|
"eval_runtime": 14.0058, |
|
"eval_samples_per_second": 486.228, |
|
"eval_steps_per_second": 60.832, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 2.0491803278688523, |
|
"grad_norm": 0.04940846189856529, |
|
"learning_rate": 3.975409836065574e-05, |
|
"loss": 0.0027, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9989358342373021, |
|
"eval_f1": 0.9402784014369105, |
|
"eval_loss": 0.0043699671514332294, |
|
"eval_precision": 0.9192273924495171, |
|
"eval_recall": 0.9623161764705882, |
|
"eval_runtime": 14.0607, |
|
"eval_samples_per_second": 484.329, |
|
"eval_steps_per_second": 60.594, |
|
"step": 1464 |
|
}, |
|
{ |
|
"epoch": 3.0737704918032787, |
|
"grad_norm": 0.01636342518031597, |
|
"learning_rate": 3.463114754098361e-05, |
|
"loss": 0.0015, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9990594147774864, |
|
"eval_f1": 0.9445208619899129, |
|
"eval_loss": 0.0036185304634273052, |
|
"eval_precision": 0.9423604757548033, |
|
"eval_recall": 0.9466911764705882, |
|
"eval_runtime": 14.26, |
|
"eval_samples_per_second": 477.56, |
|
"eval_steps_per_second": 59.748, |
|
"step": 1952 |
|
}, |
|
{ |
|
"epoch": 4.098360655737705, |
|
"grad_norm": 0.006754329428076744, |
|
"learning_rate": 2.9508196721311478e-05, |
|
"loss": 0.0007, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9991418018042759, |
|
"eval_f1": 0.9521203830369357, |
|
"eval_loss": 0.004440780263394117, |
|
"eval_precision": 0.9447963800904977, |
|
"eval_recall": 0.9595588235294118, |
|
"eval_runtime": 14.0071, |
|
"eval_samples_per_second": 486.182, |
|
"eval_steps_per_second": 60.826, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 5.122950819672131, |
|
"grad_norm": 0.0005009469459764659, |
|
"learning_rate": 2.4385245901639343e-05, |
|
"loss": 0.0004, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9990456836063548, |
|
"eval_f1": 0.9464368886818817, |
|
"eval_loss": 0.005531433038413525, |
|
"eval_precision": 0.959395656279509, |
|
"eval_recall": 0.9338235294117647, |
|
"eval_runtime": 14.0724, |
|
"eval_samples_per_second": 483.926, |
|
"eval_steps_per_second": 60.544, |
|
"step": 2928 |
|
}, |
|
{ |
|
"epoch": 6.147540983606557, |
|
"grad_norm": 0.7484572529792786, |
|
"learning_rate": 1.9262295081967212e-05, |
|
"loss": 0.0002, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9989907589218284, |
|
"eval_f1": 0.9427393495190106, |
|
"eval_loss": 0.004938796162605286, |
|
"eval_precision": 0.9397260273972603, |
|
"eval_recall": 0.9457720588235294, |
|
"eval_runtime": 13.9575, |
|
"eval_samples_per_second": 487.909, |
|
"eval_steps_per_second": 61.042, |
|
"step": 3416 |
|
}, |
|
{ |
|
"epoch": 7.172131147540983, |
|
"grad_norm": 0.00041869020787999034, |
|
"learning_rate": 1.4139344262295081e-05, |
|
"loss": 0.0002, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.999073145948618, |
|
"eval_f1": 0.9468864468864469, |
|
"eval_loss": 0.005282656755298376, |
|
"eval_precision": 0.9434306569343066, |
|
"eval_recall": 0.9503676470588235, |
|
"eval_runtime": 14.1087, |
|
"eval_samples_per_second": 482.68, |
|
"eval_steps_per_second": 60.388, |
|
"step": 3904 |
|
}, |
|
{ |
|
"epoch": 8.19672131147541, |
|
"grad_norm": 0.004756764974445105, |
|
"learning_rate": 9.016393442622952e-06, |
|
"loss": 0.0001, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9990525491919205, |
|
"eval_f1": 0.946404031149794, |
|
"eval_loss": 0.0050343358889222145, |
|
"eval_precision": 0.94337899543379, |
|
"eval_recall": 0.9494485294117647, |
|
"eval_runtime": 14.373, |
|
"eval_samples_per_second": 473.804, |
|
"eval_steps_per_second": 59.278, |
|
"step": 4392 |
|
}, |
|
{ |
|
"epoch": 9.221311475409836, |
|
"grad_norm": 0.00033881032140925527, |
|
"learning_rate": 3.89344262295082e-06, |
|
"loss": 0.0001, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9990525491919205, |
|
"eval_f1": 0.945537757437071, |
|
"eval_loss": 0.005196314305067062, |
|
"eval_precision": 0.9416590701914311, |
|
"eval_recall": 0.9494485294117647, |
|
"eval_runtime": 14.6459, |
|
"eval_samples_per_second": 464.976, |
|
"eval_steps_per_second": 58.173, |
|
"step": 4880 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 4880, |
|
"total_flos": 1.5694885147146138e+16, |
|
"train_loss": 0.002533856062142209, |
|
"train_runtime": 1422.7831, |
|
"train_samples_per_second": 219.492, |
|
"train_steps_per_second": 3.43 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 4880, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5694885147146138e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|