mohmdsh's picture
End of training
bf6c950 verified
{
"best_metric": 0.7435250282287598,
"best_model_checkpoint": "./whisper-small-arabic-finetuned-on-halabi_daataset_with-diacritics-2/checkpoint-200",
"epoch": 17.548672566371682,
"eval_steps": 200,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4424778761061947,
"grad_norm": 49.0963249206543,
"learning_rate": 1.25e-06,
"loss": 0.9627,
"step": 25
},
{
"epoch": 0.8849557522123894,
"grad_norm": 8.113597869873047,
"learning_rate": 2.5e-06,
"loss": 0.44,
"step": 50
},
{
"epoch": 1.3185840707964602,
"grad_norm": 5.070877552032471,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.1815,
"step": 75
},
{
"epoch": 1.7610619469026547,
"grad_norm": 4.050365447998047,
"learning_rate": 5e-06,
"loss": 0.1147,
"step": 100
},
{
"epoch": 2.1946902654867255,
"grad_norm": 6.347820281982422,
"learning_rate": 6.25e-06,
"loss": 0.0813,
"step": 125
},
{
"epoch": 2.6371681415929205,
"grad_norm": 4.465636730194092,
"learning_rate": 7.500000000000001e-06,
"loss": 0.0812,
"step": 150
},
{
"epoch": 3.0707964601769913,
"grad_norm": 4.107247352600098,
"learning_rate": 8.750000000000001e-06,
"loss": 0.0708,
"step": 175
},
{
"epoch": 3.5132743362831858,
"grad_norm": 5.341292381286621,
"learning_rate": 1e-05,
"loss": 0.0504,
"step": 200
},
{
"epoch": 3.5132743362831858,
"eval_loss": 0.7435250282287598,
"eval_runtime": 94.7482,
"eval_samples_per_second": 1.045,
"eval_steps_per_second": 0.074,
"eval_wer": 0.7221022318214543,
"step": 200
},
{
"epoch": 3.9557522123893807,
"grad_norm": 2.8596858978271484,
"learning_rate": 9.6875e-06,
"loss": 0.0502,
"step": 225
},
{
"epoch": 4.389380530973451,
"grad_norm": 1.7310813665390015,
"learning_rate": 9.375000000000001e-06,
"loss": 0.0419,
"step": 250
},
{
"epoch": 4.831858407079646,
"grad_norm": 2.756486415863037,
"learning_rate": 9.0625e-06,
"loss": 0.0396,
"step": 275
},
{
"epoch": 5.265486725663717,
"grad_norm": 6.585121154785156,
"learning_rate": 8.750000000000001e-06,
"loss": 0.0258,
"step": 300
},
{
"epoch": 5.707964601769912,
"grad_norm": 0.31891903281211853,
"learning_rate": 8.4375e-06,
"loss": 0.0298,
"step": 325
},
{
"epoch": 6.1415929203539825,
"grad_norm": 1.1073646545410156,
"learning_rate": 8.125000000000001e-06,
"loss": 0.0226,
"step": 350
},
{
"epoch": 6.584070796460177,
"grad_norm": 1.3673633337020874,
"learning_rate": 7.8125e-06,
"loss": 0.02,
"step": 375
},
{
"epoch": 7.017699115044247,
"grad_norm": 2.9860990047454834,
"learning_rate": 7.500000000000001e-06,
"loss": 0.0138,
"step": 400
},
{
"epoch": 7.017699115044247,
"eval_loss": 0.9074220061302185,
"eval_runtime": 84.7371,
"eval_samples_per_second": 1.168,
"eval_steps_per_second": 0.083,
"eval_wer": 0.7134629229661628,
"step": 400
},
{
"epoch": 7.460176991150442,
"grad_norm": 0.045507967472076416,
"learning_rate": 7.1875e-06,
"loss": 0.0095,
"step": 425
},
{
"epoch": 7.902654867256637,
"grad_norm": 0.6703155636787415,
"learning_rate": 6.875e-06,
"loss": 0.0128,
"step": 450
},
{
"epoch": 8.336283185840708,
"grad_norm": 0.6974866390228271,
"learning_rate": 6.5625e-06,
"loss": 0.0097,
"step": 475
},
{
"epoch": 8.778761061946902,
"grad_norm": 2.5476107597351074,
"learning_rate": 6.25e-06,
"loss": 0.0083,
"step": 500
},
{
"epoch": 9.212389380530974,
"grad_norm": 0.13264885544776917,
"learning_rate": 5.9375e-06,
"loss": 0.0053,
"step": 525
},
{
"epoch": 9.654867256637168,
"grad_norm": 0.18639236688613892,
"learning_rate": 5.625e-06,
"loss": 0.0073,
"step": 550
},
{
"epoch": 10.08849557522124,
"grad_norm": 0.33259156346321106,
"learning_rate": 5.3125e-06,
"loss": 0.0067,
"step": 575
},
{
"epoch": 10.530973451327434,
"grad_norm": 2.248627185821533,
"learning_rate": 5e-06,
"loss": 0.0049,
"step": 600
},
{
"epoch": 10.530973451327434,
"eval_loss": 1.182573676109314,
"eval_runtime": 99.0092,
"eval_samples_per_second": 1.0,
"eval_steps_per_second": 0.071,
"eval_wer": 0.7156227501799856,
"step": 600
},
{
"epoch": 10.973451327433628,
"grad_norm": 0.3167344629764557,
"learning_rate": 4.6875000000000004e-06,
"loss": 0.0038,
"step": 625
},
{
"epoch": 11.4070796460177,
"grad_norm": 0.15390612185001373,
"learning_rate": 4.3750000000000005e-06,
"loss": 0.0041,
"step": 650
},
{
"epoch": 11.849557522123893,
"grad_norm": 0.10117767751216888,
"learning_rate": 4.0625000000000005e-06,
"loss": 0.0049,
"step": 675
},
{
"epoch": 12.283185840707965,
"grad_norm": 3.8471896648406982,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0057,
"step": 700
},
{
"epoch": 12.725663716814159,
"grad_norm": 0.028711393475532532,
"learning_rate": 3.4375e-06,
"loss": 0.0034,
"step": 725
},
{
"epoch": 13.15929203539823,
"grad_norm": 0.08290126919746399,
"learning_rate": 3.125e-06,
"loss": 0.0033,
"step": 750
},
{
"epoch": 13.601769911504425,
"grad_norm": 0.03170332685112953,
"learning_rate": 2.8125e-06,
"loss": 0.0022,
"step": 775
},
{
"epoch": 14.035398230088495,
"grad_norm": 0.2151724398136139,
"learning_rate": 2.5e-06,
"loss": 0.0013,
"step": 800
},
{
"epoch": 14.035398230088495,
"eval_loss": 1.1965606212615967,
"eval_runtime": 88.1337,
"eval_samples_per_second": 1.123,
"eval_steps_per_second": 0.079,
"eval_wer": 0.7156227501799856,
"step": 800
},
{
"epoch": 14.47787610619469,
"grad_norm": 0.03010549768805504,
"learning_rate": 2.1875000000000002e-06,
"loss": 0.0007,
"step": 825
},
{
"epoch": 14.920353982300885,
"grad_norm": 0.02499246783554554,
"learning_rate": 1.8750000000000003e-06,
"loss": 0.0015,
"step": 850
},
{
"epoch": 15.353982300884956,
"grad_norm": 0.017421068623661995,
"learning_rate": 1.5625e-06,
"loss": 0.0009,
"step": 875
},
{
"epoch": 15.79646017699115,
"grad_norm": 0.14170809090137482,
"learning_rate": 1.25e-06,
"loss": 0.001,
"step": 900
},
{
"epoch": 16.23008849557522,
"grad_norm": 0.013867147266864777,
"learning_rate": 9.375000000000001e-07,
"loss": 0.0006,
"step": 925
},
{
"epoch": 16.672566371681416,
"grad_norm": 0.020731482654809952,
"learning_rate": 6.25e-07,
"loss": 0.0008,
"step": 950
},
{
"epoch": 17.106194690265486,
"grad_norm": 0.03430557996034622,
"learning_rate": 3.125e-07,
"loss": 0.0009,
"step": 975
},
{
"epoch": 17.548672566371682,
"grad_norm": 0.10500048846006393,
"learning_rate": 0.0,
"loss": 0.0008,
"step": 1000
},
{
"epoch": 17.548672566371682,
"eval_loss": 1.1946815252304077,
"eval_runtime": 98.7155,
"eval_samples_per_second": 1.003,
"eval_steps_per_second": 0.071,
"eval_wer": 0.7163426925845933,
"step": 1000
},
{
"epoch": 17.548672566371682,
"step": 1000,
"total_flos": 9.15623758135296e+18,
"train_loss": 0.05816659236606211,
"train_runtime": 17237.6477,
"train_samples_per_second": 1.856,
"train_steps_per_second": 0.058
}
],
"logging_steps": 25,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 18,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.15623758135296e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}