elsayedissa's picture
Training in progress, step 1000
29aa50a
raw
history blame
5.32 kB
{
"best_metric": 0.4409728815314771,
"best_model_checkpoint": "/storage/elsayedissa/whisper-large-v2-arabic-24h/checkpoint-1000",
"epoch": 1.7825311942959001,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 4.4e-07,
"loss": 0.8858,
"step": 25
},
{
"epoch": 0.09,
"learning_rate": 9.200000000000001e-07,
"loss": 0.7395,
"step": 50
},
{
"epoch": 0.13,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.3384,
"step": 75
},
{
"epoch": 0.18,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.3633,
"step": 100
},
{
"epoch": 0.22,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.2796,
"step": 125
},
{
"epoch": 0.27,
"learning_rate": 2.9e-06,
"loss": 0.3068,
"step": 150
},
{
"epoch": 0.31,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.251,
"step": 175
},
{
"epoch": 0.36,
"learning_rate": 3.900000000000001e-06,
"loss": 0.2845,
"step": 200
},
{
"epoch": 0.4,
"learning_rate": 4.4e-06,
"loss": 0.244,
"step": 225
},
{
"epoch": 0.45,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.297,
"step": 250
},
{
"epoch": 0.49,
"learning_rate": 5.400000000000001e-06,
"loss": 0.2282,
"step": 275
},
{
"epoch": 0.53,
"learning_rate": 5.9e-06,
"loss": 0.2681,
"step": 300
},
{
"epoch": 0.58,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.2311,
"step": 325
},
{
"epoch": 0.62,
"learning_rate": 6.9e-06,
"loss": 0.265,
"step": 350
},
{
"epoch": 0.67,
"learning_rate": 7.4e-06,
"loss": 0.2299,
"step": 375
},
{
"epoch": 0.71,
"learning_rate": 7.9e-06,
"loss": 0.2707,
"step": 400
},
{
"epoch": 0.76,
"learning_rate": 8.400000000000001e-06,
"loss": 0.2071,
"step": 425
},
{
"epoch": 0.8,
"learning_rate": 8.900000000000001e-06,
"loss": 0.2662,
"step": 450
},
{
"epoch": 0.85,
"learning_rate": 9.4e-06,
"loss": 0.2056,
"step": 475
},
{
"epoch": 0.89,
"learning_rate": 9.9e-06,
"loss": 0.2588,
"step": 500
},
{
"epoch": 0.94,
"learning_rate": 9.955555555555556e-06,
"loss": 0.2123,
"step": 525
},
{
"epoch": 0.98,
"learning_rate": 9.9e-06,
"loss": 0.247,
"step": 550
},
{
"epoch": 1.02,
"learning_rate": 9.844444444444446e-06,
"loss": 0.1877,
"step": 575
},
{
"epoch": 1.07,
"learning_rate": 9.78888888888889e-06,
"loss": 0.1774,
"step": 600
},
{
"epoch": 1.11,
"learning_rate": 9.733333333333334e-06,
"loss": 0.1732,
"step": 625
},
{
"epoch": 1.16,
"learning_rate": 9.677777777777778e-06,
"loss": 0.1739,
"step": 650
},
{
"epoch": 1.2,
"learning_rate": 9.622222222222222e-06,
"loss": 0.1613,
"step": 675
},
{
"epoch": 1.25,
"learning_rate": 9.566666666666668e-06,
"loss": 0.1711,
"step": 700
},
{
"epoch": 1.29,
"learning_rate": 9.511111111111112e-06,
"loss": 0.1604,
"step": 725
},
{
"epoch": 1.34,
"learning_rate": 9.455555555555557e-06,
"loss": 0.1623,
"step": 750
},
{
"epoch": 1.38,
"learning_rate": 9.4e-06,
"loss": 0.1584,
"step": 775
},
{
"epoch": 1.43,
"learning_rate": 9.344444444444446e-06,
"loss": 0.1664,
"step": 800
},
{
"epoch": 1.47,
"learning_rate": 9.28888888888889e-06,
"loss": 0.1708,
"step": 825
},
{
"epoch": 1.52,
"learning_rate": 9.233333333333334e-06,
"loss": 0.1647,
"step": 850
},
{
"epoch": 1.56,
"learning_rate": 9.17777777777778e-06,
"loss": 0.1595,
"step": 875
},
{
"epoch": 1.6,
"learning_rate": 9.122222222222223e-06,
"loss": 0.1599,
"step": 900
},
{
"epoch": 1.65,
"learning_rate": 9.066666666666667e-06,
"loss": 0.1686,
"step": 925
},
{
"epoch": 1.69,
"learning_rate": 9.011111111111111e-06,
"loss": 0.1684,
"step": 950
},
{
"epoch": 1.74,
"learning_rate": 8.955555555555555e-06,
"loss": 0.1566,
"step": 975
},
{
"epoch": 1.78,
"learning_rate": 8.900000000000001e-06,
"loss": 0.1638,
"step": 1000
},
{
"epoch": 1.78,
"eval_loss": 0.22948628664016724,
"eval_runtime": 4406.6894,
"eval_samples_per_second": 2.369,
"eval_steps_per_second": 0.148,
"eval_wer": 0.4409728815314771,
"step": 1000
}
],
"max_steps": 5000,
"num_train_epochs": 9,
"total_flos": 1.061436888428544e+20,
"trial_name": null,
"trial_params": null
}