|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 50.0, |
|
"global_step": 20700, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.74475e-05, |
|
"loss": 7.0856, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 3.49475e-05, |
|
"loss": 2.771, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 5.243e-05, |
|
"loss": 1.4051, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"learning_rate": 6.99125e-05, |
|
"loss": 1.173, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"eval_loss": 0.29204532504081726, |
|
"eval_runtime": 168.5565, |
|
"eval_samples_per_second": 27.801, |
|
"eval_steps_per_second": 1.738, |
|
"eval_wer": 0.36075506874854346, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 12.08, |
|
"learning_rate": 6.582934131736526e-05, |
|
"loss": 1.082, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 14.49, |
|
"learning_rate": 6.164191616766467e-05, |
|
"loss": 1.0256, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 16.91, |
|
"learning_rate": 5.74502994011976e-05, |
|
"loss": 0.9779, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 19.32, |
|
"learning_rate": 5.3262874251497e-05, |
|
"loss": 0.9433, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 19.32, |
|
"eval_loss": 0.23364943265914917, |
|
"eval_runtime": 167.8504, |
|
"eval_samples_per_second": 27.918, |
|
"eval_steps_per_second": 1.746, |
|
"eval_wer": 0.30261011419249595, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 21.74, |
|
"learning_rate": 4.907125748502993e-05, |
|
"loss": 0.9211, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 24.15, |
|
"learning_rate": 4.48880239520958e-05, |
|
"loss": 0.8969, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 26.57, |
|
"learning_rate": 4.069640718562874e-05, |
|
"loss": 0.8727, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 28.99, |
|
"learning_rate": 3.6504790419161677e-05, |
|
"loss": 0.8552, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 28.99, |
|
"eval_loss": 0.22206313908100128, |
|
"eval_runtime": 169.6004, |
|
"eval_samples_per_second": 27.63, |
|
"eval_steps_per_second": 1.728, |
|
"eval_wer": 0.27988813796317874, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 31.4, |
|
"learning_rate": 3.231317365269461e-05, |
|
"loss": 0.8401, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 33.82, |
|
"learning_rate": 2.8121556886227542e-05, |
|
"loss": 0.8178, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 36.23, |
|
"learning_rate": 2.3934131736526942e-05, |
|
"loss": 0.8002, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 38.65, |
|
"learning_rate": 1.974251497005988e-05, |
|
"loss": 0.7863, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 38.65, |
|
"eval_loss": 0.1952943205833435, |
|
"eval_runtime": 169.2585, |
|
"eval_samples_per_second": 27.685, |
|
"eval_steps_per_second": 1.731, |
|
"eval_wer": 0.24792200730210517, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 41.06, |
|
"learning_rate": 1.555508982035928e-05, |
|
"loss": 0.778, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 43.48, |
|
"learning_rate": 1.1367664670658683e-05, |
|
"loss": 0.7607, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 45.89, |
|
"learning_rate": 7.180239520958083e-06, |
|
"loss": 0.7481, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 48.31, |
|
"learning_rate": 2.988622754491018e-06, |
|
"loss": 0.7365, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 48.31, |
|
"eval_loss": 0.1967712938785553, |
|
"eval_runtime": 167.9953, |
|
"eval_samples_per_second": 27.894, |
|
"eval_steps_per_second": 1.744, |
|
"eval_wer": 0.24489241047152957, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 20700, |
|
"total_flos": 7.668153416081444e+19, |
|
"train_loss": 1.2943066022937424, |
|
"train_runtime": 28854.2781, |
|
"train_samples_per_second": 22.955, |
|
"train_steps_per_second": 0.717 |
|
} |
|
], |
|
"max_steps": 20700, |
|
"num_train_epochs": 50, |
|
"total_flos": 7.668153416081444e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|