deepspeed run_speech_recognition_seq2seq_streaming.py \ | |
--deepspeed="ds_config.json" \ | |
--model_name_or_path="openai/whisper-large-v2" \ | |
--dataset_name="mozilla-foundation/common_voice_11_0" \ | |
--dataset_config_name="ha" \ | |
--language="hausa" \ | |
--train_split_name="train+validation" \ | |
--eval_split_name="test" \ | |
--model_index_name="Whisper Large-v2 Hausa" \ | |
--max_steps="1000" \ | |
--output_dir="./" \ | |
--per_device_train_batch_size="32" \ | |
--per_device_eval_batch_size="16" \ | |
--logging_steps="25" \ | |
--learning_rate="1e-5" \ | |
--warmup_steps="100" \ | |
--evaluation_strategy="steps" \ | |
--eval_steps="1000" \ | |
--save_strategy="steps" \ | |
--save_steps="1000" \ | |
--generation_max_length="225" \ | |
--length_column_name="input_length" \ | |
--max_duration_in_seconds="30" \ | |
--text_column_name="sentence" \ | |
--freeze_feature_encoder="False" \ | |
--report_to="tensorboard" \ | |
--gradient_checkpointing \ | |
--fp16 \ | |
--overwrite_output_dir \ | |
--do_train \ | |
--do_eval \ | |
--predict_with_generate \ | |
--do_normalize_eval \ | |
--use_auth_token \ | |
--push_to_hub | |