File size: 2,518 Bytes
4008bf9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
command:
- python3
- ${program}
- --do_train
- --do_eval
- --use_scan
- --gradient_checkpointing
- --overwrite_output_dir
- --predict_with_generate
- --freeze_encoder
- --streaming
- --use_auth_token
- ${args}
method: grid
metric:
goal: minimize
name: gigaspeech-l/validation/wer
parameters:
model_name_or_path:
value: distil-whisper/large-32-2
teacher_model_name_or_path:
value: openai/whisper-large-v2
train_dataset_name:
value: librispeech_asr-timestamped+librispeech_asr-timestamped+librispeech_asr-timestamped+common_voice_13_0-timestamped+voxpopuli-timestamped+ami-ihm-timestamped+ami-sdm-timestamped+peoples_speech-clean-timestamped+tedlium-timestamped+switchboard-data+gigaspeech-l-timestamped+librispeech_asr-prompted+librispeech_asr-prompted+librispeech_asr-prompted+tedlium-prompted
train_dataset_config_name:
value: all+all+all+en+en+ihm+sdm+clean+release3+all+l+all+all+all+release3
train_split_name:
value: train.clean.100+train.clean.360+train.other.500+train+train+train+train+train+train+train+train+train.clean.100+train.clean.360+train.other.500+train
train_dataset_samples:
value: 2.9+10.4+14.9+89+18.2+10.9+10.9+288+26.8+371.2+226.6+2.9+10.4+14.9+26.8
eval_dataset_name:
value: librispeech_asr+librispeech_asr+common_voice_13_0+voxpopuli+ami-ihm+ami-sdm+peoples_speech-clean+tedlium+switchboard-data+gigaspeech-l+spgispeech+chime4+google/fleurs
eval_dataset_config_name:
value: all+all+en+en+ihm+sdm+clean+release3+all+l+L+1-channel+en_us
eval_split_name:
value: validation.clean+validation.other+validation+validation+validation+validation+validation+validation+validation+validation+validation+validation+validation
eval_text_column_name:
value: text+text+text+text+text+text+text+text+text+text+text+text+transcription
cache_dir:
value: /home/sanchitgandhi/.cache
dataset_cache_dir:
value: /home/sanchitgandhi/.cache
output_dir:
value: ./
per_device_train_batch_size:
value: 64
per_device_eval_batch_size:
value: 64
dtype:
value: bfloat16
learning_rate:
value: 1e-4
lr_scheduler_type:
value: constant_with_warmup
warmup_steps:
value: 50
max_steps:
value: 10000
save_steps:
value: 10001 # don't save checkpoints during sweep
dataloader_num_workers:
value: 48
logging_steps:
value: 25
wer_threshold:
values:
- 100
- 20
- 15
- 10
- 5
program: run_distillation.py
project: distil-whisper-sweeps
|