# ############################################################################ # Model: E2E ASR with Transformer + wav2vec2 # Encoder: wav2vec Encoder # Decoder: Transformer Decoder + (CTC/ATT joint) beamsearch # Tokens: BPE with unigram # losses: CTC + KLdiv (Label Smoothing loss) # Training: AISHELL-1 # Authors: Jianyuan Zhong, Titouan Parcollet, Mirco Ravanelli # ############################################################################ # Feature parameters sample_rate: 16000 n_fft: 400 n_mels: 80 wav2vec2_hub: facebook/wav2vec2-large-100k-voxpopuli ####################### Model parameters ########################### # Transformer d_model: 256 nhead: 4 num_encoder_layers: 2 num_decoder_layers: 6 d_ffn: 2048 transformer_dropout: 0.1 activation: !name:torch.nn.GELU output_neurons: 5000 vocab_size: 5000 # Outputs blank_index: 0 label_smoothing: 0.1 pad_index: 0 bos_index: 1 eos_index: 2 unk_index: 0 # Decoding parameters min_decode_ratio: 0.0 max_decode_ratio: 1.0 valid_search_interval: 10 valid_beam_size: 10 test_beam_size: 10 ctc_weight_decode: 0.40 ############################## models ################################ wav2vec2: !new:speechbrain.lobes.models.huggingface_transformers.wav2vec2.Wav2Vec2 source: !ref <wav2vec2_hub> output_norm: True freeze: True save_path: model_checkpoints Transformer: !new:speechbrain.lobes.models.transformer.TransformerASR.TransformerASR # yamllint disable-line rule:line-length input_size: 1024 tgt_vocab: !ref <output_neurons> d_model: !ref <d_model> nhead: !ref <nhead> num_encoder_layers: !ref <num_encoder_layers> num_decoder_layers: !ref <num_decoder_layers> d_ffn: !ref <d_ffn> dropout: !ref <transformer_dropout> activation: !ref <activation> normalize_before: True causal: False ctc_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref <d_model> n_neurons: !ref <output_neurons> seq_lin: !new:speechbrain.nnet.linear.Linear input_size: !ref <d_model> n_neurons: !ref <output_neurons> tokenizer: !new:sentencepiece.SentencePieceProcessor asr_model: !new:torch.nn.ModuleList - [!ref <Transformer>, !ref <seq_lin>, !ref <ctc_lin>] # Here, we extract the encoder from the Transformer model Tencoder: !new:speechbrain.lobes.models.transformer.TransformerASR.EncoderWrapper transformer: !ref <Transformer> # We compose the inference (encoder) pipeline. encoder: !new:speechbrain.nnet.containers.LengthsCapableSequential input_shape: [null, null, !ref <n_mels>] wav2vec2: !ref <wav2vec2> transformer_encoder: !ref <Tencoder> ctc_scorer: !new:speechbrain.decoders.scorer.CTCScorer eos_index: !ref <eos_index> blank_index: !ref <blank_index> ctc_fc: !ref <ctc_lin> scorer: !new:speechbrain.decoders.scorer.ScorerBuilder full_scorers: [!ref <ctc_scorer>] weights: ctc: !ref <ctc_weight_decode> decoder: !new:speechbrain.decoders.S2STransformerBeamSearcher modules: [!ref <Transformer>, !ref <seq_lin>] bos_index: !ref <bos_index> eos_index: !ref <eos_index> min_decode_ratio: !ref <min_decode_ratio> max_decode_ratio: !ref <max_decode_ratio> beam_size: !ref <test_beam_size> using_eos_threshold: False length_normalization: True scorer: !ref <scorer> modules: encoder: !ref <encoder> decoder: !ref <decoder> log_softmax: !new:torch.nn.LogSoftmax dim: -1 pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: wav2vec2: !ref <wav2vec2> model: !ref <asr_model> tokenizer: !ref <tokenizer>