|
import numpy as np |
|
from typing import Dict |
|
|
|
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC |
|
from pyctcdecode import Alphabet, BeamSearchDecoderCTC |
|
|
|
class PreTrainedPipeline(): |
|
def __init__(self, path): |
|
""" |
|
Initialize model |
|
""" |
|
self.processor = Wav2Vec2Processor.from_pretrained(path) |
|
self.model = Wav2Vec2ForCTC.from_pretrained(path) |
|
vocab_list = list(self.processor.tokenizer.get_vocab().keys()) |
|
|
|
|
|
vocab_list[0] = "" |
|
|
|
|
|
vocab_list[1] = "β" |
|
vocab_list[2] = "β" |
|
vocab_list[3] = "β" |
|
|
|
|
|
vocab_list[4] = " " |
|
|
|
alphabet = Alphabet.build_alphabet(vocab_list, ctc_token_idx=0) |
|
|
|
self.decoder = BeamSearchDecoderCTC(alphabet) |
|
self.sampling_rate = 16000 |
|
|
|
|
|
def __call__(self, inputs)-> Dict[str, str]: |
|
""" |
|
Args: |
|
inputs (:obj:`np.array`): |
|
The raw waveform of audio received. By default at 16KHz. |
|
Return: |
|
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing |
|
the detected text from the input audio. |
|
""" |
|
input_values = self.processor(inputs, return_tensors="pt", sampling_rate=self.sampling_rate).input_values |
|
logits = self.model(input_values).logits.cpu().detach().numpy()[0] |
|
return { |
|
"text": self.decoder.decode(logits) |
|
} |
|
|