|
import gradio as gr |
|
from transformers import Wav2Vec2ForCTC, AutoProcessor |
|
import torch |
|
import numpy as np |
|
import librosa |
|
|
|
model_id = "facebook/mms-1b-all" |
|
|
|
def transcribe(audio_file_mic=None, audio_file_upload=None): |
|
if audio_file_mic: |
|
audio_file = audio_file_mic |
|
elif audio_file_upload: |
|
audio_file = audio_file_upload |
|
else: |
|
return "Please upload an audio file or record one" |
|
|
|
speech, sample_rate = librosa.load(audio_file) |
|
if sample_rate != 16000: |
|
speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000) |
|
|
|
processor = AutoProcessor.from_pretrained(model_id) |
|
model = Wav2Vec2ForCTC.from_pretrained(model_id) |
|
|
|
inputs = processor(speech, sampling_rate=16_000, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs).logits |
|
|
|
ids = torch.argmax(outputs, dim=-1)[0] |
|
transcription = processor.decode(ids) |
|
return transcription |
|
|
|
iface = gr.Interface(fn=transcribe, |
|
inputs=[ |
|
gr.Audio(source="microphone", type="filepath"), |
|
gr.Audio(source="upload", type="filepath") |
|
], |
|
outputs=["textbox"], |
|
) |
|
iface.launch() |