WhisperATC / app.py
jlvdoorn's picture
to bytes
32c7384 unverified
raw
history blame
No virus
1.56 kB
from transformers import pipeline
from datasets import load_dataset
import gradio as gr
import numpy as np
import os
atco2 = load_dataset('jlvdoorn/atco2-asr', split='validation')
atcosim = load_dataset('jlvdoorn/atcosim', split='validation')
num_examples = 3
examples_atco2 = [[atco2[i]['audio']['array'].tobytes()] for i in range(num_examples)]
#examples_atcosim = [ [{'sampling_rate': atcosim[i]['audio']['sampling_rate'], 'raw': atcosim[i]['audio']['array']}, False, 'large-v3'] for i in range(num_examples)]
examples = examples_atco2 #+ examples_atcosim
sr, y = atco2[0]['audio']['sampling_rate'], atco2[0]['audio']['array']
y = y.astype(np.float32)
y /= np.max(np.abs(y))
y = y.tobytes()
examples = [[y]]
whisper = pipeline(model='jlvdoorn/whisper-large-v3-atco2-asr-atcosim')
def transcribe(audio):
if audio is not None:
return whisper(audio)['text']
else:
return 'There was no audio to transcribe...'
file_iface = gr.Interface(
fn = transcribe,
inputs = gr.Audio(source='upload', interactive=True),
outputs = gr.Textbox(label='Transcription'),
title = 'Whisper ATC - Large v3',
description = 'Transcribe ATC speech',
examples = examples,
)
mic_iface = gr.Interface(
fn = transcribe,
inputs = gr.Audio(source='microphone', type='filepath'),
outputs = gr.Textbox(label='Transcription'),
title = 'Whisper ATC - Large v3',
description = 'Transcribe ATC speech',
)
demo = gr.TabbedInterface([file_iface, mic_iface], ["File", "Microphone"])
demo.launch(server_name='0.0.0.0')