from transformers import pipeline from datasets import load_dataset import gradio as gr import os atco2 = load_dataset('jlvdoorn/atco2-asr', split='validation') atcosim = load_dataset('jlvdoorn/atcosim', split='validation') num_examples = 3 examples_atco2 = [ [{'sampling_rate': atco2[i]['audio']['sampling_rate'], 'raw': atco2[i]['audio']['array']}, False, 'large-v3'] for i in range(num_examples)] #examples_atcosim = [ [{'sampling_rate': atcosim[i]['audio']['sampling_rate'], 'raw': atcosim[i]['audio']['array']}, False, 'large-v3'] for i in range(num_examples)] examples = examples_atco2 #+ examples_atcosim whisper = pipeline(model='jlvdoorn/whisper-large-v3-atco2-asr-atcosim') def transcribe(audio, model_version): if audio is not None: return whisper(audio)['text'] else: return 'There was no audio to transcribe...' file_iface = gr.Interface( fn = transcribe, inputs = [gr.Audio(source='upload', interactive=True), gr.Checkbox(label='Transcribe only', default=False), gr.Dropdown(choices=['large-v2', 'large-v3'], value='large-v3', label='Whisper model version') ], outputs = [gr.Textbox(label='Transcription'), gr.Textbox(label='Callsigns, commands and values')], title = 'Whisper ATC - Large v3', description = 'Transcribe ATC speech', # examples = examples, ) mic_iface = gr.Interface( fn = transcribe, inputs = [gr.Audio(source='microphone', type='filepath'), gr.Checkbox(label='Transcribe only', default=False), gr.Dropdown(choices=['large-v2', 'large-v3'], value='large-v3', label='Whisper model version') ], outputs = [gr.Textbox(label='Transcription'), gr.Textbox(label='Callsigns, commands and values')], title = 'Whisper ATC - Large v3', description = 'Transcribe ATC speech', ) demo = gr.TabbedInterface([file_iface, mic_iface], ["File", "Microphone"]) demo.launch(server_name='0.0.0.0')