jlvdoorn commited on
Commit
659ddf4
1 Parent(s): 98213bd

updated app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -4,7 +4,7 @@ import gradio as gr
4
  import os
5
 
6
  #%%
7
- whisper = pipeline(model='C:\\Users\\doornj\\Desktop\\WhisperANSP\\Models\\whisper-large-v2-atco2-asr-atcosim-ANSP-3h1m', task='automatic-speech-recognition')
8
  bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
9
 
10
  #%%
@@ -36,8 +36,8 @@ def extractCallSignCommand(transcription):
36
  return 'There was no transcription to extract a callsign or command from...'
37
 
38
  #%%
39
- def transcribeAndExtract(audio_mic, audio_file, transcribe_only):
40
- transcription = transcribe(audio_mic, audio_file)
41
  if not transcribe_only:
42
  callSignCommandValues = extractCallSignCommand(transcription)
43
  else:
@@ -47,11 +47,11 @@ def transcribeAndExtract(audio_mic, audio_file, transcribe_only):
47
  #%%
48
  iface = gr.Interface(
49
  fn=transcribeAndExtract,
50
- inputs=[gr.Audio(source='upload', type='filepath'), gr.Audio(source='microphone', type='filepath'), gr.Checkbox(label='Transcribe only', default=False)],
51
  outputs=[gr.Text(label='Transcription'), gr.Text(label='Callsigns, commands and values')],
52
  title='Whisper Large v2 - ATCO2-ATCOSIM-ANSP',
53
  description='This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2, ATCOSIM and ANSP datasets. Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.',
54
  )
55
 
56
  #%%
57
- iface.launch()
 
4
  import os
5
 
6
  #%%
7
+ whisper = pipeline(model='/mnt/projects/whisper/WhisperANSP/Models/whisper-large-v2-atco2-asr-atcosim-ANSP-3h1m', task='automatic-speech-recognition')
8
  bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
9
 
10
  #%%
 
36
  return 'There was no transcription to extract a callsign or command from...'
37
 
38
  #%%
39
+ def transcribeAndExtract(audio_file, audio_mic, transcribe_only):
40
+ transcription = transcribe(audio_file, audio_mic)
41
  if not transcribe_only:
42
  callSignCommandValues = extractCallSignCommand(transcription)
43
  else:
 
47
  #%%
48
  iface = gr.Interface(
49
  fn=transcribeAndExtract,
50
+ inputs=[gr.Audio(source='upload', type='filepath', interactive=True), gr.Audio(source='microphone', type='filepath'), gr.Checkbox(label='Transcribe only', default=False)],
51
  outputs=[gr.Text(label='Transcription'), gr.Text(label='Callsigns, commands and values')],
52
  title='Whisper Large v2 - ATCO2-ATCOSIM-ANSP',
53
  description='This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2, ATCOSIM and ANSP datasets. Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.',
54
  )
55
 
56
  #%%
57
+ iface.launch(server_name='0.0.0.0', server_port=9000)