jlvdoorn commited on
Commit
981560f
1 Parent(s): eb78e35

Added model version dropdown

Browse files
Files changed (1) hide show
  1. app.py +27 -9
app.py CHANGED
@@ -13,14 +13,26 @@ login(token=os.environ['hf_token'])
13
  # dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2, ATCOSIM and ANSP datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
14
 
15
  # except:
16
- whisper = pipeline(model='jlvdoorn/whisper-large-v2-atco2-asr-atcosim')
17
- ttl = 'Whisper Large v2 - ATCO2-ATCOSIM'
18
- dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2 and ATCOSIM datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
19
 
20
  bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
21
 
22
  #%%
23
- def transcribe(audio_file, audio_mic):
 
 
 
 
 
 
 
 
 
 
 
 
24
  if audio_mic is not None:
25
  return whisper(audio_mic)['text']
26
  elif audio_file is not None:
@@ -48,8 +60,8 @@ def extractCallSignCommand(transcription):
48
  return 'There was no transcription to extract a callsign or command from...'
49
 
50
  #%%
51
- def transcribeAndExtract(audio_file, audio_mic, transcribe_only):
52
- transcription = transcribe(audio_file, audio_mic)
53
  if not transcribe_only:
54
  callSignCommandValues = extractCallSignCommand(transcription)
55
  else:
@@ -59,10 +71,16 @@ def transcribeAndExtract(audio_file, audio_mic, transcribe_only):
59
  #%%
60
  iface = gr.Interface(
61
  fn=transcribeAndExtract,
62
- inputs=[gr.Audio(source='upload', type='filepath', interactive=True), gr.Audio(source='microphone', type='filepath'), gr.Checkbox(label='Transcribe only', default=False)],
 
 
 
 
 
 
63
  outputs=[gr.Text(label='Transcription'), gr.Text(label='Callsigns, commands and values')],
64
- title=ttl,
65
- description=dis,
66
  )
67
 
68
  #%%
 
13
  # dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2, ATCOSIM and ANSP datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
14
 
15
  # except:
16
+ # whisper = pipeline(model='jlvdoorn/whisper-large-v2-atco2-asr-atcosim')
17
+ # ttl = 'Whisper Large v2 - ATCO2-ATCOSIM'
18
+ # dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2 and ATCOSIM datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
19
 
20
  bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
21
 
22
  #%%
23
+ def transcribe(audio_file, audio_mic, model_version):
24
+ if model_version == 'local':
25
+ whisper = pipeline(model='/mnt/projects/whisper/WhisperANSP/Models/whisper-large-v2-atco2-asr-atcosim-ANSP-3h1m', task='automatic-speech-recognition')
26
+ ttl = 'Whisper Large v2 - ATCO2-ATCOSIM-ANSP'
27
+ dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2, ATCOSIM and ANSP datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
28
+ elif model_version == 'large-v2':
29
+ whisper = pipeline(model='jlvdoorn/whisper-large-v2-atco2-asr-atcosim')
30
+ ttl = 'Whisper Large v2 - ATCO2-ATCOSIM'
31
+ dis = 'This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2 and ATCOSIM datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
32
+ elif model_version == 'large-v3':
33
+ whisper = pipeline(model='jlvdoorn/whisper-large-v3-atco2-asr-atcosim')
34
+ ttl = 'Whisper Large v3 - ATCO2-ATCOSIM'
35
+ dis = 'This demo will transcribe ATC audio files by using the Whisper Large v3 model fine-tuned on the ATCO2 and ATCOSIM datasets. \n \n Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. \n This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.'
36
  if audio_mic is not None:
37
  return whisper(audio_mic)['text']
38
  elif audio_file is not None:
 
60
  return 'There was no transcription to extract a callsign or command from...'
61
 
62
  #%%
63
+ def transcribeAndExtract(audio_file, audio_mic, transcribe_only, model_version):
64
+ transcription = transcribe(audio_file, audio_mic, model_version)
65
  if not transcribe_only:
66
  callSignCommandValues = extractCallSignCommand(transcription)
67
  else:
 
71
  #%%
72
  iface = gr.Interface(
73
  fn=transcribeAndExtract,
74
+ inputs=[
75
+ gr.Audio(source='upload', type='filepath', interactive=True),
76
+ gr.Audio(source='microphone', type='filepath'),
77
+
78
+ gr.Checkbox(label='Transcribe only', default=False),
79
+ gr.Dropdown(choices=['local', 'large-v2', 'large-v3'], value='large-v3', label='Whisper model version'),
80
+ ],
81
  outputs=[gr.Text(label='Transcription'), gr.Text(label='Callsigns, commands and values')],
82
+ title='Whisper',
83
+ description='Transcribe and extract',
84
  )
85
 
86
  #%%