Spaces:
Running
Running
changed to a single function
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import os
|
|
6 |
|
7 |
#%%
|
8 |
whisper = pipeline(model='jlvdoorn/whisper-large-v2-atco2-asr-atcosim', use_auth_token=os.environ['HUGGINGFACE_TOKEN'])
|
9 |
-
bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
|
10 |
|
11 |
#%%
|
12 |
def transcribe(audio_mic, audio_file):
|
@@ -18,38 +18,38 @@ def transcribe(audio_mic, audio_file):
|
|
18 |
return 'There was no audio to transcribe...'
|
19 |
|
20 |
#%%
|
21 |
-
def extractCallSignCommand(transcription):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
|
39 |
#%%
|
40 |
-
def transcribeAndExtract(audio_mic, audio_file, transcribe_only):
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
|
48 |
#%%
|
49 |
iface = gr.Interface(
|
50 |
-
fn=
|
51 |
-
inputs=[gr.Audio(source='microphone', type='filepath'), gr.Audio(source='upload', type='filepath')
|
52 |
-
outputs=
|
53 |
title='Whisper Large v2 - ATCO2-ASR-ATCOSIM',
|
54 |
description='This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2 and ATCOSIM datasets. Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.',
|
55 |
)
|
|
|
6 |
|
7 |
#%%
|
8 |
whisper = pipeline(model='jlvdoorn/whisper-large-v2-atco2-asr-atcosim', use_auth_token=os.environ['HUGGINGFACE_TOKEN'])
|
9 |
+
# bert_atco_ner = pipeline(model='Jzuluaga/bert-base-ner-atc-en-atco2-1h')
|
10 |
|
11 |
#%%
|
12 |
def transcribe(audio_mic, audio_file):
|
|
|
18 |
return 'There was no audio to transcribe...'
|
19 |
|
20 |
#%%
|
21 |
+
# def extractCallSignCommand(transcription):
|
22 |
+
# if type(transcription) is str:
|
23 |
+
# result = bert_atco_ner(transcription)
|
24 |
+
# callsigns = []
|
25 |
+
# commands = []
|
26 |
+
# values = []
|
27 |
+
# for item in result:
|
28 |
+
# if 'callsign' in item['entity']:
|
29 |
+
# callsigns.append(item['word'])
|
30 |
+
# if 'command' in item['entity']:
|
31 |
+
# commands.append(item['word'])
|
32 |
+
# if 'value' in item['entity']:
|
33 |
+
# values.append(item['word'])
|
34 |
|
35 |
+
# return 'Callsigns: ' + ', '.join(callsigns) + '\nCommands: ' + ', '.join(commands) + '\nValues: ' + ', '.join(values)
|
36 |
+
# else:
|
37 |
+
# return 'There was no transcription to extract a callsign or command from...'
|
38 |
|
39 |
#%%
|
40 |
+
# def transcribeAndExtract(audio_mic, audio_file, transcribe_only):
|
41 |
+
# transcription = transcribe(audio_mic, audio_file)
|
42 |
+
# if not transcribe_only:
|
43 |
+
# callSignCommandValues = extractCallSignCommand(transcription)
|
44 |
+
# else:
|
45 |
+
# callSignCommandValues = ''
|
46 |
+
# return transcription, callSignCommandValues
|
47 |
|
48 |
#%%
|
49 |
iface = gr.Interface(
|
50 |
+
fn=transcribe,
|
51 |
+
inputs=[gr.Audio(source='microphone', type='filepath'), gr.Audio(source='upload', type='filepath')],
|
52 |
+
outputs=gr.Text(label='Transcription'),
|
53 |
title='Whisper Large v2 - ATCO2-ASR-ATCOSIM',
|
54 |
description='This demo will transcribe ATC audio files by using the Whisper Large v2 model fine-tuned on the ATCO2 and ATCOSIM datasets. Further it uses a Named Entity Recognition model to extract callsigns, commands and values from the transcription. This model is based on Google\'s BERT model and fine-tuned on the ATCO2 dataset.',
|
55 |
)
|