Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,18 +10,18 @@ import re
|
|
10 |
#import torchaudio
|
11 |
|
12 |
# Initialize the speech recognition pipeline and transliterator
|
13 |
-
|
14 |
-
|
15 |
# p2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1")
|
16 |
# punjaib_modle_30000=pipeline(task="automatic-speech-recognition", model="cdactvm/wav2vec-bert-punjabi-30000-model")
|
17 |
-
punjaib_modle_155750=pipeline(task="automatic-speech-recognition", model="cdactvm/wav2vec-bert-punjabi-155750-model")
|
18 |
-
punjaib_modle_70000_aug=pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-model-30000-augmented")
|
19 |
#p3 = pipeline(task="automatic-speech-recognition", model="cdactvm/kannada_w2v-bert_model")
|
20 |
#p4 = pipeline(task="automatic-speech-recognition", model="cdactvm/telugu_w2v-bert_model")
|
21 |
#p5 = pipeline(task="automatic-speech-recognition", model="Sajjo/w2v-bert-2.0-bangala-gpu-CV16.0_v2")
|
22 |
#p6 = pipeline(task="automatic-speech-recognition", model="cdactvm/hf-open-assames")
|
23 |
# p7 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-assames")
|
24 |
-
processor = AutoProcessor.from_pretrained("cdactvm/w2v-assames")
|
25 |
vocab_dict = processor.tokenizer.get_vocab()
|
26 |
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}
|
27 |
decoder = build_ctcdecoder(
|
@@ -331,19 +331,30 @@ def transcribe_punjabi_eng_model_155750(speech):
|
|
331 |
return sentence
|
332 |
|
333 |
###########################################
|
334 |
-
def
|
335 |
-
text =
|
336 |
if text is None:
|
337 |
return "Error: ASR returned None"
|
338 |
return text
|
339 |
|
340 |
-
###################################
|
341 |
def transcribe_odiya_model2(speech):
|
342 |
text = odia_model2(speech)["text"]
|
343 |
if text is None:
|
344 |
return "Error: ASR returned None"
|
345 |
return text
|
346 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
def transcribe_odiya_eng_model2(speech):
|
348 |
trn = Transliterator(source='ori', target='eng', build_lookup=True)
|
349 |
text = odia_model2(speech)["text"]
|
@@ -552,6 +563,10 @@ def sel_lng(lng, mic=None, file=None):
|
|
552 |
return transcribe_assamese_LM(audio)
|
553 |
elif lng == "Assamese-Model2":
|
554 |
return transcribe_assamese_model2(audio)
|
|
|
|
|
|
|
|
|
555 |
elif lng == "Odia_model2":
|
556 |
return transcribe_odiya_model2(audio)
|
557 |
elif lng == "Odia_trans_model2":
|
@@ -603,9 +618,10 @@ demo=gr.Interface(
|
|
603 |
#gr.Dropdown(["Hindi","Hindi-trans","Odiya","Odiya-trans","Kannada","Kannada-trans","Telugu","Telugu-trans","Bangala","Bangala-trans"],value="Hindi",label="Select Language"),
|
604 |
gr.Dropdown([
|
605 |
# "Hindi","Hindi-trans",
|
606 |
-
|
607 |
# "Assamese-LM","Assamese-Model2",
|
608 |
-
"Punjabi_Model1","Punjabi_Model1_Trans","Punjabi_Model_aug","Punjabi_Model_aug_Trans"],value="Hindi",label="Select Language"
|
|
|
609 |
gr.Audio(sources=["microphone","upload"], type="filepath"),
|
610 |
#gr.Audio(sources="upload", type="filepath"),
|
611 |
#"state"
|
|
|
10 |
#import torchaudio
|
11 |
|
12 |
# Initialize the speech recognition pipeline and transliterator
|
13 |
+
odia_model1 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-odia_v1")
|
14 |
+
odia_model2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-odia_v2")
|
15 |
# p2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1")
|
16 |
# punjaib_modle_30000=pipeline(task="automatic-speech-recognition", model="cdactvm/wav2vec-bert-punjabi-30000-model")
|
17 |
+
# punjaib_modle_155750=pipeline(task="automatic-speech-recognition", model="cdactvm/wav2vec-bert-punjabi-155750-model")
|
18 |
+
# punjaib_modle_70000_aug=pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-model-30000-augmented")
|
19 |
#p3 = pipeline(task="automatic-speech-recognition", model="cdactvm/kannada_w2v-bert_model")
|
20 |
#p4 = pipeline(task="automatic-speech-recognition", model="cdactvm/telugu_w2v-bert_model")
|
21 |
#p5 = pipeline(task="automatic-speech-recognition", model="Sajjo/w2v-bert-2.0-bangala-gpu-CV16.0_v2")
|
22 |
#p6 = pipeline(task="automatic-speech-recognition", model="cdactvm/hf-open-assames")
|
23 |
# p7 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-assames")
|
24 |
+
# processor = AutoProcessor.from_pretrained("cdactvm/w2v-assames")
|
25 |
vocab_dict = processor.tokenizer.get_vocab()
|
26 |
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])}
|
27 |
decoder = build_ctcdecoder(
|
|
|
331 |
return sentence
|
332 |
|
333 |
###########################################
|
334 |
+
def transcribe_odiya_model1(speech):
|
335 |
+
text = odia_model1(speech)["text"]
|
336 |
if text is None:
|
337 |
return "Error: ASR returned None"
|
338 |
return text
|
339 |
|
|
|
340 |
def transcribe_odiya_model2(speech):
|
341 |
text = odia_model2(speech)["text"]
|
342 |
if text is None:
|
343 |
return "Error: ASR returned None"
|
344 |
return text
|
345 |
|
346 |
+
def transcribe_odiya_eng_model1(speech):
|
347 |
+
trn = Transliterator(source='ori', target='eng', build_lookup=True)
|
348 |
+
text = odia_model1(speech)["text"]
|
349 |
+
if text is None:
|
350 |
+
return "Error: ASR returned None"
|
351 |
+
sentence = trn.transform(text)
|
352 |
+
if sentence is None:
|
353 |
+
return "Error: Transliteration returned None"
|
354 |
+
replaced_words = replace_words(sentence)
|
355 |
+
processed_sentence = process_doubles(replaced_words)
|
356 |
+
return process_transcription(processed_sentence)
|
357 |
+
|
358 |
def transcribe_odiya_eng_model2(speech):
|
359 |
trn = Transliterator(source='ori', target='eng', build_lookup=True)
|
360 |
text = odia_model2(speech)["text"]
|
|
|
563 |
return transcribe_assamese_LM(audio)
|
564 |
elif lng == "Assamese-Model2":
|
565 |
return transcribe_assamese_model2(audio)
|
566 |
+
elif lng == "Odia_model1":
|
567 |
+
return transcribe_odiya_model1(audio)
|
568 |
+
elif lng == "Odia_trans_model1":
|
569 |
+
return transcribe_odiya_eng_model1(audio)
|
570 |
elif lng == "Odia_model2":
|
571 |
return transcribe_odiya_model2(audio)
|
572 |
elif lng == "Odia_trans_model2":
|
|
|
618 |
#gr.Dropdown(["Hindi","Hindi-trans","Odiya","Odiya-trans","Kannada","Kannada-trans","Telugu","Telugu-trans","Bangala","Bangala-trans"],value="Hindi",label="Select Language"),
|
619 |
gr.Dropdown([
|
620 |
# "Hindi","Hindi-trans",
|
621 |
+
"Odia_model1","Odiya-trans_model1","Odia_model2","Odia_trans_model2",
|
622 |
# "Assamese-LM","Assamese-Model2",
|
623 |
+
# "Punjabi_Model1","Punjabi_Model1_Trans","Punjabi_Model_aug","Punjabi_Model_aug_Trans"],value="Hindi",label="Select Language"
|
624 |
+
),
|
625 |
gr.Audio(sources=["microphone","upload"], type="filepath"),
|
626 |
#gr.Audio(sources="upload", type="filepath"),
|
627 |
#"state"
|