Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import librosa
|
3 |
+
import numpy as np
|
4 |
+
import soundfile as sf
|
5 |
+
from transformers import pipeline
|
6 |
+
|
7 |
+
asr_ceb = pipeline("automatic-speech-recognition", model = "sil-ai/wav2vec2-bloom-speech-ceb")
|
8 |
+
asr_whisper_large = pipeline("automatic-speech-recognition", model = "openai/whisper-large-v3")
|
9 |
+
asr_whisper_ceb = pipeline("automatic-speech-recognition",
|
10 |
+
model = "nlewins/whisper-small-translate-X-gen2-examples-quality-step4-1e-6")
|
11 |
+
|
12 |
+
|
13 |
+
def transcribe_speech(filepath):
|
14 |
+
if filepath is None:
|
15 |
+
gr.Warning("No audio found, please retry.")
|
16 |
+
return ""
|
17 |
+
_, sample_rate = librosa.load(filepath, sr = None)
|
18 |
+
model_rate = asr_ceb.feature_extractor.sampling_rate
|
19 |
+
if sample_rate != model_rate:
|
20 |
+
filepath = resample_audio_for_processing(filepath, model_rate, sample_rate)
|
21 |
+
|
22 |
+
output_ceb = asr_ceb(filepath)
|
23 |
+
generate_kwargs = {
|
24 |
+
# "language": "tagalog",#source language
|
25 |
+
"task": "translate"
|
26 |
+
}
|
27 |
+
output_whisper_large_translate = asr_whisper_large(filepath, generate_kwargs = generate_kwargs)
|
28 |
+
output_whisper_large = asr_whisper_large(filepath)
|
29 |
+
output_whisper_ceb = asr_whisper_ceb(filepath)
|
30 |
+
return (output_ceb["text"], output_whisper_large["text"], output_whisper_large_translate["text"],
|
31 |
+
output_whisper_ceb["text"])
|
32 |
+
|
33 |
+
|
34 |
+
def resample_audio_for_processing(filepath, model_rate, sample_rate):
|
35 |
+
print(f"Audio loaded with rate: {sample_rate} Hz while model requires rate: {model_rate} Hz")
|
36 |
+
try:
|
37 |
+
print("Resampling audio...")
|
38 |
+
audio_data, sr = librosa.load(filepath, sr = None) # Audio data will be a NumPy array
|
39 |
+
# Ensure that audio_data is a NumPy array
|
40 |
+
audio_data = np.array(audio_data)
|
41 |
+
# Resample to 16kHz
|
42 |
+
audio_resampled = librosa.resample(audio_data, orig_sr = sample_rate, target_sr = model_rate)
|
43 |
+
# Save the resampled audio
|
44 |
+
resampled_audio_path = 'resampled_audio.wav'
|
45 |
+
sf.write(resampled_audio_path, audio_resampled, 16000)
|
46 |
+
print("Audio resampled successfully.")
|
47 |
+
return resampled_audio_path
|
48 |
+
except Exception as e:
|
49 |
+
print(f"Error resampling audio: {e}, processing with audio as is it !")
|
50 |
+
return filepath
|
51 |
+
|
52 |
+
|
53 |
+
mic_transcribe = gr.Interface(
|
54 |
+
fn = transcribe_speech,
|
55 |
+
inputs = gr.Audio(sources = ["microphone"], type = "filepath"),
|
56 |
+
outputs = [gr.Textbox(label = "Transcription CEB (sil-ai)"), gr.Textbox(label = "Transcription (openai)"),
|
57 |
+
gr.Textbox(label = "Translation (openai)"),
|
58 |
+
gr.Textbox(label = "Transcription (nlewins)")]
|
59 |
+
, allow_flagging = "never")
|
60 |
+
|
61 |
+
file_transcribe = gr.Interface(
|
62 |
+
fn = transcribe_speech,
|
63 |
+
inputs = gr.Audio(sources = ["upload"], type = "filepath"),
|
64 |
+
outputs = [gr.Textbox(label = "Transcription CEB (sil-ai)"), gr.Textbox(label = "Transcription (openai)"),
|
65 |
+
gr.Textbox(label = "Translation (openai)"),
|
66 |
+
gr.Textbox(label = "Translation (nlewins)")]
|
67 |
+
, allow_flagging = "never",
|
68 |
+
)
|
69 |
+
|
70 |
+
demo = gr.TabbedInterface(
|
71 |
+
[mic_transcribe, file_transcribe],
|
72 |
+
["Use your Microphone", "Upload Audio File"],
|
73 |
+
)
|
74 |
+
|
75 |
+
if __name__ == '__main__':
|
76 |
+
demo.launch()
|