Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,65 +1,32 @@
|
|
|
|
|
|
|
|
|
|
1 |
import soundfile as sf
|
2 |
import torch
|
3 |
-
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor,Wav2Vec2ProcessorWithLM
|
4 |
-
import gradio as gr
|
5 |
-
import sox
|
6 |
-
import subprocess
|
7 |
-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
resampler(wav_file, filename_16k)
|
13 |
-
speech, _ = sf.read(filename_16k)
|
14 |
-
inputs = processor(speech, sampling_rate=16_000, return_tensors="pt", padding=True)
|
15 |
-
|
16 |
-
return inputs
|
17 |
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
f"ffmpeg -hide_banner -loglevel panic -i {input_file_path} -ar 16000 -ac 1 -bits_per_raw_sample 16 -vn "
|
22 |
-
f"{output_file_path}"
|
23 |
-
)
|
24 |
-
subprocess.call(command, shell=True)
|
25 |
|
|
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
text = result.text
|
30 |
-
transcription = text[0]
|
31 |
-
return transcription
|
32 |
|
33 |
-
|
34 |
predicted_ids = torch.argmax(logits, dim=-1)
|
35 |
-
transcription = processor.
|
36 |
-
return transcription
|
37 |
-
|
38 |
-
def parse(wav_file, applyLM):
|
39 |
-
input_values = read_file_and_process(wav_file)
|
40 |
-
with torch.no_grad():
|
41 |
-
logits = model(**input_values).logits
|
42 |
-
|
43 |
-
if applyLM:
|
44 |
-
return parse_transcription_with_lm(logits)
|
45 |
-
else:
|
46 |
-
return parse_transcription(logits)
|
47 |
-
|
48 |
-
model_id = "aditii09/facebook_english_asr"
|
49 |
-
|
50 |
-
processor = Wav2Vec2Processor.from_pretrained(model_id)
|
51 |
-
processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained(model_id)
|
52 |
-
model = Wav2Vec2ForCTC.from_pretrained(model_id)
|
53 |
|
54 |
-
|
55 |
-
input_ = gr.Audio(source="microphone", type="filepath")
|
56 |
-
txtbox = gr.Textbox(
|
57 |
-
label="Output from model will appear here:",
|
58 |
-
lines=5
|
59 |
-
)
|
60 |
-
chkbox = gr.Checkbox(label="Apply LM", value=False)
|
61 |
|
|
|
62 |
|
63 |
-
|
64 |
-
streaming=True, interactive=True,
|
65 |
-
analytics_enabled=False, show_tips=False, enable_queue=True).launch(inline=False);
|
|
|
1 |
+
import librosa
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
5 |
import soundfile as sf
|
6 |
import torch
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# load model and tokenizer
|
9 |
+
processor = Wav2Vec2Processor.from_pretrained("aditii09/facebook_english_asr")
|
10 |
+
model = Wav2Vec2ForCTC.from_pretrained("aditii09/facebook_english_asr")
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
def speech2text(audio):
|
13 |
+
sr, data = audio
|
14 |
|
15 |
+
# resample to 16hz
|
16 |
+
data_16hz = librosa.resample(data[:,0].astype(np.float32),sr,16000)
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# tokenize
|
19 |
+
input_values = processor([data_16hz], return_tensors="pt", padding="longest").input_values # Batch size 1
|
20 |
|
21 |
+
# retrieve logits
|
22 |
+
logits = model(input_values).logits
|
|
|
|
|
|
|
23 |
|
24 |
+
# take argmax and decode
|
25 |
predicted_ids = torch.argmax(logits, dim=-1)
|
26 |
+
transcription = processor.batch_decode(predicted_ids)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
return transcription[0].lower() # batch size 1
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
iface = gr.Interface(speech2text, "microphone", "text")
|
31 |
|
32 |
+
iface.launch()
|
|
|
|