Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import numpy as np
|
|
4 |
|
5 |
asr_model = "distil-whisper/distil-medium.en"
|
6 |
|
7 |
-
|
8 |
|
9 |
def transcribe(stream, new_chunk):
|
10 |
sr, y = new_chunk
|
@@ -15,13 +15,24 @@ def transcribe(stream, new_chunk):
|
|
15 |
stream = np.concatenate([stream, y])
|
16 |
else:
|
17 |
stream = y
|
18 |
-
return stream,
|
19 |
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
live=True,
|
26 |
)
|
27 |
|
|
|
4 |
|
5 |
asr_model = "distil-whisper/distil-medium.en"
|
6 |
|
7 |
+
asr_pipe = pipeline("automatic-speech-recognition", model=asr_model)
|
8 |
|
9 |
def transcribe(stream, new_chunk):
|
10 |
sr, y = new_chunk
|
|
|
15 |
stream = np.concatenate([stream, y])
|
16 |
else:
|
17 |
stream = y
|
18 |
+
return stream, asr_pipe({"sampling_rate": sr, "raw": stream})["text"]
|
19 |
|
20 |
+
demo = gr.Blocks()
|
21 |
|
22 |
+
|
23 |
+
mic = gr.Interface(
|
24 |
+
fn = transcribe,
|
25 |
+
inputs = [
|
26 |
+
"state", gr.Audio(sources=["microphone"], streaming=True)],
|
27 |
+
outputs = ["state", "text"],
|
28 |
+
layout="horizontal",
|
29 |
+
theme="huggingface",
|
30 |
+
title="Whisper & BERT demo - Intent Classification",
|
31 |
+
description=(
|
32 |
+
"Transcribe audio inputs with Whisper ASR model and detect intention from the text. Use BERT NLP model to classify the intention
|
33 |
+
"as one of the commands to command a light."
|
34 |
+
),
|
35 |
+
allow_flagging="never",
|
36 |
live=True,
|
37 |
)
|
38 |
|