Teapack1's picture
Update app.py
9422011
raw
history blame
1.22 kB
from transformers import pipeline
model_id = "Teapack1/model_KWS" # update with your model id
pipe = pipeline("automatic-speech-recognition", model=model_id)
import gradio as gr
title = "Keyword Spotting Wav2Vec2"
description = "Gradio demo for finetuned Wav2Vec2 model on a custom dataset to perform keyword spotting task. Classes are scene 1, scene 2, scene 3, yes, no and stop."
def transcribe_speech(filepath):
output = pipe(
filepath,
max_new_tokens=256,
generate_kwargs={
"task": "transcribe",
"language": "sinhalese",
}, # update with the language you've fine-tuned on
chunk_length_s=30,
batch_size=8,
)
return output["text"]
demo = gr.Blocks()
mic_transcribe = gr.Interface(
fn=transcribe_speech,
inputs=gr.Audio(sources="microphone", type="filepath"),
outputs=gr.outputs.Textbox(),
)
file_transcribe = gr.Interface(
fn=transcribe_speech,
inputs=gr.Audio(sources="upload", type="filepath"),
outputs=gr.outputs.Textbox(),
)
with demo:
gr.TabbedInterface(
[mic_transcribe, file_transcribe],
["Transcribe Microphone", "Transcribe Audio File"],
)
demo.launch(debug=True)