from transformers import pipeline import gradio as gr model_id = "Teapack1/model_KWS" # update with your model id pipe = pipeline("audio-classification", model=model_id) title = "Keyword Spotting Wav2Vec2" description = "Gradio demo for finetuned Wav2Vec2 model on a custom dataset to perform keyword spotting task. Classes are scene 1, scene 2, scene 3, yes, no and stop." def classify_audio(audio): predictions = audio_classifier(audio) return predictions demo = gr.Interface( fn=classify_audio, inputs=gr.inputs.Audio(source="microphone", type="numpy", label="Record your audio"), outputs="label", title="Audio Classification Demo", description="A simple demo to classify audio using a Hugging Face model." ) demo.launch(debug=True, share=True)