Teapack1 commited on
Commit
0cce770
1 Parent(s): b370699

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -9,6 +9,12 @@ pipe = pipeline("audio-classification", model=model_id)
9
  title = "Keyword Spotting Wav2Vec2"
10
  description = "Gradio demo for finetuned Wav2Vec2 model on a custom dataset to perform keyword spotting task. Classes are scene 1, scene 2, scene 3, yes, no and stop."
11
 
 
 
 
 
 
 
12
 
13
  def classify_audio(audio):
14
  preds = pipe(audio)
@@ -17,14 +23,14 @@ def classify_audio(audio):
17
  outputs[p["label"]] = p["score"]
18
  return outputs
19
 
20
-
21
 
22
  iface = gr.Interface(
23
  fn=classify_audio,
24
  inputs=gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio"),
25
  outputs=gr.outputs.Label(),
26
- title="Audio Classification Demo",
27
- description="A simple demo to classify audio using a Hugging Face model."
28
  )
 
29
 
30
  iface.launch(debug=True, share=True)
 
9
  title = "Keyword Spotting Wav2Vec2"
10
  description = "Gradio demo for finetuned Wav2Vec2 model on a custom dataset to perform keyword spotting task. Classes are scene 1, scene 2, scene 3, yes, no and stop."
11
 
12
+ example_samples = [
13
+ ("path_to_audio_file_1.wav",),
14
+ ("path_to_audio_file_2.wav",),
15
+ # Add more example samples as needed
16
+ ]
17
+
18
 
19
  def classify_audio(audio):
20
  preds = pipe(audio)
 
23
  outputs[p["label"]] = p["score"]
24
  return outputs
25
 
 
26
 
27
  iface = gr.Interface(
28
  fn=classify_audio,
29
  inputs=gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio"),
30
  outputs=gr.outputs.Label(),
31
+ title=title,
32
+ description=description
33
  )
34
+ iface.test_examples(example_samples)
35
 
36
  iface.launch(debug=True, share=True)