Teapack1 commited on
Commit
62a6cf8
1 Parent(s): ea2e592

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -1
app.py CHANGED
@@ -3,4 +3,39 @@ from transformers.pipelines.audio_utils import ffmpeg_microphone_live
3
  import torch
4
 
5
  asr_model = "openai/whisper-tiny.en"
6
- nlp_model = "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import torch
4
 
5
  asr_model = "openai/whisper-tiny.en"
6
+ nlp_model = "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
7
+
8
+ pipe = pipeline("automatic-speech-recognition", model=model_id, device=device)
9
+ sampling_rate = pipe.feature_extractor.sampling_rate
10
+
11
+ chunk_length_s = 10 # how often returns the text
12
+ stream_chunk_s = 1 # how often the microphone is checked for new audio
13
+ mic = ffmpeg_microphone_live(
14
+ sampling_rate=sampling_rate,
15
+ chunk_length_s=chunk_length_s,
16
+ stream_chunk_s=stream_chunk_s,
17
+ )
18
+
19
+ def listen_print_loop(responses):
20
+ for response in responses:
21
+ if response["text"]:
22
+ print(response["text"], end="\r")
23
+ return response["text"]
24
+ if not response["partial"]:
25
+ print("")
26
+
27
+
28
+ classifier = pipeline("zero-shot-classification", model=nlp_model)
29
+ candidate_labels = ["dim the light", "turn on light fully", "turn off light fully", "raise the light", "nothing about light"]
30
+
31
+
32
+ while True:
33
+ context = listen_print_loop(pipe(mic))
34
+ print(context)
35
+ output = classifier(context, candidate_labels, multi_label=False)
36
+ top_label = output['labels'][0]
37
+ top_score = output['scores'][0]
38
+ print(f"Top Prediction: {top_label} with a score of {top_score:.2f}")
39
+
40
+
41
+