yellowcandle commited on
Commit
8607936
·
unverified ·
1 Parent(s): e828a9f

feat: add whisper speech-to-text model

Browse files

Add OpenAI's Whisper large-v3 model for automatic speech recognition.
Key changes:
- Import required libraries (transformers, torch, datasets)
- Load Whisper model and processor
- Create pipeline for speech-to-text transcription
- Update Gradio interface to accept audio input and return text output

Files changed (1) hide show
  1. app.py +34 -5
app.py CHANGED
@@ -1,14 +1,43 @@
1
  import spaces
2
  import gradio as gr
3
  # Use a pipeline as a high-level helper
4
- from transformers import pipeline
 
 
5
 
6
- pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
7
 
8
- @spaces.GPU
9
  def transcribe_audio(audio):
10
- return pipe(audio)
 
11
 
12
- demo = gr.Interface(fn=transcribe_audio, inputs=gr.Audio(sources="upload", type="filepath"), outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  demo.launch()
14
 
 
 
1
  import spaces
2
  import gradio as gr
3
  # Use a pipeline as a high-level helper
4
+ import torch
5
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
6
+ from datasets import load_dataset
7
 
 
8
 
 
9
  def transcribe_audio(audio):
10
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
12
 
13
+ model_id = "openai/whisper-large-v3"
14
+
15
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
16
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
17
+ )
18
+ model.to(device)
19
+
20
+ processor = AutoProcessor.from_pretrained(model_id)
21
+
22
+ pipe = pipeline(
23
+ "automatic-speech-recognition",
24
+ model=model,
25
+ tokenizer=processor.tokenizer,
26
+ feature_extractor=processor.feature_extractor,
27
+ max_new_tokens=128,
28
+ chunk_length_s=25,
29
+ batch_size=16,
30
+ torch_dtype=torch_dtype,
31
+ device=device,
32
+ )
33
+
34
+ result = pipe(audio)
35
+ return result["text"]
36
+
37
+
38
+ demo = gr.Interface(fn=transcribe_audio,
39
+ inputs=gr.Audio(sources="upload", type="filepath"),
40
+ outputs="text")
41
  demo.launch()
42
 
43
+