Spaces:
Running
on
Zero
Running
on
Zero
add zero spaces decorator
Browse files- app.py +2 -2
- packages.txt +1 -0
- requirements.txt +1 -0
app.py
CHANGED
@@ -4,7 +4,7 @@ from transformers import AutoFeatureExtractor, WhisperModel, AutoModelForSpeechS
|
|
4 |
import numpy as np
|
5 |
import torchaudio
|
6 |
import librosa
|
7 |
-
|
8 |
|
9 |
import gradio as gr
|
10 |
from modules import load_audio, MosPredictor, denorm
|
@@ -25,7 +25,7 @@ feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large-v
|
|
25 |
model_asli = AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v3", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa")
|
26 |
model_asli = model_asli.to(device)
|
27 |
|
28 |
-
|
29 |
def predict_mos(wavefile:str):
|
30 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
31 |
if device != model.device:
|
|
|
4 |
import numpy as np
|
5 |
import torchaudio
|
6 |
import librosa
|
7 |
+
import spaces
|
8 |
|
9 |
import gradio as gr
|
10 |
from modules import load_audio, MosPredictor, denorm
|
|
|
25 |
model_asli = AutoModelForSpeechSeq2Seq.from_pretrained("openai/whisper-large-v3", torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa")
|
26 |
model_asli = model_asli.to(device)
|
27 |
|
28 |
+
@spaces.GPU
|
29 |
def predict_mos(wavefile:str):
|
30 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
31 |
if device != model.device:
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ffmpeg
|
requirements.txt
CHANGED
@@ -3,3 +3,4 @@ speechbrain
|
|
3 |
librosa
|
4 |
gradio
|
5 |
accelerate
|
|
|
|
3 |
librosa
|
4 |
gradio
|
5 |
accelerate
|
6 |
+
spaces
|