Update app.py
Browse files
app.py
CHANGED
@@ -71,7 +71,6 @@ model = CNN1DLSTMAudioClassifier(num_class)
|
|
71 |
|
72 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
73 |
model.load_state_dict(torch.load("speech-emotion-recognition-best-model.bin", weights_only=False))
|
74 |
-
model = model.to(device)
|
75 |
model.eval()
|
76 |
|
77 |
def preprocess_single_audio(file_path, sample_rate=16000, n_mels=128, n_fft=2048, hop_length=512):
|
@@ -143,6 +142,7 @@ def predict(wave):
|
|
143 |
le = LabelEncoder()
|
144 |
le.classes_ = np.array(['Angry', 'Disgusting', 'Fear', 'Happy', 'Neutral', 'Sad'])
|
145 |
wave = wave.unsqueeze(0)
|
|
|
146 |
with torch.no_grad():
|
147 |
prediction = model(wave)
|
148 |
predicted_emotion, confidence = decode_emotion_prediction(prediction, le)
|
|
|
71 |
|
72 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
73 |
model.load_state_dict(torch.load("speech-emotion-recognition-best-model.bin", weights_only=False))
|
|
|
74 |
model.eval()
|
75 |
|
76 |
def preprocess_single_audio(file_path, sample_rate=16000, n_mels=128, n_fft=2048, hop_length=512):
|
|
|
142 |
le = LabelEncoder()
|
143 |
le.classes_ = np.array(['Angry', 'Disgusting', 'Fear', 'Happy', 'Neutral', 'Sad'])
|
144 |
wave = wave.unsqueeze(0)
|
145 |
+
wave = wave.to(device)
|
146 |
with torch.no_grad():
|
147 |
prediction = model(wave)
|
148 |
predicted_emotion, confidence = decode_emotion_prediction(prediction, le)
|