Spaces:
Sleeping
Sleeping
File size: 2,299 Bytes
3c62a48 bb80bdc 3c62a48 bb80bdc 3c62a48 bb80bdc 3c62a48 bb80bdc 3c62a48 bb80bdc 3c62a48 bb80bdc 3c62a48 bb80bdc 26547ea 3c62a48 2d3ea73 3c62a48 59137fe 3c62a48 59137fe 3c62a48 926c2a5 3c62a48 926c2a5 3c62a48 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
## libraries for data preprocessing
import numpy as np
import pandas as pd
## libraries for training dl models
import tensorflow as tf
from tensorflow import keras
## libraries for reading audio files
import librosa as lib
import gradio as gr
## lets load the model
model = keras.models.load_model('best_heartbeatsound_classification.h5')
def loading_sound_file(sound_file, sr=22050, duration=10):
input_length = sr * duration
X, sr = lib.load(sound_file, sr=sr, duration=duration)
dur = lib.get_duration(y=X, sr=sr)
# # pad audio file same duration
# if (round(dur) < duration):
# print ("fixing audio lenght :", file_name)
# y = lib.util.fix_length(X, input_length)
# extract normalized mfcc feature from data
# ## pad audio to same duration
# if round(dur) < duration:
# X = lib.util.fix_length(X, input_length)
# Pad or truncate audio file to the same duration
if round(dur) < duration:
pad_amount = input_length - len(X)
X = np.pad(X, (0, pad_amount), mode='constant')
elif round(dur) > duration:
X = X[:input_length]
mfccs = np.mean(lib.feature.mfcc(y=X, sr=sr, n_mfcc=25).T,axis=0)
## Reshape to match the model's input shape
data = np.array(mfccs).reshape(1, -1, 1)
return data
def heart_signal_classification(data):
X = loading_sound_file(data)
pred = model.predict(X)
## Define the threshold
threshold = 0.6
max_prob = np.max(pred)
## Create labels
labels = {
0: 'artifact',
1: 'unlabel',
2: 'extrastole',
3: 'extrahls',
4: 'normal',
5: 'murmur'
}
if max_prob < threshold:
label = 'unknown'
else:
result = pred[0].argmax()
label = labels[result]
return label
################### Gradio Web APP ################################
title = "Heart Signal Classification App"
Input = gr.Audio(sources=["upload"], type="filepath")
Output1 = gr.Textbox(label="Type Of Heart Signal")
description = "Type Of Signal: Artifact, Murmur, Normal, Extrastole, Extrahls"
iface = gr.Interface(fn=heart_signal_classification, inputs=Input, outputs=Output1, title=title, description=description)
iface.launch(inline=False) |