mistermprah's picture
Upload 8 files
1674ccb verified
raw
history blame
1.56 kB
import streamlit as st
from transformers import pipeline
import torchaudio
from config import MODEL_ID
# Load the model and pipeline using the model_id variable
pipe = pipeline("audio-classification", model=MODEL_ID)
def classify_audio(filepath):
preds = pipe(filepath)
outputs = {}
for p in preds:
outputs[p["label"]] = p["score"]
return outputs
# Streamlit app layout
st.title("Heartbeat Sound Classification")
# File uploader for audio files
uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3"])
if uploaded_file is not None:
# Load and display the audio file
audio_bytes = uploaded_file.read()
st.audio(audio_bytes, format='audio/wav')
# Save the uploaded file to a temporary location
with open("temp_audio_file.wav", "wb") as f:
f.write(audio_bytes)
# Classify the audio file
st.write("Classifying the audio...")
results = classify_audio("temp_audio_file.wav")
# Display the classification results
st.write("Classification results:")
for label, score in results.items():
st.write(f"{label}: {score:.4f}")
# Examples of audio files for classification
st.write("Examples:")
examples = ['normal.wav', 'murmur.wav', 'extra_systole.wav', 'extra_hystole.wav', 'artifact.wav']
for example in examples:
st.write(example)
audio_bytes = open(example, 'rb').read()
st.audio(audio_bytes, format='audio/wav')
results = classify_audio(example)
for label, score in results.items():
st.write(f"{label}: {score:.4f}")