ASRfr / app.py
Kr08's picture
Basic App.py
f427fe9 verified
raw
history blame
1.74 kB
import torchaudio as ta
import streamlit as st
from io import BytesIO
from transformers import AutoProcessor, SeamlessM4TModel
processor = AutoProcessor.from_pretrained("facebook/hf-seamless-m4t-medium", use_fast=False)
model = SeamlessM4TModel.from_pretrained("facebook/hf-seamless-m4t-medium")
# Title of the app
st.title("Audio Player with Live Transcription")
# Sidebar for file uploader and submit button
st.sidebar.header("Upload Audio Files")
uploaded_files = st.sidebar.file_uploader("Choose audio files", type=["mp3", "wav"], accept_multiple_files=True)
submit_button = st.sidebar.button("Submit")
# def transcribe_audio(audio_data):
# recognizer = sr.Recognizer()
# with sr.AudioFile(audio_data) as source:
# audio = recognizer.record(source)
# try:
# # Transcribe the audio using Google Web Speech API
# transcription = recognizer.recognize_google(audio)
# return transcription
# except sr.UnknownValueError:
# return "Unable to transcribe the audio."
# except sr.RequestError as e:
# return f"Could not request results; {e}"
if submit_button and uploaded_files:
st.write("Files uploaded successfully!")
for uploaded_file in uploaded_files:
# Display file name and audio player
print(uploaded_file)
st.write(f"**File name**: {uploaded_file.name}")
st.audio(uploaded_file, format=uploaded_file.type)
# Transcription section
st.write("**Transcription**:")
# Read the uploaded file data
waveform, sampling_rate = ta.load(uploaded_file.getvalue())
# Run transcription function and display
# import pdb;pdb.set_trace()
# st.write(audio_data.getvalue())