File size: 1,736 Bytes
f427fe9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import torchaudio as ta
import streamlit as st
from io import BytesIO
from transformers import AutoProcessor, SeamlessM4TModel
processor = AutoProcessor.from_pretrained("facebook/hf-seamless-m4t-medium", use_fast=False)
model = SeamlessM4TModel.from_pretrained("facebook/hf-seamless-m4t-medium")
# Title of the app
st.title("Audio Player with Live Transcription")
# Sidebar for file uploader and submit button
st.sidebar.header("Upload Audio Files")
uploaded_files = st.sidebar.file_uploader("Choose audio files", type=["mp3", "wav"], accept_multiple_files=True)
submit_button = st.sidebar.button("Submit")
# def transcribe_audio(audio_data):
# recognizer = sr.Recognizer()
# with sr.AudioFile(audio_data) as source:
# audio = recognizer.record(source)
# try:
# # Transcribe the audio using Google Web Speech API
# transcription = recognizer.recognize_google(audio)
# return transcription
# except sr.UnknownValueError:
# return "Unable to transcribe the audio."
# except sr.RequestError as e:
# return f"Could not request results; {e}"
if submit_button and uploaded_files:
st.write("Files uploaded successfully!")
for uploaded_file in uploaded_files:
# Display file name and audio player
print(uploaded_file)
st.write(f"**File name**: {uploaded_file.name}")
st.audio(uploaded_file, format=uploaded_file.type)
# Transcription section
st.write("**Transcription**:")
# Read the uploaded file data
waveform, sampling_rate = ta.load(uploaded_file.getvalue())
# Run transcription function and display
# import pdb;pdb.set_trace()
# st.write(audio_data.getvalue())
|