Amith Adiraju
Added exception handling, fixed models and tokenizer being on different devices, fixed boiler plate code
e8f31c7
import streamlit as st | |
from streamlit import session_state as sst | |
from utils import navigate_to | |
from model_inference import rate_video_frames,get_text_from_audio, summarize_from_text | |
from utils import read_important_frames, extract_audio | |
# Define size limits (adjust based on your system) | |
SMALL_VIDEO_LIMIT_MB = 35 # Files ≤ 35MB are small | |
LARGE_VIDEO_LIMIT_MB = 50 # Max large file upload allowed | |
# Convert MB to bytes | |
SMALL_VIDEO_LIMIT_BYTES = SMALL_VIDEO_LIMIT_MB * 1024 * 1024 | |
LARGE_VIDEO_LIMIT_BYTES = LARGE_VIDEO_LIMIT_MB * 1024 * 1024 | |
async def landing_page(): | |
uploaded_file = st.file_uploader("Upload a video", | |
type=["mp4", "avi", "mov"]) | |
if uploaded_file is not None: | |
file_size = uploaded_file.size # Get file size in bytes | |
# Restrict max file upload size | |
if file_size > LARGE_VIDEO_LIMIT_BYTES: | |
st.error(f"File is too large! Max allowed size is {LARGE_VIDEO_LIMIT_MB}MB. Please upload a smaller version of it.") | |
else: | |
# bytes object which can be translated to audio or video | |
video_bytes = uploaded_file.read() | |
# Try to get important frames from this video, if not don't add this key for further inference processing | |
with st.spinner("Getting most important moments from your video."): | |
try: | |
important_frames = read_important_frames(video_bytes, 100) | |
st.success(f"Got important moments.") | |
# add important frames to session state and redirect to model inference page | |
sst["important_frames"] = important_frames | |
except Exception as e: | |
st.write(f"Sorry couldn't extract important frames from this video & can't rate this on movie scale, because of error: {e}") | |
# Try to get audio from this video, if not don't add this key for further inference processing | |
with st.spinner("Getting audio transcript from your video for summary"): | |
try: | |
audio_transcript_bytes = extract_audio(video_bytes) | |
st.success(f"Got audio transcript.") | |
# add audio transcript to session state | |
sst["audio_transcript"] = audio_transcript_bytes | |
except Exception as e: | |
st.write(f"Sorry couldn't extract audio from this video & can't rate summarize it, because of error: {e}") | |
st.button("Summarize & Analyze Video", | |
on_click = navigate_to, | |
args = ("model_inference_page",) | |
) | |
async def model_inference_page(): | |
# check if frames are present and they are non-empty | |
if "important_frames" in sst: | |
important_frames = sst["important_frames"] | |
with st.spinner("Generating Movie Scale rating for your video"): | |
try: | |
video_rating_scale = rate_video_frames(important_frames) | |
except Exception as e: | |
video_rating_scale = f"Sorry, we couldn't generate rating of your video because of this error: {e} " | |
st.toast("Done") | |
st.header("Movie Scale Rating of Your Video: ", divider = True) | |
st.write(video_rating_scale) | |
st.markdown("************************") | |
# check if audio is present and it's non-empty | |
if "audio_transcript" in sst: | |
with st.spinner("Extracting text from audio file"): | |
try: | |
video_summary_text = get_text_from_audio(sst["audio_transcript"]) | |
except Exception as e: | |
video_summary_text = f"Sorry, we couldn't extract text from audio of this file because of this error: {e} " | |
st.toast("Done") | |
if video_summary_text[:5] != "Sorry": | |
with st.spinner("Summarizing text from entire transcript"): | |
try: | |
video_summary_text = summarize_from_text(video_summary_text) | |
except Exception as e: | |
video_summary_text = f"Sorry, we couldn't summarize text from audio of this file because of this error: {e} " | |
st.toast("Done") | |
st.header("Audio Transcript summary of your video: ", divider = True) | |
st.write(video_summary_text) | |
st.button("Go Home", | |
on_click = navigate_to, | |
args = ("landing_page",) | |
) | |