FrameVis / app.py
GenAIJake's picture
back to gradio
fd758d8
raw
history blame
2.97 kB
import gradio as gr
import cv2
import numpy as np
from framevis import FrameVis
def process_video(video_path, nframes, height, width, direction, trim, average, blur_amount):
"""Process video using FrameVis and return the visualization"""
try:
fv = FrameVis()
# Process the video
output_image = fv.visualize(
video_path,
nframes=nframes,
height=height if height > 0 else None,
width=width if width > 0 else None,
direction=direction,
trim=trim,
quiet=False
)
# Apply post-processing if requested
if average:
output_image = fv.average_image(output_image, direction)
elif blur_amount > 0:
output_image = fv.motion_blur(output_image, direction, blur_amount)
# Convert from BGR to RGB for Gradio
output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
return output_image
except Exception as e:
raise gr.Error(str(e))
# Create the Gradio interface
with gr.Blocks(title="FrameVis - Video Frame Visualizer") as demo:
gr.Markdown("""
# 🎬 FrameVis - Video Frame Visualizer
Upload a video to create a beautiful visualization of its frames. The tool will extract frames at regular intervals
and combine them into a single image.
""")
with gr.Row():
with gr.Column(scale=1):
# Input components
video_input = gr.Video(label="Upload Video")
with gr.Row():
nframes = gr.Slider(minimum=1, maximum=500, value=100, step=1,
label="Number of Frames")
direction = gr.Radio(["horizontal", "vertical"], value="horizontal",
label="Direction")
with gr.Row():
height = gr.Number(value=0, label="Frame Height (0 for auto)")
width = gr.Number(value=0, label="Frame Width (0 for auto)")
with gr.Row():
trim = gr.Checkbox(label="Auto-trim black bars")
average = gr.Checkbox(label="Average colors")
blur_amount = gr.Slider(minimum=0, maximum=200, value=0, step=1,
label="Motion Blur Amount")
process_btn = gr.Button("Generate Visualization", variant="primary")
with gr.Column(scale=2):
# Output component
output_image = gr.Image(label="Visualization Result", height=300)
# Handle processing
process_btn.click(
fn=process_video,
inputs=[
video_input,
nframes,
height,
width,
direction,
trim,
average,
blur_amount
],
outputs=output_image
)
if __name__ == "__main__":
demo.launch()