|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
import tempfile |
|
import os |
|
from framevis import FrameVis |
|
import json |
|
|
|
class InteractiveFrameVis(FrameVis): |
|
"""Extended FrameVis class that tracks frame positions""" |
|
|
|
def visualize(self, source, nframes, height=None, width=None, direction="horizontal", trim=False, quiet=True): |
|
"""Extended visualize method that returns both the visualization and frame data""" |
|
video = cv2.VideoCapture(source) |
|
if not video.isOpened(): |
|
raise FileNotFoundError("Source Video Not Found") |
|
|
|
|
|
total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) |
|
fps = video.get(cv2.CAP_PROP_FPS) |
|
keyframe_interval = total_frames / nframes |
|
|
|
|
|
output_image = super().visualize(source, nframes, height, width, direction, trim, quiet) |
|
|
|
|
|
frame_data = [] |
|
img_height, img_width = output_image.shape[:2] |
|
|
|
for i in range(nframes): |
|
frame_pos = int(keyframe_interval * (i + 0.5)) |
|
timestamp = frame_pos / fps |
|
|
|
if direction == "horizontal": |
|
x_start = (i * img_width) // nframes |
|
x_end = ((i + 1) * img_width) // nframes |
|
frame_info = { |
|
"frame": frame_pos, |
|
"time": timestamp, |
|
"x_start": int(x_start), |
|
"x_end": int(x_end), |
|
"y_start": 0, |
|
"y_end": img_height |
|
} |
|
else: |
|
y_start = (i * img_height) // nframes |
|
y_end = ((i + 1) * img_height) // nframes |
|
frame_info = { |
|
"frame": frame_pos, |
|
"time": timestamp, |
|
"x_start": 0, |
|
"x_end": img_width, |
|
"y_start": int(y_start), |
|
"y_end": int(y_end) |
|
} |
|
frame_data.append(frame_info) |
|
|
|
video.release() |
|
return output_image, frame_data |
|
|
|
def extract_frame(video_path, frame_number): |
|
"""Extract a specific frame from the video""" |
|
if not video_path: |
|
return None |
|
|
|
cap = cv2.VideoCapture(video_path) |
|
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number) |
|
ret, frame = cap.read() |
|
cap.release() |
|
|
|
if ret: |
|
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
return None |
|
|
|
def process_video(video_path, nframes, height, width, direction, trim, average, blur_amount): |
|
"""Process video using FrameVis and return the visualization with frame data""" |
|
try: |
|
fv = InteractiveFrameVis() |
|
|
|
|
|
output_image, frame_data = fv.visualize( |
|
video_path, |
|
nframes=nframes, |
|
height=height if height > 0 else None, |
|
width=width if width > 0 else None, |
|
direction=direction, |
|
trim=trim, |
|
quiet=False |
|
) |
|
|
|
|
|
if average: |
|
output_image = fv.average_image(output_image, direction) |
|
elif blur_amount > 0: |
|
output_image = fv.motion_blur(output_image, direction, blur_amount) |
|
|
|
|
|
output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
temp_dir = tempfile.gettempdir() |
|
data_path = os.path.join(temp_dir, "frame_data.json") |
|
with open(data_path, "w") as f: |
|
json.dump({"video_path": video_path, "frames": frame_data}, f) |
|
|
|
return output_image, data_path |
|
|
|
except Exception as e: |
|
raise gr.Error(str(e)) |
|
|
|
def on_mouse_move(evt: gr.EventData, frame_data_path): |
|
"""Handle mouseover on the visualization image""" |
|
if not frame_data_path: |
|
return None |
|
|
|
try: |
|
|
|
with open(frame_data_path) as f: |
|
data = json.load(f) |
|
|
|
video_path = data["video_path"] |
|
frames = data["frames"] |
|
|
|
|
|
x, y = evt.index[0], evt.index[1] |
|
|
|
|
|
for frame in frames: |
|
if (frame["x_start"] <= x <= frame["x_end"] and |
|
frame["y_start"] <= y <= frame["y_end"]): |
|
|
|
preview = extract_frame(video_path, frame["frame"]) |
|
if preview is not None: |
|
return preview, f"Frame {frame['frame']} (Time: {frame['time']:.2f}s)" |
|
|
|
except Exception as e: |
|
print(f"Error handling mouseover: {e}") |
|
return None, "" |
|
|
|
|
|
with gr.Blocks(title="FrameVis - Video Frame Visualizer") as demo: |
|
gr.Markdown(""" |
|
# 🎬 FrameVis - Video Frame Visualizer |
|
Upload a video to create a beautiful visualization of its frames. The tool will extract frames at regular intervals |
|
and combine them into a single image. **Move your mouse over the visualization to see the original frames!** |
|
""") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
|
|
video_input = gr.Video(label="Upload Video") |
|
with gr.Row(): |
|
nframes = gr.Slider(minimum=1, maximum=500, value=100, step=1, |
|
label="Number of Frames") |
|
direction = gr.Radio(["horizontal", "vertical"], value="horizontal", |
|
label="Direction") |
|
|
|
with gr.Row(): |
|
height = gr.Number(value=0, label="Frame Height (0 for auto)") |
|
width = gr.Number(value=0, label="Frame Width (0 for auto)") |
|
|
|
with gr.Row(): |
|
trim = gr.Checkbox(label="Auto-trim black bars") |
|
average = gr.Checkbox(label="Average colors") |
|
blur_amount = gr.Slider(minimum=0, maximum=200, value=0, step=1, |
|
label="Motion Blur Amount") |
|
|
|
process_btn = gr.Button("Generate Visualization", variant="primary") |
|
|
|
with gr.Column(scale=2): |
|
|
|
frame_data = gr.State() |
|
output_image = gr.Image(label="Visualization Result", interactive=True, height=300) |
|
frame_info = gr.Markdown("Hover over the visualization to see frame details") |
|
preview_frame = gr.Image(label="Frame Preview", interactive=False, height=300) |
|
|
|
|
|
result = process_btn.click( |
|
fn=process_video, |
|
inputs=[ |
|
video_input, |
|
nframes, |
|
height, |
|
width, |
|
direction, |
|
trim, |
|
average, |
|
blur_amount |
|
], |
|
outputs=[output_image, frame_data] |
|
) |
|
|
|
|
|
output_image.mouseover( |
|
fn=on_mouse_move, |
|
inputs=[frame_data], |
|
outputs=[preview_frame, frame_info] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |