File size: 7,387 Bytes
d80a719 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import gradio as gr
import cv2
import numpy as np
import tempfile
import os
from framevis import FrameVis
import json
class InteractiveFrameVis(FrameVis):
"""Extended FrameVis class that tracks frame positions"""
def visualize(self, source, nframes, height=None, width=None, direction="horizontal", trim=False, quiet=True):
"""Extended visualize method that returns both the visualization and frame data"""
video = cv2.VideoCapture(source)
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
# Calculate frame positions and timestamps
total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
fps = video.get(cv2.CAP_PROP_FPS)
keyframe_interval = total_frames / nframes
# Get the visualization
output_image = super().visualize(source, nframes, height, width, direction, trim, quiet)
# Calculate frame positions and timestamps
frame_data = []
img_height, img_width = output_image.shape[:2]
for i in range(nframes):
frame_pos = int(keyframe_interval * (i + 0.5)) # Same calculation as in visualize
timestamp = frame_pos / fps
if direction == "horizontal":
x_start = (i * img_width) // nframes
x_end = ((i + 1) * img_width) // nframes
frame_info = {
"frame": frame_pos,
"time": timestamp,
"x_start": int(x_start),
"x_end": int(x_end),
"y_start": 0,
"y_end": img_height
}
else: # vertical
y_start = (i * img_height) // nframes
y_end = ((i + 1) * img_height) // nframes
frame_info = {
"frame": frame_pos,
"time": timestamp,
"x_start": 0,
"x_end": img_width,
"y_start": int(y_start),
"y_end": int(y_end)
}
frame_data.append(frame_info)
video.release()
return output_image, frame_data
def extract_frame(video_path, frame_number):
"""Extract a specific frame from the video"""
if not video_path:
return None
cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
ret, frame = cap.read()
cap.release()
if ret:
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return None
def process_video(video_path, nframes, height, width, direction, trim, average, blur_amount):
"""Process video using FrameVis and return the visualization with frame data"""
try:
fv = InteractiveFrameVis()
# Process the video
output_image, frame_data = fv.visualize(
video_path,
nframes=nframes,
height=height if height > 0 else None,
width=width if width > 0 else None,
direction=direction,
trim=trim,
quiet=False
)
# Apply post-processing if requested
if average:
output_image = fv.average_image(output_image, direction)
elif blur_amount > 0:
output_image = fv.motion_blur(output_image, direction, blur_amount)
# Convert from BGR to RGB for Gradio
output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
# Store frame data in a temporary file
temp_dir = tempfile.gettempdir()
data_path = os.path.join(temp_dir, "frame_data.json")
with open(data_path, "w") as f:
json.dump({"video_path": video_path, "frames": frame_data}, f)
return output_image, data_path
except Exception as e:
raise gr.Error(str(e))
def on_mouse_move(evt: gr.EventData, frame_data_path):
"""Handle mouseover on the visualization image"""
if not frame_data_path:
return None
try:
# Load frame data
with open(frame_data_path) as f:
data = json.load(f)
video_path = data["video_path"]
frames = data["frames"]
# Get mouse coordinates
x, y = evt.index[0], evt.index[1] # Extract x, y from index
# Find which frame was hovered
for frame in frames:
if (frame["x_start"] <= x <= frame["x_end"] and
frame["y_start"] <= y <= frame["y_end"]):
# Extract and return the frame
preview = extract_frame(video_path, frame["frame"])
if preview is not None:
return preview, f"Frame {frame['frame']} (Time: {frame['time']:.2f}s)"
except Exception as e:
print(f"Error handling mouseover: {e}")
return None, ""
# Create the Gradio interface
with gr.Blocks(title="FrameVis - Video Frame Visualizer") as demo:
gr.Markdown("""
# 🎬 FrameVis - Video Frame Visualizer
Upload a video to create a beautiful visualization of its frames. The tool will extract frames at regular intervals
and combine them into a single image. **Move your mouse over the visualization to see the original frames!**
""")
with gr.Row():
with gr.Column(scale=1):
# Input components
video_input = gr.Video(label="Upload Video")
with gr.Row():
nframes = gr.Slider(minimum=1, maximum=500, value=100, step=1,
label="Number of Frames")
direction = gr.Radio(["horizontal", "vertical"], value="horizontal",
label="Direction")
with gr.Row():
height = gr.Number(value=0, label="Frame Height (0 for auto)")
width = gr.Number(value=0, label="Frame Width (0 for auto)")
with gr.Row():
trim = gr.Checkbox(label="Auto-trim black bars")
average = gr.Checkbox(label="Average colors")
blur_amount = gr.Slider(minimum=0, maximum=200, value=0, step=1,
label="Motion Blur Amount")
process_btn = gr.Button("Generate Visualization", variant="primary")
with gr.Column(scale=2):
# Output components
frame_data = gr.State() # Hidden component to store frame data
output_image = gr.Image(label="Visualization Result", interactive=True, height=300)
frame_info = gr.Markdown("Hover over the visualization to see frame details")
preview_frame = gr.Image(label="Frame Preview", interactive=False, height=300)
# Handle processing
result = process_btn.click(
fn=process_video,
inputs=[
video_input,
nframes,
height,
width,
direction,
trim,
average,
blur_amount
],
outputs=[output_image, frame_data]
)
# Handle mouseover events
output_image.mouseover(
fn=on_mouse_move,
inputs=[frame_data],
outputs=[preview_frame, frame_info]
)
if __name__ == "__main__":
demo.launch() |