import gradio as gr | |
""" | |
===================================================== | |
Optical Flow: Predicting movement with the RAFT model | |
===================================================== | |
Optical flow is the task of predicting movement between two images, usually two | |
consecutive frames of a video. Optical flow models take two images as input, and | |
predict a flow: the flow indicates the displacement of every single pixel in the | |
first image, and maps it to its corresponding pixel in the second image. Flows | |
are (2, H, W)-dimensional tensors, where the first axis corresponds to the | |
predicted horizontal and vertical displacements. | |
The following example illustrates how torchvision can be used to predict flows | |
using our implementation of the RAFT model. We will also see how to convert the | |
predicted flows to RGB images for visualization. | |
""" | |
import cv2 | |
import numpy as np | |
import os | |
import sys | |
import torch | |
import matplotlib.pyplot as plt | |
import torchvision.transforms.functional as F | |
from torchvision.io import read_video | |
from torchvision.models.optical_flow import Raft_Large_Weights | |
from torchvision.models.optical_flow import raft_large | |
from torchvision.io import write_jpeg | |
import torchvision.transforms as T | |
import tempfile | |
from pathlib import Path | |
from urllib.request import urlretrieve | |
import tensorflow as tf | |
from scipy.interpolate import LinearNDInterpolator | |
from imageio import imread, imwrite | |
from flowio import readFlowFile | |
def write_flo(flow, filename): | |
""" | |
Write optical flow in Middlebury .flo format | |
:param flow: optical flow map | |
:param filename: optical flow file path to be saved | |
:return: None | |
from https://github.com/liruoteng/OpticalFlowToolkit/ | |
""" | |
# forcing conversion to float32 precision | |
flow = flow.cpu().data.numpy() | |
flow = flow.astype(np.float32) | |
f = open(filename, 'wb') | |
magic = np.array([202021.25], dtype=np.float32) | |
(height, width) = flow.shape[0:2] | |
w = np.array([width], dtype=np.int32) | |
h = np.array([height], dtype=np.int32) | |
magic.tofile(f) | |
w.tofile(f) | |
h.tofile(f) | |
flow.tofile(f) | |
f.close() | |
def warp_flow(img, flow, mul=1.): | |
#img = np.array(img.convert('RGB')) | |
img = cv2.imread(img) | |
flow = cv2.imread(flow) | |
#flow = np.load(flow) | |
h, w = flow.shape[:2] | |
flow = flow.copy() | |
flow[:, :, 0] + np.arange(w) | |
flow[:, :, 1] + np.arange(h)[:, np.newaxis] | |
# print('flow stats', flow.max(), flow.min(), flow.mean()) | |
# print(flow) | |
flow*mul | |
# print('flow stats mul', flow.max(), flow.min(), flow.mean()) | |
# res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) | |
res = cv2.remap(img, flow, None, cv2.INTER_LANCZOS4) | |
print(res) | |
def get_warp_res(fname_image, fname_flow, fname_output='warped.png'): | |
print(f"FNAME IMAGE: {fname_image}") | |
#im2 = imread(fname_image) | |
#print(f"FNAME IMAGE READED: {im2.shape}") | |
#flow = fname_flow.cpu().detach().numpy() | |
flow = fname_flow | |
#print(f"FNAME FLOW READED: {flow.shape}") | |
res = warp_flow(fname_image, flow, 1.) | |
def infer(): | |
video_url = "https://download.pytorch.org/tutorial/pexelscom_pavel_danilyuk_basketball_hd.mp4" | |
video_path = Path(tempfile.mkdtemp()) / "basketball.mp4" | |
_ = urlretrieve(video_url, video_path) | |
frames, _, _ = read_video(str(video_path), output_format="TCHW") | |
print(f"FRAME BEFORE stack: {frames[100]}") | |
img1_batch = torch.stack([frames[100]]) | |
img2_batch = torch.stack([frames[101]]) | |
print(f"FRAME AFTER stack: {img1_batch}") | |
weights = Raft_Large_Weights.DEFAULT | |
transforms = weights.transforms() | |
def preprocess(img1_batch, img2_batch): | |
img1_batch = F.resize(img1_batch, size=[520, 960]) | |
img2_batch = F.resize(img2_batch, size=[520, 960]) | |
return transforms(img1_batch, img2_batch) | |
img1_batch, img2_batch = preprocess(img1_batch, img2_batch) | |
print(f"shape = {img1_batch.shape}, dtype = {img1_batch.dtype}") | |
#################################### | |
# Estimating Optical flow using RAFT | |
# ---------------------------------- | |
# We will use our RAFT implementation from | |
# :func:`~torchvision.models.optical_flow.raft_large`, which follows the same | |
# architecture as the one described in the `original paper <https://arxiv.org/abs/2003.12039>`_. | |
# We also provide the :func:`~torchvision.models.optical_flow.raft_small` model | |
# builder, which is smaller and faster to run, sacrificing a bit of accuracy. | |
# If you can, run this example on a GPU, it will be a lot faster. | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model = raft_large(weights=Raft_Large_Weights.DEFAULT, progress=False).to(device) | |
model = model.eval() | |
list_of_flows = model(img1_batch.to(device), img2_batch.to(device)) | |
print(f"type = {type(list_of_flows)}") | |
print(f"length = {len(list_of_flows)} = number of iterations of the model") | |
#################################### | |
# The RAFT model outputs lists of predicted flows where each entry is a | |
# (N, 2, H, W) batch of predicted flows that corresponds to a given "iteration" | |
# in the model. For more details on the iterative nature of the model, please | |
# refer to the `original paper <https://arxiv.org/abs/2003.12039>`_. Here, we | |
# are only interested in the final predicted flows (they are the most acccurate | |
# ones), so we will just retrieve the last item in the list. | |
# | |
# As described above, a flow is a tensor with dimensions (2, H, W) (or (N, 2, H, | |
# W) for batches of flows) where each entry corresponds to the horizontal and | |
# vertical displacement of each pixel from the first image to the second image. | |
# Note that the predicted flows are in "pixel" unit, they are not normalized | |
# w.r.t. the dimensions of the images. | |
predicted_flows = list_of_flows[-1] | |
print(f"dtype = {predicted_flows.dtype}") | |
print(f"shape = {predicted_flows.shape} = (N, 2, H, W)") | |
print(f"min = {predicted_flows.min()}, max = {predicted_flows.max()}") | |
#################################### | |
# Visualizing predicted flows | |
# --------------------------- | |
# Torchvision provides the :func:`~torchvision.utils.flow_to_image` utlity to | |
# convert a flow into an RGB image. It also supports batches of flows. | |
# each "direction" in the flow will be mapped to a given RGB color. In the | |
# images below, pixels with similar colors are assumed by the model to be moving | |
# in similar directions. The model is properly able to predict the movement of | |
# the ball and the player. Note in particular the different predicted direction | |
# of the ball in the first image (going to the left) and in the second image | |
# (going up). | |
from torchvision.utils import flow_to_image | |
#flow_imgs = flow_to_image(predicted_flows) | |
#print(flow_imgs) | |
predicted_flow = list_of_flows[-1][0] | |
print(f"predicted flow dtype = {predicted_flows.dtype}") | |
print(f"predicted flow shape = {predicted_flows.shape}") | |
flow_img = flow_to_image(predicted_flow).to("cpu") | |
# output_folder = "/tmp/" # Update this to the folder of your choice | |
write_jpeg(flow_img, f"predicted_flow.jpg") | |
#input_image = flow_to_image(frames[100]).to("cpu") | |
#write_jpeg(input_image, f"frame_input.jpg") | |
flo_file = write_flo(predicted_flow, "flofile.flo") | |
#write_jpeg(frames[100], f"input_image.jpg") | |
#res = warp_image(img1_batch, predicted_flow) | |
# define a transform to convert a tensor to PIL image | |
transform = T.ToPILImage() | |
# convert the tensor to PIL image using above transform | |
img = transform(frames[100]) | |
img = img.resize((960, 520)) | |
# display the PIL image | |
#img.show() | |
img.save('frame_input.jpg') | |
#res = get_warp_res('frame_input.jpg', "predicted_flow.jpg", 'warped.png') | |
#print(res) | |
return "done", "predicted_flow.jpg", ["flofile.flo"], 'frame_input.jpg' | |
#################################### | |
# Bonus: Creating GIFs of predicted flows | |
# --------------------------------------- | |
# In the example above we have only shown the predicted flows of 2 pairs of | |
# frames. A fun way to apply the Optical Flow models is to run the model on an | |
# entire video, and create a new video from all the predicted flows. Below is a | |
# snippet that can get you started with this. We comment out the code, because | |
# this example is being rendered on a machine without a GPU, and it would take | |
# too long to run it. | |
# from torchvision.io import write_jpeg | |
# for i, (img1, img2) in enumerate(zip(frames, frames[1:])): | |
# # Note: it would be faster to predict batches of flows instead of individual flows | |
# img1, img2 = preprocess(img1, img2) | |
# list_of_flows = model(img1.to(device), img2.to(device)) | |
# predicted_flow = list_of_flows[-1][0] | |
# flow_img = flow_to_image(predicted_flow).to("cpu") | |
# output_folder = "/tmp/" # Update this to the folder of your choice | |
# write_jpeg(flow_img, output_folder + f"predicted_flow_{i}.jpg") | |
#################################### | |
# Once the .jpg flow images are saved, you can convert them into a video or a | |
# GIF using ffmpeg with e.g.: | |
# | |
# ffmpeg -f image2 -framerate 30 -i predicted_flow_%d.jpg -loop -1 flow.gif | |
gr.Interface(fn=infer, inputs=[], outputs=[gr.Textbox(), gr.Image(), gr.Files(), gr.Image()]).launch() |