fffiloni commited on
Commit
bbe8153
1 Parent(s): 4aad5b4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +193 -0
app.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =====================================================
3
+ Optical Flow: Predicting movement with the RAFT model
4
+ =====================================================
5
+
6
+ Optical flow is the task of predicting movement between two images, usually two
7
+ consecutive frames of a video. Optical flow models take two images as input, and
8
+ predict a flow: the flow indicates the displacement of every single pixel in the
9
+ first image, and maps it to its corresponding pixel in the second image. Flows
10
+ are (2, H, W)-dimensional tensors, where the first axis corresponds to the
11
+ predicted horizontal and vertical displacements.
12
+
13
+ The following example illustrates how torchvision can be used to predict flows
14
+ using our implementation of the RAFT model. We will also see how to convert the
15
+ predicted flows to RGB images for visualization.
16
+ """
17
+
18
+ import numpy as np
19
+ import torch
20
+ import matplotlib.pyplot as plt
21
+ import torchvision.transforms.functional as F
22
+
23
+
24
+ plt.rcParams["savefig.bbox"] = "tight"
25
+ # sphinx_gallery_thumbnail_number = 2
26
+
27
+
28
+ def plot(imgs, **imshow_kwargs):
29
+ if not isinstance(imgs[0], list):
30
+ # Make a 2d grid even if there's just 1 row
31
+ imgs = [imgs]
32
+
33
+ num_rows = len(imgs)
34
+ num_cols = len(imgs[0])
35
+ _, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
36
+ for row_idx, row in enumerate(imgs):
37
+ for col_idx, img in enumerate(row):
38
+ ax = axs[row_idx, col_idx]
39
+ img = F.to_pil_image(img.to("cpu"))
40
+ ax.imshow(np.asarray(img), **imshow_kwargs)
41
+ ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
42
+
43
+ plt.tight_layout()
44
+
45
+ ###################################
46
+ # Reading Videos Using Torchvision
47
+ # --------------------------------
48
+ # We will first read a video using :func:`~torchvision.io.read_video`.
49
+ # Alternatively one can use the new :class:`~torchvision.io.VideoReader` API (if
50
+ # torchvision is built from source).
51
+ # The video we will use here is free of use from `pexels.com
52
+ # <https://www.pexels.com/video/a-man-playing-a-game-of-basketball-5192157/>`_,
53
+ # credits go to `Pavel Danilyuk <https://www.pexels.com/@pavel-danilyuk>`_.
54
+
55
+
56
+ import tempfile
57
+ from pathlib import Path
58
+ from urllib.request import urlretrieve
59
+
60
+
61
+ video_url = "https://download.pytorch.org/tutorial/pexelscom_pavel_danilyuk_basketball_hd.mp4"
62
+ video_path = Path(tempfile.mkdtemp()) / "basketball.mp4"
63
+ _ = urlretrieve(video_url, video_path)
64
+
65
+ #########################
66
+ # :func:`~torchvision.io.read_video` returns the video frames, audio frames and
67
+ # the metadata associated with the video. In our case, we only need the video
68
+ # frames.
69
+ #
70
+ # Here we will just make 2 predictions between 2 pre-selected pairs of frames,
71
+ # namely frames (100, 101) and (150, 151). Each of these pairs corresponds to a
72
+ # single model input.
73
+
74
+ from torchvision.io import read_video
75
+ frames, _, _ = read_video(str(video_path), output_format="TCHW")
76
+
77
+ img1_batch = torch.stack([frames[100], frames[150]])
78
+ img2_batch = torch.stack([frames[101], frames[151]])
79
+
80
+ plot(img1_batch)
81
+
82
+ #########################
83
+ # The RAFT model accepts RGB images. We first get the frames from
84
+ # :func:`~torchvision.io.read_video` and resize them to ensure their
85
+ # dimensions are divisible by 8. Then we use the transforms bundled into the
86
+ # weights in order to preprocess the input and rescale its values to the
87
+ # required ``[-1, 1]`` interval.
88
+
89
+ from torchvision.models.optical_flow import Raft_Large_Weights
90
+
91
+ weights = Raft_Large_Weights.DEFAULT
92
+ transforms = weights.transforms()
93
+
94
+
95
+ def preprocess(img1_batch, img2_batch):
96
+ img1_batch = F.resize(img1_batch, size=[520, 960])
97
+ img2_batch = F.resize(img2_batch, size=[520, 960])
98
+ return transforms(img1_batch, img2_batch)
99
+
100
+
101
+ img1_batch, img2_batch = preprocess(img1_batch, img2_batch)
102
+
103
+ print(f"shape = {img1_batch.shape}, dtype = {img1_batch.dtype}")
104
+
105
+
106
+ ####################################
107
+ # Estimating Optical flow using RAFT
108
+ # ----------------------------------
109
+ # We will use our RAFT implementation from
110
+ # :func:`~torchvision.models.optical_flow.raft_large`, which follows the same
111
+ # architecture as the one described in the `original paper <https://arxiv.org/abs/2003.12039>`_.
112
+ # We also provide the :func:`~torchvision.models.optical_flow.raft_small` model
113
+ # builder, which is smaller and faster to run, sacrificing a bit of accuracy.
114
+
115
+ from torchvision.models.optical_flow import raft_large
116
+
117
+ # If you can, run this example on a GPU, it will be a lot faster.
118
+ device = "cuda" if torch.cuda.is_available() else "cpu"
119
+
120
+ model = raft_large(weights=Raft_Large_Weights.DEFAULT, progress=False).to(device)
121
+ model = model.eval()
122
+
123
+ list_of_flows = model(img1_batch.to(device), img2_batch.to(device))
124
+ print(f"type = {type(list_of_flows)}")
125
+ print(f"length = {len(list_of_flows)} = number of iterations of the model")
126
+
127
+ ####################################
128
+ # The RAFT model outputs lists of predicted flows where each entry is a
129
+ # (N, 2, H, W) batch of predicted flows that corresponds to a given "iteration"
130
+ # in the model. For more details on the iterative nature of the model, please
131
+ # refer to the `original paper <https://arxiv.org/abs/2003.12039>`_. Here, we
132
+ # are only interested in the final predicted flows (they are the most acccurate
133
+ # ones), so we will just retrieve the last item in the list.
134
+ #
135
+ # As described above, a flow is a tensor with dimensions (2, H, W) (or (N, 2, H,
136
+ # W) for batches of flows) where each entry corresponds to the horizontal and
137
+ # vertical displacement of each pixel from the first image to the second image.
138
+ # Note that the predicted flows are in "pixel" unit, they are not normalized
139
+ # w.r.t. the dimensions of the images.
140
+ predicted_flows = list_of_flows[-1]
141
+ print(f"dtype = {predicted_flows.dtype}")
142
+ print(f"shape = {predicted_flows.shape} = (N, 2, H, W)")
143
+ print(f"min = {predicted_flows.min()}, max = {predicted_flows.max()}")
144
+
145
+
146
+ ####################################
147
+ # Visualizing predicted flows
148
+ # ---------------------------
149
+ # Torchvision provides the :func:`~torchvision.utils.flow_to_image` utlity to
150
+ # convert a flow into an RGB image. It also supports batches of flows.
151
+ # each "direction" in the flow will be mapped to a given RGB color. In the
152
+ # images below, pixels with similar colors are assumed by the model to be moving
153
+ # in similar directions. The model is properly able to predict the movement of
154
+ # the ball and the player. Note in particular the different predicted direction
155
+ # of the ball in the first image (going to the left) and in the second image
156
+ # (going up).
157
+
158
+ from torchvision.utils import flow_to_image
159
+
160
+ flow_imgs = flow_to_image(predicted_flows)
161
+
162
+ # The images have been mapped into [-1, 1] but for plotting we want them in [0, 1]
163
+ img1_batch = [(img1 + 1) / 2 for img1 in img1_batch]
164
+
165
+ grid = [[img1, flow_img] for (img1, flow_img) in zip(img1_batch, flow_imgs)]
166
+ plot(grid)
167
+
168
+ ####################################
169
+ # Bonus: Creating GIFs of predicted flows
170
+ # ---------------------------------------
171
+ # In the example above we have only shown the predicted flows of 2 pairs of
172
+ # frames. A fun way to apply the Optical Flow models is to run the model on an
173
+ # entire video, and create a new video from all the predicted flows. Below is a
174
+ # snippet that can get you started with this. We comment out the code, because
175
+ # this example is being rendered on a machine without a GPU, and it would take
176
+ # too long to run it.
177
+
178
+ # from torchvision.io import write_jpeg
179
+ # for i, (img1, img2) in enumerate(zip(frames, frames[1:])):
180
+ # # Note: it would be faster to predict batches of flows instead of individual flows
181
+ # img1, img2 = preprocess(img1, img2)
182
+
183
+ # list_of_flows = model(img1.to(device), img2.to(device))
184
+ # predicted_flow = list_of_flows[-1][0]
185
+ # flow_img = flow_to_image(predicted_flow).to("cpu")
186
+ # output_folder = "/tmp/" # Update this to the folder of your choice
187
+ # write_jpeg(flow_img, output_folder + f"predicted_flow_{i}.jpg")
188
+
189
+ ####################################
190
+ # Once the .jpg flow images are saved, you can convert them into a video or a
191
+ # GIF using ffmpeg with e.g.:
192
+ #
193
+ # ffmpeg -f image2 -framerate 30 -i predicted_flow_%d.jpg -loop -1 flow.gif