tcm03
commited on
Commit
·
fc48d96
1
Parent(s):
cc6eb9f
Analyze running time of process_images() and process_video_frames()
Browse files
preprocessing/mm_datautils.py
CHANGED
@@ -4,6 +4,7 @@ import torch
|
|
4 |
from decord import cpu, VideoReader
|
5 |
from transformers import BaseImageProcessor
|
6 |
from typing import List, Union, Tuple
|
|
|
7 |
|
8 |
def expand2square(pil_img, background_color):
|
9 |
width, height = pil_img.size
|
@@ -30,7 +31,7 @@ def process_images(
|
|
30 |
new_images_aux_list = []
|
31 |
for i, image in enumerate(images):
|
32 |
# image.shape: (height, width, channels)
|
33 |
-
print(f'@tcm: In process_images(): frame {i}')
|
34 |
if isinstance(image, np.ndarray):
|
35 |
image = Image.fromarray(image)
|
36 |
image_aux_list = []
|
@@ -44,11 +45,9 @@ def process_images(
|
|
44 |
image_aux = expand2square(
|
45 |
image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
|
46 |
).resize((target_resolution, target_resolution))
|
47 |
-
print(f'@tcm: In process_images(): begin processor_aux.preprocess()')
|
48 |
image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
|
49 |
"pixel_values"
|
50 |
][0]
|
51 |
-
print(f'@tcm: In process_images(): end processor_aux.preprocess()')
|
52 |
# image_aux.shape: torch.Size([3, 384, 384])
|
53 |
image_aux_list.append(image_aux)
|
54 |
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
|
@@ -83,17 +82,21 @@ def process_video_frames(
|
|
83 |
image_processor: List[BaseImageProcessor],
|
84 |
device: str
|
85 |
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
|
86 |
-
|
87 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
88 |
-
print(f'@tcm: In process_video_frames():
|
89 |
fps = float(vr.get_avg_fps())
|
90 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
91 |
video = []
|
|
|
92 |
for frame_index in frame_indices:
|
93 |
img = vr[frame_index].asnumpy()
|
94 |
video.append(img)
|
95 |
video = np.stack(video) # shape: (num_frames, height, width, channels)
|
|
|
96 |
image_sizes = [video[0].shape[:2]]
|
|
|
97 |
video = process_images(video, image_processor, device)
|
|
|
98 |
video = [item.unsqueeze(0) for item in video]
|
99 |
return video, image_sizes
|
|
|
4 |
from decord import cpu, VideoReader
|
5 |
from transformers import BaseImageProcessor
|
6 |
from typing import List, Union, Tuple
|
7 |
+
import time
|
8 |
|
9 |
def expand2square(pil_img, background_color):
|
10 |
width, height = pil_img.size
|
|
|
31 |
new_images_aux_list = []
|
32 |
for i, image in enumerate(images):
|
33 |
# image.shape: (height, width, channels)
|
34 |
+
# print(f'@tcm: In process_images(): frame {i}')
|
35 |
if isinstance(image, np.ndarray):
|
36 |
image = Image.fromarray(image)
|
37 |
image_aux_list = []
|
|
|
45 |
image_aux = expand2square(
|
46 |
image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
|
47 |
).resize((target_resolution, target_resolution))
|
|
|
48 |
image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
|
49 |
"pixel_values"
|
50 |
][0]
|
|
|
51 |
# image_aux.shape: torch.Size([3, 384, 384])
|
52 |
image_aux_list.append(image_aux)
|
53 |
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
|
|
|
82 |
image_processor: List[BaseImageProcessor],
|
83 |
device: str
|
84 |
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
|
85 |
+
init_time = time.time()
|
86 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
87 |
+
print(f'@tcm: In process_video_frames(): init_time={time.time()-init_time:4f}')
|
88 |
fps = float(vr.get_avg_fps())
|
89 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
90 |
video = []
|
91 |
+
vectorize_time = time.time()
|
92 |
for frame_index in frame_indices:
|
93 |
img = vr[frame_index].asnumpy()
|
94 |
video.append(img)
|
95 |
video = np.stack(video) # shape: (num_frames, height, width, channels)
|
96 |
+
print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
|
97 |
image_sizes = [video[0].shape[:2]]
|
98 |
+
process_time = time.time()
|
99 |
video = process_images(video, image_processor, device)
|
100 |
+
print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
|
101 |
video = [item.unsqueeze(0) for item in video]
|
102 |
return video, image_sizes
|