tcm03
commited on
Commit
·
51273ab
1
Parent(s):
fc48d96
Segment long videos and multithreading in EnTubeDataset
Browse files- annotation/__init__.py +0 -0
- annotation/__pycache__/__init__.cpython-310.pyc +0 -0
- annotation/__pycache__/utils.cpython-310.pyc +0 -0
- annotation/annotate.py +5 -0
- annotation/train_test.py +5 -0
- annotation/utils.py +6 -0
- preprocessing/constants.py +1 -0
- preprocessing/entube_dataset.py +27 -11
- preprocessing/mm_datautils.py +39 -15
annotation/__init__.py
ADDED
File without changes
|
annotation/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (137 Bytes). View file
|
|
annotation/__pycache__/utils.cpython-310.pyc
CHANGED
Binary files a/annotation/__pycache__/utils.cpython-310.pyc and b/annotation/__pycache__/utils.cpython-310.pyc differ
|
|
annotation/annotate.py
CHANGED
@@ -1,3 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import os
|
3 |
from typing import List, Union, Dict, Any, Callable, Optional
|
|
|
1 |
+
# In case this module is invoked from other modules, e.g., preprocessing
|
2 |
+
from pathlib import Path
|
3 |
+
import sys
|
4 |
+
sys.path.append(str(Path.cwd() / "annotation"))
|
5 |
+
|
6 |
import json
|
7 |
import os
|
8 |
from typing import List, Union, Dict, Any, Callable, Optional
|
annotation/train_test.py
CHANGED
@@ -1,3 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import os
|
3 |
import argparse
|
|
|
1 |
+
# In case this module is invoked from other modules, e.g., preprocessing
|
2 |
+
from pathlib import Path
|
3 |
+
import sys
|
4 |
+
sys.path.append(str(Path.cwd() / "annotation"))
|
5 |
+
|
6 |
import json
|
7 |
import os
|
8 |
import argparse
|
annotation/utils.py
CHANGED
@@ -1,3 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import decord as de
|
2 |
from datatypes import Metadata
|
3 |
from typing import List
|
@@ -6,6 +11,7 @@ from multiprocessing import cpu_count
|
|
6 |
import traceback
|
7 |
from pathlib import Path
|
8 |
|
|
|
9 |
def convert_to_linux_path(path: str) -> str:
|
10 |
return Path(path).as_posix()
|
11 |
|
|
|
1 |
+
# In case this module is invoked from other modules, e.g., preprocessing
|
2 |
+
from pathlib import Path
|
3 |
+
import sys
|
4 |
+
sys.path.append(str(Path.cwd() / "annotation"))
|
5 |
+
|
6 |
import decord as de
|
7 |
from datatypes import Metadata
|
8 |
from typing import List
|
|
|
11 |
import traceback
|
12 |
from pathlib import Path
|
13 |
|
14 |
+
|
15 |
def convert_to_linux_path(path: str) -> str:
|
16 |
return Path(path).as_posix()
|
17 |
|
preprocessing/constants.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
NUM_PROCESSED_FRAMES = 600
|
preprocessing/entube_dataset.py
CHANGED
@@ -1,9 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
from torch.utils.data import Dataset
|
3 |
from typing import List
|
4 |
import os
|
5 |
from mm_datautils import process_video_frames
|
6 |
from transformers import BaseImageProcessor
|
|
|
7 |
|
8 |
class EnTubeDataset(Dataset):
|
9 |
|
@@ -16,20 +22,30 @@ class EnTubeDataset(Dataset):
|
|
16 |
self.videos = []
|
17 |
self.image_sizes = []
|
18 |
self.device = device
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
for
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
def __len__(self):
|
30 |
return len(self.image_sizes)
|
31 |
|
32 |
def __getitem__(self, idx):
|
33 |
-
print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}')
|
34 |
-
print(f'@tcm: In EnTubeDataset.__getitem__(): video shape: {self.videos[idx][0].shape}')
|
35 |
return self.videos[idx], self.image_sizes[idx]
|
|
|
1 |
+
import sys
|
2 |
+
from pathlib import Path
|
3 |
+
sys.path.append(str(Path.cwd()))
|
4 |
+
from annotation.utils import get_optimal_workers
|
5 |
+
|
6 |
import torch
|
7 |
from torch.utils.data import Dataset
|
8 |
from typing import List
|
9 |
import os
|
10 |
from mm_datautils import process_video_frames
|
11 |
from transformers import BaseImageProcessor
|
12 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
13 |
|
14 |
class EnTubeDataset(Dataset):
|
15 |
|
|
|
22 |
self.videos = []
|
23 |
self.image_sizes = []
|
24 |
self.device = device
|
25 |
+
|
26 |
+
with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
|
27 |
+
futures = []
|
28 |
+
for folder_path in folder_paths:
|
29 |
+
print(f'@tcm: In EnTubeDataset.__init__(): folder_path={folder_path}')
|
30 |
+
file_names = os.listdir(folder_path)
|
31 |
+
for file_name in file_names:
|
32 |
+
file_path = os.path.join(folder_path, file_name)
|
33 |
+
print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
|
34 |
+
future = executor.submit(process_video_frames, file_path, image_processor, device)
|
35 |
+
futures.append(future)
|
36 |
+
|
37 |
+
for future in as_completed(futures):
|
38 |
+
result = future.result()
|
39 |
+
if result is not None:
|
40 |
+
video, image_size = result
|
41 |
+
self.videos.append(video)
|
42 |
+
self.image_sizes.append(image_size)
|
43 |
+
|
44 |
+
|
45 |
|
46 |
def __len__(self):
|
47 |
return len(self.image_sizes)
|
48 |
|
49 |
def __getitem__(self, idx):
|
50 |
+
print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}, video shape: {self.videos[idx][0].shape}')
|
|
|
51 |
return self.videos[idx], self.image_sizes[idx]
|
preprocessing/mm_datautils.py
CHANGED
@@ -5,6 +5,7 @@ from decord import cpu, VideoReader
|
|
5 |
from transformers import BaseImageProcessor
|
6 |
from typing import List, Union, Tuple
|
7 |
import time
|
|
|
8 |
|
9 |
def expand2square(pil_img, background_color):
|
10 |
width, height = pil_img.size
|
@@ -25,7 +26,7 @@ def process_images(
|
|
25 |
device: str
|
26 |
) -> Union[torch.Tensor, List[torch.Tensor]]:
|
27 |
# images.shape: (4294, 360, 640, 3)
|
28 |
-
print(f'@tcm: In process_images(): images.shape={images.shape}')
|
29 |
if isinstance(image_processor, list):
|
30 |
processor_aux_list = image_processor
|
31 |
new_images_aux_list = []
|
@@ -51,7 +52,7 @@ def process_images(
|
|
51 |
# image_aux.shape: torch.Size([3, 384, 384])
|
52 |
image_aux_list.append(image_aux)
|
53 |
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
|
54 |
-
|
55 |
new_images_aux_list = [
|
56 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
57 |
] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
|
@@ -82,21 +83,44 @@ def process_video_frames(
|
|
82 |
image_processor: List[BaseImageProcessor],
|
83 |
device: str
|
84 |
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
|
85 |
-
init_time = time.time()
|
86 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
87 |
-
print(f'@tcm: In process_video_frames(): init_time={time.time()-init_time:4f}')
|
88 |
fps = float(vr.get_avg_fps())
|
89 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
video = [item.unsqueeze(0) for item in video]
|
102 |
return video, image_sizes
|
|
|
5 |
from transformers import BaseImageProcessor
|
6 |
from typing import List, Union, Tuple
|
7 |
import time
|
8 |
+
from constants import *
|
9 |
|
10 |
def expand2square(pil_img, background_color):
|
11 |
width, height = pil_img.size
|
|
|
26 |
device: str
|
27 |
) -> Union[torch.Tensor, List[torch.Tensor]]:
|
28 |
# images.shape: (4294, 360, 640, 3)
|
29 |
+
# print(f'@tcm: In process_images(): images.shape={images.shape}')
|
30 |
if isinstance(image_processor, list):
|
31 |
processor_aux_list = image_processor
|
32 |
new_images_aux_list = []
|
|
|
52 |
# image_aux.shape: torch.Size([3, 384, 384])
|
53 |
image_aux_list.append(image_aux)
|
54 |
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
|
55 |
+
|
56 |
new_images_aux_list = [
|
57 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
58 |
] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
|
|
|
83 |
image_processor: List[BaseImageProcessor],
|
84 |
device: str
|
85 |
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
|
|
|
86 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
|
|
87 |
fps = float(vr.get_avg_fps())
|
88 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
89 |
+
print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
|
90 |
+
image_sizes = [vr[0].shape[:2]]
|
91 |
+
|
92 |
+
video = [[] for _ in range(len(image_processor))]
|
93 |
+
for i in range(0, len(frame_indices), NUM_PROCESSED_FRAMES):
|
94 |
+
print(f'@tcm: In process_video_frames(): segment {i/NUM_PROCESSED_FRAMES}')
|
95 |
+
sub_frame_indices = frame_indices[i:min(i+NUM_PROCESSED_FRAMES, len(frame_indices))]
|
96 |
+
sub_videos = []
|
97 |
+
process_time = time.time()
|
98 |
+
for frame_index in sub_frame_indices:
|
99 |
+
img = vr[frame_index].asnumpy()
|
100 |
+
sub_videos.append(img)
|
101 |
+
sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
|
102 |
+
sub_videos = process_images(sub_videos, image_processor, device)
|
103 |
+
print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
|
104 |
+
assert len(sub_videos) == len(video)
|
105 |
+
for j, sub_video in enumerate(sub_videos):
|
106 |
+
video[j].append(sub_video)
|
107 |
+
|
108 |
+
del sub_videos
|
109 |
+
if 'cuda' in device:
|
110 |
+
torch.cuda.empty_cache()
|
111 |
+
|
112 |
+
for i in range(len(video)):
|
113 |
+
video[i] = torch.cat(video[i], dim=0)
|
114 |
+
|
115 |
+
# vectorize_time = time.time()
|
116 |
+
# for frame_index in frame_indices:
|
117 |
+
# img = vr[frame_index].asnumpy()
|
118 |
+
# video.append(img)
|
119 |
+
# video = np.stack(video) # shape: (num_frames, height, width, channels)
|
120 |
+
# print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
|
121 |
+
# image_sizes = [video[0].shape[:2]]
|
122 |
+
# process_time = time.time()
|
123 |
+
# video = process_images(video, image_processor, device)
|
124 |
+
# print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
|
125 |
video = [item.unsqueeze(0) for item in video]
|
126 |
return video, image_sizes
|