tcm03 commited on
Commit
484e90b
·
1 Parent(s): 51273ab

collate_fn for dataloader and extract vision features

Browse files
preprocessing/constants.py CHANGED
@@ -1 +1 @@
1
- NUM_PROCESSED_FRAMES = 600
 
1
+ CHUNK_SIZE = 64 # adapted from LongVU: number of frames in each chunk
preprocessing/entube_dataset.py CHANGED
@@ -1,8 +1,3 @@
1
- import sys
2
- from pathlib import Path
3
- sys.path.append(str(Path.cwd()))
4
- from annotation.utils import get_optimal_workers
5
-
6
  import torch
7
  from torch.utils.data import Dataset
8
  from typing import List
@@ -16,36 +11,55 @@ class EnTubeDataset(Dataset):
16
  def __init__(
17
  self,
18
  folder_paths: List[str],
19
- image_processor: List[BaseImageProcessor],
20
  device: str
21
  ) -> None:
22
- self.videos = []
23
- self.image_sizes = []
24
  self.device = device
25
 
26
- with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
27
- futures = []
28
- for folder_path in folder_paths:
29
- print(f'@tcm: In EnTubeDataset.__init__(): folder_path={folder_path}')
30
- file_names = os.listdir(folder_path)
31
- for file_name in file_names:
32
- file_path = os.path.join(folder_path, file_name)
33
- print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
34
- future = executor.submit(process_video_frames, file_path, image_processor, device)
35
- futures.append(future)
36
-
37
- for future in as_completed(futures):
38
- result = future.result()
39
- if result is not None:
40
- video, image_size = result
41
- self.videos.append(video)
42
- self.image_sizes.append(image_size)
 
 
 
 
 
 
43
 
44
 
45
 
46
  def __len__(self):
47
- return len(self.image_sizes)
48
 
49
  def __getitem__(self, idx):
50
- print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}, video shape: {self.videos[idx][0].shape}')
51
- return self.videos[idx], self.image_sizes[idx]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  from torch.utils.data import Dataset
3
  from typing import List
 
11
  def __init__(
12
  self,
13
  folder_paths: List[str],
14
+ image_processors: List[BaseImageProcessor],
15
  device: str
16
  ) -> None:
17
+ self.file_paths = []
18
+ self.image_processors = image_processors
19
  self.device = device
20
 
21
+ for folder_path in folder_paths:
22
+ file_names = os.listdir(folder_path)
23
+ for file_name in file_names:
24
+ file_path = os.path.join(folder_path, file_name)
25
+ self.file_paths.append(file_path)
26
+
27
+ # with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
28
+ # futures = []
29
+ # for folder_path in folder_paths:
30
+ # print(f'@tcm: In EnTubeDataset.__init__(): folder_path={folder_path}')
31
+ # file_names = os.listdir(folder_path)
32
+ # for file_name in file_names:
33
+ # file_path = os.path.join(folder_path, file_name)
34
+ # print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
35
+ # future = executor.submit(process_video_frames, file_path, image_processor, device)
36
+ # futures.append(future)
37
+
38
+ # for future in as_completed(futures):
39
+ # result = future.result()
40
+ # if result is not None:
41
+ # video, image_size = result
42
+ # self.videos.append(video)
43
+ # self.image_sizes.append(image_size)
44
 
45
 
46
 
47
  def __len__(self):
48
+ return len(self.file_paths)
49
 
50
  def __getitem__(self, idx):
51
+ print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}')
52
+ video, image_size = process_video_frames(self.file_paths[idx], self.image_processors, self.device)
53
+ return video, image_size
54
+
55
+ def collate_fn(batch):
56
+ """
57
+ batch: list of samples from EnTubeDataset.__getitem__()
58
+ """
59
+ assert isinstance(batch, list)
60
+ assert isinstance(batch[0], tuple)
61
+
62
+ image_sizes = batch[0][1]
63
+ batch_videos = [video for video, _ in batch]
64
+ batch_videos = [list(videos) for videos in zip(*batch_videos)]
65
+ return batch_videos, image_sizes
preprocessing/main.py CHANGED
@@ -1,3 +1,8 @@
 
 
 
 
 
1
  import os
2
  import argparse
3
  from typing import List, Dict
@@ -8,7 +13,7 @@ from safetensors.torch import save_file
8
  from collections import defaultdict
9
  import logging
10
  from multiprocessing import cpu_count
11
- from entube_dataset import EnTubeDataset
12
  from torch.utils.data import Dataset, DataLoader
13
  from transformers import BaseImageProcessor
14
 
@@ -74,13 +79,22 @@ if __name__ == "__main__":
74
 
75
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
76
  entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
77
- dataloader = DataLoader(entube_dataset, batch_size=1)
 
 
 
 
 
 
78
 
79
  for batch_idx, (videos, image_sizes) in enumerate(dataloader):
80
  print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
81
- print(type(videos))
82
- print(type(image_sizes))
 
 
 
83
  break
84
 
85
 
86
- save_file(dict(data_tensor), args.output_file)
 
1
+ import sys
2
+ from pathlib import Path
3
+ sys.path.append(str(Path.cwd()))
4
+ from annotation.utils import get_optimal_workers
5
+
6
  import os
7
  import argparse
8
  from typing import List, Dict
 
13
  from collections import defaultdict
14
  import logging
15
  from multiprocessing import cpu_count
16
+ from entube_dataset import EnTubeDataset, collate_fn
17
  from torch.utils.data import Dataset, DataLoader
18
  from transformers import BaseImageProcessor
19
 
 
79
 
80
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
81
  entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
82
+ dataloader = DataLoader(
83
+ entube_dataset,
84
+ batch_size=4,
85
+ collate_fn=collate_fn,
86
+ # num_workers=get_optimal_workers()
87
+ num_workers=1
88
+ )
89
 
90
  for batch_idx, (videos, image_sizes) in enumerate(dataloader):
91
  print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
92
+ assert isinstance(videos, list), "List of videos features for each processor (vision encoder)"
93
+ assert isinstance(videos[0], list) or isinstance(videos[0], torch.Tensor), "List of videos in the batch"
94
+ image_aux_features_list = processor.prepare_mm_features(videos, image_sizes)
95
+ for i, image_aux_features in enumerate(image_aux_features_list):
96
+ print(f"@tcm: In main(): image_aux_features[{i}].shape={image_aux_features.shape}")
97
  break
98
 
99
 
100
+ # save_file(dict(data_tensor), args.output_file)
preprocessing/mm_datautils.py CHANGED
@@ -22,7 +22,7 @@ def expand2square(pil_img, background_color):
22
 
23
  def process_images(
24
  images: torch.Tensor,
25
- image_processor: BaseImageProcessor,
26
  device: str
27
  ) -> Union[torch.Tensor, List[torch.Tensor]]:
28
  # images.shape: (4294, 360, 640, 3)
@@ -80,7 +80,7 @@ def process_images(
80
 
81
  def process_video_frames(
82
  video_path: str,
83
- image_processor: List[BaseImageProcessor],
84
  device: str
85
  ) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
86
  vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
@@ -89,17 +89,17 @@ def process_video_frames(
89
  print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
90
  image_sizes = [vr[0].shape[:2]]
91
 
92
- video = [[] for _ in range(len(image_processor))]
93
- for i in range(0, len(frame_indices), NUM_PROCESSED_FRAMES):
94
- print(f'@tcm: In process_video_frames(): segment {i/NUM_PROCESSED_FRAMES}')
95
- sub_frame_indices = frame_indices[i:min(i+NUM_PROCESSED_FRAMES, len(frame_indices))]
96
  sub_videos = []
97
  process_time = time.time()
98
  for frame_index in sub_frame_indices:
99
  img = vr[frame_index].asnumpy()
100
  sub_videos.append(img)
101
  sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
102
- sub_videos = process_images(sub_videos, image_processor, device)
103
  print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
104
  assert len(sub_videos) == len(video)
105
  for j, sub_video in enumerate(sub_videos):
@@ -120,7 +120,7 @@ def process_video_frames(
120
  # print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
121
  # image_sizes = [video[0].shape[:2]]
122
  # process_time = time.time()
123
- # video = process_images(video, image_processor, device)
124
  # print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
125
  video = [item.unsqueeze(0) for item in video]
126
  return video, image_sizes
 
22
 
23
  def process_images(
24
  images: torch.Tensor,
25
+ image_processor: List[BaseImageProcessor],
26
  device: str
27
  ) -> Union[torch.Tensor, List[torch.Tensor]]:
28
  # images.shape: (4294, 360, 640, 3)
 
80
 
81
  def process_video_frames(
82
  video_path: str,
83
+ image_processors: List[BaseImageProcessor],
84
  device: str
85
  ) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
86
  vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
 
89
  print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
90
  image_sizes = [vr[0].shape[:2]]
91
 
92
+ video = [[] for _ in range(len(image_processors))]
93
+ for i in range(0, len(frame_indices), CHUNK_SIZE):
94
+ print(f'@tcm: In process_video_frames(): segment {int(i/CHUNK_SIZE)}')
95
+ sub_frame_indices = frame_indices[i:min(i+CHUNK_SIZE, len(frame_indices))]
96
  sub_videos = []
97
  process_time = time.time()
98
  for frame_index in sub_frame_indices:
99
  img = vr[frame_index].asnumpy()
100
  sub_videos.append(img)
101
  sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
102
+ sub_videos = process_images(sub_videos, image_processors, device)
103
  print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
104
  assert len(sub_videos) == len(video)
105
  for j, sub_video in enumerate(sub_videos):
 
120
  # print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
121
  # image_sizes = [video[0].shape[:2]]
122
  # process_time = time.time()
123
+ # video = process_images(video, image_processors, device)
124
  # print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
125
  video = [item.unsqueeze(0) for item in video]
126
  return video, image_sizes