tcm03 commited on
Commit
adb427c
·
2 Parent(s): f16aea9 484e90b

Merge new repo on WSL Ubuntu and remote HF

Browse files
.gitattributes CHANGED
@@ -1,59 +1,59 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,4 +1,4 @@
1
- preprocessing/__pycache__/
2
- preprocessing/vision_encoders/__pycache__/
3
- annotations/__pycache__/
4
  .vscode/
 
1
+ preprocessing/__pycache__/
2
+ preprocessing/vision_encoders/__pycache__/
3
+ annotations/__pycache__/
4
  .vscode/
2.4.0 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Collecting pygments
2
+ Using cached pygments-2.18.0-py3-none-any.whl.metadata (2.5 kB)
3
+ Using cached pygments-2.18.0-py3-none-any.whl (1.2 MB)
4
+ Installing collected packages: pygments
5
+ Successfully installed pygments-2.18.0
EnTube_filtered.csv CHANGED
The diff for this file is too large to render. See raw diff
 
annotation/annotate.py CHANGED
@@ -1,59 +1,59 @@
1
- # In case this module is invoked from other modules, e.g., preprocessing
2
- from pathlib import Path
3
- import sys
4
- sys.path.append(str(Path.cwd() / "annotation"))
5
-
6
- import json
7
- import os
8
- from typing import List, Union, Dict, Any, Callable, Optional
9
- from concurrent.futures import ThreadPoolExecutor, as_completed
10
- from datatypes import VideoAnnotation, Metadata
11
- from utils import get_optimal_workers, extract_label, convert_to_linux_path
12
-
13
-
14
- def annotate_video(
15
- file_path: str,
16
- label: str,
17
- video_filter: Callable[[str, Any], bool] = lambda path: True,
18
- **kwargs
19
- ) -> VideoAnnotation:
20
- if not video_filter(file_path, **kwargs):
21
- return None
22
- # print(f'Begin annotating {file_path}...')
23
- json_content: VideoAnnotation = {
24
- 'video': convert_to_linux_path(file_path),
25
- 'label': label,
26
- 'conversations': [
27
- {
28
- 'from': 'human',
29
- 'value': '<image>\nThis video is a Youtube video on one of many categories such as Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs, etc. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The higher number of likes and lower number of dislikes, the more engaged the video is. The final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged). Please predict one of the three labels for this video, based on its contents only.'
30
- },
31
- {
32
- 'from': 'gpt',
33
- 'value': f'The engagement label of the video is {label}.'
34
- }
35
- ]
36
- }
37
- return json_content
38
-
39
-
40
-
41
- def dump_json(
42
- metadata: Metadata,
43
- video_filter: Callable[[str, Any], bool] = lambda path: True,
44
- **kwargs
45
- ) -> List[VideoAnnotation]:
46
- print(f'Annotating {len(metadata)} videos...')
47
- json_contents: List[VideoAnnotation] = []
48
-
49
- with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
50
- futures = []
51
- for (file_path, label) in metadata:
52
- futures.append(executor.submit(annotate_video, file_path, label, video_filter=video_filter, **kwargs))
53
-
54
- for future in as_completed(futures):
55
- result = future.result()
56
- if result:
57
- json_contents.append(result)
58
-
59
  return json_contents
 
1
+ # In case this module is invoked from other modules, e.g., preprocessing
2
+ from pathlib import Path
3
+ import sys
4
+ sys.path.append(str(Path.cwd() / "annotation"))
5
+
6
+ import json
7
+ import os
8
+ from typing import List, Union, Dict, Any, Callable, Optional
9
+ from concurrent.futures import ThreadPoolExecutor, as_completed
10
+ from datatypes import VideoAnnotation, Metadata
11
+ from utils import get_optimal_workers, extract_label, convert_to_linux_path
12
+
13
+
14
+ def annotate_video(
15
+ file_path: str,
16
+ label: str,
17
+ video_filter: Callable[[str, Any], bool] = lambda path: True,
18
+ **kwargs
19
+ ) -> VideoAnnotation:
20
+ if not video_filter(file_path, **kwargs):
21
+ return None
22
+ # print(f'Begin annotating {file_path}...')
23
+ json_content: VideoAnnotation = {
24
+ 'video': convert_to_linux_path(file_path),
25
+ 'label': label,
26
+ 'conversations': [
27
+ {
28
+ 'from': 'human',
29
+ 'value': '<image>\nThis video is a Youtube video on one of many categories such as Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs, etc. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The higher number of likes and lower number of dislikes, the more engaged the video is. The final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged). Please predict one of the three labels for this video, based on its contents only.'
30
+ },
31
+ {
32
+ 'from': 'gpt',
33
+ 'value': f'The engagement label of the video is {label}.'
34
+ }
35
+ ]
36
+ }
37
+ return json_content
38
+
39
+
40
+
41
+ def dump_json(
42
+ metadata: Metadata,
43
+ video_filter: Callable[[str, Any], bool] = lambda path: True,
44
+ **kwargs
45
+ ) -> List[VideoAnnotation]:
46
+ print(f'Annotating {len(metadata)} videos...')
47
+ json_contents: List[VideoAnnotation] = []
48
+
49
+ with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
50
+ futures = []
51
+ for (file_path, label) in metadata:
52
+ futures.append(executor.submit(annotate_video, file_path, label, video_filter=video_filter, **kwargs))
53
+
54
+ for future in as_completed(futures):
55
+ result = future.result()
56
+ if result:
57
+ json_contents.append(result)
58
+
59
  return json_contents
annotation/datatypes.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import List, Union, Dict, Tuple, Callable, Optional
2
-
3
- VideoAnnotation = Optional[Dict[str, Union[str, List[Dict[str, str]]]]]
4
  Metadata = List[Tuple[str, str]]
 
1
+ from typing import List, Union, Dict, Tuple, Callable, Optional
2
+
3
+ VideoAnnotation = Optional[Dict[str, Union[str, List[Dict[str, str]]]]]
4
  Metadata = List[Tuple[str, str]]
annotation/train_test.py CHANGED
@@ -1,87 +1,87 @@
1
- # In case this module is invoked from other modules, e.g., preprocessing
2
- from pathlib import Path
3
- import sys
4
- sys.path.append(str(Path.cwd() / "annotation"))
5
-
6
- import json
7
- import os
8
- import argparse
9
- from sklearn.model_selection import train_test_split
10
- from datatypes import VideoAnnotation, Metadata
11
- from annotate import dump_json
12
- from utils import get_metadata, filter_video
13
- from typing import List
14
-
15
-
16
-
17
- if __name__ == "__main__":
18
-
19
- parser = argparse.ArgumentParser(
20
- prog = 'train_test.py',
21
- description='Annotate video dataset with JSON format'
22
- )
23
- parser.add_argument(
24
- '--folders',
25
- type = str,
26
- nargs = '+',
27
- required = True,
28
- help = "List of folder paths to video data"
29
- )
30
- parser.add_argument(
31
- '--train_size',
32
- type=float,
33
- default=0.8,
34
- help='Proportion of the dataset for training'
35
- )
36
- parser.add_argument(
37
- '--output_train_file',
38
- type=str,
39
- default='data/EnTube_train.json',
40
- help='Output JSON file for training'
41
- )
42
- parser.add_argument(
43
- '--output_test_file',
44
- type=str,
45
- default='data/EnTube_test.json',
46
- help='Output JSON file for testing'
47
- )
48
- parser.add_argument(
49
- '--max_duration',
50
- type=int,
51
- help='Maximum duration of video in seconds'
52
- )
53
- parser.add_argument(
54
- '--random_state',
55
- type=int,
56
- default=42,
57
- help='Random seed for train-test split'
58
- )
59
- args = parser.parse_args()
60
-
61
- folder_paths: List[str] = args.folders
62
- metadata: Metadata = get_metadata(folder_paths)
63
- # split metadata into 3 submetadata corresponding to 3 labels
64
- metadata_label = {0: [], 1: [], 2: []}
65
- for video, label in metadata:
66
- metadata_label[int(label)].append((video, label))
67
- train = []
68
- test = []
69
- for label, videos in metadata_label.items():
70
- train_l, test_l = train_test_split(
71
- videos,
72
- train_size=args.train_size,
73
- random_state=args.random_state
74
- )
75
- print(f'Label {label}: {len(train_l)} training videos, {len(test_l)} testing videos')
76
- train.extend(train_l)
77
- test.extend(test_l)
78
-
79
- json_train: List[VideoAnnotation] = dump_json(train, filter_video, **vars(args))
80
- json_test: List[VideoAnnotation] = dump_json(test, filter_video, **vars(args))
81
-
82
- with open(args.output_train_file, 'w') as f:
83
- json.dump(json_train, f, indent=4)
84
- print(f"Training data saved to {args.output_train_file}")
85
- with open(args.output_test_file, 'w') as f:
86
- json.dump(json_test, f, indent=4)
87
  print(f"Testing data saved to {args.output_test_file}")
 
1
+ # In case this module is invoked from other modules, e.g., preprocessing
2
+ from pathlib import Path
3
+ import sys
4
+ sys.path.append(str(Path.cwd() / "annotation"))
5
+
6
+ import json
7
+ import os
8
+ import argparse
9
+ from sklearn.model_selection import train_test_split
10
+ from datatypes import VideoAnnotation, Metadata
11
+ from annotate import dump_json
12
+ from utils import get_metadata, filter_video
13
+ from typing import List
14
+
15
+
16
+
17
+ if __name__ == "__main__":
18
+
19
+ parser = argparse.ArgumentParser(
20
+ prog = 'train_test.py',
21
+ description='Annotate video dataset with JSON format'
22
+ )
23
+ parser.add_argument(
24
+ '--folders',
25
+ type = str,
26
+ nargs = '+',
27
+ required = True,
28
+ help = "List of folder paths to video data"
29
+ )
30
+ parser.add_argument(
31
+ '--train_size',
32
+ type=float,
33
+ default=0.8,
34
+ help='Proportion of the dataset for training'
35
+ )
36
+ parser.add_argument(
37
+ '--output_train_file',
38
+ type=str,
39
+ default='data/EnTube_train.json',
40
+ help='Output JSON file for training'
41
+ )
42
+ parser.add_argument(
43
+ '--output_test_file',
44
+ type=str,
45
+ default='data/EnTube_test.json',
46
+ help='Output JSON file for testing'
47
+ )
48
+ parser.add_argument(
49
+ '--max_duration',
50
+ type=int,
51
+ help='Maximum duration of video in seconds'
52
+ )
53
+ parser.add_argument(
54
+ '--random_state',
55
+ type=int,
56
+ default=42,
57
+ help='Random seed for train-test split'
58
+ )
59
+ args = parser.parse_args()
60
+
61
+ folder_paths: List[str] = args.folders
62
+ metadata: Metadata = get_metadata(folder_paths)
63
+ # split metadata into 3 submetadata corresponding to 3 labels
64
+ metadata_label = {0: [], 1: [], 2: []}
65
+ for video, label in metadata:
66
+ metadata_label[int(label)].append((video, label))
67
+ train = []
68
+ test = []
69
+ for label, videos in metadata_label.items():
70
+ train_l, test_l = train_test_split(
71
+ videos,
72
+ train_size=args.train_size,
73
+ random_state=args.random_state
74
+ )
75
+ print(f'Label {label}: {len(train_l)} training videos, {len(test_l)} testing videos')
76
+ train.extend(train_l)
77
+ test.extend(test_l)
78
+
79
+ json_train: List[VideoAnnotation] = dump_json(train, filter_video, **vars(args))
80
+ json_test: List[VideoAnnotation] = dump_json(test, filter_video, **vars(args))
81
+
82
+ with open(args.output_train_file, 'w') as f:
83
+ json.dump(json_train, f, indent=4)
84
+ print(f"Training data saved to {args.output_train_file}")
85
+ with open(args.output_test_file, 'w') as f:
86
+ json.dump(json_test, f, indent=4)
87
  print(f"Testing data saved to {args.output_test_file}")
annotation/utils.py CHANGED
@@ -1,68 +1,68 @@
1
- # In case this module is invoked from other modules, e.g., preprocessing
2
- from pathlib import Path
3
- import sys
4
- sys.path.append(str(Path.cwd() / "annotation"))
5
-
6
- import decord as de
7
- from datatypes import Metadata
8
- from typing import List
9
- import os
10
- from multiprocessing import cpu_count
11
- import traceback
12
- from pathlib import Path
13
-
14
-
15
- def convert_to_linux_path(path: str) -> str:
16
- return Path(path).as_posix()
17
-
18
- def extract_label(path: str) -> str:
19
- idx = len(path) - 1
20
- while idx >= 0:
21
- if path[idx].isnumeric():
22
- return path[idx]
23
- idx -= 1
24
- return '-1'
25
-
26
- def get_duration(path: str) -> int:
27
- try:
28
- vr = de.VideoReader(path, ctx=de.cpu(0), num_threads=1)
29
- return int(len(vr) / vr.get_avg_fps())
30
- except Exception as e:
31
- print(f"Error reading video {path}: {e}")
32
- print(traceback.format_exc()) # Include the full traceback for debugging
33
- return -1 # Use -1 to indicate an invalid duration
34
-
35
- def filter_video(path: str, **kwargs) -> bool:
36
- try:
37
- max_duration = kwargs.get('max_duration', None)
38
- if max_duration is not None:
39
- duration = get_duration(path)
40
- if duration == -1: # Handle invalid duration
41
- print(f"Skipping invalid video: {path}")
42
- return False
43
- return duration <= max_duration
44
- return True
45
- except Exception as e:
46
- print(f"Error in filter_video for {path}: {e}")
47
- return False
48
-
49
- def get_optimal_workers() -> int:
50
- """Determine the optimal number of workers based on available CPU cores."""
51
- try:
52
- return max(1, cpu_count() - 1) # Leave one core free
53
- except (NotImplementedError, ValueError):
54
- return 1 # Fallback to a single worker in case of an error
55
-
56
- def get_metadata(
57
- folder_paths: List[str]
58
- ) -> Metadata:
59
- metadata: Metadata = []
60
- for folder_path in folder_paths:
61
- label: str = extract_label(folder_path)
62
- assert label != '-1', f"Invalid folder path: {folder_path}"
63
- for file_name in os.listdir(folder_path):
64
- file_path: str = os.path.join(folder_path.rstrip('/'), file_name)
65
- if os.path.exists(file_path) and os.path.isfile(file_path):
66
- metadata.append((file_path, label))
67
- print(f'Found {len(metadata)} videos')
68
  return metadata
 
1
+ # In case this module is invoked from other modules, e.g., preprocessing
2
+ from pathlib import Path
3
+ import sys
4
+ sys.path.append(str(Path.cwd() / "annotation"))
5
+
6
+ import decord as de
7
+ from datatypes import Metadata
8
+ from typing import List
9
+ import os
10
+ from multiprocessing import cpu_count
11
+ import traceback
12
+ from pathlib import Path
13
+
14
+
15
+ def convert_to_linux_path(path: str) -> str:
16
+ return Path(path).as_posix()
17
+
18
+ def extract_label(path: str) -> str:
19
+ idx = len(path) - 1
20
+ while idx >= 0:
21
+ if path[idx].isnumeric():
22
+ return path[idx]
23
+ idx -= 1
24
+ return '-1'
25
+
26
+ def get_duration(path: str) -> int:
27
+ try:
28
+ vr = de.VideoReader(path, ctx=de.cpu(0), num_threads=1)
29
+ return int(len(vr) / vr.get_avg_fps())
30
+ except Exception as e:
31
+ print(f"Error reading video {path}: {e}")
32
+ print(traceback.format_exc()) # Include the full traceback for debugging
33
+ return -1 # Use -1 to indicate an invalid duration
34
+
35
+ def filter_video(path: str, **kwargs) -> bool:
36
+ try:
37
+ max_duration = kwargs.get('max_duration', None)
38
+ if max_duration is not None:
39
+ duration = get_duration(path)
40
+ if duration == -1: # Handle invalid duration
41
+ print(f"Skipping invalid video: {path}")
42
+ return False
43
+ return duration <= max_duration
44
+ return True
45
+ except Exception as e:
46
+ print(f"Error in filter_video for {path}: {e}")
47
+ return False
48
+
49
+ def get_optimal_workers() -> int:
50
+ """Determine the optimal number of workers based on available CPU cores."""
51
+ try:
52
+ return max(1, cpu_count() - 1) # Leave one core free
53
+ except (NotImplementedError, ValueError):
54
+ return 1 # Fallback to a single worker in case of an error
55
+
56
+ def get_metadata(
57
+ folder_paths: List[str]
58
+ ) -> Metadata:
59
+ metadata: Metadata = []
60
+ for folder_path in folder_paths:
61
+ label: str = extract_label(folder_path)
62
+ assert label != '-1', f"Invalid folder path: {folder_path}"
63
+ for file_name in os.listdir(folder_path):
64
+ file_path: str = os.path.join(folder_path.rstrip('/'), file_name)
65
+ if os.path.exists(file_path) and os.path.isfile(file_path):
66
+ metadata.append((file_path, label))
67
+ print(f'Found {len(metadata)} videos')
68
  return metadata
data/EnTube_old.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/EnTube_short_train.json CHANGED
@@ -1,44 +1,44 @@
1
- [
2
- {
3
- "video": "0/jh2wXnuckEs.mp4",
4
- "label": "0",
5
- "conversations": [
6
- {
7
- "from": "human",
8
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
9
- },
10
- {
11
- "from": "gpt",
12
- "value": "The engagement label of the video is 0."
13
- }
14
- ]
15
- },
16
- {
17
- "video": "1/5YbX5V7O8Oc.mp4",
18
- "label": "1",
19
- "conversations": [
20
- {
21
- "from": "human",
22
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
23
- },
24
- {
25
- "from": "gpt",
26
- "value": "The engagement label of the video is 1."
27
- }
28
- ]
29
- },
30
- {
31
- "video": "2/kiVkwc1MR8U.mp4",
32
- "label": "2",
33
- "conversations": [
34
- {
35
- "from": "human",
36
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
37
- },
38
- {
39
- "from": "gpt",
40
- "value": "The engagement label of the video is 2."
41
- }
42
- ]
43
- }
44
  ]
 
1
+ [
2
+ {
3
+ "video": "0/jh2wXnuckEs.mp4",
4
+ "label": "0",
5
+ "conversations": [
6
+ {
7
+ "from": "human",
8
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
9
+ },
10
+ {
11
+ "from": "gpt",
12
+ "value": "The engagement label of the video is 0."
13
+ }
14
+ ]
15
+ },
16
+ {
17
+ "video": "1/5YbX5V7O8Oc.mp4",
18
+ "label": "1",
19
+ "conversations": [
20
+ {
21
+ "from": "human",
22
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
23
+ },
24
+ {
25
+ "from": "gpt",
26
+ "value": "The engagement label of the video is 1."
27
+ }
28
+ ]
29
+ },
30
+ {
31
+ "video": "2/kiVkwc1MR8U.mp4",
32
+ "label": "2",
33
+ "conversations": [
34
+ {
35
+ "from": "human",
36
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
37
+ },
38
+ {
39
+ "from": "gpt",
40
+ "value": "The engagement label of the video is 2."
41
+ }
42
+ ]
43
+ }
44
  ]
data/EnTube_short_val.json CHANGED
@@ -1,44 +1,44 @@
1
- [
2
- {
3
- "video": "0/o89G2LyStsI.mp4",
4
- "label": "0",
5
- "conversations": [
6
- {
7
- "from": "human",
8
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
9
- },
10
- {
11
- "from": "gpt",
12
- "value": "The engagement label of the video is 0."
13
- }
14
- ]
15
- },
16
- {
17
- "video": "1/oPC9C21Tt1o.mp4",
18
- "label": "1",
19
- "conversations": [
20
- {
21
- "from": "human",
22
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
23
- },
24
- {
25
- "from": "gpt",
26
- "value": "The engagement label of the video is 1."
27
- }
28
- ]
29
- },
30
- {
31
- "video": "2/wbZk7Mkgf9Q.mp4",
32
- "label": "2",
33
- "conversations": [
34
- {
35
- "from": "human",
36
- "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
37
- },
38
- {
39
- "from": "gpt",
40
- "value": "The engagement label of the video is 2."
41
- }
42
- ]
43
- }
44
  ]
 
1
+ [
2
+ {
3
+ "video": "0/o89G2LyStsI.mp4",
4
+ "label": "0",
5
+ "conversations": [
6
+ {
7
+ "from": "human",
8
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
9
+ },
10
+ {
11
+ "from": "gpt",
12
+ "value": "The engagement label of the video is 0."
13
+ }
14
+ ]
15
+ },
16
+ {
17
+ "video": "1/oPC9C21Tt1o.mp4",
18
+ "label": "1",
19
+ "conversations": [
20
+ {
21
+ "from": "human",
22
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
23
+ },
24
+ {
25
+ "from": "gpt",
26
+ "value": "The engagement label of the video is 1."
27
+ }
28
+ ]
29
+ },
30
+ {
31
+ "video": "2/wbZk7Mkgf9Q.mp4",
32
+ "label": "2",
33
+ "conversations": [
34
+ {
35
+ "from": "human",
36
+ "value": "<image>\nThis video is a Youtube video on one of the following categories: Education, Film & Animation, Comedy, Entertainment, Music, Howto & Style, and People & Blogs. The engagement rate defined for each such video is based on the number of potential likes and dislikes only when published on Youtube. The exact formula for the score is (likes-dislikes) / (likes+dislikes) and the final prediction label is either 0 (not engaged), 1 (neutral), or 2 (engaged) based on thresholding this score. Please predict one of the three labels for this video, based on its contents only."
37
+ },
38
+ {
39
+ "from": "gpt",
40
+ "value": "The engagement label of the video is 2."
41
+ }
42
+ ]
43
+ }
44
  ]
data/EnTube_test.json CHANGED
The diff for this file is too large to render. See raw diff
 
data/EnTube_train.json CHANGED
The diff for this file is too large to render. See raw diff
 
preprocessing/entube_dataset.py CHANGED
@@ -1,65 +1,65 @@
1
- import torch
2
- from torch.utils.data import Dataset
3
- from typing import List
4
- import os
5
- from mm_datautils import process_video_frames
6
- from transformers import BaseImageProcessor
7
- from concurrent.futures import ThreadPoolExecutor, as_completed
8
-
9
- class EnTubeDataset(Dataset):
10
-
11
- def __init__(
12
- self,
13
- folder_paths: List[str],
14
- image_processors: List[BaseImageProcessor],
15
- device: str
16
- ) -> None:
17
- self.file_paths = []
18
- self.image_processors = image_processors
19
- self.device = device
20
-
21
- for folder_path in folder_paths:
22
- file_names = os.listdir(folder_path)
23
- for file_name in file_names:
24
- file_path = os.path.join(folder_path, file_name)
25
- self.file_paths.append(file_path)
26
-
27
- # with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
28
- # futures = []
29
- # for folder_path in folder_paths:
30
- # print(f'@tcm: In EnTubeDataset.__init__(): folder_path={folder_path}')
31
- # file_names = os.listdir(folder_path)
32
- # for file_name in file_names:
33
- # file_path = os.path.join(folder_path, file_name)
34
- # print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
35
- # future = executor.submit(process_video_frames, file_path, image_processor, device)
36
- # futures.append(future)
37
-
38
- # for future in as_completed(futures):
39
- # result = future.result()
40
- # if result is not None:
41
- # video, image_size = result
42
- # self.videos.append(video)
43
- # self.image_sizes.append(image_size)
44
-
45
-
46
-
47
- def __len__(self):
48
- return len(self.file_paths)
49
-
50
- def __getitem__(self, idx):
51
- print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}')
52
- video, image_size = process_video_frames(self.file_paths[idx], self.image_processors, self.device)
53
- return video, image_size
54
-
55
- def collate_fn(batch):
56
- """
57
- batch: list of samples from EnTubeDataset.__getitem__()
58
- """
59
- assert isinstance(batch, list)
60
- assert isinstance(batch[0], tuple)
61
-
62
- image_sizes = batch[0][1]
63
- batch_videos = [video for video, _ in batch]
64
- batch_videos = [list(videos) for videos in zip(*batch_videos)]
65
- return batch_videos, image_sizes
 
1
+ import torch
2
+ from torch.utils.data import Dataset
3
+ from typing import List
4
+ import os
5
+ from mm_datautils import process_video_frames
6
+ from transformers import BaseImageProcessor
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+
9
+ class EnTubeDataset(Dataset):
10
+
11
+ def __init__(
12
+ self,
13
+ folder_paths: List[str],
14
+ image_processors: List[BaseImageProcessor],
15
+ device: str
16
+ ) -> None:
17
+ self.file_paths = []
18
+ self.image_processors = image_processors
19
+ self.device = device
20
+
21
+ for folder_path in folder_paths:
22
+ file_names = os.listdir(folder_path)
23
+ for file_name in file_names:
24
+ file_path = os.path.join(folder_path, file_name)
25
+ self.file_paths.append(file_path)
26
+
27
+ # with ThreadPoolExecutor(max_workers=get_optimal_workers()) as executor:
28
+ # futures = []
29
+ # for folder_path in folder_paths:
30
+ # print(f'@tcm: In EnTubeDataset.__init__(): folder_path={folder_path}')
31
+ # file_names = os.listdir(folder_path)
32
+ # for file_name in file_names:
33
+ # file_path = os.path.join(folder_path, file_name)
34
+ # print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
35
+ # future = executor.submit(process_video_frames, file_path, image_processor, device)
36
+ # futures.append(future)
37
+
38
+ # for future in as_completed(futures):
39
+ # result = future.result()
40
+ # if result is not None:
41
+ # video, image_size = result
42
+ # self.videos.append(video)
43
+ # self.image_sizes.append(image_size)
44
+
45
+
46
+
47
+ def __len__(self):
48
+ return len(self.file_paths)
49
+
50
+ def __getitem__(self, idx):
51
+ print(f'@tcm: In EnTubeDataset.__getitem__(): idx={idx}')
52
+ video, image_size = process_video_frames(self.file_paths[idx], self.image_processors, self.device)
53
+ return video, image_size
54
+
55
+ def collate_fn(batch):
56
+ """
57
+ batch: list of samples from EnTubeDataset.__getitem__()
58
+ """
59
+ assert isinstance(batch, list)
60
+ assert isinstance(batch[0], tuple)
61
+
62
+ image_sizes = batch[0][1]
63
+ batch_videos = [video for video, _ in batch]
64
+ batch_videos = [list(videos) for videos in zip(*batch_videos)]
65
+ return batch_videos, image_sizes
preprocessing/main.py CHANGED
@@ -1,100 +1,100 @@
1
- import sys
2
- from pathlib import Path
3
- sys.path.append(str(Path.cwd()))
4
- from annotation.utils import get_optimal_workers
5
-
6
- import os
7
- import argparse
8
- from typing import List, Dict
9
- from mm_datautils import process_video_frames
10
- from preprocessor import CambrianConfig, CambrianEncoders
11
- import torch
12
- from safetensors.torch import save_file
13
- from collections import defaultdict
14
- import logging
15
- from multiprocessing import cpu_count
16
- from entube_dataset import EnTubeDataset, collate_fn
17
- from torch.utils.data import Dataset, DataLoader
18
- from transformers import BaseImageProcessor
19
-
20
-
21
- # Configure logging
22
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
23
-
24
- def get_optimal_workers() -> int:
25
- """Determine the optimal number of workers based on available CPU cores."""
26
- try:
27
- return max(1, cpu_count() - 1) # Leave one core free
28
- except (NotImplementedError, ValueError):
29
- return 1 # Fallback to a single worker in case of an error
30
-
31
- def extract_features(processor: CambrianEncoders, file_path: str, file_name: str) -> Dict[str, torch.Tensor]:
32
- try:
33
- video, image_sizes = process_video_frames(file_path)
34
- image_aux_features_list = processor.prepare_mm_features(images=video, image_sizes=image_sizes)
35
- return {
36
- file_name + '-siglip': image_aux_features_list[0],
37
- file_name + '-dino': image_aux_features_list[1]
38
- }
39
- except Exception as e:
40
- logging.error(f"Error processing {file_path}: {e}")
41
- return {}
42
-
43
- if __name__ == "__main__":
44
- parser = argparse.ArgumentParser()
45
- parser.add_argument(
46
- '--folders',
47
- type=str,
48
- nargs='+',
49
- required=True,
50
- help="List of folder paths to video data"
51
- )
52
- parser.add_argument(
53
- '--output_file',
54
- type = str,
55
- default = 'entube_tensors.safetensors',
56
- help = 'Safetensor file to store embeddings of EnTube dataset by vision encoders'
57
- )
58
- parser.add_argument(
59
- '--config_file',
60
- type = str,
61
- default = 'config.json',
62
- help = 'Path to configuration file of encoders parameters'
63
- )
64
- args = parser.parse_args()
65
-
66
- cambrianConfig = CambrianConfig.from_json_file(args.config_file)
67
- processor = CambrianEncoders(cambrianConfig)
68
- image_processors = []
69
- if not processor.vision_tower_aux_list[0].is_loaded:
70
- processor.vision_tower_aux_list[0].load_model()
71
- image_processors.append(processor.vision_tower_aux_list[0].image_processor)
72
- # for vision_tower_aux in processor.vision_tower_aux_list:
73
- # if not vision_tower_aux.is_loaded:
74
- # vision_tower_aux.load_model()
75
- # image_processors.append(vision_tower_aux.image_processor)
76
-
77
- folder_paths: List[str] = args.folders
78
- data_tensor = dict()
79
-
80
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
81
- entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
82
- dataloader = DataLoader(
83
- entube_dataset,
84
- batch_size=4,
85
- collate_fn=collate_fn,
86
- # num_workers=get_optimal_workers()
87
- num_workers=1
88
- )
89
-
90
- for batch_idx, (videos, image_sizes) in enumerate(dataloader):
91
- print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
92
- assert isinstance(videos, list), "List of videos features for each processor (vision encoder)"
93
- assert isinstance(videos[0], list) or isinstance(videos[0], torch.Tensor), "List of videos in the batch"
94
- image_aux_features_list = processor.prepare_mm_features(videos, image_sizes)
95
- for i, image_aux_features in enumerate(image_aux_features_list):
96
- print(f"@tcm: In main(): image_aux_features[{i}].shape={image_aux_features.shape}")
97
- break
98
-
99
-
100
- # save_file(dict(data_tensor), args.output_file)
 
1
+ import sys
2
+ from pathlib import Path
3
+ sys.path.append(str(Path.cwd()))
4
+ from annotation.utils import get_optimal_workers
5
+
6
+ import os
7
+ import argparse
8
+ from typing import List, Dict
9
+ from mm_datautils import process_video_frames
10
+ from preprocessor import CambrianConfig, CambrianEncoders
11
+ import torch
12
+ from safetensors.torch import save_file
13
+ from collections import defaultdict
14
+ import logging
15
+ from multiprocessing import cpu_count
16
+ from entube_dataset import EnTubeDataset, collate_fn
17
+ from torch.utils.data import Dataset, DataLoader
18
+ from transformers import BaseImageProcessor
19
+
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
23
+
24
+ def get_optimal_workers() -> int:
25
+ """Determine the optimal number of workers based on available CPU cores."""
26
+ try:
27
+ return max(1, cpu_count() - 1) # Leave one core free
28
+ except (NotImplementedError, ValueError):
29
+ return 1 # Fallback to a single worker in case of an error
30
+
31
+ def extract_features(processor: CambrianEncoders, file_path: str, file_name: str) -> Dict[str, torch.Tensor]:
32
+ try:
33
+ video, image_sizes = process_video_frames(file_path)
34
+ image_aux_features_list = processor.prepare_mm_features(images=video, image_sizes=image_sizes)
35
+ return {
36
+ file_name + '-siglip': image_aux_features_list[0],
37
+ file_name + '-dino': image_aux_features_list[1]
38
+ }
39
+ except Exception as e:
40
+ logging.error(f"Error processing {file_path}: {e}")
41
+ return {}
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ parser.add_argument(
46
+ '--folders',
47
+ type=str,
48
+ nargs='+',
49
+ required=True,
50
+ help="List of folder paths to video data"
51
+ )
52
+ parser.add_argument(
53
+ '--output_file',
54
+ type = str,
55
+ default = 'entube_tensors.safetensors',
56
+ help = 'Safetensor file to store embeddings of EnTube dataset by vision encoders'
57
+ )
58
+ parser.add_argument(
59
+ '--config_file',
60
+ type = str,
61
+ default = 'config.json',
62
+ help = 'Path to configuration file of encoders parameters'
63
+ )
64
+ args = parser.parse_args()
65
+
66
+ cambrianConfig = CambrianConfig.from_json_file(args.config_file)
67
+ processor = CambrianEncoders(cambrianConfig)
68
+ image_processors = []
69
+ if not processor.vision_tower_aux_list[0].is_loaded:
70
+ processor.vision_tower_aux_list[0].load_model()
71
+ image_processors.append(processor.vision_tower_aux_list[0].image_processor)
72
+ # for vision_tower_aux in processor.vision_tower_aux_list:
73
+ # if not vision_tower_aux.is_loaded:
74
+ # vision_tower_aux.load_model()
75
+ # image_processors.append(vision_tower_aux.image_processor)
76
+
77
+ folder_paths: List[str] = args.folders
78
+ data_tensor = dict()
79
+
80
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
81
+ entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
82
+ dataloader = DataLoader(
83
+ entube_dataset,
84
+ batch_size=4,
85
+ collate_fn=collate_fn,
86
+ # num_workers=get_optimal_workers()
87
+ num_workers=1
88
+ )
89
+
90
+ for batch_idx, (videos, image_sizes) in enumerate(dataloader):
91
+ print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
92
+ assert isinstance(videos, list), "List of videos features for each processor (vision encoder)"
93
+ assert isinstance(videos[0], list) or isinstance(videos[0], torch.Tensor), "List of videos in the batch"
94
+ image_aux_features_list = processor.prepare_mm_features(videos, image_sizes)
95
+ for i, image_aux_features in enumerate(image_aux_features_list):
96
+ print(f"@tcm: In main(): image_aux_features[{i}].shape={image_aux_features.shape}")
97
+ break
98
+
99
+
100
+ # save_file(dict(data_tensor), args.output_file)
preprocessing/mm_datautils.py CHANGED
@@ -1,126 +1,126 @@
1
- import numpy as np
2
- from PIL import Image
3
- import torch
4
- from decord import cpu, VideoReader
5
- from transformers import BaseImageProcessor
6
- from typing import List, Union, Tuple
7
- import time
8
- from constants import *
9
-
10
- def expand2square(pil_img, background_color):
11
- width, height = pil_img.size
12
- if width == height:
13
- return pil_img
14
- elif width > height:
15
- result = Image.new(pil_img.mode, (width, width), background_color)
16
- result.paste(pil_img, (0, (width - height) // 2))
17
- return result
18
- else:
19
- result = Image.new(pil_img.mode, (height, height), background_color)
20
- result.paste(pil_img, ((height - width) // 2, 0))
21
- return result
22
-
23
- def process_images(
24
- images: torch.Tensor,
25
- image_processor: List[BaseImageProcessor],
26
- device: str
27
- ) -> Union[torch.Tensor, List[torch.Tensor]]:
28
- # images.shape: (4294, 360, 640, 3)
29
- # print(f'@tcm: In process_images(): images.shape={images.shape}')
30
- if isinstance(image_processor, list):
31
- processor_aux_list = image_processor
32
- new_images_aux_list = []
33
- for i, image in enumerate(images):
34
- # image.shape: (height, width, channels)
35
- # print(f'@tcm: In process_images(): frame {i}')
36
- if isinstance(image, np.ndarray):
37
- image = Image.fromarray(image)
38
- image_aux_list = []
39
- for processor_aux in processor_aux_list:
40
- image_aux = image # PIL.Image
41
- if hasattr(processor_aux, "image_mean"):
42
- try:
43
- target_resolution = processor_aux.crop_size["height"]
44
- except:
45
- target_resolution = processor_aux.size["height"]
46
- image_aux = expand2square(
47
- image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
48
- ).resize((target_resolution, target_resolution))
49
- image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
50
- "pixel_values"
51
- ][0]
52
- # image_aux.shape: torch.Size([3, 384, 384])
53
- image_aux_list.append(image_aux)
54
- new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
55
-
56
- new_images_aux_list = [
57
- list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
58
- ] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
59
- new_images_aux_list = [
60
- torch.stack(image_aux).half().to(device) for image_aux in new_images_aux_list
61
- ] # torch.Tensor(num_frames, C, H, W) new_images_aux_list[num_processor]
62
- return new_images_aux_list
63
- else:
64
- image_aspect_ratio = "pad"
65
- new_images = []
66
- if image_aspect_ratio == "pad":
67
- for image in images:
68
- image = expand2square(
69
- image, tuple(int(x * 255) for x in image_processor.image_mean)
70
- )
71
- image = image_processor.preprocess(image, return_tensors="pt")[
72
- "pixel_values"
73
- ][0]
74
- new_images.append(image)
75
- else:
76
- return image_processor(images, return_tensors="pt")["pixel_values"]
77
- if all(x.shape == new_images[0].shape for x in new_images):
78
- new_images = torch.stack(new_images, dim=0)
79
- return new_images
80
-
81
- def process_video_frames(
82
- video_path: str,
83
- image_processors: List[BaseImageProcessor],
84
- device: str
85
- ) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
86
- vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
87
- fps = float(vr.get_avg_fps())
88
- frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
89
- print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
90
- image_sizes = [vr[0].shape[:2]]
91
-
92
- video = [[] for _ in range(len(image_processors))]
93
- for i in range(0, len(frame_indices), CHUNK_SIZE):
94
- print(f'@tcm: In process_video_frames(): segment {int(i/CHUNK_SIZE)}')
95
- sub_frame_indices = frame_indices[i:min(i+CHUNK_SIZE, len(frame_indices))]
96
- sub_videos = []
97
- process_time = time.time()
98
- for frame_index in sub_frame_indices:
99
- img = vr[frame_index].asnumpy()
100
- sub_videos.append(img)
101
- sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
102
- sub_videos = process_images(sub_videos, image_processors, device)
103
- print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
104
- assert len(sub_videos) == len(video)
105
- for j, sub_video in enumerate(sub_videos):
106
- video[j].append(sub_video)
107
-
108
- del sub_videos
109
- if 'cuda' in device:
110
- torch.cuda.empty_cache()
111
-
112
- for i in range(len(video)):
113
- video[i] = torch.cat(video[i], dim=0)
114
-
115
- # vectorize_time = time.time()
116
- # for frame_index in frame_indices:
117
- # img = vr[frame_index].asnumpy()
118
- # video.append(img)
119
- # video = np.stack(video) # shape: (num_frames, height, width, channels)
120
- # print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
121
- # image_sizes = [video[0].shape[:2]]
122
- # process_time = time.time()
123
- # video = process_images(video, image_processors, device)
124
- # print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
125
- video = [item.unsqueeze(0) for item in video]
126
  return video, image_sizes
 
1
+ import numpy as np
2
+ from PIL import Image
3
+ import torch
4
+ from decord import cpu, VideoReader
5
+ from transformers import BaseImageProcessor
6
+ from typing import List, Union, Tuple
7
+ import time
8
+ from constants import *
9
+
10
+ def expand2square(pil_img, background_color):
11
+ width, height = pil_img.size
12
+ if width == height:
13
+ return pil_img
14
+ elif width > height:
15
+ result = Image.new(pil_img.mode, (width, width), background_color)
16
+ result.paste(pil_img, (0, (width - height) // 2))
17
+ return result
18
+ else:
19
+ result = Image.new(pil_img.mode, (height, height), background_color)
20
+ result.paste(pil_img, ((height - width) // 2, 0))
21
+ return result
22
+
23
+ def process_images(
24
+ images: torch.Tensor,
25
+ image_processor: List[BaseImageProcessor],
26
+ device: str
27
+ ) -> Union[torch.Tensor, List[torch.Tensor]]:
28
+ # images.shape: (4294, 360, 640, 3)
29
+ # print(f'@tcm: In process_images(): images.shape={images.shape}')
30
+ if isinstance(image_processor, list):
31
+ processor_aux_list = image_processor
32
+ new_images_aux_list = []
33
+ for i, image in enumerate(images):
34
+ # image.shape: (height, width, channels)
35
+ # print(f'@tcm: In process_images(): frame {i}')
36
+ if isinstance(image, np.ndarray):
37
+ image = Image.fromarray(image)
38
+ image_aux_list = []
39
+ for processor_aux in processor_aux_list:
40
+ image_aux = image # PIL.Image
41
+ if hasattr(processor_aux, "image_mean"):
42
+ try:
43
+ target_resolution = processor_aux.crop_size["height"]
44
+ except:
45
+ target_resolution = processor_aux.size["height"]
46
+ image_aux = expand2square(
47
+ image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
48
+ ).resize((target_resolution, target_resolution))
49
+ image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
50
+ "pixel_values"
51
+ ][0]
52
+ # image_aux.shape: torch.Size([3, 384, 384])
53
+ image_aux_list.append(image_aux)
54
+ new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
55
+
56
+ new_images_aux_list = [
57
+ list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
58
+ ] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
59
+ new_images_aux_list = [
60
+ torch.stack(image_aux).half().to(device) for image_aux in new_images_aux_list
61
+ ] # torch.Tensor(num_frames, C, H, W) new_images_aux_list[num_processor]
62
+ return new_images_aux_list
63
+ else:
64
+ image_aspect_ratio = "pad"
65
+ new_images = []
66
+ if image_aspect_ratio == "pad":
67
+ for image in images:
68
+ image = expand2square(
69
+ image, tuple(int(x * 255) for x in image_processor.image_mean)
70
+ )
71
+ image = image_processor.preprocess(image, return_tensors="pt")[
72
+ "pixel_values"
73
+ ][0]
74
+ new_images.append(image)
75
+ else:
76
+ return image_processor(images, return_tensors="pt")["pixel_values"]
77
+ if all(x.shape == new_images[0].shape for x in new_images):
78
+ new_images = torch.stack(new_images, dim=0)
79
+ return new_images
80
+
81
+ def process_video_frames(
82
+ video_path: str,
83
+ image_processors: List[BaseImageProcessor],
84
+ device: str
85
+ ) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
86
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
87
+ fps = float(vr.get_avg_fps())
88
+ frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
89
+ print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
90
+ image_sizes = [vr[0].shape[:2]]
91
+
92
+ video = [[] for _ in range(len(image_processors))]
93
+ for i in range(0, len(frame_indices), CHUNK_SIZE):
94
+ print(f'@tcm: In process_video_frames(): segment {int(i/CHUNK_SIZE)}')
95
+ sub_frame_indices = frame_indices[i:min(i+CHUNK_SIZE, len(frame_indices))]
96
+ sub_videos = []
97
+ process_time = time.time()
98
+ for frame_index in sub_frame_indices:
99
+ img = vr[frame_index].asnumpy()
100
+ sub_videos.append(img)
101
+ sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
102
+ sub_videos = process_images(sub_videos, image_processors, device)
103
+ print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
104
+ assert len(sub_videos) == len(video)
105
+ for j, sub_video in enumerate(sub_videos):
106
+ video[j].append(sub_video)
107
+
108
+ del sub_videos
109
+ if 'cuda' in device:
110
+ torch.cuda.empty_cache()
111
+
112
+ for i in range(len(video)):
113
+ video[i] = torch.cat(video[i], dim=0)
114
+
115
+ # vectorize_time = time.time()
116
+ # for frame_index in frame_indices:
117
+ # img = vr[frame_index].asnumpy()
118
+ # video.append(img)
119
+ # video = np.stack(video) # shape: (num_frames, height, width, channels)
120
+ # print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
121
+ # image_sizes = [video[0].shape[:2]]
122
+ # process_time = time.time()
123
+ # video = process_images(video, image_processors, device)
124
+ # print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
125
+ video = [item.unsqueeze(0) for item in video]
126
  return video, image_sizes
preprocessing/preprocessor.py CHANGED
@@ -1,240 +1,240 @@
1
- from vision_encoders.builder import build_vision_tower_aux_list
2
- from transformers import Qwen2Config
3
- from typing import Optional, List, Tuple
4
- import torch
5
- import json
6
- from transformers import BaseImageProcessor
7
-
8
- class CambrianConfig(Qwen2Config):
9
- model_type = "cambrian_qwen"
10
- debug = "debug"
11
-
12
- def __init__(
13
- self,
14
- **kwargs
15
- ) -> None:
16
- super().__init__(**kwargs)
17
-
18
- for key, value in kwargs.items():
19
- setattr(self, key, value)
20
-
21
- @classmethod
22
- def from_json_file(cls, json_file_path):
23
- """Load a config from a json file."""
24
- with open(json_file_path, "r") as f:
25
- config_dict = json.load(f)
26
- return cls(**config_dict)
27
-
28
-
29
- class CambrianEncoders:
30
-
31
- def __init__(
32
- self,
33
- config: CambrianConfig
34
- ) -> None:
35
- self.config: CambrianConfig = config
36
- self.vision_tower_aux_list = build_vision_tower_aux_list(config, delay_load=True)
37
-
38
- def encode_images(self, image_aux_list, encode_type=None):
39
- vision_tower_aux_list = self.vision_tower_aux_list
40
- image_aux_features_list = []
41
- chunk_size = 64
42
- if encode_type == "dino":
43
- image_aux = image_aux_list[-1]
44
- vision_tower_aux = vision_tower_aux_list[-1]
45
- if image_aux.shape[0] > chunk_size:
46
- image_aux_features_chunks = []
47
- for start_idx in range(0, image_aux.shape[0], chunk_size):
48
- end_idx = min(start_idx + chunk_size, image_aux.shape[0])
49
- chunk = image_aux[start_idx:end_idx]
50
- image_aux_features_chunk = vision_tower_aux(chunk)
51
- image_aux_features_chunks.append(image_aux_features_chunk)
52
- image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
53
- else:
54
- image_aux_features = vision_tower_aux(image_aux)
55
- return image_aux_features
56
- elif encode_type == "siglip":
57
- image_aux = image_aux_list[0]
58
- vision_tower_aux = vision_tower_aux_list[0]
59
- if image_aux.shape[0] > chunk_size:
60
- image_aux_features_chunks = []
61
- for start_idx in range(0, image_aux.shape[0], chunk_size):
62
- end_idx = min(start_idx + chunk_size, image_aux.shape[0])
63
- chunk = image_aux[start_idx:end_idx]
64
- image_aux_features_chunk = vision_tower_aux(chunk)
65
- image_aux_features_chunks.append(image_aux_features_chunk)
66
- image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
67
- else:
68
- image_aux_features = vision_tower_aux(image_aux)
69
- return image_aux_features
70
- else:
71
- for image_aux, vision_tower_aux in zip(
72
- image_aux_list, vision_tower_aux_list
73
- ):
74
- if image_aux.shape[0] > chunk_size:
75
- image_aux_features_chunks = []
76
- for start_idx in range(0, image_aux.shape[0], chunk_size):
77
- end_idx = min(start_idx + chunk_size, image_aux.shape[0])
78
- chunk = image_aux[start_idx:end_idx]
79
- image_aux_features_chunk = vision_tower_aux(chunk)
80
- image_aux_features_chunks.append(image_aux_features_chunk)
81
- image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
82
- else:
83
- image_aux_features = vision_tower_aux(image_aux)
84
- image_aux_features_list.append(image_aux_features)
85
- return image_aux_features_list
86
-
87
- def select_frame(
88
- self,
89
- feature_list,
90
- split_sizes,
91
- new_image_aux_list,
92
- image_sizes,
93
- window_size=16,
94
- threshold=0.83,
95
- ):
96
- dino_features_batch = torch.split(feature_list, split_sizes, dim=0)
97
- new_image_aux_batch_0 = torch.split(new_image_aux_list[0], split_sizes, dim=0)
98
- new_image_aux_batch_1 = torch.split(new_image_aux_list[1], split_sizes, dim=0)
99
- new_split_sizes = []
100
- selected_frames_all_0 = []
101
- selected_frames_all_1 = []
102
- selected_frames_feature_all = []
103
- selected_frame_indices_all = []
104
- for i_batch, frame_features in enumerate(dino_features_batch):
105
-
106
- original_width, original_height = image_sizes[i_batch]
107
- if getattr(self.get_model().config, "highres", False):
108
- token_per_frame = self.config.lowres_token ** 2
109
- else:
110
- token_per_frame = self.config.image_token_len
111
-
112
- max_num_frames = max(
113
- 1,
114
- (
115
- self.config.tokenizer_model_max_length
116
- - getattr(self.config, "inference_max_length", 16)
117
- )
118
- // token_per_frame,
119
- )
120
- if len(frame_features) < max_num_frames:
121
- selected_frames_all_0.append(new_image_aux_batch_0[i_batch])
122
- selected_frames_all_1.append(new_image_aux_batch_1[i_batch])
123
- selected_frames_feature_all.append(frame_features)
124
- new_split_sizes.append(len(frame_features))
125
- selected_frame_indices_all.append(torch.arange(len(frame_features)))
126
- continue
127
-
128
- num_segments = len(frame_features) // window_size
129
- if num_segments == 0:
130
- query_feature = frame_features.flatten(1, 2)
131
- query_feature = query_feature / torch.norm(
132
- (query_feature), dim=1, keepdim=True
133
- )
134
- similarities = torch.mean(query_feature @ query_feature.T, dim=1)
135
- similarities[len(frame_features) // 2] = 0
136
- indices = torch.where(similarities < threshold)[0]
137
- selected_frame_indices_all.append(indices)
138
- selected_frames_all_0.append(new_image_aux_batch_0[i_batch][indices])
139
- selected_frames_all_1.append(new_image_aux_batch_1[i_batch][indices])
140
- selected_frames_feature_all.append(frame_features[indices])
141
- new_split_sizes.append(len(indices))
142
- continue
143
- segments_frames_0 = []
144
- segments_frames_1 = []
145
- segments_features = []
146
- for start_idx in range(0, len(frame_features), window_size):
147
- end_idx = min(start_idx + window_size, len(frame_features))
148
- segments_frames_0.append(
149
- new_image_aux_batch_0[i_batch][start_idx:end_idx]
150
- )
151
- segments_frames_1.append(
152
- new_image_aux_batch_1[i_batch][start_idx:end_idx]
153
- )
154
- segments_features.append(frame_features[start_idx:end_idx])
155
- selected_frames_0 = []
156
- selected_frames_1 = []
157
- selected_features = []
158
- selected_frame_indices = []
159
- for i, segment in enumerate(segments_features):
160
- query_feature = segment.flatten(1, 2)
161
- query_feature = query_feature / torch.norm(
162
- (query_feature), dim=1, keepdim=True
163
- )
164
- similarities = torch.mean(query_feature @ query_feature.T, dim=1)
165
- similarities[len(segment) // 2] = 0
166
- indices = torch.where(similarities < threshold)[0]
167
- selected_frames_0.append(segments_frames_0[i][indices])
168
- selected_frames_1.append(segments_frames_1[i][indices])
169
- selected_features.append(segment[indices])
170
- selected_frame_indices.extend(indices + i * window_size)
171
- selected_frames_0 = torch.cat(selected_frames_0, dim=0)
172
- selected_frames_1 = torch.cat(selected_frames_1, dim=0)
173
- selected_features = torch.cat(selected_features, dim=0)
174
- selected_frame_indices = torch.tensor(selected_frame_indices)
175
- # ablation
176
- max_num_frames = 400 # in case of OOM
177
- if len(selected_frames_0) > max_num_frames:
178
- interval = len(selected_frames_0) / float(max_num_frames)
179
- indices = [int(interval * i) for i in range(max_num_frames)]
180
- new_split_sizes.append(len(indices))
181
- selected_frames_all_0.append(selected_frames_0[indices])
182
- selected_frames_all_1.append(selected_frames_1[indices])
183
- selected_frames_feature_all.append(selected_features[indices])
184
- selected_frame_indices = selected_frame_indices[indices]
185
- else:
186
- new_split_sizes.append(len(selected_frames_0))
187
- selected_frames_all_0.append(selected_frames_0)
188
- selected_frames_all_1.append(selected_frames_1)
189
- selected_frames_feature_all.append(selected_features)
190
- selected_frame_indices_all.append(selected_frame_indices)
191
- selected_frames_all_0 = torch.cat(selected_frames_all_0, dim=0)
192
- selected_frames_all_1 = torch.cat(selected_frames_all_1, dim=0)
193
- selected_frames_feature_all = torch.cat(selected_frames_feature_all, dim=0)
194
- return (
195
- selected_frames_feature_all,
196
- new_split_sizes,
197
- [selected_frames_all_0, selected_frames_all_1],
198
- selected_frame_indices_all,
199
- )
200
-
201
- def prepare_mm_features(
202
- self,
203
- images: List[torch.Tensor],
204
- image_sizes: List[Tuple[int, int]],
205
- ):
206
- image_aux_list = images
207
- split_sizes_ori = [
208
- 1 if image.ndim == 3 else image.shape[0] for image in image_aux_list[0]
209
- ]
210
- new_image_aux_list = []
211
- for image_aux in image_aux_list:
212
- if type(image_aux) is list:
213
- image_aux = [
214
- x.unsqueeze(0) if x.ndim == 3 else x for x in image_aux
215
- ]
216
- concat_image_aux = torch.cat([image for image in image_aux], dim=0)
217
- new_image_aux_list.append(concat_image_aux)
218
- image_aux_features_dino = self.encode_images(
219
- new_image_aux_list, encode_type="dino"
220
- )
221
- (
222
- image_aux_features_dino,
223
- split_sizes,
224
- new_image_aux_list,
225
- selected_frame_indices_all,
226
- ) = self.select_frame(
227
- image_aux_features_dino,
228
- split_sizes_ori,
229
- new_image_aux_list,
230
- image_sizes,
231
- threshold=getattr(self.config, "dino_threshold", 0.83),
232
- )
233
- image_aux_features_siglip = self.encode_images(
234
- new_image_aux_list, encode_type="siglip"
235
- )
236
- image_aux_features_list = [
237
- image_aux_features_siglip,
238
- image_aux_features_dino,
239
- ]
240
  return image_aux_features_list
 
1
+ from vision_encoders.builder import build_vision_tower_aux_list
2
+ from transformers import Qwen2Config
3
+ from typing import Optional, List, Tuple
4
+ import torch
5
+ import json
6
+ from transformers import BaseImageProcessor
7
+
8
+ class CambrianConfig(Qwen2Config):
9
+ model_type = "cambrian_qwen"
10
+ debug = "debug"
11
+
12
+ def __init__(
13
+ self,
14
+ **kwargs
15
+ ) -> None:
16
+ super().__init__(**kwargs)
17
+
18
+ for key, value in kwargs.items():
19
+ setattr(self, key, value)
20
+
21
+ @classmethod
22
+ def from_json_file(cls, json_file_path):
23
+ """Load a config from a json file."""
24
+ with open(json_file_path, "r") as f:
25
+ config_dict = json.load(f)
26
+ return cls(**config_dict)
27
+
28
+
29
+ class CambrianEncoders:
30
+
31
+ def __init__(
32
+ self,
33
+ config: CambrianConfig
34
+ ) -> None:
35
+ self.config: CambrianConfig = config
36
+ self.vision_tower_aux_list = build_vision_tower_aux_list(config, delay_load=True)
37
+
38
+ def encode_images(self, image_aux_list, encode_type=None):
39
+ vision_tower_aux_list = self.vision_tower_aux_list
40
+ image_aux_features_list = []
41
+ chunk_size = 64
42
+ if encode_type == "dino":
43
+ image_aux = image_aux_list[-1]
44
+ vision_tower_aux = vision_tower_aux_list[-1]
45
+ if image_aux.shape[0] > chunk_size:
46
+ image_aux_features_chunks = []
47
+ for start_idx in range(0, image_aux.shape[0], chunk_size):
48
+ end_idx = min(start_idx + chunk_size, image_aux.shape[0])
49
+ chunk = image_aux[start_idx:end_idx]
50
+ image_aux_features_chunk = vision_tower_aux(chunk)
51
+ image_aux_features_chunks.append(image_aux_features_chunk)
52
+ image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
53
+ else:
54
+ image_aux_features = vision_tower_aux(image_aux)
55
+ return image_aux_features
56
+ elif encode_type == "siglip":
57
+ image_aux = image_aux_list[0]
58
+ vision_tower_aux = vision_tower_aux_list[0]
59
+ if image_aux.shape[0] > chunk_size:
60
+ image_aux_features_chunks = []
61
+ for start_idx in range(0, image_aux.shape[0], chunk_size):
62
+ end_idx = min(start_idx + chunk_size, image_aux.shape[0])
63
+ chunk = image_aux[start_idx:end_idx]
64
+ image_aux_features_chunk = vision_tower_aux(chunk)
65
+ image_aux_features_chunks.append(image_aux_features_chunk)
66
+ image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
67
+ else:
68
+ image_aux_features = vision_tower_aux(image_aux)
69
+ return image_aux_features
70
+ else:
71
+ for image_aux, vision_tower_aux in zip(
72
+ image_aux_list, vision_tower_aux_list
73
+ ):
74
+ if image_aux.shape[0] > chunk_size:
75
+ image_aux_features_chunks = []
76
+ for start_idx in range(0, image_aux.shape[0], chunk_size):
77
+ end_idx = min(start_idx + chunk_size, image_aux.shape[0])
78
+ chunk = image_aux[start_idx:end_idx]
79
+ image_aux_features_chunk = vision_tower_aux(chunk)
80
+ image_aux_features_chunks.append(image_aux_features_chunk)
81
+ image_aux_features = torch.cat(image_aux_features_chunks, dim=0)
82
+ else:
83
+ image_aux_features = vision_tower_aux(image_aux)
84
+ image_aux_features_list.append(image_aux_features)
85
+ return image_aux_features_list
86
+
87
+ def select_frame(
88
+ self,
89
+ feature_list,
90
+ split_sizes,
91
+ new_image_aux_list,
92
+ image_sizes,
93
+ window_size=16,
94
+ threshold=0.83,
95
+ ):
96
+ dino_features_batch = torch.split(feature_list, split_sizes, dim=0)
97
+ new_image_aux_batch_0 = torch.split(new_image_aux_list[0], split_sizes, dim=0)
98
+ new_image_aux_batch_1 = torch.split(new_image_aux_list[1], split_sizes, dim=0)
99
+ new_split_sizes = []
100
+ selected_frames_all_0 = []
101
+ selected_frames_all_1 = []
102
+ selected_frames_feature_all = []
103
+ selected_frame_indices_all = []
104
+ for i_batch, frame_features in enumerate(dino_features_batch):
105
+
106
+ original_width, original_height = image_sizes[i_batch]
107
+ if getattr(self.get_model().config, "highres", False):
108
+ token_per_frame = self.config.lowres_token ** 2
109
+ else:
110
+ token_per_frame = self.config.image_token_len
111
+
112
+ max_num_frames = max(
113
+ 1,
114
+ (
115
+ self.config.tokenizer_model_max_length
116
+ - getattr(self.config, "inference_max_length", 16)
117
+ )
118
+ // token_per_frame,
119
+ )
120
+ if len(frame_features) < max_num_frames:
121
+ selected_frames_all_0.append(new_image_aux_batch_0[i_batch])
122
+ selected_frames_all_1.append(new_image_aux_batch_1[i_batch])
123
+ selected_frames_feature_all.append(frame_features)
124
+ new_split_sizes.append(len(frame_features))
125
+ selected_frame_indices_all.append(torch.arange(len(frame_features)))
126
+ continue
127
+
128
+ num_segments = len(frame_features) // window_size
129
+ if num_segments == 0:
130
+ query_feature = frame_features.flatten(1, 2)
131
+ query_feature = query_feature / torch.norm(
132
+ (query_feature), dim=1, keepdim=True
133
+ )
134
+ similarities = torch.mean(query_feature @ query_feature.T, dim=1)
135
+ similarities[len(frame_features) // 2] = 0
136
+ indices = torch.where(similarities < threshold)[0]
137
+ selected_frame_indices_all.append(indices)
138
+ selected_frames_all_0.append(new_image_aux_batch_0[i_batch][indices])
139
+ selected_frames_all_1.append(new_image_aux_batch_1[i_batch][indices])
140
+ selected_frames_feature_all.append(frame_features[indices])
141
+ new_split_sizes.append(len(indices))
142
+ continue
143
+ segments_frames_0 = []
144
+ segments_frames_1 = []
145
+ segments_features = []
146
+ for start_idx in range(0, len(frame_features), window_size):
147
+ end_idx = min(start_idx + window_size, len(frame_features))
148
+ segments_frames_0.append(
149
+ new_image_aux_batch_0[i_batch][start_idx:end_idx]
150
+ )
151
+ segments_frames_1.append(
152
+ new_image_aux_batch_1[i_batch][start_idx:end_idx]
153
+ )
154
+ segments_features.append(frame_features[start_idx:end_idx])
155
+ selected_frames_0 = []
156
+ selected_frames_1 = []
157
+ selected_features = []
158
+ selected_frame_indices = []
159
+ for i, segment in enumerate(segments_features):
160
+ query_feature = segment.flatten(1, 2)
161
+ query_feature = query_feature / torch.norm(
162
+ (query_feature), dim=1, keepdim=True
163
+ )
164
+ similarities = torch.mean(query_feature @ query_feature.T, dim=1)
165
+ similarities[len(segment) // 2] = 0
166
+ indices = torch.where(similarities < threshold)[0]
167
+ selected_frames_0.append(segments_frames_0[i][indices])
168
+ selected_frames_1.append(segments_frames_1[i][indices])
169
+ selected_features.append(segment[indices])
170
+ selected_frame_indices.extend(indices + i * window_size)
171
+ selected_frames_0 = torch.cat(selected_frames_0, dim=0)
172
+ selected_frames_1 = torch.cat(selected_frames_1, dim=0)
173
+ selected_features = torch.cat(selected_features, dim=0)
174
+ selected_frame_indices = torch.tensor(selected_frame_indices)
175
+ # ablation
176
+ max_num_frames = 400 # in case of OOM
177
+ if len(selected_frames_0) > max_num_frames:
178
+ interval = len(selected_frames_0) / float(max_num_frames)
179
+ indices = [int(interval * i) for i in range(max_num_frames)]
180
+ new_split_sizes.append(len(indices))
181
+ selected_frames_all_0.append(selected_frames_0[indices])
182
+ selected_frames_all_1.append(selected_frames_1[indices])
183
+ selected_frames_feature_all.append(selected_features[indices])
184
+ selected_frame_indices = selected_frame_indices[indices]
185
+ else:
186
+ new_split_sizes.append(len(selected_frames_0))
187
+ selected_frames_all_0.append(selected_frames_0)
188
+ selected_frames_all_1.append(selected_frames_1)
189
+ selected_frames_feature_all.append(selected_features)
190
+ selected_frame_indices_all.append(selected_frame_indices)
191
+ selected_frames_all_0 = torch.cat(selected_frames_all_0, dim=0)
192
+ selected_frames_all_1 = torch.cat(selected_frames_all_1, dim=0)
193
+ selected_frames_feature_all = torch.cat(selected_frames_feature_all, dim=0)
194
+ return (
195
+ selected_frames_feature_all,
196
+ new_split_sizes,
197
+ [selected_frames_all_0, selected_frames_all_1],
198
+ selected_frame_indices_all,
199
+ )
200
+
201
+ def prepare_mm_features(
202
+ self,
203
+ images: List[torch.Tensor],
204
+ image_sizes: List[Tuple[int, int]],
205
+ ):
206
+ image_aux_list = images
207
+ split_sizes_ori = [
208
+ 1 if image.ndim == 3 else image.shape[0] for image in image_aux_list[0]
209
+ ]
210
+ new_image_aux_list = []
211
+ for image_aux in image_aux_list:
212
+ if type(image_aux) is list:
213
+ image_aux = [
214
+ x.unsqueeze(0) if x.ndim == 3 else x for x in image_aux
215
+ ]
216
+ concat_image_aux = torch.cat([image for image in image_aux], dim=0)
217
+ new_image_aux_list.append(concat_image_aux)
218
+ image_aux_features_dino = self.encode_images(
219
+ new_image_aux_list, encode_type="dino"
220
+ )
221
+ (
222
+ image_aux_features_dino,
223
+ split_sizes,
224
+ new_image_aux_list,
225
+ selected_frame_indices_all,
226
+ ) = self.select_frame(
227
+ image_aux_features_dino,
228
+ split_sizes_ori,
229
+ new_image_aux_list,
230
+ image_sizes,
231
+ threshold=getattr(self.config, "dino_threshold", 0.83),
232
+ )
233
+ image_aux_features_siglip = self.encode_images(
234
+ new_image_aux_list, encode_type="siglip"
235
+ )
236
+ image_aux_features_list = [
237
+ image_aux_features_siglip,
238
+ image_aux_features_dino,
239
+ ]
240
  return image_aux_features_list
preprocessing/vision_encoders/base_encoder.py CHANGED
@@ -1,135 +1,135 @@
1
- from abc import ABC, abstractmethod
2
-
3
- import torch
4
- import torch.nn as nn
5
-
6
-
7
- class ProcessorWrapper:
8
- def __init__(
9
- self,
10
- transform,
11
- height=378,
12
- width=378,
13
- image_mean=[0.48145466, 0.4578275, 0.40821073],
14
- ):
15
- self._crop_size = {
16
- "height": height,
17
- "width": width,
18
- }
19
- self._transforms = transform
20
- # print(transform)
21
- self.image_mean = image_mean
22
-
23
- @property
24
- def crop_size(self):
25
- return self._crop_size
26
-
27
- def preprocess(self, image, return_tensors="pt"):
28
- # Ensure image is a PIL Image
29
- output = {}
30
- output["pixel_values"] = [self._transforms(image)]
31
- return output
32
-
33
-
34
- class BaseVisionTower(nn.Module):
35
- def __init__(self, vision_tower_name, args, delay_load=False):
36
- super().__init__()
37
-
38
- self.is_loaded = False
39
- self.args = args
40
-
41
- self.vision_tower_name = vision_tower_name
42
- self.select_layer = args.mm_vision_select_layer
43
- self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
44
- self.unfreeze_mm_vision_tower = getattr(args, "unfreeze_mm_vision_tower", False)
45
- self.delay_load = delay_load
46
-
47
- @abstractmethod
48
- def load_model(self, device_map=None):
49
- raise NotImplementedError("Subclasses must implement load_model")
50
-
51
- @abstractmethod
52
- def _forward(self, images):
53
- raise NotImplementedError("Subclasses must implement forward")
54
-
55
- def forward(self, images):
56
- if type(images) is list:
57
- image_features = [self._forward(image.unsqueeze(0)) for image in images]
58
- else:
59
- image_features = self._forward(images)
60
-
61
- return image_features
62
-
63
- @property
64
- def dummy_feature(self):
65
- return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
66
-
67
- @property
68
- def dtype(self):
69
- # Dynamically infer the dtype from the first parameter, if not explicitly specified
70
- if hasattr(self.vision_tower, "dtype"):
71
- return self.vision_tower.dtype
72
- else:
73
- params = list(self.vision_tower.parameters())
74
- return (
75
- params[0].dtype if len(params) > 0 else torch.float32
76
- ) # Default to torch.float32 if no parameters
77
-
78
- @property
79
- def device(self):
80
- # Dynamically infer the device from the first parameter, if not explicitly specified
81
- if hasattr(self.vision_tower, "device"):
82
- return self.vision_tower.device
83
- else:
84
- params = list(self.vision_tower.parameters())
85
- return (
86
- params[0].device if len(params) > 0 else torch.device("cpu")
87
- ) # Default to CPU if no parameters
88
-
89
- @property
90
- def config(self):
91
- if self.is_loaded:
92
- return self.vision_tower.config
93
- else:
94
- return self.cfg_only
95
-
96
- @property
97
- def hidden_size(self):
98
- try:
99
- return self.config.hidden_size
100
- except:
101
- return self._hidden_size
102
-
103
- @property
104
- def image_size(self): # resolution
105
- # return self.config.image_size
106
- try:
107
- return self.config.image_size
108
- except:
109
- return self._image_size
110
-
111
- @property
112
- def patch_size(self):
113
- # return self.config.patch_size
114
- try:
115
- return self.config.patch_size
116
- except:
117
- return self._patch_size
118
-
119
- @property
120
- def num_patches_per_side(self):
121
- if self._interp_size is not None:
122
- return int(self._interp_size**0.5)
123
- try:
124
- return self.image_size // self.patch_size
125
- except:
126
- return self._num_patches_per_side
127
-
128
- @property
129
- def num_patches(self):
130
- if self._interp_size is not None:
131
- return self._interp_size
132
- try:
133
- return self.num_patches_per_side**2
134
- except:
135
- return self._num_patches
 
1
+ from abc import ABC, abstractmethod
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ class ProcessorWrapper:
8
+ def __init__(
9
+ self,
10
+ transform,
11
+ height=378,
12
+ width=378,
13
+ image_mean=[0.48145466, 0.4578275, 0.40821073],
14
+ ):
15
+ self._crop_size = {
16
+ "height": height,
17
+ "width": width,
18
+ }
19
+ self._transforms = transform
20
+ # print(transform)
21
+ self.image_mean = image_mean
22
+
23
+ @property
24
+ def crop_size(self):
25
+ return self._crop_size
26
+
27
+ def preprocess(self, image, return_tensors="pt"):
28
+ # Ensure image is a PIL Image
29
+ output = {}
30
+ output["pixel_values"] = [self._transforms(image)]
31
+ return output
32
+
33
+
34
+ class BaseVisionTower(nn.Module):
35
+ def __init__(self, vision_tower_name, args, delay_load=False):
36
+ super().__init__()
37
+
38
+ self.is_loaded = False
39
+ self.args = args
40
+
41
+ self.vision_tower_name = vision_tower_name
42
+ self.select_layer = args.mm_vision_select_layer
43
+ self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
44
+ self.unfreeze_mm_vision_tower = getattr(args, "unfreeze_mm_vision_tower", False)
45
+ self.delay_load = delay_load
46
+
47
+ @abstractmethod
48
+ def load_model(self, device_map=None):
49
+ raise NotImplementedError("Subclasses must implement load_model")
50
+
51
+ @abstractmethod
52
+ def _forward(self, images):
53
+ raise NotImplementedError("Subclasses must implement forward")
54
+
55
+ def forward(self, images):
56
+ if type(images) is list:
57
+ image_features = [self._forward(image.unsqueeze(0)) for image in images]
58
+ else:
59
+ image_features = self._forward(images)
60
+
61
+ return image_features
62
+
63
+ @property
64
+ def dummy_feature(self):
65
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
66
+
67
+ @property
68
+ def dtype(self):
69
+ # Dynamically infer the dtype from the first parameter, if not explicitly specified
70
+ if hasattr(self.vision_tower, "dtype"):
71
+ return self.vision_tower.dtype
72
+ else:
73
+ params = list(self.vision_tower.parameters())
74
+ return (
75
+ params[0].dtype if len(params) > 0 else torch.float32
76
+ ) # Default to torch.float32 if no parameters
77
+
78
+ @property
79
+ def device(self):
80
+ # Dynamically infer the device from the first parameter, if not explicitly specified
81
+ if hasattr(self.vision_tower, "device"):
82
+ return self.vision_tower.device
83
+ else:
84
+ params = list(self.vision_tower.parameters())
85
+ return (
86
+ params[0].device if len(params) > 0 else torch.device("cpu")
87
+ ) # Default to CPU if no parameters
88
+
89
+ @property
90
+ def config(self):
91
+ if self.is_loaded:
92
+ return self.vision_tower.config
93
+ else:
94
+ return self.cfg_only
95
+
96
+ @property
97
+ def hidden_size(self):
98
+ try:
99
+ return self.config.hidden_size
100
+ except:
101
+ return self._hidden_size
102
+
103
+ @property
104
+ def image_size(self): # resolution
105
+ # return self.config.image_size
106
+ try:
107
+ return self.config.image_size
108
+ except:
109
+ return self._image_size
110
+
111
+ @property
112
+ def patch_size(self):
113
+ # return self.config.patch_size
114
+ try:
115
+ return self.config.patch_size
116
+ except:
117
+ return self._patch_size
118
+
119
+ @property
120
+ def num_patches_per_side(self):
121
+ if self._interp_size is not None:
122
+ return int(self._interp_size**0.5)
123
+ try:
124
+ return self.image_size // self.patch_size
125
+ except:
126
+ return self._num_patches_per_side
127
+
128
+ @property
129
+ def num_patches(self):
130
+ if self._interp_size is not None:
131
+ return self._interp_size
132
+ try:
133
+ return self.num_patches_per_side**2
134
+ except:
135
+ return self._num_patches
preprocessing/vision_encoders/builder.py CHANGED
@@ -1,37 +1,37 @@
1
- # pyre-unsafe
2
- import copy
3
-
4
- from .dino_encoder import DinoVisionTower
5
- from .siglip_encoder import SiglipVisionTower
6
-
7
-
8
- def build_vision_tower_aux_list(vision_tower_cfg, **kwargs):
9
- vision_tower_aux_name_list = getattr(
10
- vision_tower_cfg,
11
- "mm_vision_tower_aux_list",
12
- getattr(vision_tower_cfg, "vision_tower_aux_list", None),
13
- )
14
- vision_tower_aux_token_len_list = getattr(
15
- vision_tower_cfg,
16
- "mm_vision_tower_aux_token_len_list",
17
- getattr(vision_tower_cfg, "vision_tower_aux_token_len_list", None),
18
- )
19
- vision_tower_aux_list = []
20
- for vision_tower_aux_name, vision_tower_aux_token_len in zip(
21
- vision_tower_aux_name_list, vision_tower_aux_token_len_list
22
- ):
23
- config = copy.deepcopy(vision_tower_cfg)
24
- vision_tower_aux_name += "-interp{}".format(vision_tower_aux_token_len)
25
- if "siglip" in vision_tower_aux_name.lower():
26
- vision_tower_aux_list.append(
27
- SiglipVisionTower(vision_tower_aux_name, args=config, **kwargs)
28
- )
29
-
30
- # SSL-based Vision Towers
31
- elif "dinov2" in vision_tower_aux_name.lower():
32
- vision_tower_aux_list.append(
33
- DinoVisionTower(vision_tower_aux_name, args=config, **kwargs)
34
- )
35
- else:
36
- raise ValueError(f"Unknown vision tower: {vision_tower_aux_name}")
37
- return vision_tower_aux_list
 
1
+ # pyre-unsafe
2
+ import copy
3
+
4
+ from .dino_encoder import DinoVisionTower
5
+ from .siglip_encoder import SiglipVisionTower
6
+
7
+
8
+ def build_vision_tower_aux_list(vision_tower_cfg, **kwargs):
9
+ vision_tower_aux_name_list = getattr(
10
+ vision_tower_cfg,
11
+ "mm_vision_tower_aux_list",
12
+ getattr(vision_tower_cfg, "vision_tower_aux_list", None),
13
+ )
14
+ vision_tower_aux_token_len_list = getattr(
15
+ vision_tower_cfg,
16
+ "mm_vision_tower_aux_token_len_list",
17
+ getattr(vision_tower_cfg, "vision_tower_aux_token_len_list", None),
18
+ )
19
+ vision_tower_aux_list = []
20
+ for vision_tower_aux_name, vision_tower_aux_token_len in zip(
21
+ vision_tower_aux_name_list, vision_tower_aux_token_len_list
22
+ ):
23
+ config = copy.deepcopy(vision_tower_cfg)
24
+ vision_tower_aux_name += "-interp{}".format(vision_tower_aux_token_len)
25
+ if "siglip" in vision_tower_aux_name.lower():
26
+ vision_tower_aux_list.append(
27
+ SiglipVisionTower(vision_tower_aux_name, args=config, **kwargs)
28
+ )
29
+
30
+ # SSL-based Vision Towers
31
+ elif "dinov2" in vision_tower_aux_name.lower():
32
+ vision_tower_aux_list.append(
33
+ DinoVisionTower(vision_tower_aux_name, args=config, **kwargs)
34
+ )
35
+ else:
36
+ raise ValueError(f"Unknown vision tower: {vision_tower_aux_name}")
37
+ return vision_tower_aux_list
preprocessing/vision_encoders/dino_encoder.py CHANGED
@@ -1,131 +1,131 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
- from transformers import AutoImageProcessor, Dinov2Config, Dinov2Model
5
-
6
- from .base_encoder import BaseVisionTower, ProcessorWrapper
7
-
8
-
9
- class DinoVisionTower(BaseVisionTower):
10
- def __init__(self, vision_tower, args, delay_load=False):
11
- super(DinoVisionTower, self).__init__(vision_tower, args, delay_load)
12
-
13
- model_path = "facebook/dinov2-giant"
14
- base_model_name, res, interp = model_path, 378, 576
15
- self._vision_tower_name = vision_tower
16
- self.vision_tower_name = base_model_name
17
- self._image_size = res
18
- self._interp_size = interp
19
- self._patch_size = 14 # default patch size
20
-
21
- if not self.delay_load:
22
- self.load_model()
23
- else:
24
- self.cfg_only = Dinov2Config.from_pretrained(self.vision_tower_name)
25
-
26
- def load_model(self, device_map=None):
27
-
28
- self.vision_tower = Dinov2Model.from_pretrained(self.vision_tower_name)
29
- """ValueError: Dinov2Model does not support `device_map='auto'`. To implement support, the model class needs to implement the `_no_split_modules` attribute."""
30
- self.vision_tower._no_split_modules = ["Dinov2SwiGLUFFN"]
31
-
32
- _image_size = self.vision_tower.config.image_size
33
- if self._image_size is None:
34
- self._image_size = _image_size
35
-
36
- # increase shortest edge to prevent edge case crops
37
- default_shortest_ratio = 8 / 7 # 224/256
38
- # shortest_edge = int(default_shortest_ratio * self._image_size)
39
- shortest_edge = self._image_size
40
-
41
- processor = AutoImageProcessor.from_pretrained(
42
- self.vision_tower_name,
43
- crop_size=dict(height=self._image_size, width=self._image_size),
44
- size=dict(shortest_edge=shortest_edge),
45
- )
46
- self.image_processor = processor
47
-
48
- # Assign the output channels of the projection convolution as the hidden size
49
- self._hidden_size = (
50
- self.vision_tower.embeddings.patch_embeddings.projection.out_channels
51
- )
52
- # Assign the first value of the stride of the projection convolution as the patch size
53
- self._patch_size = (
54
- self.vision_tower.embeddings.patch_embeddings.projection.stride[0]
55
- )
56
-
57
- # print(self._hidden_size, self._patch_size)
58
-
59
- self.vision_tower.requires_grad_(self.unfreeze_mm_vision_tower)
60
- self.is_loaded = True
61
-
62
- @property
63
- def image_size(self):
64
- return self._image_size
65
-
66
- def feature_select(self, outputs):
67
- sequence_output = outputs[
68
- "last_hidden_state"
69
- ] # batch_size, sequence_length, hidden_size
70
-
71
- if self.select_feature == "cls_patch":
72
- image_features = sequence_output
73
- elif self.select_feature == "patch":
74
- image_features = sequence_output[:, 1:]
75
- elif self.select_feature == "cls":
76
- image_features = sequence_output[:, 0]
77
- else:
78
- raise ValueError(f"Unexpected select feature: {self.select_feature}")
79
- return image_features
80
-
81
- def interpolate(self, image_features):
82
- if self._interp_size is None:
83
- return image_features
84
-
85
- b, num_tokens, dim = image_features.shape
86
-
87
- if num_tokens != self.num_patches:
88
- target_h = target_w = int(self._interp_size**0.5)
89
- h = w = int(num_tokens**0.5)
90
-
91
- image_features = image_features.view(b, h, w, dim)
92
- image_features = image_features.permute(0, 3, 1, 2).contiguous()
93
-
94
- image_features = F.interpolate(
95
- image_features.to(torch.float32),
96
- size=(target_h, target_w),
97
- mode="bilinear",
98
- align_corners=False,
99
- ).to(image_features.dtype)
100
-
101
- # Permute the dimensions back to (b, target_h, target_w, dim)
102
- image_features = image_features.permute(0, 2, 3, 1).contiguous()
103
-
104
- # Flatten the spatial dimensions (target_h, target_w) into a single dimension
105
- image_features = image_features.flatten(1, 2)
106
-
107
- return image_features
108
-
109
- def _forward(self, images):
110
- # logger.warning(f"images shape: {images.shape}")
111
- with torch.set_grad_enabled(self.unfreeze_mm_vision_tower):
112
- image_forward_outs = self.vision_tower.forward(
113
- images.to(device=self.device, dtype=self.dtype)
114
- )
115
- # logger.warning(f"image_forward_outs shape: {image_forward_outs['last_hidden_state'].shape}")
116
- image_features = self.feature_select(image_forward_outs).to(images.dtype)
117
- # logger.warning(f"image_features shape: {image_features.shape}")
118
- interp_features = self.interpolate(image_features)
119
- # logger.warning(f"interp_features shape: {interp_features.shape}")
120
- return interp_features
121
-
122
- @property
123
- def num_patches_per_side(self):
124
- return int(self.num_patches**0.5)
125
-
126
- @property
127
- def num_patches(self):
128
- if self._interp_size is None:
129
- return (self._image_size // self._patch_size) ** 2
130
- else:
131
- return self._interp_size
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ from transformers import AutoImageProcessor, Dinov2Config, Dinov2Model
5
+
6
+ from .base_encoder import BaseVisionTower, ProcessorWrapper
7
+
8
+
9
+ class DinoVisionTower(BaseVisionTower):
10
+ def __init__(self, vision_tower, args, delay_load=False):
11
+ super(DinoVisionTower, self).__init__(vision_tower, args, delay_load)
12
+
13
+ model_path = "facebook/dinov2-giant"
14
+ base_model_name, res, interp = model_path, 378, 576
15
+ self._vision_tower_name = vision_tower
16
+ self.vision_tower_name = base_model_name
17
+ self._image_size = res
18
+ self._interp_size = interp
19
+ self._patch_size = 14 # default patch size
20
+
21
+ if not self.delay_load:
22
+ self.load_model()
23
+ else:
24
+ self.cfg_only = Dinov2Config.from_pretrained(self.vision_tower_name)
25
+
26
+ def load_model(self, device_map=None):
27
+
28
+ self.vision_tower = Dinov2Model.from_pretrained(self.vision_tower_name)
29
+ """ValueError: Dinov2Model does not support `device_map='auto'`. To implement support, the model class needs to implement the `_no_split_modules` attribute."""
30
+ self.vision_tower._no_split_modules = ["Dinov2SwiGLUFFN"]
31
+
32
+ _image_size = self.vision_tower.config.image_size
33
+ if self._image_size is None:
34
+ self._image_size = _image_size
35
+
36
+ # increase shortest edge to prevent edge case crops
37
+ default_shortest_ratio = 8 / 7 # 224/256
38
+ # shortest_edge = int(default_shortest_ratio * self._image_size)
39
+ shortest_edge = self._image_size
40
+
41
+ processor = AutoImageProcessor.from_pretrained(
42
+ self.vision_tower_name,
43
+ crop_size=dict(height=self._image_size, width=self._image_size),
44
+ size=dict(shortest_edge=shortest_edge),
45
+ )
46
+ self.image_processor = processor
47
+
48
+ # Assign the output channels of the projection convolution as the hidden size
49
+ self._hidden_size = (
50
+ self.vision_tower.embeddings.patch_embeddings.projection.out_channels
51
+ )
52
+ # Assign the first value of the stride of the projection convolution as the patch size
53
+ self._patch_size = (
54
+ self.vision_tower.embeddings.patch_embeddings.projection.stride[0]
55
+ )
56
+
57
+ # print(self._hidden_size, self._patch_size)
58
+
59
+ self.vision_tower.requires_grad_(self.unfreeze_mm_vision_tower)
60
+ self.is_loaded = True
61
+
62
+ @property
63
+ def image_size(self):
64
+ return self._image_size
65
+
66
+ def feature_select(self, outputs):
67
+ sequence_output = outputs[
68
+ "last_hidden_state"
69
+ ] # batch_size, sequence_length, hidden_size
70
+
71
+ if self.select_feature == "cls_patch":
72
+ image_features = sequence_output
73
+ elif self.select_feature == "patch":
74
+ image_features = sequence_output[:, 1:]
75
+ elif self.select_feature == "cls":
76
+ image_features = sequence_output[:, 0]
77
+ else:
78
+ raise ValueError(f"Unexpected select feature: {self.select_feature}")
79
+ return image_features
80
+
81
+ def interpolate(self, image_features):
82
+ if self._interp_size is None:
83
+ return image_features
84
+
85
+ b, num_tokens, dim = image_features.shape
86
+
87
+ if num_tokens != self.num_patches:
88
+ target_h = target_w = int(self._interp_size**0.5)
89
+ h = w = int(num_tokens**0.5)
90
+
91
+ image_features = image_features.view(b, h, w, dim)
92
+ image_features = image_features.permute(0, 3, 1, 2).contiguous()
93
+
94
+ image_features = F.interpolate(
95
+ image_features.to(torch.float32),
96
+ size=(target_h, target_w),
97
+ mode="bilinear",
98
+ align_corners=False,
99
+ ).to(image_features.dtype)
100
+
101
+ # Permute the dimensions back to (b, target_h, target_w, dim)
102
+ image_features = image_features.permute(0, 2, 3, 1).contiguous()
103
+
104
+ # Flatten the spatial dimensions (target_h, target_w) into a single dimension
105
+ image_features = image_features.flatten(1, 2)
106
+
107
+ return image_features
108
+
109
+ def _forward(self, images):
110
+ # logger.warning(f"images shape: {images.shape}")
111
+ with torch.set_grad_enabled(self.unfreeze_mm_vision_tower):
112
+ image_forward_outs = self.vision_tower.forward(
113
+ images.to(device=self.device, dtype=self.dtype)
114
+ )
115
+ # logger.warning(f"image_forward_outs shape: {image_forward_outs['last_hidden_state'].shape}")
116
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
117
+ # logger.warning(f"image_features shape: {image_features.shape}")
118
+ interp_features = self.interpolate(image_features)
119
+ # logger.warning(f"interp_features shape: {interp_features.shape}")
120
+ return interp_features
121
+
122
+ @property
123
+ def num_patches_per_side(self):
124
+ return int(self.num_patches**0.5)
125
+
126
+ @property
127
+ def num_patches(self):
128
+ if self._interp_size is None:
129
+ return (self._image_size // self._patch_size) ** 2
130
+ else:
131
+ return self._interp_size
preprocessing/vision_encoders/siglip_encoder.py CHANGED
@@ -1,78 +1,78 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
- from transformers import SiglipImageProcessor, SiglipVisionConfig, SiglipVisionModel
5
-
6
- from .base_encoder import BaseVisionTower, ProcessorWrapper
7
-
8
-
9
- class SiglipVisionTower(BaseVisionTower):
10
- def __init__(self, vision_tower_name, args, delay_load=False):
11
- super(SiglipVisionTower, self).__init__(vision_tower_name, args, delay_load)
12
-
13
- model_path = "google/siglip-so400m-patch14-384"
14
- base_model_name, res, interp = model_path, 384, 576
15
- self.vision_tower_name = base_model_name
16
- self._image_size = res if res is not None else 512
17
- self._interp_size = interp
18
- if not self.delay_load:
19
- self.load_model()
20
- elif self.unfreeze_mm_vision_tower:
21
- self.load_model()
22
- else:
23
- self._hidden_size = 1152
24
-
25
- def load_model(self, device_map=None):
26
- self.vision_model = "siglip"
27
- # clip_model, processor = create_model_from_pretrained(self.vision_tower_name)
28
- self.vision_tower = SiglipVisionModel.from_pretrained(self.vision_tower_name)
29
-
30
- # self.vision_tower = clip_model.visual.trunk
31
- self.vision_tower.output_tokens = True
32
-
33
- self._hidden_size = self.vision_tower.config.hidden_size
34
- self._image_size = self.vision_tower.config.image_size
35
- self._patch_size = self.vision_tower.config.patch_size
36
- self.image_processor = SiglipImageProcessor.from_pretrained(
37
- self.vision_tower_name
38
- )
39
-
40
- self.vision_tower.requires_grad_(self.unfreeze_mm_vision_tower)
41
- self.is_loaded = True
42
-
43
- def interpolate(self, image_features):
44
- if self._interp_size is None:
45
- return image_features
46
-
47
- b, num_tokens, dim = image_features.shape
48
-
49
- if num_tokens != self.num_patches:
50
- target_h = target_w = int(self._interp_size**0.5)
51
- h = w = int(num_tokens**0.5)
52
-
53
- image_features = image_features.view(b, h, w, dim)
54
- image_features = image_features.permute(0, 3, 1, 2).contiguous()
55
-
56
- image_features = F.interpolate(
57
- image_features.to(torch.float32),
58
- size=(target_h, target_w),
59
- mode="bilinear",
60
- align_corners=False,
61
- ).to(image_features.dtype)
62
-
63
- # Permute the dimensions back to (b, target_h, target_w, dim)
64
- image_features = image_features.permute(0, 2, 3, 1).contiguous()
65
-
66
- # Flatten the spatial dimensions (target_h, target_w) into a single dimension
67
- image_features = image_features.flatten(1, 2)
68
-
69
- return image_features
70
-
71
- def _forward(self, images, interpolate_token=576):
72
- with torch.set_grad_enabled(self.unfreeze_mm_vision_tower):
73
- image_features = self.vision_tower.forward(
74
- images.to(device=self.device, dtype=self.dtype),
75
- output_hidden_states=True,
76
- ).hidden_states[-1]
77
- interp_features = self.interpolate(image_features)
78
- return interp_features
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ from transformers import SiglipImageProcessor, SiglipVisionConfig, SiglipVisionModel
5
+
6
+ from .base_encoder import BaseVisionTower, ProcessorWrapper
7
+
8
+
9
+ class SiglipVisionTower(BaseVisionTower):
10
+ def __init__(self, vision_tower_name, args, delay_load=False):
11
+ super(SiglipVisionTower, self).__init__(vision_tower_name, args, delay_load)
12
+
13
+ model_path = "google/siglip-so400m-patch14-384"
14
+ base_model_name, res, interp = model_path, 384, 576
15
+ self.vision_tower_name = base_model_name
16
+ self._image_size = res if res is not None else 512
17
+ self._interp_size = interp
18
+ if not self.delay_load:
19
+ self.load_model()
20
+ elif self.unfreeze_mm_vision_tower:
21
+ self.load_model()
22
+ else:
23
+ self._hidden_size = 1152
24
+
25
+ def load_model(self, device_map=None):
26
+ self.vision_model = "siglip"
27
+ # clip_model, processor = create_model_from_pretrained(self.vision_tower_name)
28
+ self.vision_tower = SiglipVisionModel.from_pretrained(self.vision_tower_name)
29
+
30
+ # self.vision_tower = clip_model.visual.trunk
31
+ self.vision_tower.output_tokens = True
32
+
33
+ self._hidden_size = self.vision_tower.config.hidden_size
34
+ self._image_size = self.vision_tower.config.image_size
35
+ self._patch_size = self.vision_tower.config.patch_size
36
+ self.image_processor = SiglipImageProcessor.from_pretrained(
37
+ self.vision_tower_name
38
+ )
39
+
40
+ self.vision_tower.requires_grad_(self.unfreeze_mm_vision_tower)
41
+ self.is_loaded = True
42
+
43
+ def interpolate(self, image_features):
44
+ if self._interp_size is None:
45
+ return image_features
46
+
47
+ b, num_tokens, dim = image_features.shape
48
+
49
+ if num_tokens != self.num_patches:
50
+ target_h = target_w = int(self._interp_size**0.5)
51
+ h = w = int(num_tokens**0.5)
52
+
53
+ image_features = image_features.view(b, h, w, dim)
54
+ image_features = image_features.permute(0, 3, 1, 2).contiguous()
55
+
56
+ image_features = F.interpolate(
57
+ image_features.to(torch.float32),
58
+ size=(target_h, target_w),
59
+ mode="bilinear",
60
+ align_corners=False,
61
+ ).to(image_features.dtype)
62
+
63
+ # Permute the dimensions back to (b, target_h, target_w, dim)
64
+ image_features = image_features.permute(0, 2, 3, 1).contiguous()
65
+
66
+ # Flatten the spatial dimensions (target_h, target_w) into a single dimension
67
+ image_features = image_features.flatten(1, 2)
68
+
69
+ return image_features
70
+
71
+ def _forward(self, images, interpolate_token=576):
72
+ with torch.set_grad_enabled(self.unfreeze_mm_vision_tower):
73
+ image_features = self.vision_tower.forward(
74
+ images.to(device=self.device, dtype=self.dtype),
75
+ output_hidden_states=True,
76
+ ).hidden_states[-1]
77
+ interp_features = self.interpolate(image_features)
78
+ return interp_features
requirements.txt CHANGED
@@ -1,10 +1,10 @@
1
- numpy==1.26.4
2
- datasets
3
- decord
4
- torch==2.1.2
5
- torchvision
6
- transformers==4.42.4
7
- safetensors
8
- pillow
9
- huggingface_hub
10
  scikit-learn
 
1
+ numpy==1.26.4
2
+ datasets
3
+ decord
4
+ torch==2.1.2
5
+ torchvision
6
+ transformers==4.42.4
7
+ safetensors
8
+ pillow
9
+ huggingface_hub
10
  scikit-learn
test.py CHANGED
@@ -1,25 +1,25 @@
1
- from datasets import load_dataset
2
- import decord
3
-
4
- # Load the dataset in streaming mode
5
- dataset = load_dataset("tcm03/EnTube", split="train", streaming=True)
6
-
7
- # Initialize counter and specify the max videos to process
8
- max_videos = 5
9
- video_count = 0
10
-
11
- # Stream and process videos
12
- for item in dataset:
13
- video_reader = item['video'] # Decord VideoReader object
14
- label = item['label'] # Label for the video
15
-
16
- # Extract frames from the video
17
- frames = []
18
- for frame in video_reader:
19
- frames.append(frame.asnumpy()) # Convert Decord frames to NumPy arrays
20
-
21
- print(f"Processed video {video_count} with label {label}, extracted {len(frames)} frames")
22
-
23
- video_count += 1
24
- if video_count >= max_videos:
25
- break
 
1
+ from datasets import load_dataset
2
+ import decord
3
+
4
+ # Load the dataset in streaming mode
5
+ dataset = load_dataset("tcm03/EnTube", split="train", streaming=True)
6
+
7
+ # Initialize counter and specify the max videos to process
8
+ max_videos = 5
9
+ video_count = 0
10
+
11
+ # Stream and process videos
12
+ for item in dataset:
13
+ video_reader = item['video'] # Decord VideoReader object
14
+ label = item['label'] # Label for the video
15
+
16
+ # Extract frames from the video
17
+ frames = []
18
+ for frame in video_reader:
19
+ frames.append(frame.asnumpy()) # Convert Decord frames to NumPy arrays
20
+
21
+ print(f"Processed video {video_count} with label {label}, extracted {len(frames)} frames")
22
+
23
+ video_count += 1
24
+ if video_count >= max_videos:
25
+ break