Gormery Kombo Wanjiru
commited on
Commit
·
57a7965
1
Parent(s):
17edc76
attempt at making loadscript
Browse files- load_script.py +145 -0
load_script.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import cv2
|
4 |
+
from datasets import Dataset, DatasetDict, Features, Value, Array2D, Array3D, Sequence
|
5 |
+
from pathlib import Path
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
# Define constants
|
9 |
+
VIDEO_EXTENSIONS = ['.avi']
|
10 |
+
JSON_EXTENSIONS = ['.json']
|
11 |
+
KEYPOINTS = [
|
12 |
+
"nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder", "right_shoulder",
|
13 |
+
"left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip", "right_hip",
|
14 |
+
"left_knee", "right_knee", "left_ankle", "right_ankle"
|
15 |
+
]
|
16 |
+
|
17 |
+
def load_video(video_path):
|
18 |
+
"""Reads a video file and returns a list of frames as NumPy arrays."""
|
19 |
+
cap = cv2.VideoCapture(video_path)
|
20 |
+
frames = []
|
21 |
+
|
22 |
+
while cap.isOpened():
|
23 |
+
ret, frame = cap.read()
|
24 |
+
if not ret:
|
25 |
+
break
|
26 |
+
frames.append(frame)
|
27 |
+
|
28 |
+
cap.release()
|
29 |
+
return np.array(frames)
|
30 |
+
|
31 |
+
def load_json(json_path):
|
32 |
+
"""Loads the JSON keypoint data for each frame."""
|
33 |
+
with open(json_path, 'r') as f:
|
34 |
+
data = json.load(f)
|
35 |
+
return data
|
36 |
+
|
37 |
+
def process_frame_data(frame_data):
|
38 |
+
"""Converts the frame's keypoints into a structured format."""
|
39 |
+
detections = []
|
40 |
+
|
41 |
+
# Check if 'data' exists in the frame_data
|
42 |
+
if 'detections' in frame_data:
|
43 |
+
for detection in frame_data['detections']: # Now using 'data' instead of 'detections'
|
44 |
+
if detection: # Check if there's any valid detection data
|
45 |
+
person = {
|
46 |
+
"confidence": detection.get("confidence", 0),
|
47 |
+
"box": detection.get("box", {}),
|
48 |
+
"keypoints": {
|
49 |
+
keypoint['label']: keypoint['coordinates']
|
50 |
+
for keypoint in detection.get('keypoints', [])
|
51 |
+
}
|
52 |
+
}
|
53 |
+
detections.append(person)
|
54 |
+
else:
|
55 |
+
print(f"Warning: Empty detection in frame {frame_data['frame_index']}")
|
56 |
+
else:
|
57 |
+
# Handle the case where 'data' is missing in the frame data
|
58 |
+
print(f"Warning: 'data' key missing in frame data: {frame_data}")
|
59 |
+
|
60 |
+
return detections
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
def get_file_paths(base_path, split="train"):
|
65 |
+
"""Returns video and JSON file paths."""
|
66 |
+
video_paths = []
|
67 |
+
json_paths = []
|
68 |
+
split_path = os.path.join(base_path, split)
|
69 |
+
|
70 |
+
for label in ['Fight', 'NonFight']:
|
71 |
+
label_path = os.path.join(split_path, label)
|
72 |
+
for video_folder in os.listdir(label_path):
|
73 |
+
video_folder_path = os.path.join(label_path, video_folder)
|
74 |
+
video_file = next((f for f in os.listdir(video_folder_path) if any(f.endswith(ext) for ext in VIDEO_EXTENSIONS)), None)
|
75 |
+
json_file = next((f for f in os.listdir(video_folder_path) if any(f.endswith(ext) for ext in JSON_EXTENSIONS)), None)
|
76 |
+
|
77 |
+
if video_file and json_file:
|
78 |
+
video_paths.append(os.path.join(video_folder_path, video_file))
|
79 |
+
json_paths.append(os.path.join(video_folder_path, json_file))
|
80 |
+
|
81 |
+
return video_paths, json_paths
|
82 |
+
|
83 |
+
def load_data(base_path, split="train"):
|
84 |
+
"""Loads and processes the data for a given split (train or val)."""
|
85 |
+
video_paths, json_paths = get_file_paths(base_path, split)
|
86 |
+
dataset = []
|
87 |
+
|
88 |
+
for video_path, json_path in zip(video_paths, json_paths):
|
89 |
+
# Load video frames
|
90 |
+
frames = load_video(video_path)
|
91 |
+
|
92 |
+
# Load JSON keypoints
|
93 |
+
keypoints_data = load_json(json_path)
|
94 |
+
|
95 |
+
# Process the data
|
96 |
+
frame_data = [process_frame_data(frame) for frame in keypoints_data]
|
97 |
+
|
98 |
+
# Construct the data record
|
99 |
+
dataset.append({
|
100 |
+
'video': frames,
|
101 |
+
'keypoints': frame_data,
|
102 |
+
'video_path': video_path,
|
103 |
+
'json_path': json_path
|
104 |
+
})
|
105 |
+
|
106 |
+
return dataset
|
107 |
+
|
108 |
+
def main():
|
109 |
+
# Path to the dataset directory
|
110 |
+
dataset_dir = '.' # Replace with your actual dataset path
|
111 |
+
|
112 |
+
# Load training and validation data
|
113 |
+
train_data = load_data(dataset_dir, split="train")
|
114 |
+
val_data = load_data(dataset_dir, split="val")
|
115 |
+
|
116 |
+
# Convert to Hugging Face Dataset
|
117 |
+
train_features = Features({
|
118 |
+
'video': Array3D(dtype='int32', shape=(None, None, None)), # None indicates variable sizes
|
119 |
+
'keypoints': Sequence(Features({
|
120 |
+
'person_id': Value('int32'),
|
121 |
+
'confidence': Value('float32'),
|
122 |
+
'box': {
|
123 |
+
'x1': Value('float32'),
|
124 |
+
'y1': Value('float32'),
|
125 |
+
'x2': Value('float32'),
|
126 |
+
'y2': Value('float32')
|
127 |
+
},
|
128 |
+
'keypoints': {key: Array2D(dtype='float32', shape=(2,)) for key in KEYPOINTS}
|
129 |
+
})),
|
130 |
+
'video_path': Value('string'),
|
131 |
+
'json_path': Value('string')
|
132 |
+
})
|
133 |
+
|
134 |
+
# Create DatasetDict
|
135 |
+
dataset_dict = DatasetDict({
|
136 |
+
'train': Dataset.from_dict(train_data, features=train_features),
|
137 |
+
'val': Dataset.from_dict(val_data, features=train_features)
|
138 |
+
})
|
139 |
+
|
140 |
+
# Save or push dataset to Hugging Face
|
141 |
+
dataset_dict.save_to_disk("keypoints_keyger")
|
142 |
+
# Or to upload: dataset_dict.push_to_hub("your_dataset_name")
|
143 |
+
|
144 |
+
if __name__ == "__main__":
|
145 |
+
main()
|