dogs-video-object-tracking-dataset / dogs-video-object-tracking-dataset.py
vkashko's picture
fix: load script
5d47aa6
raw
history blame
5.68 kB
import os
from xml.etree import ElementTree as ET
import datasets
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {dogs-video-object-tracking-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
The dataset contains frames extracted from videos with dogs on the streets.
Each frame is accompanied by **bounding box** that specifically **tracks the dog**
in the image.
The dataset provides a valuable resource for advancing computer vision tasks,
enabling the development of more accurate and effective solutions for monitoring and
understanding dog behavior in urban settings.
"""
_NAME = "dogs-video-object-tracking-dataset"
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
_LABELS = ["dog"]
class DogsVideoObjectTrackingDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="video_01", data_dir=f"{_DATA}video_01.zip"),
datasets.BuilderConfig(name="video_02", data_dir=f"{_DATA}video_02.zip"),
datasets.BuilderConfig(name="video_03", data_dir=f"{_DATA}video_03.zip"),
]
DEFAULT_CONFIG_NAME = "video_01"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("int32"),
"name": datasets.Value("string"),
"image": datasets.Image(),
"mask": datasets.Image(),
"shapes": datasets.Sequence(
{
"track_id": datasets.Value("uint32"),
"label": datasets.ClassLabel(
num_classes=len(_LABELS),
names=_LABELS,
),
"type": datasets.Value("string"),
"points": datasets.Sequence(
datasets.Sequence(
datasets.Value("float"),
),
),
"rotation": datasets.Value("float"),
"occluded": datasets.Value("uint8"),
"attributes": datasets.Sequence(
{
"name": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
}
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data = dl_manager.download_and_extract(self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data": data,
},
),
]
@staticmethod
def extract_shapes_from_tracks(
root: ET.Element, file: str, index: int
) -> ET.Element:
img = ET.Element("image")
img.set("name", file)
img.set("id", str(index))
for track in root.iter("track"):
shape = track.find(f".//*[@frame='{index}']")
if not (shape is None):
shape.set("label", track.get("label"))
shape.set("track_id", track.get("id"))
img.append(shape)
return img
@staticmethod
def parse_shape(shape: ET.Element) -> dict:
label = shape.get("label")
track_id = shape.get("track_id")
shape_type = shape.tag
rotation = shape.get("rotation", 0.0)
occluded = shape.get("occluded", 0)
points = None
if shape_type == "points":
points = tuple(map(float, shape.get("points").split(",")))
elif shape_type == "box":
points = [
(float(shape.get("xtl")), float(shape.get("ytl"))),
(float(shape.get("xbr")), float(shape.get("ybr"))),
]
elif shape_type == "polygon":
points = [
tuple(map(float, point.split(",")))
for point in shape.get("points").split(";")
]
attributes = []
for attr in shape:
attr_name = attr.get("name")
attr_text = attr.text
attributes.append({"name": attr_name, "text": attr_text})
shape_data = {
"label": label,
"track_id": track_id,
"type": shape_type,
"points": points,
"rotation": rotation,
"occluded": occluded,
"attributes": attributes,
}
return shape_data
def _generate_examples(self, data):
tree = ET.parse(os.path.join(data, "annotations.xml"))
root = tree.getroot()
for idx, file in enumerate(sorted(os.listdir(os.path.join(data, "images")))):
img = self.extract_shapes_from_tracks(root, file, idx)
image_id = img.get("id")
name = img.get("name")
shapes = [self.parse_shape(shape) for shape in img]
print(shapes)
yield idx, {
"id": image_id,
"name": name,
"image": os.path.join(data, "images", file),
"mask": os.path.join(data, "masks", file),
"shapes": shapes,
}