|
"""NYU Depth Dataset V2""" |
|
|
|
import numpy as np |
|
import h5py |
|
import datasets |
|
from datasets import BuilderConfig, Features, Value, SplitGenerator, Array2D, Image, Sequence |
|
import hashlib |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{Silberman:ECCV12, |
|
author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus}, |
|
title = {Indoor Segmentation and Support Inference from RGBD Images}, |
|
booktitle = {Proceedings of the European Conference on Computer Vision}, |
|
year = {2012} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The NYU-Depth V2 data set is comprised of video sequences from a variety of indoor scenes as recorded by both the RGB and Depth cameras from the Microsoft Kinect. It features: |
|
|
|
* 1449 densely labeled pairs of aligned RGB and depth images |
|
* 464 new scenes taken from 3 cities |
|
* 407,024 new unlabeled frames |
|
* Each object is labeled with a class and an instance number (cup1, cup2, cup3, etc) |
|
|
|
This dataset is useful for various computer vision tasks, including depth estimation, semantic segmentation, and instance segmentation. |
|
|
|
This Hugging Face version of the dataset is unofficial. It downloads the data from the original source and converts it to the Hugging Face format. |
|
""" |
|
|
|
_HOMEPAGE = "https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html" |
|
|
|
_LICENSE = "Unknown" |
|
|
|
_URL = "http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat" |
|
_FILE_HASH = "520609c519fba3ba5ac58c8fefcc3530" |
|
|
|
class NYUv2(datasets.GeneratorBasedBuilder): |
|
"""NYU Depth Dataset V2""" |
|
|
|
VERSION = datasets.Version("1.2.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
BuilderConfig(name="default", version=VERSION, description="Default configuration for NYUv2 dataset"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
features = Features({ |
|
"image": Image(decode=True), |
|
"depth": Array2D(dtype="float32", shape=(480, 640)), |
|
"label": Array2D(dtype="int32", shape=(480, 640)), |
|
"scene": Value("string"), |
|
"scene_type": Value("string"), |
|
'accelData': Sequence(Value("float32"), length=4), |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_path = dl_manager.download(_URL) |
|
|
|
|
|
with open(data_path, 'rb') as f: |
|
file_hash = hashlib.md5(f.read()).hexdigest() |
|
if file_hash != _FILE_HASH: |
|
raise ValueError( |
|
f"Downloaded file hash '{file_hash}' does not match expected hash '{_FILE_HASH}'. " |
|
"The downloaded dataset file might be corrupted or modified." |
|
) |
|
|
|
return [ |
|
SplitGenerator( |
|
name="train", |
|
gen_kwargs={ |
|
"filepath": data_path, |
|
"split": "train", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
with h5py.File(filepath, 'r') as f: |
|
|
|
images = np.array(f['images']) |
|
depths = np.array(f['depths']) |
|
instances = np.array(f['instances']) |
|
labels = np.array(f['labels']) |
|
scenes = [''.join(chr(int(x)) for x in f[y]) for y in f.get('scenes')[0]] |
|
scene_types = [''.join(chr(int(x)) for x in f[y]) for y in f.get('sceneTypes')[0]] |
|
accelData = np.array(f['accelData']).T |
|
|
|
for idx in range(images.shape[0]): |
|
yield idx, { |
|
"image": np.rot90(images[idx].transpose(1, 2, 0), -1), |
|
"depth": np.rot90(depths[idx], -1), |
|
"instance": instances[idx], |
|
"label": np.rot90(labels[idx], -1), |
|
"scene": scenes[idx], |
|
"scene_type": scene_types[idx], |
|
"accelData": accelData[idx], |
|
} |
|
|