File size: 4,127 Bytes
fc8aba4
 
 
 
 
2f380cf
fc8aba4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178c197
fc8aba4
 
 
 
 
 
 
 
 
 
178c197
 
fc8aba4
 
2f380cf
fc8aba4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f380cf
fc8aba4
 
 
 
 
 
2f380cf
fc8aba4
 
 
e1973b6
 
fc8aba4
178c197
fc8aba4
 
2f380cf
fc8aba4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
"""NYU Depth Dataset V2"""

import numpy as np
import h5py
import datasets
from datasets import BuilderConfig, Features, Value, SplitGenerator, Array2D, Image, Sequence
import hashlib


_CITATION = """\
@inproceedings{Silberman:ECCV12,
  author    = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
  title     = {Indoor Segmentation and Support Inference from RGBD Images},
  booktitle = {Proceedings of the European Conference on Computer Vision},
  year      = {2012}
}
"""

_DESCRIPTION = """\
The NYU-Depth V2 data set is comprised of video sequences from a variety of indoor scenes as recorded by both the RGB and Depth cameras from the Microsoft Kinect. It features:

* 1449 densely labeled pairs of aligned RGB and depth images
* 464 new scenes taken from 3 cities
* 407,024 new unlabeled frames
* Each object is labeled with a class and an instance number (cup1, cup2, cup3, etc)

This dataset is useful for various computer vision tasks, including depth estimation, semantic segmentation, and instance segmentation.

This Hugging Face version of the dataset is unofficial. It downloads the data from the original source and converts it to the Hugging Face format.
"""

_HOMEPAGE = "https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html"

_LICENSE = "Unknown"

_URL = "http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat"
_FILE_HASH = "520609c519fba3ba5ac58c8fefcc3530"

class NYUv2(datasets.GeneratorBasedBuilder):
    """NYU Depth Dataset V2"""

    VERSION = datasets.Version("1.2.1")

    BUILDER_CONFIGS = [
        BuilderConfig(name="default", version=VERSION, description="Default configuration for NYUv2 dataset"),
    ]

    DEFAULT_CONFIG_NAME = "default"

    def _info(self):
        features = Features({
            "image": Image(decode=True),
            "depth": Array2D(dtype="float32", shape=(480, 640)),
            "label": Array2D(dtype="int32", shape=(480, 640)),
            "scene": Value("string"),
            "scene_type": Value("string"),
            'accelData': Sequence(Value("float32"), length=4),
        })

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_path = dl_manager.download(_URL)

        # Verify file hash
        with open(data_path, 'rb') as f:
            file_hash = hashlib.md5(f.read()).hexdigest()
        if file_hash != _FILE_HASH:
            raise ValueError(
                f"Downloaded file hash '{file_hash}' does not match expected hash '{_FILE_HASH}'. "
                "The downloaded dataset file might be corrupted or modified."
            )

        return [
            SplitGenerator(
                name="train",
                gen_kwargs={
                    "filepath": data_path,
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath, split):
        with h5py.File(filepath, 'r') as f:
            # available keys: ['accelData','depths','images','instances','labels','names','namesToIds','rawDepthFilenames','rawDepths','rawRgbFilenames','sceneTypes','scenes',]
            images = np.array(f['images'])
            depths = np.array(f['depths'])
            instances = np.array(f['instances'])
            labels = np.array(f['labels'])
            scenes = [''.join(chr(int(x)) for x in f[y]) for y in f.get('scenes')[0]]
            scene_types = [''.join(chr(int(x)) for x in f[y]) for y in f.get('sceneTypes')[0]]
            accelData = np.array(f['accelData']).T

        for idx in range(images.shape[0]):
            yield idx, {
                "image": np.rot90(images[idx].transpose(1, 2, 0), -1),
                "depth": np.rot90(depths[idx], -1),
                "instance": instances[idx],
                "label": np.rot90(labels[idx], -1),
                "scene": scenes[idx],
                "scene_type": scene_types[idx],
                "accelData": accelData[idx],
            }