v1.1: add accelData
Browse files
NYUv2.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
import numpy as np
|
4 |
import h5py
|
5 |
import datasets
|
6 |
-
from datasets import BuilderConfig, Features, Value, SplitGenerator,
|
7 |
import hashlib
|
8 |
|
9 |
|
@@ -39,7 +39,7 @@ _FILE_HASH = "520609c519fba3ba5ac58c8fefcc3530"
|
|
39 |
class NYUv2(datasets.GeneratorBasedBuilder):
|
40 |
"""NYU Depth Dataset V2"""
|
41 |
|
42 |
-
VERSION = datasets.Version("1.
|
43 |
|
44 |
BUILDER_CONFIGS = [
|
45 |
BuilderConfig(name="default", version=VERSION, description="Default configuration for NYUv2 dataset"),
|
@@ -54,6 +54,7 @@ class NYUv2(datasets.GeneratorBasedBuilder):
|
|
54 |
"label": Array2D(dtype="int32", shape=(640, 480)),
|
55 |
"scene": Value("string"),
|
56 |
"scene_type": Value("string"),
|
|
|
57 |
})
|
58 |
|
59 |
return datasets.DatasetInfo(
|
@@ -88,12 +89,14 @@ class NYUv2(datasets.GeneratorBasedBuilder):
|
|
88 |
|
89 |
def _generate_examples(self, filepath, split):
|
90 |
with h5py.File(filepath, 'r') as f:
|
|
|
91 |
images = np.array(f['images'])
|
92 |
depths = np.array(f['depths'])
|
93 |
instances = np.array(f['instances'])
|
94 |
labels = np.array(f['labels'])
|
95 |
scenes = [''.join(chr(int(x)) for x in f[y]) for y in f.get('scenes')[0]]
|
96 |
scene_types = [''.join(chr(int(x)) for x in f[y]) for y in f.get('sceneTypes')[0]]
|
|
|
97 |
|
98 |
for idx in range(images.shape[0]):
|
99 |
yield idx, {
|
@@ -103,4 +106,5 @@ class NYUv2(datasets.GeneratorBasedBuilder):
|
|
103 |
"label": labels[idx],
|
104 |
"scene": scenes[idx],
|
105 |
"scene_type": scene_types[idx],
|
|
|
106 |
}
|
|
|
3 |
import numpy as np
|
4 |
import h5py
|
5 |
import datasets
|
6 |
+
from datasets import BuilderConfig, Features, Value, SplitGenerator, Array2D, Image, Sequence
|
7 |
import hashlib
|
8 |
|
9 |
|
|
|
39 |
class NYUv2(datasets.GeneratorBasedBuilder):
|
40 |
"""NYU Depth Dataset V2"""
|
41 |
|
42 |
+
VERSION = datasets.Version("1.1.0")
|
43 |
|
44 |
BUILDER_CONFIGS = [
|
45 |
BuilderConfig(name="default", version=VERSION, description="Default configuration for NYUv2 dataset"),
|
|
|
54 |
"label": Array2D(dtype="int32", shape=(640, 480)),
|
55 |
"scene": Value("string"),
|
56 |
"scene_type": Value("string"),
|
57 |
+
'accelData': Sequence(Value("float32"), length=4),
|
58 |
})
|
59 |
|
60 |
return datasets.DatasetInfo(
|
|
|
89 |
|
90 |
def _generate_examples(self, filepath, split):
|
91 |
with h5py.File(filepath, 'r') as f:
|
92 |
+
# available keys: ['accelData','depths','images','instances','labels','names','namesToIds','rawDepthFilenames','rawDepths','rawRgbFilenames','sceneTypes','scenes',]
|
93 |
images = np.array(f['images'])
|
94 |
depths = np.array(f['depths'])
|
95 |
instances = np.array(f['instances'])
|
96 |
labels = np.array(f['labels'])
|
97 |
scenes = [''.join(chr(int(x)) for x in f[y]) for y in f.get('scenes')[0]]
|
98 |
scene_types = [''.join(chr(int(x)) for x in f[y]) for y in f.get('sceneTypes')[0]]
|
99 |
+
accelData = np.array(f['accelData']).T
|
100 |
|
101 |
for idx in range(images.shape[0]):
|
102 |
yield idx, {
|
|
|
106 |
"label": labels[idx],
|
107 |
"scene": scenes[idx],
|
108 |
"scene_type": scene_types[idx],
|
109 |
+
"accelData": accelData[idx],
|
110 |
}
|
README.md
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
---
|
2 |
size_categories:
|
3 |
- 1K<n<10K
|
|
|
|
|
|
|
4 |
paperswithcode_id: nyuv2
|
5 |
tags:
|
6 |
- depth-estimation
|
@@ -27,15 +30,15 @@ dataset_info:
|
|
27 |
dtype: string
|
28 |
- name: scene_type
|
29 |
dtype: string
|
|
|
|
|
|
|
30 |
splits:
|
31 |
- name: train
|
32 |
-
num_bytes:
|
33 |
num_examples: 1449
|
34 |
download_size: 2972037809
|
35 |
-
dataset_size:
|
36 |
-
task_categories:
|
37 |
-
- depth-estimation
|
38 |
-
- image-segmentation
|
39 |
---
|
40 |
|
41 |
# NYU Depth Dataset V2
|
@@ -94,10 +97,11 @@ dataset = load_dataset("0jl/NYUv2", trust_remote_code=True, split="train")
|
|
94 |
The dataset contains only one training split with the following features:
|
95 |
|
96 |
- `image`: RGB image (PIL.Image.Image, shape: (640, 480, 3))
|
97 |
-
- `depth`: Depth map (
|
98 |
-
- `label`: Semantic segmentation labels (
|
99 |
- `scene`: Scene name (string)
|
100 |
- `scene_type`: Scene type (string)
|
|
|
101 |
|
102 |
|
103 |
## Citation Information
|
|
|
1 |
---
|
2 |
size_categories:
|
3 |
- 1K<n<10K
|
4 |
+
task_categories:
|
5 |
+
- depth-estimation
|
6 |
+
- image-segmentation
|
7 |
paperswithcode_id: nyuv2
|
8 |
tags:
|
9 |
- depth-estimation
|
|
|
30 |
dtype: string
|
31 |
- name: scene_type
|
32 |
dtype: string
|
33 |
+
- name: accelData
|
34 |
+
sequence: float32
|
35 |
+
length: 4
|
36 |
splits:
|
37 |
- name: train
|
38 |
+
num_bytes: 4096489803
|
39 |
num_examples: 1449
|
40 |
download_size: 2972037809
|
41 |
+
dataset_size: 4096489803
|
|
|
|
|
|
|
42 |
---
|
43 |
|
44 |
# NYU Depth Dataset V2
|
|
|
97 |
The dataset contains only one training split with the following features:
|
98 |
|
99 |
- `image`: RGB image (PIL.Image.Image, shape: (640, 480, 3))
|
100 |
+
- `depth`: Depth map (2D array, shape: (640, 480), dtype: float32)
|
101 |
+
- `label`: Semantic segmentation labels (2D array, shape: (640, 480), dtype: int32)
|
102 |
- `scene`: Scene name (string)
|
103 |
- `scene_type`: Scene type (string)
|
104 |
+
- `accelData`: Acceleration data (list, shape: (4,), dtype: float32)
|
105 |
|
106 |
|
107 |
## Citation Information
|