Datasets:
Size:
10K<n<100K
License:
import io | |
from PIL import Image | |
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, SplitGenerator, Value, Array2D, Split | |
import datasets | |
import numpy as np | |
import h5py | |
class CustomConfig(datasets.BuilderConfig): | |
def __init__(self, **kwargs): | |
super(CustomConfig, self).__init__(**kwargs) | |
self.dataset_type = kwargs.pop("name", "all") | |
class RGBSemanticDepthDataset(GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [ | |
CustomConfig(name="all", version="1.0.0", description="load both segmentation and depth"), | |
CustomConfig(name="depth", version="1.0.0", description="only load depth"), | |
CustomConfig(name="seg", version="1.0.0", description="only load segmentation"), | |
] # Configs initialization | |
BUILDER_CONFIG_CLASS = CustomConfig | |
def _info(self): | |
return DatasetInfo( | |
features=Features({ | |
"left_rgb": datasets.Image(), | |
"right_rgb": datasets.Image(), | |
"left_seg": datasets.Image(), | |
"left_depth": datasets.Image(), | |
"right_depth": datasets.Image(), | |
}) | |
) | |
def _h5_loader(self, bytes_stream, type_dataset): | |
# Reference: https://github.com/dwofk/fast-depth/blob/master/dataloaders/dataloader.py#L8-L13 | |
f = io.BytesIO(bytes_stream) | |
h5f = h5py.File(f, "r") | |
left_rgb = self._read_jpg(h5f['rgb_left'][:]) | |
if type_dataset == 'depth': | |
right_rgb = self._read_jpg(h5f['rgb_right'][:]) | |
left_depth = h5f['depth_left'][:].astype(np.float32) | |
right_depth = h5f['depth_right'][:].astype(np.float32) | |
return left_rgb, right_rgb, np.zeros((1,1)), left_depth, right_depth | |
elif type_dataset == 'seg': | |
left_seg = h5f['seg_left'][:] | |
return left_rgb, np.zeros((1,1)), left_seg, np.zeros((1,1)), np.zeros((1,1)) | |
else: | |
right_rgb = self._read_jpg(h5f['rgb_right'][:]) | |
left_seg = h5f['seg_left'][:] | |
left_depth = h5f['depth_left'][:].astype(np.float32) | |
right_depth = h5f['depth_right'][:].astype(np.float32) | |
return left_rgb, right_rgb, left_seg, left_depth, right_depth | |
def _read_jpg(self, bytes_stream): | |
return Image.open(io.BytesIO(bytes_stream)) | |
def _split_generators(self, dl_manager): | |
archives = dl_manager.download({"train":["data/images_1730238419.175364.tar"]}) | |
return [ | |
SplitGenerator( | |
name=Split.TRAIN, | |
gen_kwargs={ | |
"archives": [dl_manager.iter_archive(archive) for archive in archives], | |
"split_txt": "train.txt" | |
}, | |
), | |
SplitGenerator( | |
name=Split.VALIDATION, | |
gen_kwargs={ | |
"archives": [dl_manager.iter_archive(archive) for archive in archives], | |
"split_txt": "val.txt" | |
}, | |
), | |
] | |
def _generate_examples(self, archives, split_txt): | |
with open(split_txt) as split_f: | |
all_splits = split_f.read().split('\n') | |
print(all_splits) | |
for archive in archives: | |
for path, file in archive: | |
if path not in all_splits: | |
continue | |
left_rgb, right_rgb, left_seg, left_depth, right_depth = self._h5_loader(file.read(), self.config.dataset_type) | |
yield path, { | |
"left_rgb": left_rgb, | |
"right_rgb": right_rgb, | |
"left_seg": left_seg, | |
"left_depth": left_depth, | |
"right_depth": right_depth, | |
} |