|
|
|
"""MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader.""" |
|
from __future__ import annotations |
|
from pathlib import Path |
|
from argparse import Namespace, ArgumentParser |
|
from pprint import pprint |
|
from natsort import natsorted |
|
from loguru import logger |
|
import torch as tr |
|
import numpy as np |
|
from torch.utils.data import Dataset, DataLoader |
|
from lovely_tensors import monkey_patch |
|
|
|
monkey_patch() |
|
BuildDatasetTuple = tuple[dict[str, list[Path]], list[str]] |
|
MultiTaskItem = tuple[dict[str, tr.Tensor], str, list[str]] |
|
|
|
class NpzRepresentation: |
|
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well""" |
|
def __init__(self, name: str): |
|
self.name = name |
|
|
|
def load_from_disk(self, path: Path) -> tr.Tensor: |
|
"""Reads the npz data from the disk and transforms it properly""" |
|
data = np.load(path, allow_pickle=False) |
|
data = data if isinstance(data, np.ndarray) else data["arr_0"] |
|
return tr.from_numpy(data) |
|
|
|
def save_to_disk(self, data: tr.Tensor, path: Path): |
|
"""stores this item to the disk which can then be loaded via `load_from_disk`""" |
|
np.save(path, data.cpu().numpy(), allow_pickle=False) |
|
|
|
def plot_fn(self, x: tr.Tensor) -> np.ndarray: |
|
"""very basic implementation of converting this representation to a viewable image. You should overwrite this""" |
|
assert isinstance(x, tr.Tensor), type(x) |
|
if len(x.shape) == 2: x = x.unsqueeze(-1) |
|
assert len(x.shape) == 3, x.shape |
|
if x.shape[-1] != 3: x = x[..., 0:1] |
|
if x.shape[-1] == 1: x = x.repeat(1, 1, 3) |
|
x = x.nan_to_num(0).cpu().numpy() |
|
_min, _max = x.min((0, 1), keepdims=True), x.max((0, 1), keepdims=True) |
|
if x.dtype != np.uint8: x = np.nan_to_num((x - _min) / (_max - _min) * 255, 0).astype(np.uint8) |
|
return x |
|
|
|
def __repr__(self): |
|
return str(self) |
|
|
|
def __str__(self): |
|
return f"{str(type(self)).split('.')[-1][0:-2]}({self.name})" |
|
|
|
class MultiTaskDataset(Dataset): |
|
""" |
|
MultiTaskDataset implementation. Reads data from npz files and returns them as a dict. |
|
|
|
Parameters: |
|
- path: Path to the directory containing the npz files. |
|
- task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk. |
|
- handle_missing_data: Modes to handle missing data. Valid options are: |
|
- drop: Drop the data point if any of the representations is missing. |
|
- fill_none: Fill the missing data with Nones. |
|
|
|
Expected directory structure: |
|
path/ |
|
- task_1/0.npz, ..., N.npz |
|
- ... |
|
- task_n/0.npz, ..., N.npz |
|
|
|
Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks. |
|
""" |
|
|
|
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none", |
|
files_suffix: str = "npz", task_types: dict[str, type] = None): |
|
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!" |
|
assert handle_missing_data in ("drop", "fill_none"), f"Invalid handle_missing_data mode: {handle_missing_data}" |
|
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)" |
|
self.path = Path(path).absolute() |
|
self.handle_missing_data = handle_missing_data |
|
self.suffix = files_suffix |
|
self.files_per_repr, self.file_names = self._build_dataset() |
|
if task_types is None: |
|
logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.") |
|
task_types = {} |
|
|
|
if task_names is None: |
|
task_names = list(self.files_per_repr.keys()) |
|
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).") |
|
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names} |
|
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names))) |
|
self.task_names = sorted(task_names) |
|
self._data_shape: tuple[int, ...] | None = None |
|
self._tasks: list[NpzRepresentation] | None = None |
|
self.name_to_task = {task.name: task for task in self.tasks} |
|
logger.info(f"Tasks used in this dataset: {self.task_names}") |
|
|
|
|
|
|
|
@property |
|
def data_shape(self) -> dict[str, tuple[int, ...]]: |
|
"""Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each.""" |
|
first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()} |
|
data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names} |
|
return data_shape |
|
|
|
@property |
|
def tasks(self) -> list[NpzRepresentation]: |
|
""" |
|
Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add |
|
new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks. |
|
""" |
|
if self._tasks is not None: |
|
return self._tasks |
|
self._tasks = [] |
|
for task_name in self.task_names: |
|
t = self.task_types[task_name] |
|
if not isinstance(t, NpzRepresentation): |
|
t = t(task_name) |
|
self._tasks.append(t) |
|
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self._task_names, self._tasks) |
|
return self._tasks |
|
|
|
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem: |
|
""" |
|
given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension. |
|
Nones (missing data points) are turned into zeros as per the data shape of that dim. |
|
""" |
|
assert all(item[2] == self.task_names for item in items), ((item[2] for item in items), self.task_names) |
|
items_name = [item[1] for item in items] |
|
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} |
|
for i in range(len(items)): |
|
for k in self.task_names: |
|
res[k][i] = items[i][0][k] if items[i][0][k] is not None else float("nan") |
|
return res, items_name, self.task_names |
|
|
|
|
|
|
|
def _get_all_npz_files(self) -> dict[str, list[Path]]: |
|
"""returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}""" |
|
in_files = {} |
|
all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()] |
|
for repr_dir_name in all_repr_dirs: |
|
dir_name = self.path / repr_dir_name |
|
if all(f.is_dir() for f in dir_name.iterdir()): |
|
all_files = [] |
|
for part in dir_name.iterdir(): |
|
all_files.extend(part.glob(f"*.{self.suffix}")) |
|
else: |
|
all_files = dir_name.glob(f"*.{self.suffix}") |
|
in_files[repr_dir_name] = natsorted(all_files, key=lambda x: x.name) |
|
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }" |
|
return in_files |
|
|
|
def _build_dataset_drop(self) -> BuildDatasetTuple: |
|
in_files = self._get_all_npz_files() |
|
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} |
|
common = set(x.name for x in next(iter(in_files.values()))) |
|
nodes = in_files.keys() |
|
for node in nodes: |
|
common = common.intersection([f.name for f in in_files[node]]) |
|
assert len(common) > 0, f"Node '{node}' made the intersection null" |
|
common = natsorted(list(common)) |
|
logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).") |
|
files_per_repr = {node: [name_to_node_path[node][x] for x in common] for node in nodes} |
|
assert len(files_per_repr) > 0 |
|
return files_per_repr, common |
|
|
|
def _build_dataset_fill_none(self) -> BuildDatasetTuple: |
|
in_files = self._get_all_npz_files() |
|
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} |
|
all_files = set(x.name for x in next(iter(in_files.values()))) |
|
nodes = in_files.keys() |
|
for node in nodes: |
|
all_files = all_files.union([f.name for f in in_files[node]]) |
|
all_files = natsorted(list(all_files)) |
|
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).") |
|
|
|
files_per_repr = {node: [] for node in nodes} |
|
in_file_names = {node: [f.name for f in in_files[node]] for node in nodes} |
|
for node in nodes: |
|
for file_name in all_files: |
|
file_path = name_to_node_path[node].get(file_name, None) |
|
files_per_repr[node].append(file_path) |
|
assert len(files_per_repr) > 0 |
|
return files_per_repr, all_files |
|
|
|
def _build_dataset(self) -> BuildDatasetTuple: |
|
logger.debug(f"Building dataset from: '{self.path}'") |
|
if self.handle_missing_data == "drop": |
|
return self._build_dataset_drop() |
|
else: |
|
return self._build_dataset_fill_none() |
|
|
|
|
|
|
|
def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem: |
|
"""Read the data all the desired nodes""" |
|
assert isinstance(index, (int, slice, list, tuple)), type(index) |
|
if isinstance(index, slice): |
|
assert index.start is not None and index.stop is not None and index.step is None, "Only reader[l:r] allowed" |
|
index = list(range(index.stop)[index]) |
|
if isinstance(index, (list, tuple)): |
|
return self.collate_fn([self.__getitem__(ix) for ix in index]) |
|
res = {} |
|
item_name = self.file_names[index] |
|
|
|
for _repr in self.tasks: |
|
file_path = self.files_per_repr[_repr.name][index] |
|
file_path = file_path.resolve() if file_path is not None else None |
|
assert self.handle_missing_data == "fill_none" or (file_path is not None and file_path.exists()), item_name |
|
item = _repr.load_from_disk(file_path) if file_path is not None and file_path.exists() else None |
|
res[_repr.name] = item |
|
return (res, item_name, self.task_names) |
|
|
|
def __len__(self) -> int: |
|
return len(self.files_per_repr[self.task_names[0]]) |
|
|
|
def __str__(self): |
|
f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]" |
|
f_str += f"\n - Path: '{self.path}'" |
|
f_str += f"\n - Only full data: {self.handle_missing_data == 'drop'}" |
|
f_str += f"\n - Representations ({len(self.tasks)}): {self.tasks}" |
|
f_str += f"\n - Length: {len(self)}" |
|
return f_str |
|
|
|
def __repr__(self): |
|
return str(self) |
|
|
|
def main(): |
|
"""main fn""" |
|
parser = ArgumentParser() |
|
parser.add_argument("dataset_path", type=Path) |
|
parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none") |
|
args = parser.parse_args() |
|
|
|
reader = MultiTaskDataset(args.dataset_path, task_names=None, handle_missing_data=args.handle_missing_data) |
|
print(reader) |
|
print(f"Shape: {reader.data_shape}") |
|
|
|
rand_ix = np.random.randint(len(reader)) |
|
data, name, repr_names = reader[rand_ix] |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
data, name, repr_names = reader[rand_ix: min(len(reader), rand_ix + 5)] |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
loader = DataLoader(reader, collate_fn=reader.collate_fn, batch_size=5, shuffle=True) |
|
data, name, repr_names = next(iter(loader)) |
|
print(f"Name: {name}. Nodes: {repr_names}") |
|
pprint({k: v for k, v in data.items()}) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|