files_per_repr_overwrites and and hsv support. Other backports too (opticalflow/ndims)
Browse files
dronescapes_reader/__init__.py
CHANGED
@@ -1,20 +1,22 @@
|
|
1 |
"""init file"""
|
2 |
from .multitask_dataset import MultiTaskDataset, NpzRepresentation
|
3 |
from .dronescapes_representations import DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation, \
|
4 |
-
ColorRepresentation, EdgesRepresentation
|
5 |
|
6 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
7 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
8 |
_m2f_name = "semantic_mask2former_swin_mapillary_converted"
|
9 |
dronescapes_task_types = { # some pre-baked representations
|
10 |
-
"rgb": ColorRepresentation("rgb"),
|
11 |
-
"
|
|
|
12 |
"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
13 |
"depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
|
|
|
14 |
"depth_ufo": DepthRepresentation("depth_ufo", min_depth=0, max_depth=1),
|
15 |
"opticalflow_rife": OpticalFlowRepresentation,
|
16 |
"semantic_segprop8": SemanticRepresentation("semantic_segprop8", classes=8, color_map=_color_map),
|
17 |
_m2f_name: SemanticRepresentation(_m2f_name, classes=8, color_map=_color_map),
|
18 |
-
"softseg_gb": ColorRepresentation("softseg_gb"),
|
19 |
"edges_gb": EdgesRepresentation("edges_gb"),
|
20 |
}
|
|
|
1 |
"""init file"""
|
2 |
from .multitask_dataset import MultiTaskDataset, NpzRepresentation
|
3 |
from .dronescapes_representations import DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation, \
|
4 |
+
ColorRepresentation, EdgesRepresentation, NormalsRepresentation, HSVRepresentation
|
5 |
|
6 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
7 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
8 |
_m2f_name = "semantic_mask2former_swin_mapillary_converted"
|
9 |
dronescapes_task_types = { # some pre-baked representations
|
10 |
+
"rgb": ColorRepresentation("rgb", 3),
|
11 |
+
"hsv": HSVRepresentation("hsv", 3),
|
12 |
+
"edges_dexined": ColorRepresentation("edges_dexined", 1),
|
13 |
"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
14 |
"depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
|
15 |
+
"normals_sfm_manual202204": NormalsRepresentation("normals_sfm_manual202204"),
|
16 |
"depth_ufo": DepthRepresentation("depth_ufo", min_depth=0, max_depth=1),
|
17 |
"opticalflow_rife": OpticalFlowRepresentation,
|
18 |
"semantic_segprop8": SemanticRepresentation("semantic_segprop8", classes=8, color_map=_color_map),
|
19 |
_m2f_name: SemanticRepresentation(_m2f_name, classes=8, color_map=_color_map),
|
20 |
+
"softseg_gb": ColorRepresentation("softseg_gb", 3),
|
21 |
"edges_gb": EdgesRepresentation("edges_gb"),
|
22 |
}
|
dronescapes_reader/dronescapes_representations.py
CHANGED
@@ -4,28 +4,58 @@ from pathlib import Path
|
|
4 |
import numpy as np
|
5 |
import torch as tr
|
6 |
import flow_vis
|
|
|
7 |
from overrides import overrides
|
8 |
from matplotlib.cm import hot # pylint: disable=no-name-in-module
|
9 |
from .multitask_dataset import NpzRepresentation
|
10 |
from torch.nn import functional as F
|
11 |
|
12 |
class ColorRepresentation(NpzRepresentation):
|
|
|
|
|
|
|
|
|
|
|
13 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
14 |
res = super().load_from_disk(path)
|
15 |
return res.float() / 255
|
16 |
|
|
|
17 |
def save_to_disk(self, data: tr.Tensor, path: Path):
|
18 |
return super().save_to_disk((data * 255).byte(), path)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
class EdgesRepresentation(NpzRepresentation):
|
|
|
21 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
22 |
res = super().load_from_disk(path).float()
|
23 |
assert len(res.shape) == 3 and res.shape[-1] == 1
|
24 |
return res
|
25 |
|
|
|
26 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
27 |
return (x.detach().repeat(1, 1, 3) * 255).cpu().numpy().astype(np.uint8)
|
28 |
|
|
|
|
|
|
|
|
|
|
|
29 |
class DepthRepresentation(NpzRepresentation):
|
30 |
"""DepthRepresentation. Implements depth task-specific stuff, like hotmap."""
|
31 |
def __init__(self, *args, min_depth: float, max_depth: float, **kwargs):
|
@@ -40,21 +70,38 @@ class DepthRepresentation(NpzRepresentation):
|
|
40 |
y: np.ndarray = hot(x)[..., 0:3] * 255
|
41 |
return y.astype(np.uint8)
|
42 |
|
|
|
43 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
44 |
res = super().load_from_disk(path).squeeze().unsqueeze(-1)
|
45 |
res = res.float().clip(self.min_depth, self.max_depth)
|
46 |
res = (res - self.min_depth) / (self.max_depth - self.min_depth)
|
47 |
return res
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
class OpticalFlowRepresentation(NpzRepresentation):
|
50 |
"""OpticalFlowRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
51 |
@overrides
|
52 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
53 |
return flow_vis.flow_to_color(x.squeeze().nan_to_num(0).detach().cpu().numpy())
|
54 |
|
|
|
55 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
56 |
-
|
57 |
-
|
|
|
|
|
|
|
|
|
58 |
|
59 |
class SemanticRepresentation(NpzRepresentation):
|
60 |
"""SemanticRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
@@ -82,3 +129,8 @@ class SemanticRepresentation(NpzRepresentation):
|
|
82 |
for i in range(self.n_classes):
|
83 |
new_images[x == i] = self.color_map[i]
|
84 |
return new_images
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import numpy as np
|
5 |
import torch as tr
|
6 |
import flow_vis
|
7 |
+
from skimage.color import rgb2hsv, hsv2rgb
|
8 |
from overrides import overrides
|
9 |
from matplotlib.cm import hot # pylint: disable=no-name-in-module
|
10 |
from .multitask_dataset import NpzRepresentation
|
11 |
from torch.nn import functional as F
|
12 |
|
13 |
class ColorRepresentation(NpzRepresentation):
|
14 |
+
def __init__(self, name: str, n_channels: int):
|
15 |
+
super().__init__(name)
|
16 |
+
self._n_channels = n_channels
|
17 |
+
|
18 |
+
@overrides
|
19 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
20 |
res = super().load_from_disk(path)
|
21 |
return res.float() / 255
|
22 |
|
23 |
+
@overrides
|
24 |
def save_to_disk(self, data: tr.Tensor, path: Path):
|
25 |
return super().save_to_disk((data * 255).byte(), path)
|
26 |
|
27 |
+
@property
|
28 |
+
@overrides
|
29 |
+
def n_channels(self) -> int:
|
30 |
+
return self._n_channels
|
31 |
+
|
32 |
+
class HSVRepresentation(ColorRepresentation):
|
33 |
+
@overrides
|
34 |
+
def load_from_disk(self, path: Path) -> tr.Tensor:
|
35 |
+
rgb = NpzRepresentation.load_from_disk(self, path)
|
36 |
+
return tr.from_numpy(rgb2hsv(rgb)).float()
|
37 |
+
|
38 |
+
@overrides
|
39 |
+
def save_to_disk(self, data: tr.Tensor, path: Path):
|
40 |
+
rgb = tr.from_numpy(hsv2rgb(data) * 255).byte()
|
41 |
+
NpzRepresentation.save_to_disk(rgb, path)
|
42 |
+
|
43 |
class EdgesRepresentation(NpzRepresentation):
|
44 |
+
@overrides
|
45 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
46 |
res = super().load_from_disk(path).float()
|
47 |
assert len(res.shape) == 3 and res.shape[-1] == 1
|
48 |
return res
|
49 |
|
50 |
+
@overrides
|
51 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
52 |
return (x.detach().repeat(1, 1, 3) * 255).cpu().numpy().astype(np.uint8)
|
53 |
|
54 |
+
@property
|
55 |
+
@overrides
|
56 |
+
def n_channels(self) -> int:
|
57 |
+
return 1
|
58 |
+
|
59 |
class DepthRepresentation(NpzRepresentation):
|
60 |
"""DepthRepresentation. Implements depth task-specific stuff, like hotmap."""
|
61 |
def __init__(self, *args, min_depth: float, max_depth: float, **kwargs):
|
|
|
70 |
y: np.ndarray = hot(x)[..., 0:3] * 255
|
71 |
return y.astype(np.uint8)
|
72 |
|
73 |
+
@overrides
|
74 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
75 |
res = super().load_from_disk(path).squeeze().unsqueeze(-1)
|
76 |
res = res.float().clip(self.min_depth, self.max_depth)
|
77 |
res = (res - self.min_depth) / (self.max_depth - self.min_depth)
|
78 |
return res
|
79 |
|
80 |
+
@property
|
81 |
+
@overrides
|
82 |
+
def n_channels(self) -> int:
|
83 |
+
return 1
|
84 |
+
|
85 |
+
class NormalsRepresentation(NpzRepresentation):
|
86 |
+
@property
|
87 |
+
@overrides
|
88 |
+
def n_channels(self) -> int:
|
89 |
+
return 3
|
90 |
+
|
91 |
class OpticalFlowRepresentation(NpzRepresentation):
|
92 |
"""OpticalFlowRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
93 |
@overrides
|
94 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
95 |
return flow_vis.flow_to_color(x.squeeze().nan_to_num(0).detach().cpu().numpy())
|
96 |
|
97 |
+
@overrides
|
98 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
99 |
+
return super().load_from_disk(path).float()
|
100 |
+
|
101 |
+
@property
|
102 |
+
@overrides
|
103 |
+
def n_channels(self) -> int:
|
104 |
+
return 2
|
105 |
|
106 |
class SemanticRepresentation(NpzRepresentation):
|
107 |
"""SemanticRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
|
|
129 |
for i in range(self.n_classes):
|
130 |
new_images[x == i] = self.color_map[i]
|
131 |
return new_images
|
132 |
+
|
133 |
+
@property
|
134 |
+
@overrides
|
135 |
+
def n_channels(self) -> int:
|
136 |
+
return self.n_classes
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -16,6 +16,7 @@ monkey_patch()
|
|
16 |
BuildDatasetTuple = Tuple[Dict[str, List[Path]], List[str]]
|
17 |
MultiTaskItem = Tuple[Dict[str, tr.Tensor], str, List[str]] # [{task: data}, stem(name) | list[stem(name)], [tasks]]
|
18 |
|
|
|
19 |
class NpzRepresentation:
|
20 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
21 |
def __init__(self, name: str):
|
@@ -34,15 +35,24 @@ class NpzRepresentation:
|
|
34 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
35 |
"""very basic implementation of converting this representation to a viewable image. You should overwrite this"""
|
36 |
assert isinstance(x, tr.Tensor), type(x)
|
37 |
-
if len(x.shape) == 2:
|
|
|
38 |
assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
|
39 |
-
if x.shape[-1] != 3:
|
40 |
-
|
|
|
|
|
41 |
x = x.nan_to_num(0).cpu().detach().numpy() # guaranteed to be (H, W, 3) at this point hopefully
|
42 |
_min, _max = x.min((0, 1), keepdims=True), x.max((0, 1), keepdims=True)
|
43 |
-
if x.dtype != np.uint8:
|
|
|
44 |
return x
|
45 |
|
|
|
|
|
|
|
|
|
|
|
46 |
def __repr__(self):
|
47 |
return str(self)
|
48 |
|
@@ -70,7 +80,8 @@ class MultiTaskDataset(Dataset):
|
|
70 |
"""
|
71 |
|
72 |
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
|
73 |
-
files_suffix: str = "npz", task_types: dict[str, type] | None = None
|
|
|
74 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
75 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
76 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
@@ -78,6 +89,7 @@ class MultiTaskDataset(Dataset):
|
|
78 |
self.path = Path(path).absolute()
|
79 |
self.handle_missing_data = handle_missing_data
|
80 |
self.suffix = files_suffix
|
|
|
81 |
self.all_files_per_repr = self._get_all_npz_files()
|
82 |
self.files_per_repr, self.file_names = self._build_dataset() # these are filtered by 'drop' or 'fill_none' logic
|
83 |
if task_types is None:
|
@@ -87,6 +99,7 @@ class MultiTaskDataset(Dataset):
|
|
87 |
if task_names is None:
|
88 |
task_names = list(self.files_per_repr.keys())
|
89 |
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
|
|
|
90 |
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names}
|
91 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
92 |
self.task_names = sorted(task_names)
|
@@ -164,6 +177,21 @@ class MultiTaskDataset(Dataset):
|
|
164 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
165 |
return in_files
|
166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
def _build_dataset_drop_missing(self) -> BuildDatasetTuple:
|
168 |
in_files = self.all_files_per_repr
|
169 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
@@ -195,13 +223,6 @@ class MultiTaskDataset(Dataset):
|
|
195 |
assert len(files_per_repr) > 0
|
196 |
return files_per_repr, all_files
|
197 |
|
198 |
-
def _build_dataset(self) -> BuildDatasetTuple:
|
199 |
-
logger.debug(f"Building dataset from: '{self.path}'")
|
200 |
-
if self.handle_missing_data == "drop":
|
201 |
-
return self._build_dataset_drop_missing()
|
202 |
-
else:
|
203 |
-
return self._build_dataset_fill_missing()
|
204 |
-
|
205 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
206 |
|
207 |
def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem:
|
|
|
16 |
BuildDatasetTuple = Tuple[Dict[str, List[Path]], List[str]]
|
17 |
MultiTaskItem = Tuple[Dict[str, tr.Tensor], str, List[str]] # [{task: data}, stem(name) | list[stem(name)], [tasks]]
|
18 |
|
19 |
+
|
20 |
class NpzRepresentation:
|
21 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
22 |
def __init__(self, name: str):
|
|
|
35 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
36 |
"""very basic implementation of converting this representation to a viewable image. You should overwrite this"""
|
37 |
assert isinstance(x, tr.Tensor), type(x)
|
38 |
+
if len(x.shape) == 2:
|
39 |
+
x = x.unsqueeze(-1)
|
40 |
assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
|
41 |
+
if x.shape[-1] != 3:
|
42 |
+
x = x[..., 0:1]
|
43 |
+
if x.shape[-1] == 1:
|
44 |
+
x = x.repeat(1, 1, 3)
|
45 |
x = x.nan_to_num(0).cpu().detach().numpy() # guaranteed to be (H, W, 3) at this point hopefully
|
46 |
_min, _max = x.min((0, 1), keepdims=True), x.max((0, 1), keepdims=True)
|
47 |
+
if x.dtype != np.uint8:
|
48 |
+
x = np.nan_to_num((x - _min) / (_max - _min) * 255, 0).astype(np.uint8)
|
49 |
return x
|
50 |
|
51 |
+
@property
|
52 |
+
def n_channels(self) -> int:
|
53 |
+
"""return the number of channels for this representation. Must be updated by each downstream representation"""
|
54 |
+
raise NotImplementedError(f"n_channels is not implemented for {self}")
|
55 |
+
|
56 |
def __repr__(self):
|
57 |
return str(self)
|
58 |
|
|
|
80 |
"""
|
81 |
|
82 |
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
|
83 |
+
files_suffix: str = "npz", task_types: dict[str, type] | None = None,
|
84 |
+
files_per_repr_overwrites: dict[str, str] | None = None):
|
85 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
86 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
87 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
|
|
89 |
self.path = Path(path).absolute()
|
90 |
self.handle_missing_data = handle_missing_data
|
91 |
self.suffix = files_suffix
|
92 |
+
self.files_per_repr_overwrites = files_per_repr_overwrites
|
93 |
self.all_files_per_repr = self._get_all_npz_files()
|
94 |
self.files_per_repr, self.file_names = self._build_dataset() # these are filtered by 'drop' or 'fill_none' logic
|
95 |
if task_types is None:
|
|
|
99 |
if task_names is None:
|
100 |
task_names = list(self.files_per_repr.keys())
|
101 |
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
|
102 |
+
assert all(task in self.files_per_repr for task in task_names), (task_names, self.files_per_repr.keys())
|
103 |
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names}
|
104 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
105 |
self.task_names = sorted(task_names)
|
|
|
177 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
178 |
return in_files
|
179 |
|
180 |
+
def _build_dataset(self) -> BuildDatasetTuple:
|
181 |
+
logger.debug(f"Building dataset from: '{self.path}'")
|
182 |
+
if self.handle_missing_data == "drop":
|
183 |
+
files_per_repr, common = self._build_dataset_drop_missing()
|
184 |
+
else:
|
185 |
+
files_per_repr, common = self._build_dataset_fill_missing()
|
186 |
+
if self.files_per_repr_overwrites is not None: # here we match for example 'hsv' to read also from 'rgb' dir
|
187 |
+
for left, right in self.files_per_repr_overwrites.items():
|
188 |
+
if right not in (fpr := files_per_repr):
|
189 |
+
logger.info(f"Overwrite: {left} => {right} provided, but {right} is not in {fpr.keys()}")
|
190 |
+
continue
|
191 |
+
assert left not in fpr.keys(), f"Overwrite: {left} => {right}. {left} already exists in {fpr.keys()}"
|
192 |
+
files_per_repr[left] = files_per_repr[right]
|
193 |
+
return files_per_repr, common
|
194 |
+
|
195 |
def _build_dataset_drop_missing(self) -> BuildDatasetTuple:
|
196 |
in_files = self.all_files_per_repr
|
197 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
|
|
223 |
assert len(files_per_repr) > 0
|
224 |
return files_per_repr, all_files
|
225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
227 |
|
228 |
def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem:
|
scripts/dronescapes_viewer.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer.py
CHANGED
@@ -8,7 +8,8 @@ from torch.utils.data import DataLoader
|
|
8 |
import random
|
9 |
|
10 |
def main():
|
11 |
-
reader = MultiTaskDataset(sys.argv[1], handle_missing_data="fill_none", task_types=dronescapes_task_types
|
|
|
12 |
print(reader)
|
13 |
|
14 |
print("== Shapes ==")
|
|
|
8 |
import random
|
9 |
|
10 |
def main():
|
11 |
+
reader = MultiTaskDataset(sys.argv[1], handle_missing_data="fill_none", task_types=dronescapes_task_types,
|
12 |
+
files_per_repr_overwrites={"hsv": "rgb"})
|
13 |
print(reader)
|
14 |
|
15 |
print("== Shapes ==")
|