File size: 15,937 Bytes
61740cb
630cdf5
61740cb
58c45ab
61740cb
 
 
0094b94
61740cb
22a896e
630cdf5
 
85a37ac
 
 
 
5e6866e
0094b94
58c45ab
 
f755e76
0094b94
f755e76
630cdf5
0094b94
f755e76
 
120d732
58c45ab
 
f755e76
61740cb
58c45ab
 
 
8475432
 
 
 
 
 
 
 
 
61740cb
 
 
120d732
8475432
0094b94
22a896e
120d732
61740cb
f755e76
58c45ab
 
f755e76
61740cb
58c45ab
 
 
61740cb
f755e76
0094b94
 
 
 
630cdf5
61740cb
58c45ab
61740cb
0094b94
 
61740cb
 
630cdf5
61740cb
 
 
 
6a58a50
 
 
61740cb
630cdf5
61740cb
 
 
 
0094b94
 
61740cb
0094b94
61740cb
f755e76
3a808fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b47e95
3a808fd
 
 
 
 
7b47e95
 
 
22a896e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a808fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b47e95
 
 
 
 
 
 
 
 
 
3a808fd
 
 
7b47e95
 
3a808fd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks"""
from __future__ import annotations
from pathlib import Path
from typing import Callable
import numpy as np
import torch as tr
import flow_vis
from skimage.color import rgb2hsv
from overrides import overrides
from matplotlib.cm import Spectral # pylint: disable=no-name-in-module
from torch.nn import functional as F

try:
    from npz_representation import NpzRepresentation
except ImportError:
    from .npz_representation import NpzRepresentation

class RGBRepresentation(NpzRepresentation):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, n_channels=3, **kwargs)

class HSVRepresentation(RGBRepresentation):
    @overrides
    def load_from_disk(self, path: Path) -> tr.Tensor:
        rgb = super().load_from_disk(path)
        return tr.from_numpy(rgb2hsv(rgb)).float()

class EdgesRepresentation(NpzRepresentation):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, n_channels=1, **kwargs)

class DepthRepresentation(NpzRepresentation):
    """DepthRepresentation. Implements depth task-specific stuff, like spectral map for plots."""
    def __init__(self, name: str, min_depth: float, max_depth: float, *args, **kwargs):
        super().__init__(name, n_channels=1, *args, **kwargs)
        self.min_depth = min_depth
        self.max_depth = max_depth

    @overrides
    def load_from_disk(self, path: Path) -> tr.Tensor:
        """Reads the npz data from the disk and transforms it properly"""
        res = super().load_from_disk(path)
        res_clip = res.clip(self.min_depth, self.max_depth)
        return res_clip

    @overrides
    def plot_fn(self, x: tr.Tensor) -> np.ndarray:
        x = x.detach().clip(0, 1).squeeze().cpu().numpy()
        _min, _max = np.percentile(x, [1, 95])
        x = np.nan_to_num((x - _min) / (_max - _min), False, 0, 0, 0).clip(0, 1)
        y: np.ndarray = Spectral(x)[..., 0:3] * 255
        return y.astype(np.uint8)

class NormalsRepresentation(NpzRepresentation):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, n_channels=3, **kwargs)

class OpticalFlowRepresentation(NpzRepresentation):
    """OpticalFlowRepresentation. Implements flow task-specific stuff, like using flow_vis."""
    def __init__(self, *args, **kwargs):
        super().__init__(*args, n_channels=2, **kwargs)

    @overrides
    def plot_fn(self, x: tr.Tensor) -> np.ndarray:
        _min, _max = x.min(0)[0].min(0)[0], x.max(0)[0].max(0)[0]
        x = ((x - _min) / (_max - _min)).nan_to_num(0, 0, 0).detach().cpu().numpy()
        return flow_vis.flow_to_color(x)

class SemanticRepresentation(NpzRepresentation):
    """SemanticRepresentation. Implements semantic task-specific stuff, like argmaxing if needed"""
    def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
        self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes)
        super().__init__(*args, **kwargs, n_channels=self.n_classes)
        self.classes = list(range(classes)) if isinstance(classes, int) else classes
        self.color_map = color_map
        assert len(color_map) == self.n_classes and self.n_classes > 1, (color_map, self.n_classes)

    @overrides
    def load_from_disk(self, path: Path) -> tr.Tensor:
        res = super().load_from_disk(path)
        if len(res.shape) == 3:
            assert res.shape[-1] == self.n_classes, f"Expected {self.n_classes} (HxWxC), got {res.shape[-1]}"
            res = res.argmax(-1)
        assert len(res.shape) == 2, f"Only argmaxed data supported, got: {res.shape}"
        res = F.one_hot(res.long(), num_classes=self.n_classes).float()
        return res

    @overrides
    def plot_fn(self, x: tr.Tensor) -> np.ndarray:
        x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy()
        new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8)
        for i in range(self.n_classes):
            new_images[x_argmax == i] = self.color_map[i]
        return new_images

def semantic_mapper(semantic_original: np.ndarray, mapping: dict[str, list[str]],
                    original_classes: list[str]) -> np.ndarray:
    """maps a bigger semantic segmentation to a smaller one"""
    assert len(semantic_original.shape) == 2, f"Only argmaxed data supported, got: {semantic_original.shape}"
    assert np.issubdtype(semantic_original.dtype, np.integer), semantic_original.dtype
    mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v) for _v in v] for k, v in mapping.items()}
    flat_mapping = {}
    for k, v in mapping_ix.items():
        for _v in v:
            flat_mapping[_v] = k
    mapped_data = np.vectorize(flat_mapping.get)(semantic_original).astype(np.uint8)
    return mapped_data

class TaskMapper(NpzRepresentation):
    def __init__(self, *args, merge_fn: Callable[[list[np.ndarray]], tr.Tensor], **kwargs):
        super().__init__(*args, **kwargs)
        assert len(self.dependencies) > 0 and self.dep_names[0] != self.name, "Need at least one dependency"
        self.merge_fn = merge_fn

    def load_from_disk(self, path: Path | list[Path]) -> tr.Tensor:
        paths = [path] if isinstance(path, Path) else path
        dep_data = [dep.load_from_disk(path) for dep, path in zip(self.dependencies, paths)]
        return self.merge_fn(dep_data)

    def plot_fn(self, x):
        raise NotImplementedError("Must be overriden by the user")

color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
                      [255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
                "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
                "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
                "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
                "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
                "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
                "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard",
                "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
                "scissors", "teddy bear", "hair drier", "toothbrush", "banner", "blanket", "bridge", "cardboard",
                "counter", "curtain", "door-stuff", "floor-wood", "flower", "fruit", "gravel", "house", "light",
                "mirror-stuff", "net", "pillow", "platform", "playingfield", "railroad", "river", "road", "roof",
                "sand", "sea", "shelf", "snow", "stairs", "tent", "towel", "wall-brick", "wall-stone", "wall-tile",
                "wall-wood", "water-other", "window-blind", "window-other", "tree-merged", "fence-merged",
                "ceiling-merged", "sky-other-merged", "cabinet-merged", "table-merged", "floor-other-merged",
                "pavement-merged", "mountain-merged", "grass-merged", "dirt-merged", "paper-merged",
                "food-other-merged", "building-other-merged", "rock-merged", "wall-other-merged", "rug-merged"]
coco_color_map = [[220, 20, 60], [119, 11, 32], [0, 0, 142], [0, 0, 230], [106, 0, 228], [0, 60, 100], [0, 80, 100],
                  [0, 0, 70], [0, 0, 192], [250, 170, 30], [100, 170, 30], [220, 220, 0], [175, 116, 175], [250, 0, 30],
                  [165, 42, 42], [255, 77, 255], [0, 226, 252], [182, 182, 255], [0, 82, 0], [120, 166, 157],
                  [110, 76, 0], [174, 57, 255], [199, 100, 0], [72, 0, 118], [255, 179, 240], [0, 125, 92],
                  [209, 0, 151], [188, 208, 182], [0, 220, 176], [255, 99, 164], [92, 0, 73], [133, 129, 255],
                  [78, 180, 255], [0, 228, 0], [174, 255, 243], [45, 89, 255], [134, 134, 103], [145, 148, 174],
                  [255, 208, 186], [197, 226, 255], [171, 134, 1], [109, 63, 54], [207, 138, 255], [151, 0, 95],
                  [9, 80, 61], [84, 105, 51], [74, 65, 105], [166, 196, 102], [208, 195, 210], [255, 109, 65],
                  [0, 143, 149], [179, 0, 194], [209, 99, 106], [5, 121, 0], [227, 255, 205], [147, 186, 208],
                  [153, 69, 1], [3, 95, 161], [163, 255, 0], [119, 0, 170], [0, 182, 199], [0, 165, 120],
                  [183, 130, 88], [95, 32, 0], [130, 114, 135], [110, 129, 133], [166, 74, 118], [219, 142, 185],
                  [79, 210, 114], [178, 90, 62], [65, 70, 15], [127, 167, 115], [59, 105, 106], [142, 108, 45],
                  [196, 172, 0], [95, 54, 80], [128, 76, 255], [201, 57, 1], [246, 0, 122], [191, 162, 208],
                  [255, 255, 128], [147, 211, 203], [150, 100, 100], [168, 171, 172], [146, 112, 198],
                  [210, 170, 100], [92, 136, 89], [218, 88, 184], [241, 129, 0], [217, 17, 255], [124, 74, 181],
                  [70, 70, 70], [255, 228, 255], [154, 208, 0], [193, 0, 92], [76, 91, 113], [255, 180, 195],
                  [106, 154, 176], [230, 150, 140], [60, 143, 255], [128, 64, 128], [92, 82, 55], [254, 212, 124],
                  [73, 77, 174], [255, 160, 98], [255, 255, 255], [104, 84, 109], [169, 164, 131], [225, 199, 255],
                  [137, 54, 74], [135, 158, 223], [7, 246, 231], [107, 255, 200], [58, 41, 149], [183, 121, 142],
                  [255, 73, 97], [107, 142, 35], [190, 153, 153], [146, 139, 141], [70, 130, 180], [134, 199, 156],
                  [209, 226, 140], [96, 36, 108], [96, 96, 96], [64, 170, 64], [152, 251, 152], [208, 229, 228],
                  [206, 186, 171], [152, 161, 64], [116, 112, 0], [0, 114, 143], [102, 102, 156], [250, 141, 255]]
mapillary_classes = ["Bird", "Ground Animal", "Curb", "Fence", "Guard Rail", "Barrier", "Wall", "Bike Lane",
                     "Crosswalk - Plain", "Curb Cut", "Parking", "Pedestrian Area", "Rail Track", "Road",
                     "Service Lane", "Sidewalk", "Bridge", "Building", "Tunnel", "Person", "Bicyclist",
                     "Motorcyclist", "Other Rider", "Lane Marking - Crosswalk", "Lane Marking - General",
                     "Mountain", "Sand", "Sky", "Snow", "Terrain", "Vegetation", "Water", "Banner", "Bench",
                     "Bike Rack", "Billboard", "Catch Basin", "CCTV Camera", "Fire Hydrant", "Junction Box",
                     "Mailbox", "Manhole", "Phone Booth", "Pothole", "Street Light", "Pole", "Traffic Sign Frame",
                     "Utility Pole", "Traffic Light", "Traffic Sign (Back)", "Traffic Sign (Front)", "Trash Can",
                     "Bicycle", "Boat", "Bus", "Car", "Caravan", "Motorcycle", "On Rails", "Other Vehicle", "Trailer",
                     "Truck", "Wheeled Slow", "Car Mount", "Ego Vehicle"]
mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153], [180, 165, 180], [90, 120, 150],
                       [102, 102, 156], [128, 64, 255], [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96],
                       [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232], [150, 100, 100], [70, 70, 70],
                       [150, 120, 90], [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128],
                       [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180], [190, 255, 255], [152, 251, 152],
                       [107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220],
                       [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33], [100, 128, 160],
                       [142, 0, 0], [70, 100, 150], [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80],
                       [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32], [150, 0, 255],
                       [0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
                       [0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]]

class SemanticMask2FormerMapillaryConvertedPaper(TaskMapper):
    def __init__(self, dep: NpzRepresentation):
        super().__init__("semantic_mask2former_swin_mapillary_converted",
                         dependencies=[dep], merge_fn=self._merge_fn, n_channels=8)
        self.mapping = {
            "land": ["Terrain", "Sand", "Snow"],
            "forest": ["Vegetation"],
            "residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb",
                            "Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner",
                            "Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera",
                            "Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame",
                            "Bike Rack"],
            "road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole",
                    "Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk",
                    "Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"],
            "little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle",
                            "Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"],
            "water": ["Water"],
            "sky": ["Sky"],
            "hill": ["Mountain"]
        }
        self.color_map = color_map_8classes
        self.original_classes = mapillary_classes
        self.classes = list(self.mapping.keys())
        self.n_classes = len(self.classes)

    def plot_fn(self, x: tr.Tensor) -> np.ndarray:
        x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy()
        new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8)
        for i in range(self.n_classes):
            new_images[x_argmax == i] = self.color_map[i]
        return new_images

    def _merge_fn(self, dep_data: list[np.ndarray]) -> tr.Tensor:
        m2f_mapillary = dep_data[0].argmax(-1).numpy()
        m2f_mapillary_converted = semantic_mapper(m2f_mapillary, self.mapping, self.original_classes)
        converted_oh = F.one_hot(tr.from_numpy(m2f_mapillary_converted).long(), num_classes=self.n_classes).float()
        return converted_oh

_tasks: list[NpzRepresentation] = [ # some pre-baked representations
    rgb := RGBRepresentation("rgb"),
    HSVRepresentation("hsv", dependencies=[rgb]),
    EdgesRepresentation("edges_dexined"),
    EdgesRepresentation("edges_gb"),
    DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
    DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
    DepthRepresentation("depth_ufo", min_depth=0, max_depth=1),
    DepthRepresentation("depth_marigold", min_depth=0, max_depth=1),
    NormalsRepresentation("normals_sfm_manual202204"),
    OpticalFlowRepresentation("opticalflow_rife"),
    SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes),
    SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map),
    m2f_mapillary := SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes,
                                            color_map=mapillary_color_map),
    SemanticMask2FormerMapillaryConvertedPaper(m2f_mapillary),
    NpzRepresentation("softseg_gb", 3),
]
dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in _tasks}