|
"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks""" |
|
from __future__ import annotations |
|
from pathlib import Path |
|
from typing import Callable |
|
import numpy as np |
|
import torch as tr |
|
import flow_vis |
|
from skimage.color import rgb2hsv |
|
from overrides import overrides |
|
from matplotlib.cm import Spectral |
|
from torch.nn import functional as F |
|
|
|
try: |
|
from npz_representation import NpzRepresentation |
|
except ImportError: |
|
from .npz_representation import NpzRepresentation |
|
|
|
class RGBRepresentation(NpzRepresentation): |
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, n_channels=3, **kwargs) |
|
|
|
class HSVRepresentation(RGBRepresentation): |
|
@overrides |
|
def load_from_disk(self, path: Path) -> tr.Tensor: |
|
rgb = super().load_from_disk(path) |
|
return tr.from_numpy(rgb2hsv(rgb)).float() |
|
|
|
class EdgesRepresentation(NpzRepresentation): |
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, n_channels=1, **kwargs) |
|
|
|
class DepthRepresentation(NpzRepresentation): |
|
"""DepthRepresentation. Implements depth task-specific stuff, like spectral map for plots.""" |
|
def __init__(self, name: str, min_depth: float, max_depth: float, *args, **kwargs): |
|
super().__init__(name, n_channels=1, *args, **kwargs) |
|
self.min_depth = min_depth |
|
self.max_depth = max_depth |
|
|
|
@overrides |
|
def load_from_disk(self, path: Path) -> tr.Tensor: |
|
"""Reads the npz data from the disk and transforms it properly""" |
|
res = super().load_from_disk(path) |
|
res_clip = res.clip(self.min_depth, self.max_depth) |
|
return res_clip |
|
|
|
@overrides |
|
def plot_fn(self, x: tr.Tensor) -> np.ndarray: |
|
x = x.detach().clip(0, 1).squeeze().cpu().numpy() |
|
_min, _max = np.percentile(x, [1, 95]) |
|
x = np.nan_to_num((x - _min) / (_max - _min), False, 0, 0, 0).clip(0, 1) |
|
y: np.ndarray = Spectral(x)[..., 0:3] * 255 |
|
return y.astype(np.uint8) |
|
|
|
class NormalsRepresentation(NpzRepresentation): |
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, n_channels=3, **kwargs) |
|
|
|
class OpticalFlowRepresentation(NpzRepresentation): |
|
"""OpticalFlowRepresentation. Implements flow task-specific stuff, like using flow_vis.""" |
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, n_channels=2, **kwargs) |
|
|
|
@overrides |
|
def plot_fn(self, x: tr.Tensor) -> np.ndarray: |
|
_min, _max = x.min(0)[0].min(0)[0], x.max(0)[0].max(0)[0] |
|
x = ((x - _min) / (_max - _min)).nan_to_num(0, 0, 0).detach().cpu().numpy() |
|
return flow_vis.flow_to_color(x) |
|
|
|
class SemanticRepresentation(NpzRepresentation): |
|
"""SemanticRepresentation. Implements semantic task-specific stuff, like argmaxing if needed""" |
|
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs): |
|
self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes) |
|
super().__init__(*args, **kwargs, n_channels=self.n_classes) |
|
self.classes = list(range(classes)) if isinstance(classes, int) else classes |
|
self.color_map = color_map |
|
assert len(color_map) == self.n_classes and self.n_classes > 1, (color_map, self.n_classes) |
|
|
|
@overrides |
|
def load_from_disk(self, path: Path) -> tr.Tensor: |
|
res = super().load_from_disk(path) |
|
if len(res.shape) == 3: |
|
assert res.shape[-1] == self.n_classes, f"Expected {self.n_classes} (HxWxC), got {res.shape[-1]}" |
|
res = res.argmax(-1) |
|
assert len(res.shape) == 2, f"Only argmaxed data supported, got: {res.shape}" |
|
res = F.one_hot(res.long(), num_classes=self.n_classes).float() |
|
return res |
|
|
|
@overrides |
|
def plot_fn(self, x: tr.Tensor) -> np.ndarray: |
|
x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy() |
|
new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8) |
|
for i in range(self.n_classes): |
|
new_images[x_argmax == i] = self.color_map[i] |
|
return new_images |
|
|
|
def semantic_mapper(semantic_original: np.ndarray, mapping: dict[str, list[str]], |
|
original_classes: list[str]) -> np.ndarray: |
|
"""maps a bigger semantic segmentation to a smaller one""" |
|
assert len(semantic_original.shape) == 2, f"Only argmaxed data supported, got: {semantic_original.shape}" |
|
assert np.issubdtype(semantic_original.dtype, np.integer), semantic_original.dtype |
|
mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v) for _v in v] for k, v in mapping.items()} |
|
flat_mapping = {} |
|
for k, v in mapping_ix.items(): |
|
for _v in v: |
|
flat_mapping[_v] = k |
|
mapped_data = np.vectorize(flat_mapping.get)(semantic_original).astype(np.uint8) |
|
return mapped_data |
|
|
|
class TaskMapper(NpzRepresentation): |
|
def __init__(self, *args, merge_fn: Callable[[list[np.ndarray]], tr.Tensor], **kwargs): |
|
super().__init__(*args, **kwargs) |
|
assert len(self.dependencies) > 0 and self.dep_names[0] != self.name, "Need at least one dependency" |
|
self.merge_fn = merge_fn |
|
|
|
def load_from_disk(self, path: Path | list[Path]) -> tr.Tensor: |
|
paths = [path] if isinstance(path, Path) else path |
|
dep_data = [dep.load_from_disk(path) for dep, path in zip(self.dependencies, paths)] |
|
return self.merge_fn(dep_data) |
|
|
|
def plot_fn(self, x): |
|
raise NotImplementedError("Must be overriden by the user") |
|
|
|
color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255], |
|
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]] |
|
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", |
|
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", |
|
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", |
|
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", |
|
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", |
|
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", |
|
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", |
|
"cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", |
|
"scissors", "teddy bear", "hair drier", "toothbrush", "banner", "blanket", "bridge", "cardboard", |
|
"counter", "curtain", "door-stuff", "floor-wood", "flower", "fruit", "gravel", "house", "light", |
|
"mirror-stuff", "net", "pillow", "platform", "playingfield", "railroad", "river", "road", "roof", |
|
"sand", "sea", "shelf", "snow", "stairs", "tent", "towel", "wall-brick", "wall-stone", "wall-tile", |
|
"wall-wood", "water-other", "window-blind", "window-other", "tree-merged", "fence-merged", |
|
"ceiling-merged", "sky-other-merged", "cabinet-merged", "table-merged", "floor-other-merged", |
|
"pavement-merged", "mountain-merged", "grass-merged", "dirt-merged", "paper-merged", |
|
"food-other-merged", "building-other-merged", "rock-merged", "wall-other-merged", "rug-merged"] |
|
coco_color_map = [[220, 20, 60], [119, 11, 32], [0, 0, 142], [0, 0, 230], [106, 0, 228], [0, 60, 100], [0, 80, 100], |
|
[0, 0, 70], [0, 0, 192], [250, 170, 30], [100, 170, 30], [220, 220, 0], [175, 116, 175], [250, 0, 30], |
|
[165, 42, 42], [255, 77, 255], [0, 226, 252], [182, 182, 255], [0, 82, 0], [120, 166, 157], |
|
[110, 76, 0], [174, 57, 255], [199, 100, 0], [72, 0, 118], [255, 179, 240], [0, 125, 92], |
|
[209, 0, 151], [188, 208, 182], [0, 220, 176], [255, 99, 164], [92, 0, 73], [133, 129, 255], |
|
[78, 180, 255], [0, 228, 0], [174, 255, 243], [45, 89, 255], [134, 134, 103], [145, 148, 174], |
|
[255, 208, 186], [197, 226, 255], [171, 134, 1], [109, 63, 54], [207, 138, 255], [151, 0, 95], |
|
[9, 80, 61], [84, 105, 51], [74, 65, 105], [166, 196, 102], [208, 195, 210], [255, 109, 65], |
|
[0, 143, 149], [179, 0, 194], [209, 99, 106], [5, 121, 0], [227, 255, 205], [147, 186, 208], |
|
[153, 69, 1], [3, 95, 161], [163, 255, 0], [119, 0, 170], [0, 182, 199], [0, 165, 120], |
|
[183, 130, 88], [95, 32, 0], [130, 114, 135], [110, 129, 133], [166, 74, 118], [219, 142, 185], |
|
[79, 210, 114], [178, 90, 62], [65, 70, 15], [127, 167, 115], [59, 105, 106], [142, 108, 45], |
|
[196, 172, 0], [95, 54, 80], [128, 76, 255], [201, 57, 1], [246, 0, 122], [191, 162, 208], |
|
[255, 255, 128], [147, 211, 203], [150, 100, 100], [168, 171, 172], [146, 112, 198], |
|
[210, 170, 100], [92, 136, 89], [218, 88, 184], [241, 129, 0], [217, 17, 255], [124, 74, 181], |
|
[70, 70, 70], [255, 228, 255], [154, 208, 0], [193, 0, 92], [76, 91, 113], [255, 180, 195], |
|
[106, 154, 176], [230, 150, 140], [60, 143, 255], [128, 64, 128], [92, 82, 55], [254, 212, 124], |
|
[73, 77, 174], [255, 160, 98], [255, 255, 255], [104, 84, 109], [169, 164, 131], [225, 199, 255], |
|
[137, 54, 74], [135, 158, 223], [7, 246, 231], [107, 255, 200], [58, 41, 149], [183, 121, 142], |
|
[255, 73, 97], [107, 142, 35], [190, 153, 153], [146, 139, 141], [70, 130, 180], [134, 199, 156], |
|
[209, 226, 140], [96, 36, 108], [96, 96, 96], [64, 170, 64], [152, 251, 152], [208, 229, 228], |
|
[206, 186, 171], [152, 161, 64], [116, 112, 0], [0, 114, 143], [102, 102, 156], [250, 141, 255]] |
|
mapillary_classes = ["Bird", "Ground Animal", "Curb", "Fence", "Guard Rail", "Barrier", "Wall", "Bike Lane", |
|
"Crosswalk - Plain", "Curb Cut", "Parking", "Pedestrian Area", "Rail Track", "Road", |
|
"Service Lane", "Sidewalk", "Bridge", "Building", "Tunnel", "Person", "Bicyclist", |
|
"Motorcyclist", "Other Rider", "Lane Marking - Crosswalk", "Lane Marking - General", |
|
"Mountain", "Sand", "Sky", "Snow", "Terrain", "Vegetation", "Water", "Banner", "Bench", |
|
"Bike Rack", "Billboard", "Catch Basin", "CCTV Camera", "Fire Hydrant", "Junction Box", |
|
"Mailbox", "Manhole", "Phone Booth", "Pothole", "Street Light", "Pole", "Traffic Sign Frame", |
|
"Utility Pole", "Traffic Light", "Traffic Sign (Back)", "Traffic Sign (Front)", "Trash Can", |
|
"Bicycle", "Boat", "Bus", "Car", "Caravan", "Motorcycle", "On Rails", "Other Vehicle", "Trailer", |
|
"Truck", "Wheeled Slow", "Car Mount", "Ego Vehicle"] |
|
mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153], [180, 165, 180], [90, 120, 150], |
|
[102, 102, 156], [128, 64, 255], [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96], |
|
[230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232], [150, 100, 100], [70, 70, 70], |
|
[150, 120, 90], [220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128], |
|
[255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180], [190, 255, 255], [152, 251, 152], |
|
[107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220], |
|
[220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33], [100, 128, 160], |
|
[142, 0, 0], [70, 100, 150], [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80], |
|
[250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32], [150, 0, 255], |
|
[0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110], |
|
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]] |
|
|
|
class SemanticMask2FormerMapillaryConvertedPaper(TaskMapper): |
|
def __init__(self, dep: NpzRepresentation): |
|
super().__init__("semantic_mask2former_swin_mapillary_converted", |
|
dependencies=[dep], merge_fn=self._merge_fn, n_channels=8) |
|
self.mapping = { |
|
"land": ["Terrain", "Sand", "Snow"], |
|
"forest": ["Vegetation"], |
|
"residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb", |
|
"Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner", |
|
"Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera", |
|
"Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame", |
|
"Bike Rack"], |
|
"road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole", |
|
"Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk", |
|
"Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"], |
|
"little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle", |
|
"Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"], |
|
"water": ["Water"], |
|
"sky": ["Sky"], |
|
"hill": ["Mountain"] |
|
} |
|
self.color_map = color_map_8classes |
|
self.original_classes = mapillary_classes |
|
self.classes = list(self.mapping.keys()) |
|
self.n_classes = len(self.classes) |
|
|
|
def plot_fn(self, x: tr.Tensor) -> np.ndarray: |
|
x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy() |
|
new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8) |
|
for i in range(self.n_classes): |
|
new_images[x_argmax == i] = self.color_map[i] |
|
return new_images |
|
|
|
def _merge_fn(self, dep_data: list[np.ndarray]) -> tr.Tensor: |
|
m2f_mapillary = dep_data[0].argmax(-1).numpy() |
|
m2f_mapillary_converted = semantic_mapper(m2f_mapillary, self.mapping, self.original_classes) |
|
converted_oh = F.one_hot(tr.from_numpy(m2f_mapillary_converted).long(), num_classes=self.n_classes).float() |
|
return converted_oh |
|
|
|
_tasks: list[NpzRepresentation] = [ |
|
rgb := RGBRepresentation("rgb"), |
|
HSVRepresentation("hsv", dependencies=[rgb]), |
|
EdgesRepresentation("edges_dexined"), |
|
EdgesRepresentation("edges_gb"), |
|
DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999), |
|
DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300), |
|
DepthRepresentation("depth_ufo", min_depth=0, max_depth=1), |
|
DepthRepresentation("depth_marigold", min_depth=0, max_depth=1), |
|
NormalsRepresentation("normals_sfm_manual202204"), |
|
OpticalFlowRepresentation("opticalflow_rife"), |
|
SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes), |
|
SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map), |
|
m2f_mapillary := SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes, |
|
color_map=mapillary_color_map), |
|
SemanticMask2FormerMapillaryConvertedPaper(m2f_mapillary), |
|
NpzRepresentation("softseg_gb", 3), |
|
] |
|
dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in _tasks} |
|
|