Meehai commited on
Commit
f5a8641
1 Parent(s): c86db9a

little updates -- added some pre-baked representations in init file

Browse files
.gitignore CHANGED
@@ -14,4 +14,5 @@ sanity_check.py
14
  commands.txt
15
  raw_data/npz_540p_2/
16
  here.csv
 
17
 
 
14
  commands.txt
15
  raw_data/npz_540p_2/
16
  here.csv
17
+ *.ttf
18
 
dronescapes_reader/__init__.py CHANGED
@@ -1,3 +1,13 @@
1
  """init file"""
2
  from .multitask_dataset import MultiTaskDataset
3
  from .dronescapes_representations import DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation
 
 
 
 
 
 
 
 
 
 
 
1
  """init file"""
2
  from .multitask_dataset import MultiTaskDataset
3
  from .dronescapes_representations import DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation
4
+
5
+ _color_map=[[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
6
+ [255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
7
+ dronescapes_task_types = { # some pre-baked representations
8
+ "depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
9
+ "depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
10
+ "opticalflow_rife": OpticalFlowRepresentation,
11
+ "semantic_segprop8": SemanticRepresentation("semantic_segprop8", classes=8, color_map=_color_map),
12
+ "semantic_mask2former_swin_mapillary_converted":
13
+ SemanticRepresentation("semantic_mask2former_swin_mapillary_converted", classes=8, color_map=_color_map)}
dronescapes_reader/multitask_dataset.py CHANGED
@@ -2,7 +2,7 @@
2
  """MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader."""
3
  from __future__ import annotations
4
  from pathlib import Path
5
- from argparse import Namespace, ArgumentParser
6
  from pprint import pprint
7
  from natsort import natsorted
8
  from loguru import logger
@@ -113,8 +113,10 @@ class MultiTaskDataset(Dataset):
113
  self._tasks = []
114
  for task_name in self.task_names:
115
  t = self.task_types[task_name]
116
- if not isinstance(t, NpzRepresentation):
117
  t = t(task_name)
 
 
118
  self._tasks.append(t)
119
  assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self._task_names, self._tasks)
120
  return self._tasks
@@ -175,7 +177,6 @@ class MultiTaskDataset(Dataset):
175
  logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
176
 
177
  files_per_repr = {node: [] for node in nodes}
178
- in_file_names = {node: [f.name for f in in_files[node]] for node in nodes}
179
  for node in nodes:
180
  for file_name in all_files:
181
  file_path = name_to_node_path[node].get(file_name, None)
 
2
  """MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader."""
3
  from __future__ import annotations
4
  from pathlib import Path
5
+ from argparse import ArgumentParser
6
  from pprint import pprint
7
  from natsort import natsorted
8
  from loguru import logger
 
113
  self._tasks = []
114
  for task_name in self.task_names:
115
  t = self.task_types[task_name]
116
+ try:
117
  t = t(task_name)
118
+ except Exception:
119
+ pass
120
  self._tasks.append(t)
121
  assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self._task_names, self._tasks)
122
  return self._tasks
 
177
  logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
178
 
179
  files_per_repr = {node: [] for node in nodes}
 
180
  for node in nodes:
181
  for file_name in all_files:
182
  file_path = name_to_node_path[node].get(file_name, None)
scripts/dronescapes_viewer.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
scripts/dronescapes_viewer.py CHANGED
@@ -2,23 +2,13 @@
2
  import sys
3
  from pathlib import Path
4
  sys.path.append(Path(__file__).parents[1].__str__())
5
- from functools import partial
6
- from dronescapes_reader import MultiTaskDataset, DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation
7
  from pprint import pprint
8
  from torch.utils.data import DataLoader
9
  import random
10
 
11
  def main():
12
- sema_repr = partial(SemanticRepresentation, classes=8, color_map=[[0, 255, 0], [0, 127, 0], [255, 255, 0],
13
- [255, 255, 255], [255, 0, 0], [0, 0, 255],
14
- [0, 255, 255], [127, 127, 63]])
15
- reader = MultiTaskDataset(sys.argv[1], handle_missing_data="fill_none",
16
- task_types={"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
17
- "depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204",
18
- min_depth=0, max_depth=300),
19
- "opticalflow_rife": OpticalFlowRepresentation,
20
- "semantic_segprop8": sema_repr,
21
- "semantic_mask2former_swin_mapillary_converted": sema_repr})
22
  print(reader)
23
 
24
  print("== Shapes ==")
 
2
  import sys
3
  from pathlib import Path
4
  sys.path.append(Path(__file__).parents[1].__str__())
5
+ from dronescapes_reader import MultiTaskDataset, dronescapes_task_types
 
6
  from pprint import pprint
7
  from torch.utils.data import DataLoader
8
  import random
9
 
10
  def main():
11
+ reader = MultiTaskDataset(sys.argv[1], handle_missing_data="fill_none", task_types=dronescapes_task_types)
 
 
 
 
 
 
 
 
 
12
  print(reader)
13
 
14
  print("== Shapes ==")