Meehai commited on
Commit
2351499
1 Parent(s): df90c1b

evaluate semantic segmentation script

Browse files
.gitignore CHANGED
@@ -13,4 +13,5 @@ error.txt
13
  sanity_check.py
14
  commands.txt
15
  raw_data/npz_540p_2/
 
16
 
 
13
  sanity_check.py
14
  commands.txt
15
  raw_data/npz_540p_2/
16
+ here.csv
17
 
README.md CHANGED
@@ -211,9 +211,65 @@ python scripts/dronescapes_viewer.py data/test_set_annotated_only/ # or any of t
211
  'semantic_mask2former_swin_mapillary_converted': torch.Size([5, 540, 960]),
212
  'semantic_segprop8': torch.Size([5, 540, 960])}
213
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
 
 
 
 
 
215
  </details>
216
 
217
  ## TODOs
218
  - convert camera normals to world normals
219
- - evaluation script for sseg
 
211
  'semantic_mask2former_swin_mapillary_converted': torch.Size([5, 540, 960]),
212
  'semantic_segprop8': torch.Size([5, 540, 960])}
213
  ```
214
+ </details>
215
+
216
+ ## 3. Evaluation for semantic segmentation
217
+
218
+ We evaluate in the paper on the 3 test scenes (unsees at train) as well as the semi-supervised scenes (seen, but
219
+ different split) against the human annotated frames. The general evaluation script is in
220
+ `scripts/evaluate_semantic_segmentation.py`.
221
+
222
+ General usage is:
223
+ ```
224
+ python scripts/evaluate_semantic_segmentation.py y_dir gt_dir -o results.csv --classes C1 C2 .. Cn
225
+ [--class_weights W1 W2 ... Wn] [--scenes s1 s2 ... sm]
226
+ ```
227
+
228
+ <details>
229
+ <summary> Script explanation </summary>
230
+ The script is a bit convoluted, so let's break it into parts:
231
+
232
+ - `y_dir` and `gt_dir` Two directories of .npz files in the same format as the dataset (y_dir/1.npz, gt_dir/55.npz etc.)
233
+ - `classes` A list of classes in the order that they appear in the predictions and gt files
234
+ - `class_weights` (optional, but used in paper) How much to weigh each class. In the paper we compute these weights as
235
+ the number of pixels in all the dataset (train/val/semisup/test) for each of the 8 classes resulting in the numbers
236
+ below.
237
+ - `scenes` if the `y_dir` and `gt_dir` contains multiple scenes that you want to evaluate separately, the script allows
238
+ you to pass the prefix of all the scenes. For example, in `data/test_set_annotated_only/semantic_segprop8/` there are
239
+ actually 3 scenes in the npz files and in the paper, we evaluate each scene independently. Even though the script
240
+ outputs one csv file with predictions for each npz file, the scenes are used for proper aggregation at scene level.
241
+ </details>
242
+
243
+ <details>
244
+ <summary> Reproducing paper results for Mask2Former </summary>
245
+
246
+ ```
247
+ python scripts/evaluate_semantic_segmentation.py \
248
+ data/test_set_annotated_only/semantic_mask2former_swin_mapillary_converted/ \ # change this with your predictions dir
249
+ data/test_set_annotated_only/semantic_segprop8/ \
250
+ -o results.csv \
251
+ --classes land forest residential road little-objects water sky hill \
252
+ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 \
253
+ --scenes barsana_DJI_0500_0501_combined_sliced_2700_14700 comana_DJI_0881_full norway_210821_DJI_0015_full
254
+ ```
255
+
256
+ Should output:
257
+ ```
258
+ scene iou f1
259
+ barsana_DJI_0500_0501_combined_sliced_2700_14700 63.367 75.327
260
+ comana_DJI_0881_full 60.554 73.757
261
+ norway_210821_DJI_0015_full 37.998 45.928
262
+ overall avg 53.973 65.004
263
+ ```
264
+
265
+ Not providing `--scenes` will make an average across all 3 scenes (not average after each metric individually):
266
 
267
+ ```
268
+ iou f1
269
+ scene
270
+ all 60.456 73.261
271
+ ```
272
  </details>
273
 
274
  ## TODOs
275
  - convert camera normals to world normals
 
scripts/convert_m2f_to_dronescapes.py CHANGED
File without changes
scripts/evaluate_semantic_segmentation.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the 8 classes and each frame.
3
+ Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir -o results.csv
4
+ """
5
+ import sys
6
+ import os
7
+ from loguru import logger
8
+ from pathlib import Path
9
+ from argparse import ArgumentParser, Namespace
10
+ from tempfile import TemporaryDirectory
11
+ from functools import partial
12
+ from torchmetrics.functional.classification import multiclass_stat_scores
13
+ from tqdm import trange
14
+ import torch as tr
15
+ import numpy as np
16
+ import pandas as pd
17
+
18
+ sys.path.append(Path(__file__).parents[1].__str__())
19
+ from dronescapes_reader import MultiTaskDataset, SemanticRepresentation
20
+
21
+ def compute_metrics(tp: np.ndarray, fp: np.ndarray, tn: np.ndarray, fn: np.ndarray) -> pd.DataFrame:
22
+ precision = tp / (tp + fp)
23
+ recall = tp / (tp + fn)
24
+ f1 = 2 * precision * recall / (precision + recall)
25
+ iou = tp / (tp + fp + fn)
26
+ return pd.DataFrame([precision, recall, f1, iou], index=["precision", "recall", "f1", "iou"]).T
27
+
28
+ def do_one_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
29
+ df = df.query("class_name == @class_name").drop(columns="class_name")
30
+ df.loc["all"] = df.sum()
31
+ df[["precision", "recall", "f1", "iou"]] = compute_metrics(df["tp"], df["fp"], df["tn"], df["fn"])
32
+ df.insert(0, "class_name", class_name)
33
+ df = df.fillna(0).round(3)
34
+ return df
35
+
36
+ def compute_raw_stats_per_class(reader: MultiTaskDataset, classes: list[str]) -> pd.DataFrame:
37
+ res = tr.zeros((len(reader), 8, 4)).long() # (N, 8, 4)
38
+ index = []
39
+ for i in trange(len(reader)):
40
+ x = reader[i]
41
+ y, gt = x[0]["pred"], x[0]["gt"]
42
+ res[i] = multiclass_stat_scores(y, gt, num_classes=len(classes), average=None)[:, 0:4]
43
+ index.append(x[1])
44
+ res = res.reshape(len(reader) * len(classes), 4)
45
+ df = pd.DataFrame(res, index=np.repeat(index, len(classes)), columns=["tp", "fp", "tn", "fn"])
46
+ df.insert(0, "class_name", np.array(classes)[:, None].repeat(len(index), 1).T.flatten())
47
+ return df
48
+
49
+ def compute_final_per_scene(res: pd.DataFrame, scene: str, classes: list[str],
50
+ class_weights: list[float]) -> tuple[float, float]:
51
+ df = res.iloc[[x.startswith(scene) for x in res.index]]
52
+ # aggregate for this class all the individual predictions
53
+ df_scene = df[["class_name", "tp", "fp", "tn", "fn"]].groupby("class_name") \
54
+ .apply(lambda x: x.sum(), include_groups=False).loc[classes]
55
+ df_metrics = compute_metrics(df_scene["tp"], df_scene["fp"], df_scene["tn"], df_scene["fn"])
56
+ iou_weighted = (df_metrics["iou"] * class_weights).sum()
57
+ f1_weighted = (df_metrics["f1"] * class_weights).sum()
58
+ return scene, iou_weighted, f1_weighted
59
+
60
+ def get_args() -> Namespace:
61
+ parser = ArgumentParser()
62
+ parser.add_argument("y_dir", type=lambda p: Path(p).absolute())
63
+ parser.add_argument("gt_dir", type=lambda p: Path(p).absolute())
64
+ parser.add_argument("--output_path", "-o", type=Path, required=True)
65
+ parser.add_argument("--classes", required=True, nargs="+")
66
+ parser.add_argument("--class_weights", nargs="+", type=float)
67
+ parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
68
+ args = parser.parse_args()
69
+ if args.class_weights is None:
70
+ args.class_weights = [1 / len(args.classes)] * len(args.classes)
71
+ assert (a := len(args.class_weights)) == (b := len(args.classes)), (a, b)
72
+ assert np.fabs(sum(args.class_weights) - 1) < 1e-3, (args.class_weights, sum(args.class_weights))
73
+ assert args.output_path.suffix == ".csv", f"Prediction file must end in .csv, got: '{args.output_path.suffix}'"
74
+ if len(args.scenes) > 0:
75
+ logger.info(f"Scenes: {args.scenes}")
76
+ return args
77
+
78
+ def main(args: Namespace):
79
+ temp_dir = Path(TemporaryDirectory().name)
80
+ temp_dir.mkdir(exist_ok=False)
81
+ os.symlink(args.y_dir, temp_dir / "pred")
82
+ os.symlink(args.gt_dir, temp_dir / "gt")
83
+
84
+ if not args.output_path.exists():
85
+ sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
86
+ reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr})
87
+ df = compute_raw_stats_per_class(reader, args.classes)
88
+ res = pd.concat([do_one_class(df, class_name) for class_name in args.classes])
89
+ res.to_csv(args.output_path)
90
+ logger.info(f"Stored raw metrics file to: '{args.output_path}'")
91
+ else:
92
+ logger.info(f"Loading raw metris from: '{args.output_path}'. Delete this file if you want to recompute.")
93
+ res = pd.read_csv(args.output_path, index_col=0)
94
+
95
+ final_agg = []
96
+ for scene in args.scenes:
97
+ final_agg.append(compute_final_per_scene(res, scene, classes=args.classes, class_weights=args.class_weights))
98
+ final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
99
+ if len(args.scenes) > 1:
100
+ final_agg.loc["mean"] = final_agg.mean()
101
+ final_agg = (final_agg * 100).round(3)
102
+ print(final_agg)
103
+
104
+ if __name__ == "__main__":
105
+ main(get_args())