Meehai commited on
Commit
304b1ab
1 Parent(s): f8d91d8

fix eval script

Browse files
README.md CHANGED
@@ -288,11 +288,12 @@ python scripts/evaluate_semantic_segmentation.py \
288
 
289
  Should output:
290
  ```
291
- scene iou f1
292
- barsana_DJI_0500_0501_combined_sliced_2700_14700 63.36 75.32
293
- comana_DJI_0881_full 60.55 73.75
294
- norway_210821_DJI_0015_full 37.99 45.92
295
- overall avg 53.97 65.00
 
296
  ```
297
 
298
  Not providing `--scenes` will make an average across all 3 scenes (not average after each metric individually):
@@ -308,18 +309,18 @@ all 60.456 73.261
308
 
309
  #### IoU
310
 
311
- | method | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
312
- |:-|:-|:-|:-|
313
- | [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 63.36 | 60.55 | 37.99 |
314
- | [NGC(LR)](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf) | 46.51 | 45.59 | 30.17 |
315
- | [CShift](https://www.bmvc2021-virtualconference.com/assets/papers/0455.pdf)[^1] | 46.27 | 43.67 | 29.09 |
316
- | [NGC](https://cdn.aaai.org/ojs/16283/16283-13-19777-1-2-20210518.pdf)[^1] | 44.34 | 38.99 | 22.63 |
317
 
318
  [^1]: reported in the [Dronescapes paper](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf).
319
 
320
  #### F1 Score
321
 
322
- | method | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
323
- |:-|:-|:-|:-|
324
- | [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 75.32 | 73.75 | 45.92 |
325
 
 
288
 
289
  Should output:
290
  ```
291
+ scene iou f1
292
+ barsana_DJI_0500_0501_combined_sliced_2700_14700 63.371 75.338
293
+ comana_DJI_0881_full 60.559 73.779
294
+ norway_210821_DJI_0015_full 37.986 45.939
295
+ mean 53.972 65.019
296
+
297
  ```
298
 
299
  Not providing `--scenes` will make an average across all 3 scenes (not average after each metric individually):
 
309
 
310
  #### IoU
311
 
312
+ | method | average | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
313
+ |:-|:-|:-|:-|:-|
314
+ | [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 53.97 | 63.37 | 60.55 | 37.98 |
315
+ | [NGC(LR)](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf) | 40.75 | 46.51 | 45.59 | 30.17 |
316
+ | [CShift](https://www.bmvc2021-virtualconference.com/assets/papers/0455.pdf)[^1] | 39.67 | 46.27 | 43.67 | 29.09 |
317
+ | [NGC](https://cdn.aaai.org/ojs/16283/16283-13-19777-1-2-20210518.pdf)[^1] | 35.32 | 44.34 | 38.99 | 22.63 |
318
 
319
  [^1]: reported in the [Dronescapes paper](https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf).
320
 
321
  #### F1 Score
322
 
323
+ | method | mean | barsana_DJI_0500_0501_combined_sliced_2700_14700 | comana_DJI_0881_full | norway_210821_DJI_0015_full |
324
+ |:-|:-|:-|:-|:-|
325
+ | [Mask2Former](https://openaccess.thecvf.com/content/CVPR2022/papers/Cheng_Masked-Attention_Mask_Transformer_for_Universal_Image_Segmentation_CVPR_2022_paper.pdf) | 65.01 | 75.33 | 73.77 | 45.93 |
326
 
scripts/eval_script_old.py CHANGED
@@ -15,16 +15,15 @@ done
15
 
16
  Then run this:
17
  cd /dronescapes/scripts
18
- python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/comana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/comana/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/comana --overwrite
19
- python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/barsana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/barsana/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/barsana --overwrite
20
- python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/norway/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/norway/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/norway --overwrite
21
  """
22
 
23
  from __future__ import annotations
24
  import os
25
- import cv2
26
  import numpy as np
27
- import multiprocessing as mp
28
  from natsort import natsorted
29
  from pathlib import Path
30
  import shutil
@@ -42,72 +41,48 @@ def convert_label2multi(label, class_id):
42
  return np.array(out, dtype=bool)
43
 
44
  def process_all_video_frames(gt_files: list[Path], pred_files: list[Path], class_id: int):
45
- global_true_positives = 0
46
- global_true_negatives = 0
47
- global_false_positives = 0
48
- global_false_negatives = 0
49
-
50
  for gt_file, pred_file in tqdm(zip(gt_files, pred_files), total=len(gt_files), desc=f"{class_id=}"):
51
- gt_label = np.load(gt_file, allow_pickle=True)["arr_0"]
52
- net_label = np.load(pred_file, allow_pickle=True)["arr_0"]
53
-
54
- if gt_label.shape == ():
55
- gt_label = gt_label.item()['data']
56
- gt_label = convert_label2multi(gt_label, class_id)
57
- net_label = convert_label2multi(net_label, class_id)
58
 
59
  true_positives = np.count_nonzero(gt_label * net_label)
60
  true_negatives = np.count_nonzero((gt_label + net_label) == 0)
61
  false_positives = np.count_nonzero((np.array(net_label, dtype=int) - np.array(gt_label, dtype=int)) > 0)
62
  false_negatives = np.count_nonzero((np.array(gt_label, dtype=int) - np.array(net_label, dtype=int)) > 0)
63
 
64
- global_true_positives += true_positives
65
- global_true_negatives += true_negatives
66
- global_false_positives += false_positives
67
- global_false_negatives += false_negatives
68
-
69
- global_precision = global_true_positives / (global_true_positives + global_false_positives + np.spacing(1))
70
- global_recall = global_true_positives / (global_true_positives + global_false_negatives + np.spacing(1))
 
71
  global_f1_score = (2 * global_precision * global_recall) / (global_precision + global_recall + np.spacing(1))
72
- global_iou = global_true_positives / (global_true_positives + global_false_positives + global_false_negatives + np.spacing(1))
73
 
74
  return (global_precision, global_recall, global_f1_score, global_iou)
75
 
76
  def join_results(args: argparse.Namespace):
77
- assert args.num_classes in (7, 8, 10), args.num_classes
78
- if args.num_classes == 7:
79
- CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky']
80
- CLASS_WEIGHTS = [0.28172092, 0.37426183, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721]
81
- #[0.37426183 0.28172092 0.13341699 0.08660721 0.05987466 0.05937348 0.00474491]
82
- elif args.num_classes == 8:
83
- CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky', 'hill']
84
- CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721, 0.06836531]
85
- #[0.30589653 0.28172092 0.13341699 0.08660721 0.06836531 0.05987466 0.05937348 0.00474491]
86
- elif args.num_classes == 10:
87
- CLASS_NAMES = ['land', 'forest', 'low-level', 'road', 'high-level', 'cars', 'water', 'sky', 'hill', 'person']
88
- CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.09954808, 0.05937348, 0.03386891, 0.00445865, 0.05987466, 0.08660721, 0.06836531, 0.00028626]
89
- # [0.30589653 0.28172092 0.09954808 0.08660721 0.06836531 0.05987466 0.05937348 0.03386891 0.00445865 0.00028626]
90
-
91
-
92
- out_path = os.path.join(args.out_dir, 'joined_results_' + str(args.num_classes) + 'classes.txt')
93
  out_file = open(out_path, 'w')
94
 
95
  joined_f1_scores_mean = []
96
  joined_iou_scores_mean = []
97
 
98
- for CLASS_ID in range(0, len(CLASS_NAMES)):
99
-
100
  RESULT_FILE = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(CLASS_ID) + '.txt')
101
-
102
  result_file_lines = open(RESULT_FILE, 'r').read().splitlines()
103
-
104
  for idx, line in enumerate(result_file_lines):
105
  if idx != 0:
106
  splits = line.split(',')
107
  f1_score = float(splits[2])
108
  iou_score = float(splits[3])
109
 
110
- out_file.write('------------------------- ' + ' CLASS ' + str(CLASS_ID) + ' - ' + CLASS_NAMES[CLASS_ID] + ' --------------------------------------------\n')
111
  # F1Score
112
  out_file.write('F1-Score: ' + str(round(f1_score, 4)) + '\n')
113
  # Mean IOU
@@ -122,13 +97,33 @@ def join_results(args: argparse.Namespace):
122
  out_file.write('\n\n')
123
 
124
  out_file.write('\n\n')
125
- out_file.write('Weighted Mean F1-Score all classes: ' + str(round(np.sum(np.dot(joined_f1_scores_mean, CLASS_WEIGHTS)), 4)) + '\n')
126
- out_file.write('Weighted Mean IOU all classes: ' + str(round(np.sum(np.dot(joined_iou_scores_mean, CLASS_WEIGHTS)), 4)) + '\n')
127
  out_file.write('\n\n')
128
 
129
  out_file.close()
130
  print(f"Written to '{out_path}'")
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  def main(args: argparse.Namespace):
133
  gt_files = natsorted([x for x in args.gt_path.iterdir()], key=lambda x: Path(x).name)
134
  pred_files = natsorted([x for x in args.pred_path.iterdir()], key=lambda x: Path(x).name)
@@ -149,41 +144,38 @@ if __name__ == "__main__":
149
  Comana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20221208_new_comana_clip/only_manually_annotated_test_files_30.txt
150
  gt_path: /Date3/hpc/datasets/dronescapes/all_scenes
151
  pred_path/Date3/hpc/code/Mask2Former/demo_dronescapes/outputs_dronescapes_compatible/mapillary_sseg
 
 
 
 
 
 
 
 
 
152
  """
153
  parser = argparse.ArgumentParser()
154
  parser.add_argument("--gt_path", type=Path, required=True)
155
  parser.add_argument("--pred_path", type=Path, required=True)
156
  parser.add_argument("--out_dir", "-o", required=True, type=Path, default=Path(__file__).parent / "out_dir")
157
- parser.add_argument("--num_classes", type=int, default=8)
 
158
  parser.add_argument("--txt_path")
159
  parser.add_argument("--overwrite", action="store_true")
160
  args = parser.parse_args()
 
 
 
 
 
161
  assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite"
162
  shutil.rmtree(args.out_dir, ignore_errors=True)
163
  os.makedirs(args.out_dir, exist_ok=True)
164
 
165
  if args.txt_path is not None:
166
- (tempdir := Path(tempfile.TemporaryDirectory().name)).mkdir()
167
- (tempdir / "gt").mkdir()
168
- (tempdir / "pred").mkdir()
169
- print(f"old pattern detected. Copying files to a temp dir: {tempdir}")
170
-
171
- test_files = natsorted(open(args.txt_path, "r").read().splitlines())
172
- scenes = natsorted(set(([os.path.dirname(x) for x in test_files])))
173
- assert len(scenes) == 1, scenes
174
- files = natsorted([x for x in test_files if scenes[0] in x])
175
- gt_files = [f"{args.gt_path}/{f.split('/')[0]}/segprop{args.num_classes}/{f.split('/')[1]}.npz" for f in files]
176
- pred_files = [f"{args.pred_path}/{f.split('/')[0]}/{int(f.split('/')[1]):06}.npz" for f in files]
177
- assert all(Path(x).exists() for x in [*gt_files, *pred_files])
178
- for _file in gt_files:
179
- os.symlink(_file, tempdir / "gt" / Path(_file).name)
180
- for _file in pred_files:
181
- os.symlink(_file, tempdir / "pred" / Path(_file).name)
182
- args.gt_path = tempdir / "gt"
183
- args.pred_path = tempdir / "pred"
184
- args.txt_path = None
185
-
186
- for class_id in range(args.num_classes):
187
  args.class_id = class_id
188
  main(args)
189
  join_results(args)
 
15
 
16
  Then run this:
17
  cd /dronescapes/scripts
18
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/comana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/comana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/comana --overwrite
19
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/barsana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/barsana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/barsana --overwrite
20
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/norway/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/norway/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/norway --overwrite
21
  """
22
 
23
  from __future__ import annotations
24
  import os
 
25
  import numpy as np
26
+ import pandas as pd
27
  from natsort import natsorted
28
  from pathlib import Path
29
  import shutil
 
41
  return np.array(out, dtype=bool)
42
 
43
  def process_all_video_frames(gt_files: list[Path], pred_files: list[Path], class_id: int):
44
+ TP, TN, FP, FN = {}, {}, {}, {}
 
 
 
 
45
  for gt_file, pred_file in tqdm(zip(gt_files, pred_files), total=len(gt_files), desc=f"{class_id=}"):
46
+ gt_label_raw = np.load(gt_file, allow_pickle=True)["arr_0"]
47
+ net_label_raw = np.load(pred_file, allow_pickle=True)["arr_0"]
48
+ gt_label = convert_label2multi(gt_label_raw, class_id)
49
+ net_label = convert_label2multi(net_label_raw, class_id)
 
 
 
50
 
51
  true_positives = np.count_nonzero(gt_label * net_label)
52
  true_negatives = np.count_nonzero((gt_label + net_label) == 0)
53
  false_positives = np.count_nonzero((np.array(net_label, dtype=int) - np.array(gt_label, dtype=int)) > 0)
54
  false_negatives = np.count_nonzero((np.array(gt_label, dtype=int) - np.array(net_label, dtype=int)) > 0)
55
 
56
+ TP[gt_file.name] = true_positives
57
+ TN[gt_file.name] = true_negatives
58
+ FP[gt_file.name] = false_positives
59
+ FN[gt_file.name] = false_negatives
60
+ df = pd.DataFrame([TP, FP, TN, FN], index=["tp", "fp", "tn", "fn"]).T
61
+ global_TP, global_TN, global_FP, global_FN = sum(TP.values()), sum(TN.values()), sum(FP.values()), sum(FN.values())
62
+ global_precision = global_TP / (global_TP + global_FP + np.spacing(1))
63
+ global_recall = global_TP / (global_TP + global_FN + np.spacing(1))
64
  global_f1_score = (2 * global_precision * global_recall) / (global_precision + global_recall + np.spacing(1))
65
+ global_iou = global_TP / (global_TP + global_FP + global_FN + np.spacing(1))
66
 
67
  return (global_precision, global_recall, global_f1_score, global_iou)
68
 
69
  def join_results(args: argparse.Namespace):
70
+ out_path = os.path.join(args.out_dir, 'joined_results_' + str(len(args.classes)) + 'classes.txt')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  out_file = open(out_path, 'w')
72
 
73
  joined_f1_scores_mean = []
74
  joined_iou_scores_mean = []
75
 
76
+ for CLASS_ID in range(len(args.classes)):
 
77
  RESULT_FILE = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(CLASS_ID) + '.txt')
 
78
  result_file_lines = open(RESULT_FILE, 'r').read().splitlines()
 
79
  for idx, line in enumerate(result_file_lines):
80
  if idx != 0:
81
  splits = line.split(',')
82
  f1_score = float(splits[2])
83
  iou_score = float(splits[3])
84
 
85
+ out_file.write('------------------------- ' + ' CLASS ' + str(CLASS_ID) + ' - ' + args.classes[CLASS_ID] + ' --------------------------------------------\n')
86
  # F1Score
87
  out_file.write('F1-Score: ' + str(round(f1_score, 4)) + '\n')
88
  # Mean IOU
 
97
  out_file.write('\n\n')
98
 
99
  out_file.write('\n\n')
100
+ out_file.write('Weighted Mean F1-Score all classes: ' + str(round(np.sum(np.dot(joined_f1_scores_mean, args.class_weights)), 4)) + '\n')
101
+ out_file.write('Weighted Mean IOU all classes: ' + str(round(np.sum(np.dot(joined_iou_scores_mean, args.class_weights)), 4)) + '\n')
102
  out_file.write('\n\n')
103
 
104
  out_file.close()
105
  print(f"Written to '{out_path}'")
106
 
107
+ def compat_old_txt_file(args: Namespace):
108
+ (tempdir := Path(tempfile.TemporaryDirectory().name)).mkdir()
109
+ (tempdir / "gt").mkdir()
110
+ (tempdir / "pred").mkdir()
111
+ print(f"old pattern detected. Copying files to a temp dir: {tempdir}")
112
+ test_files = natsorted(open(args.txt_path, "r").read().splitlines())
113
+ scenes = natsorted(set(([os.path.dirname(x) for x in test_files])))
114
+ assert len(scenes) == 1, scenes
115
+ files = natsorted([x for x in test_files if scenes[0] in x])
116
+ gt_files = [f"{args.gt_path}/{f.split('/')[0]}/segprop{len(args.classes)}/{f.split('/')[1]}.npz" for f in files]
117
+ pred_files = [f"{args.pred_path}/{f.split('/')[0]}/{int(f.split('/')[1]):06}.npz" for f in files]
118
+ assert all(Path(x).exists() for x in [*gt_files, *pred_files])
119
+ for _file in gt_files:
120
+ os.symlink(_file, tempdir / "gt" / Path(_file).name)
121
+ for _file in pred_files:
122
+ os.symlink(_file, tempdir / "pred" / Path(_file).name)
123
+ args.gt_path = tempdir / "gt"
124
+ args.pred_path = tempdir / "pred"
125
+ args.txt_path = None
126
+
127
  def main(args: argparse.Namespace):
128
  gt_files = natsorted([x for x in args.gt_path.iterdir()], key=lambda x: Path(x).name)
129
  pred_files = natsorted([x for x in args.pred_path.iterdir()], key=lambda x: Path(x).name)
 
144
  Comana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20221208_new_comana_clip/only_manually_annotated_test_files_30.txt
145
  gt_path: /Date3/hpc/datasets/dronescapes/all_scenes
146
  pred_path/Date3/hpc/code/Mask2Former/demo_dronescapes/outputs_dronescapes_compatible/mapillary_sseg
147
+ NC = 7
148
+ CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky']
149
+ CLASS_WEIGHTS = [0.28172092, 0.37426183, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721]
150
+ NC = 8
151
+ CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky', 'hill']
152
+ CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721, 0.06836531]
153
+ NC = 10
154
+ CLASS_NAMES = ['land', 'forest', 'low-level', 'road', 'high-level', 'cars', 'water', 'sky', 'hill', 'person']
155
+ CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.09954808, 0.05937348, 0.03386891, 0.00445865, 0.05987466, 0.08660721, 0.06836531, 0.00028626]
156
  """
157
  parser = argparse.ArgumentParser()
158
  parser.add_argument("--gt_path", type=Path, required=True)
159
  parser.add_argument("--pred_path", type=Path, required=True)
160
  parser.add_argument("--out_dir", "-o", required=True, type=Path, default=Path(__file__).parent / "out_dir")
161
+ parser.add_argument("--classes", nargs="+")
162
+ parser.add_argument("--class_weights", type=float, nargs="+", required=True)
163
  parser.add_argument("--txt_path")
164
  parser.add_argument("--overwrite", action="store_true")
165
  args = parser.parse_args()
166
+ if args.classes is None:
167
+ print("Class names not provided")
168
+ args.classes = list(map(str, range(len(args.class_weights))))
169
+ assert len(args.classes) == len(args.class_weights), (args.classes, args.class_weights)
170
+ assert len(args.classes) in (7, 8, 10), len(args.classes)
171
  assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite"
172
  shutil.rmtree(args.out_dir, ignore_errors=True)
173
  os.makedirs(args.out_dir, exist_ok=True)
174
 
175
  if args.txt_path is not None:
176
+ compat_old_txt_file(args)
177
+
178
+ for class_id in range(len(args.classes)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  args.class_id = class_id
180
  main(args)
181
  join_results(args)
scripts/evaluate_semantic_segmentation.py CHANGED
@@ -1,3 +1,4 @@
 
1
  """
2
  Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the classes and each frame.
3
  Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir --classes C1 .. Cn [--class_weights W1 .. Wn] -o results.csv
@@ -38,7 +39,8 @@ def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str]) ->
38
  index = []
39
  for i in trange(len(reader)):
40
  x = reader[i]
41
- y, gt = x[0]["pred"], x[0]["gt"]
 
42
  res[i] = multiclass_stat_scores(y, gt, num_classes=len(classes), average=None)[:, 0:4]
43
  index.append(x[1])
44
  res = res.reshape(len(reader) * len(classes), 4)
@@ -89,13 +91,10 @@ def main(args: Namespace):
89
  assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}"
90
 
91
  # Compute TP, FP, TN, FN for each frame
92
- if not args.output_path.exists():
93
- raw_stats = compute_raw_stats_per_frame(reader, args.classes)
94
- logger.info(f"Stored raw metrics file to: '{args.output_path}'")
95
- raw_stats.to_csv(args.output_path)
96
- else:
97
- logger.info(f"Loading raw metrics from: '{args.output_path}'. Delete this file if you want to recompute.")
98
- raw_stats = pd.read_csv(args.output_path, index_col=0)
99
 
100
  # Compute Precision, Recall, F1, IoU for each class and put them together in the same df.
101
  metrics_per_class = pd.concat([compute_metrics_by_class(raw_stats, class_name) for class_name in args.classes])
 
1
+ #!/usr/bin/env python3
2
  """
3
  Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the classes and each frame.
4
  Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir --classes C1 .. Cn [--class_weights W1 .. Wn] -o results.csv
 
39
  index = []
40
  for i in trange(len(reader)):
41
  x = reader[i]
42
+ y = x[0]["pred"].argmax(-1) if x[0]["pred"].dtype != tr.int64 else x[0]["pred"]
43
+ gt = x[0]["gt"].argmax(-1) if x[0]["gt"].dtype != tr.int64 else x[0]["gt"]
44
  res[i] = multiclass_stat_scores(y, gt, num_classes=len(classes), average=None)[:, 0:4]
45
  index.append(x[1])
46
  res = res.reshape(len(reader) * len(classes), 4)
 
91
  assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}"
92
 
93
  # Compute TP, FP, TN, FN for each frame
94
+ raw_stats = compute_raw_stats_per_frame(reader, args.classes)
95
+ logger.info(f"Stored raw metrics file to: '{args.output_path}'")
96
+ Path(args.output_path).parent.mkdir(exist_ok=True, parents=True)
97
+ raw_stats.to_csv(args.output_path)
 
 
 
98
 
99
  # Compute Precision, Recall, F1, IoU for each class and put them together in the same df.
100
  metrics_per_class = pd.concat([compute_metrics_by_class(raw_stats, class_name) for class_name in args.classes])