Meehai commited on
Commit
2db37a2
1 Parent(s): ee41033

little update

Browse files
scripts/evaluate_semantic_segmentation.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
  Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the 8 classes and each frame.
3
- Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir -o results.csv
4
  """
5
  import sys
6
  import os
@@ -84,17 +84,19 @@ def main(args: Namespace):
84
  if not args.output_path.exists():
85
  sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
86
  reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr})
87
- df = compute_raw_stats_per_class(reader, args.classes)
88
- res = pd.concat([do_one_class(df, class_name) for class_name in args.classes])
89
- res.to_csv(args.output_path)
90
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
 
91
  else:
92
- logger.info(f"Loading raw metris from: '{args.output_path}'. Delete this file if you want to recompute.")
93
- res = pd.read_csv(args.output_path, index_col=0)
 
 
94
 
95
  final_agg = []
96
  for scene in args.scenes:
97
- final_agg.append(compute_final_per_scene(res, scene, classes=args.classes, class_weights=args.class_weights))
 
98
  final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
99
  if len(args.scenes) > 1:
100
  final_agg.loc["mean"] = final_agg.mean()
 
1
  """
2
  Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the 8 classes and each frame.
3
+ Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir --classes C1 C2 ... Cn -o results.csv
4
  """
5
  import sys
6
  import os
 
84
  if not args.output_path.exists():
85
  sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
86
  reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr})
87
+ raw_stats = compute_raw_stats_per_class(reader, args.classes)
 
 
88
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
89
+ raw_stats.to_csv(args.output_path)
90
  else:
91
+ logger.info(f"Loading raw metrics from: '{args.output_path}'. Delete this file if you want to recompute.")
92
+ raw_stats = pd.read_csv(args.output_path, index_col=0)
93
+
94
+ metrics_per_class = pd.concat([do_one_class(raw_stats, class_name) for class_name in args.classes])
95
 
96
  final_agg = []
97
  for scene in args.scenes:
98
+ final_agg.append(compute_final_per_scene(metrics_per_class, scene, classes=args.classes,
99
+ class_weights=args.class_weights))
100
  final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
101
  if len(args.scenes) > 1:
102
  final_agg.loc["mean"] = final_agg.mean()