|
""" |
|
Evaluation script for semantic segmentation for dronescapes. Outputs F1Score and mIoU for the classes and each frame. |
|
Usage: ./evaluate_semantic_segmentation.py y_dir gt_dir --classes C1 .. Cn [--class_weights W1 .. Wn] -o results.csv |
|
""" |
|
import sys |
|
import os |
|
from loguru import logger |
|
from pathlib import Path |
|
from argparse import ArgumentParser, Namespace |
|
from tempfile import TemporaryDirectory |
|
from functools import partial |
|
from torchmetrics.functional.classification import multiclass_stat_scores |
|
from tqdm import trange |
|
import torch as tr |
|
import numpy as np |
|
import pandas as pd |
|
|
|
sys.path.append(Path(__file__).parents[1].__str__()) |
|
from dronescapes_reader import MultiTaskDataset, SemanticRepresentation |
|
|
|
def compute_metrics(tp: np.ndarray, fp: np.ndarray, tn: np.ndarray, fn: np.ndarray) -> pd.DataFrame: |
|
precision = tp / (tp + fp) |
|
recall = tp / (tp + fn) |
|
f1 = 2 * precision * recall / (precision + recall) |
|
iou = tp / (tp + fp + fn) |
|
return pd.DataFrame([precision, recall, f1, iou], index=["precision", "recall", "f1", "iou"]).T |
|
|
|
def compute_metrics_by_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame: |
|
df = df.query("class_name == @class_name").drop(columns="class_name") |
|
df.loc["all"] = df.sum() |
|
df[["precision", "recall", "f1", "iou"]] = compute_metrics(df["tp"], df["fp"], df["tn"], df["fn"]) |
|
df.insert(0, "class_name", class_name) |
|
df = df.fillna(0).round(3) |
|
return df |
|
|
|
def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str]) -> pd.DataFrame: |
|
res = tr.zeros((len(reader), len(classes), 4)).long() |
|
index = [] |
|
for i in trange(len(reader)): |
|
x = reader[i] |
|
y, gt = x[0]["pred"], x[0]["gt"] |
|
res[i] = multiclass_stat_scores(y, gt, num_classes=len(classes), average=None)[:, 0:4] |
|
index.append(x[1]) |
|
res = res.reshape(len(reader) * len(classes), 4) |
|
df = pd.DataFrame(res, index=np.repeat(index, len(classes)), columns=["tp", "fp", "tn", "fn"]) |
|
df.insert(0, "class_name", np.array(classes)[:, None].repeat(len(index), 1).T.flatten()) |
|
return df |
|
|
|
def compute_final_per_scene(res: pd.DataFrame, scene: str, classes: list[str], |
|
class_weights: list[float]) -> tuple[float, float]: |
|
df = res.iloc[[x.startswith(scene) for x in res.index]] |
|
|
|
df_scene = df[["class_name", "tp", "fp", "tn", "fn"]].groupby("class_name") \ |
|
.apply(lambda x: x.sum(), include_groups=False).loc[classes] |
|
df_metrics = compute_metrics(df_scene["tp"], df_scene["fp"], df_scene["tn"], df_scene["fn"]) |
|
iou_weighted = (df_metrics["iou"] * class_weights).sum() |
|
f1_weighted = (df_metrics["f1"] * class_weights).sum() |
|
return scene, iou_weighted, f1_weighted |
|
|
|
def get_args() -> Namespace: |
|
parser = ArgumentParser() |
|
parser.add_argument("y_dir", type=lambda p: Path(p).absolute()) |
|
parser.add_argument("gt_dir", type=lambda p: Path(p).absolute()) |
|
parser.add_argument("--output_path", "-o", type=Path, required=True) |
|
parser.add_argument("--classes", required=True, nargs="+") |
|
parser.add_argument("--class_weights", nargs="+", type=float) |
|
parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided") |
|
parser.add_argument("--overwrite", action="store_true") |
|
args = parser.parse_args() |
|
if args.class_weights is None: |
|
logger.info("No class weights provided, defaulting to equal weights.") |
|
args.class_weights = [1 / len(args.classes)] * len(args.classes) |
|
assert (a := len(args.class_weights)) == (b := len(args.classes)), (a, b) |
|
assert np.fabs(sum(args.class_weights) - 1) < 1e-3, (args.class_weights, sum(args.class_weights)) |
|
assert args.output_path.suffix == ".csv", f"Prediction file must end in .csv, got: '{args.output_path.suffix}'" |
|
if len(args.scenes) > 0: |
|
logger.info(f"Scenes: {args.scenes}") |
|
if args.output_path.exists() and args.overwrite: |
|
os.remove(args.output_path) |
|
return args |
|
|
|
def main(args: Namespace): |
|
|
|
(temp_dir := Path(TemporaryDirectory().name)).mkdir(exist_ok=False) |
|
os.symlink(args.y_dir, temp_dir / "pred") |
|
os.symlink(args.gt_dir, temp_dir / "gt") |
|
sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes)) |
|
reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr}) |
|
assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}" |
|
|
|
|
|
if not args.output_path.exists(): |
|
raw_stats = compute_raw_stats_per_frame(reader, args.classes) |
|
logger.info(f"Stored raw metrics file to: '{args.output_path}'") |
|
raw_stats.to_csv(args.output_path) |
|
else: |
|
logger.info(f"Loading raw metrics from: '{args.output_path}'. Delete this file if you want to recompute.") |
|
raw_stats = pd.read_csv(args.output_path, index_col=0) |
|
|
|
|
|
metrics_per_class = pd.concat([compute_metrics_by_class(raw_stats, class_name) for class_name in args.classes]) |
|
|
|
|
|
final_agg = [] |
|
for scene in args.scenes: |
|
final_agg.append(compute_final_per_scene(metrics_per_class, scene, args.classes, args.class_weights)) |
|
final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene") |
|
if len(args.scenes) > 1: |
|
final_agg.loc["mean"] = final_agg.mean() |
|
final_agg = (final_agg * 100).round(3) |
|
print(final_agg) |
|
|
|
if __name__ == "__main__": |
|
main(get_args()) |
|
|