|
""" |
|
The old evaluation script. |
|
To run, you first need to split the test scenes data into 3 different directories: |
|
|
|
cd /dronescapes/data |
|
scenes=(comana barsana norway); |
|
for scene in ${scenes[@]} ; do |
|
ls test_set_annotated_only | while read task; do |
|
mkdir -p test_set_annotated_only_per_scene/$scene/$task; |
|
ls test_set_annotated_only/$task | grep "$scene" | while read line; do |
|
cp test_set_annotated_only/$task/$line test_set_annotated_only_per_scene/$scene/$task/$line; |
|
done; |
|
done |
|
done |
|
|
|
Then run this: |
|
cd /dronescapes/scripts |
|
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/comana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/comana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/comana --overwrite |
|
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/barsana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/barsana/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/barsana --overwrite |
|
python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/norway/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/norway/semantic_mask2former_swin_mapillary_converted/ --class_weights 0.28172092 0.30589653 0.13341699 0.05937348 0.00474491 0.05987466 0.08660721 0.06836531 --classes land forest residential road little-objects water sky hill -o results/norway --overwrite |
|
""" |
|
|
|
from __future__ import annotations |
|
import os |
|
import numpy as np |
|
import pandas as pd |
|
from natsort import natsorted |
|
from pathlib import Path |
|
import shutil |
|
import tempfile |
|
from tqdm import tqdm |
|
|
|
import argparse |
|
import warnings |
|
warnings.filterwarnings("ignore") |
|
|
|
def convert_label2multi(label, class_id): |
|
out = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8) |
|
data_indices = np.where(np.equal(label, class_id)) |
|
out[data_indices[0], data_indices[1]] = 1 |
|
return np.array(out, dtype=bool) |
|
|
|
def process_all_video_frames(gt_files: list[Path], pred_files: list[Path], class_id: int): |
|
TP, TN, FP, FN = {}, {}, {}, {} |
|
for gt_file, pred_file in tqdm(zip(gt_files, pred_files), total=len(gt_files), desc=f"{class_id=}"): |
|
gt_label_raw = np.load(gt_file, allow_pickle=True)["arr_0"] |
|
net_label_raw = np.load(pred_file, allow_pickle=True)["arr_0"] |
|
gt_label = convert_label2multi(gt_label_raw, class_id) |
|
net_label = convert_label2multi(net_label_raw, class_id) |
|
|
|
true_positives = np.count_nonzero(gt_label * net_label) |
|
true_negatives = np.count_nonzero((gt_label + net_label) == 0) |
|
false_positives = np.count_nonzero((np.array(net_label, dtype=int) - np.array(gt_label, dtype=int)) > 0) |
|
false_negatives = np.count_nonzero((np.array(gt_label, dtype=int) - np.array(net_label, dtype=int)) > 0) |
|
|
|
TP[gt_file.name] = true_positives |
|
TN[gt_file.name] = true_negatives |
|
FP[gt_file.name] = false_positives |
|
FN[gt_file.name] = false_negatives |
|
df = pd.DataFrame([TP, FP, TN, FN], index=["tp", "fp", "tn", "fn"]).T |
|
global_TP, global_TN, global_FP, global_FN = sum(TP.values()), sum(TN.values()), sum(FP.values()), sum(FN.values()) |
|
global_precision = global_TP / (global_TP + global_FP + np.spacing(1)) |
|
global_recall = global_TP / (global_TP + global_FN + np.spacing(1)) |
|
global_f1_score = (2 * global_precision * global_recall) / (global_precision + global_recall + np.spacing(1)) |
|
global_iou = global_TP / (global_TP + global_FP + global_FN + np.spacing(1)) |
|
|
|
return (global_precision, global_recall, global_f1_score, global_iou) |
|
|
|
def join_results(args: argparse.Namespace): |
|
out_path = os.path.join(args.out_dir, 'joined_results_' + str(len(args.classes)) + 'classes.txt') |
|
out_file = open(out_path, 'w') |
|
|
|
joined_f1_scores_mean = [] |
|
joined_iou_scores_mean = [] |
|
|
|
for CLASS_ID in range(len(args.classes)): |
|
RESULT_FILE = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(CLASS_ID) + '.txt') |
|
result_file_lines = open(RESULT_FILE, 'r').read().splitlines() |
|
for idx, line in enumerate(result_file_lines): |
|
if idx != 0: |
|
splits = line.split(',') |
|
f1_score = float(splits[2]) |
|
iou_score = float(splits[3]) |
|
|
|
out_file.write('------------------------- ' + ' CLASS ' + str(CLASS_ID) + ' - ' + args.classes[CLASS_ID] + ' --------------------------------------------\n') |
|
|
|
out_file.write('F1-Score: ' + str(round(f1_score, 4)) + '\n') |
|
|
|
out_file.write('IOU: ' + str(round(iou_score, 4)) + '\n') |
|
out_file.write('\n\n') |
|
joined_f1_scores_mean.append(f1_score) |
|
joined_iou_scores_mean.append(iou_score) |
|
|
|
out_file.write('\n\n') |
|
out_file.write('Mean F1-Score all classes: ' + str(round(np.mean(joined_f1_scores_mean), 4)) + '\n') |
|
out_file.write('Mean IOU all classes: ' + str(round(np.mean(joined_iou_scores_mean), 4)) + '\n') |
|
out_file.write('\n\n') |
|
|
|
out_file.write('\n\n') |
|
out_file.write('Weighted Mean F1-Score all classes: ' + str(round(np.sum(np.dot(joined_f1_scores_mean, args.class_weights)), 4)) + '\n') |
|
out_file.write('Weighted Mean IOU all classes: ' + str(round(np.sum(np.dot(joined_iou_scores_mean, args.class_weights)), 4)) + '\n') |
|
out_file.write('\n\n') |
|
|
|
out_file.close() |
|
print(f"Written to '{out_path}'") |
|
|
|
def compat_old_txt_file(args: Namespace): |
|
(tempdir := Path(tempfile.TemporaryDirectory().name)).mkdir() |
|
(tempdir / "gt").mkdir() |
|
(tempdir / "pred").mkdir() |
|
print(f"old pattern detected. Copying files to a temp dir: {tempdir}") |
|
test_files = natsorted(open(args.txt_path, "r").read().splitlines()) |
|
scenes = natsorted(set(([os.path.dirname(x) for x in test_files]))) |
|
assert len(scenes) == 1, scenes |
|
files = natsorted([x for x in test_files if scenes[0] in x]) |
|
gt_files = [f"{args.gt_path}/{f.split('/')[0]}/segprop{len(args.classes)}/{f.split('/')[1]}.npz" for f in files] |
|
pred_files = [f"{args.pred_path}/{f.split('/')[0]}/{int(f.split('/')[1]):06}.npz" for f in files] |
|
assert all(Path(x).exists() for x in [*gt_files, *pred_files]) |
|
for _file in gt_files: |
|
os.symlink(_file, tempdir / "gt" / Path(_file).name) |
|
for _file in pred_files: |
|
os.symlink(_file, tempdir / "pred" / Path(_file).name) |
|
args.gt_path = tempdir / "gt" |
|
args.pred_path = tempdir / "pred" |
|
args.txt_path = None |
|
|
|
def main(args: argparse.Namespace): |
|
gt_files = natsorted([x for x in args.gt_path.iterdir()], key=lambda x: Path(x).name) |
|
pred_files = natsorted([x for x in args.pred_path.iterdir()], key=lambda x: Path(x).name) |
|
assert all(Path(x).exists() for x in [*gt_files, *pred_files]) |
|
global_precision, global_recall, global_f1, global_iou = process_all_video_frames(gt_files, pred_files, args.class_id) |
|
|
|
out_path = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(args.class_id) + '.txt') |
|
out_file = open(out_path, 'w') |
|
out_file.write('precision,recall,f1,iou\n') |
|
out_file.write('{0:.6f},{1:.6f},{2:.6f},{3:.6f}\n'.format(global_precision, global_recall, global_f1, global_iou)) |
|
out_file.close() |
|
print(f"Written to '{out_path}'") |
|
|
|
if __name__ == "__main__": |
|
""" |
|
Barsana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220517_train_on_even_semisup_on_odd_validate_on_last_odd_triplet_journal_split/only_manually_annotated_test_files_36.txt |
|
Norce: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220810_new_norce_clip/only_manually_annotated_test_files_50.txt |
|
Comana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20221208_new_comana_clip/only_manually_annotated_test_files_30.txt |
|
gt_path: /Date3/hpc/datasets/dronescapes/all_scenes |
|
pred_path/Date3/hpc/code/Mask2Former/demo_dronescapes/outputs_dronescapes_compatible/mapillary_sseg |
|
NC = 7 |
|
CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky'] |
|
CLASS_WEIGHTS = [0.28172092, 0.37426183, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721] |
|
NC = 8 |
|
CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky', 'hill'] |
|
CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721, 0.06836531] |
|
NC = 10 |
|
CLASS_NAMES = ['land', 'forest', 'low-level', 'road', 'high-level', 'cars', 'water', 'sky', 'hill', 'person'] |
|
CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.09954808, 0.05937348, 0.03386891, 0.00445865, 0.05987466, 0.08660721, 0.06836531, 0.00028626] |
|
""" |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--gt_path", type=Path, required=True) |
|
parser.add_argument("--pred_path", type=Path, required=True) |
|
parser.add_argument("--out_dir", "-o", required=True, type=Path, default=Path(__file__).parent / "out_dir") |
|
parser.add_argument("--classes", nargs="+") |
|
parser.add_argument("--class_weights", type=float, nargs="+", required=True) |
|
parser.add_argument("--txt_path") |
|
parser.add_argument("--overwrite", action="store_true") |
|
args = parser.parse_args() |
|
if args.classes is None: |
|
print("Class names not provided") |
|
args.classes = list(map(str, range(len(args.class_weights)))) |
|
assert len(args.classes) == len(args.class_weights), (args.classes, args.class_weights) |
|
assert len(args.classes) in (7, 8, 10), len(args.classes) |
|
assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite" |
|
shutil.rmtree(args.out_dir, ignore_errors=True) |
|
os.makedirs(args.out_dir, exist_ok=True) |
|
|
|
if args.txt_path is not None: |
|
compat_old_txt_file(args) |
|
|
|
for class_id in range(len(args.classes)): |
|
args.class_id = class_id |
|
main(args) |
|
join_results(args) |
|
|