hyzhou's picture
upload everything
cca9b7e
import numpy as np
from skimage.metrics import contingency_table
def precision(tp, fp, fn):
return tp / (tp + fp) if tp > 0 else 0
def recall(tp, fp, fn):
return tp / (tp + fn) if tp > 0 else 0
def accuracy(tp, fp, fn):
return tp / (tp + fp + fn) if tp > 0 else 0
def f1(tp, fp, fn):
return (2 * tp) / (2 * tp + fp + fn) if tp > 0 else 0
def _relabel(input):
_, unique_labels = np.unique(input, return_inverse=True)
return unique_labels.reshape(input.shape)
def _iou_matrix(gt, seg):
# relabel gt and seg for smaller memory footprint of contingency table
gt = _relabel(gt)
seg = _relabel(seg)
# get number of overlapping pixels between GT and SEG
n_inter = contingency_table(gt, seg).A
# number of pixels for GT instances
n_gt = n_inter.sum(axis=1, keepdims=True)
# number of pixels for SEG instances
n_seg = n_inter.sum(axis=0, keepdims=True)
# number of pixels in the union between GT and SEG instances
n_union = n_gt + n_seg - n_inter
iou_matrix = n_inter / n_union
# make sure that the values are within [0,1] range
assert 0 <= np.min(iou_matrix) <= np.max(iou_matrix) <= 1
return iou_matrix
class SegmentationMetrics:
"""
Computes precision, recall, accuracy, f1 score for a given ground truth and predicted segmentation.
Contingency table for a given ground truth and predicted segmentation is computed eagerly upon construction
of the instance of `SegmentationMetrics`.
Args:
gt (ndarray): ground truth segmentation
seg (ndarray): predicted segmentation
"""
def __init__(self, gt, seg):
self.iou_matrix = _iou_matrix(gt, seg)
def metrics(self, iou_threshold):
"""
Computes precision, recall, accuracy, f1 score at a given IoU threshold
"""
# ignore background
iou_matrix = self.iou_matrix[1:, 1:]
detection_matrix = (iou_matrix > iou_threshold).astype(np.uint8)
n_gt, n_seg = detection_matrix.shape
# if the iou_matrix is empty or all values are 0
trivial = min(n_gt, n_seg) == 0 or np.all(detection_matrix == 0)
if trivial:
tp = fp = fn = 0
else:
# count non-zero rows to get the number of TP
tp = np.count_nonzero(detection_matrix.sum(axis=1))
# count zero rows to get the number of FN
fn = n_gt - tp
# count zero columns to get the number of FP
fp = n_seg - np.count_nonzero(detection_matrix.sum(axis=0))
return {
'precision': precision(tp, fp, fn),
'recall': recall(tp, fp, fn),
'accuracy': accuracy(tp, fp, fn),
'f1': f1(tp, fp, fn)
}
class Accuracy:
"""
Computes accuracy between ground truth and predicted segmentation a a given threshold value.
Defined as: AC = TP / (TP + FP + FN).
Kaggle DSB2018 calls it Precision, see:
https://www.kaggle.com/stkbailey/step-by-step-explanation-of-scoring-metric.
"""
def __init__(self, iou_threshold):
self.iou_threshold = iou_threshold
def __call__(self, input_seg, gt_seg):
metrics = SegmentationMetrics(gt_seg, input_seg).metrics(self.iou_threshold)
return metrics['accuracy']
class AveragePrecision:
"""
Average precision taken for the IoU range (0.5, 0.95) with a step of 0.05 as defined in:
https://www.kaggle.com/stkbailey/step-by-step-explanation-of-scoring-metric
"""
def __init__(self):
self.iou_range = np.linspace(0.50, 0.95, 10)
def __call__(self, input_seg, gt_seg):
# compute contingency_table
sm = SegmentationMetrics(gt_seg, input_seg)
# compute accuracy for each threshold
acc = [sm.metrics(iou)['accuracy'] for iou in self.iou_range]
# return the average
return np.mean(acc)