Spaces:
Sleeping
Sleeping
# Ultralytics YOLO 🚀, AGPL-3.0 license | |
""" | |
Model validation metrics | |
""" | |
import math | |
import warnings | |
from pathlib import Path | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import torch | |
from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings | |
from ultralytics.utils.iou import * | |
OKS_SIGMA = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0 | |
# Boxes | |
def box_area(box): | |
"""Return box area, where box shape is xyxy(4,n).""" | |
return (box[2] - box[0]) * (box[3] - box[1]) | |
def bbox_ioa(box1, box2, eps=1e-7): | |
""" | |
Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format. | |
Args: | |
box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes. | |
box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes. | |
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. | |
Returns: | |
(np.array): A numpy array of shape (n, m) representing the intersection over box2 area. | |
""" | |
# Get the coordinates of bounding boxes | |
b1_x1, b1_y1, b1_x2, b1_y2 = box1.T | |
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T | |
# Intersection area | |
inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ | |
(np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) | |
# box2 area | |
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps | |
# Intersection over box2 area | |
return inter_area / box2_area | |
def box_iou(box1, box2, eps=1e-7): | |
""" | |
Calculate intersection-over-union (IoU) of boxes. | |
Both sets of boxes are expected to be in (x1, y1, x2, y2) format. | |
Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py | |
Args: | |
box1 (torch.Tensor): A tensor of shape (N, 4) representing N bounding boxes. | |
box2 (torch.Tensor): A tensor of shape (M, 4) representing M bounding boxes. | |
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. | |
Returns: | |
(torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2. | |
""" | |
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) | |
(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) | |
inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp_(0).prod(2) | |
# IoU = inter / (area1 + area2 - inter) | |
return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) | |
def bbox_iou(box1, box2, xywh=True, type_='CIoU',eps=1e-7): | |
# Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) | |
# Get the coordinates of bounding boxes | |
if xywh: # transform from xywh to xyxy | |
(x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) | |
w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 | |
b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ | |
b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ | |
else: # x1, y1, x2, y2 = box1 | |
b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) | |
b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) | |
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps | |
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps | |
# 将边界框信息拼接 | |
b1 = torch.stack([b1_x1, b1_y1, b1_x2, b1_y2], dim=-1) | |
b2 = torch.stack([b2_x1, b2_y1, b2_x2, b2_y2], dim=-1) | |
self = IoU_Cal(b1, b2) | |
loss = getattr(IoU_Cal, type_)(b1, b2, self=self) | |
iou = 1 - self.iou | |
return loss, iou | |
def mask_iou(mask1, mask2, eps=1e-7): | |
""" | |
Calculate masks IoU. | |
Args: | |
mask1 (torch.Tensor): A tensor of shape (N, n) where N is the number of ground truth objects and n is the | |
product of image width and height. | |
mask2 (torch.Tensor): A tensor of shape (M, n) where M is the number of predicted objects and n is the | |
product of image width and height. | |
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. | |
Returns: | |
(torch.Tensor): A tensor of shape (N, M) representing masks IoU. | |
""" | |
intersection = torch.matmul(mask1, mask2.T).clamp_(0) | |
union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection | |
return intersection / (union + eps) | |
def kpt_iou(kpt1, kpt2, area, sigma, eps=1e-7): | |
""" | |
Calculate Object Keypoint Similarity (OKS). | |
Args: | |
kpt1 (torch.Tensor): A tensor of shape (N, 17, 3) representing ground truth keypoints. | |
kpt2 (torch.Tensor): A tensor of shape (M, 17, 3) representing predicted keypoints. | |
area (torch.Tensor): A tensor of shape (N,) representing areas from ground truth. | |
sigma (list): A list containing 17 values representing keypoint scales. | |
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. | |
Returns: | |
(torch.Tensor): A tensor of shape (N, M) representing keypoint similarities. | |
""" | |
d = (kpt1[:, None, :, 0] - kpt2[..., 0]) ** 2 + (kpt1[:, None, :, 1] - kpt2[..., 1]) ** 2 # (N, M, 17) | |
sigma = torch.tensor(sigma, device=kpt1.device, dtype=kpt1.dtype) # (17, ) | |
kpt_mask = kpt1[..., 2] != 0 # (N, 17) | |
e = d / (2 * sigma) ** 2 / (area[:, None, None] + eps) / 2 # from cocoeval | |
# e = d / ((area[None, :, None] + eps) * sigma) ** 2 / 2 # from formula | |
return (torch.exp(-e) * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps) | |
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 | |
# return positive, negative label smoothing BCE targets | |
return 1.0 - 0.5 * eps, 0.5 * eps | |
class ConfusionMatrix: | |
""" | |
A class for calculating and updating a confusion matrix for object detection and classification tasks. | |
Attributes: | |
task (str): The type of task, either 'detect' or 'classify'. | |
matrix (np.array): The confusion matrix, with dimensions depending on the task. | |
nc (int): The number of classes. | |
conf (float): The confidence threshold for detections. | |
iou_thres (float): The Intersection over Union threshold. | |
""" | |
def __init__(self, nc, conf=0.25, iou_thres=0.45, task='detect'): | |
"""Initialize attributes for the YOLO model.""" | |
self.task = task | |
self.matrix = np.zeros((nc + 1, nc + 1)) if self.task == 'detect' else np.zeros((nc, nc)) | |
self.nc = nc # number of classes | |
self.conf = conf | |
self.iou_thres = iou_thres | |
def process_cls_preds(self, preds, targets): | |
""" | |
Update confusion matrix for classification task | |
Args: | |
preds (Array[N, min(nc,5)]): Predicted class labels. | |
targets (Array[N, 1]): Ground truth class labels. | |
""" | |
preds, targets = torch.cat(preds)[:, 0], torch.cat(targets) | |
for p, t in zip(preds.cpu().numpy(), targets.cpu().numpy()): | |
self.matrix[p][t] += 1 | |
def process_batch(self, detections, labels): | |
""" | |
Update confusion matrix for object detection task. | |
Args: | |
detections (Array[N, 6]): Detected bounding boxes and their associated information. | |
Each row should contain (x1, y1, x2, y2, conf, class). | |
labels (Array[M, 5]): Ground truth bounding boxes and their associated class labels. | |
Each row should contain (class, x1, y1, x2, y2). | |
""" | |
if detections is None: | |
gt_classes = labels.int() | |
for gc in gt_classes: | |
self.matrix[self.nc, gc] += 1 # background FN | |
return | |
detections = detections[detections[:, 4] > self.conf] | |
gt_classes = labels[:, 0].int() | |
detection_classes = detections[:, 5].int() | |
iou = box_iou(labels[:, 1:], detections[:, :4]) | |
x = torch.where(iou > self.iou_thres) | |
if x[0].shape[0]: | |
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() | |
if x[0].shape[0] > 1: | |
matches = matches[matches[:, 2].argsort()[::-1]] | |
matches = matches[np.unique(matches[:, 1], return_index=True)[1]] | |
matches = matches[matches[:, 2].argsort()[::-1]] | |
matches = matches[np.unique(matches[:, 0], return_index=True)[1]] | |
else: | |
matches = np.zeros((0, 3)) | |
n = matches.shape[0] > 0 | |
m0, m1, _ = matches.transpose().astype(int) | |
for i, gc in enumerate(gt_classes): | |
j = m0 == i | |
if n and sum(j) == 1: | |
self.matrix[detection_classes[m1[j]], gc] += 1 # correct | |
else: | |
self.matrix[self.nc, gc] += 1 # true background | |
if n: | |
for i, dc in enumerate(detection_classes): | |
if not any(m1 == i): | |
self.matrix[dc, self.nc] += 1 # predicted background | |
def matrix(self): | |
"""Returns the confusion matrix.""" | |
return self.matrix | |
def tp_fp(self): | |
"""Returns true positives and false positives.""" | |
tp = self.matrix.diagonal() # true positives | |
fp = self.matrix.sum(1) - tp # false positives | |
# fn = self.matrix.sum(0) - tp # false negatives (missed detections) | |
return (tp[:-1], fp[:-1]) if self.task == 'detect' else (tp, fp) # remove background class if task=detect | |
def plot(self, normalize=True, save_dir='', names=(), on_plot=None): | |
""" | |
Plot the confusion matrix using seaborn and save it to a file. | |
Args: | |
normalize (bool): Whether to normalize the confusion matrix. | |
save_dir (str): Directory where the plot will be saved. | |
names (tuple): Names of classes, used as labels on the plot. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. | |
""" | |
import seaborn as sn | |
array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns | |
array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) | |
fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) | |
nc, nn = self.nc, len(names) # number of classes, names | |
sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size | |
labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels | |
ticklabels = (list(names) + ['background']) if labels else 'auto' | |
with warnings.catch_warnings(): | |
warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered | |
sn.heatmap(array, | |
ax=ax, | |
annot=nc < 30, | |
annot_kws={ | |
'size': 8}, | |
cmap='Blues', | |
fmt='.2f' if normalize else '.0f', | |
square=True, | |
vmin=0.0, | |
xticklabels=ticklabels, | |
yticklabels=ticklabels).set_facecolor((1, 1, 1)) | |
title = 'Confusion Matrix' + ' Normalized' * normalize | |
ax.set_xlabel('True') | |
ax.set_ylabel('Predicted') | |
ax.set_title(title) | |
plot_fname = Path(save_dir) / f'{title.lower().replace(" ", "_")}.png' | |
fig.savefig(plot_fname, dpi=250) | |
plt.close(fig) | |
if on_plot: | |
on_plot(plot_fname) | |
def print(self): | |
""" | |
Print the confusion matrix to the console. | |
""" | |
for i in range(self.nc + 1): | |
LOGGER.info(' '.join(map(str, self.matrix[i]))) | |
def smooth(y, f=0.05): | |
"""Box filter of fraction f.""" | |
nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) | |
p = np.ones(nf // 2) # ones padding | |
yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded | |
return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed | |
def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=(), on_plot=None): | |
"""Plots a precision-recall curve.""" | |
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | |
py = np.stack(py, axis=1) | |
if 0 < len(names) < 21: # display per-class legend if < 21 classes | |
for i, y in enumerate(py.T): | |
ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) | |
else: | |
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) | |
ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) | |
ax.set_xlabel('Recall') | |
ax.set_ylabel('Precision') | |
ax.set_xlim(0, 1) | |
ax.set_ylim(0, 1) | |
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') | |
ax.set_title('Precision-Recall Curve') | |
fig.savefig(save_dir, dpi=250) | |
plt.close(fig) | |
if on_plot: | |
on_plot(save_dir) | |
def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric', on_plot=None): | |
"""Plots a metric-confidence curve.""" | |
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) | |
if 0 < len(names) < 21: # display per-class legend if < 21 classes | |
for i, y in enumerate(py): | |
ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) | |
else: | |
ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) | |
y = smooth(py.mean(0), 0.05) | |
ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') | |
ax.set_xlabel(xlabel) | |
ax.set_ylabel(ylabel) | |
ax.set_xlim(0, 1) | |
ax.set_ylim(0, 1) | |
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') | |
ax.set_title(f'{ylabel}-Confidence Curve') | |
fig.savefig(save_dir, dpi=250) | |
plt.close(fig) | |
if on_plot: | |
on_plot(save_dir) | |
def compute_ap(recall, precision): | |
""" | |
Compute the average precision (AP) given the recall and precision curves. | |
Arguments: | |
recall (list): The recall curve. | |
precision (list): The precision curve. | |
Returns: | |
(float): Average precision. | |
(np.ndarray): Precision envelope curve. | |
(np.ndarray): Modified recall curve with sentinel values added at the beginning and end. | |
""" | |
# Append sentinel values to beginning and end | |
mrec = np.concatenate(([0.0], recall, [1.0])) | |
mpre = np.concatenate(([1.0], precision, [0.0])) | |
# Compute the precision envelope | |
mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) | |
# Integrate area under curve | |
method = 'interp' # methods: 'continuous', 'interp' | |
if method == 'interp': | |
x = np.linspace(0, 1, 101) # 101-point interp (COCO) | |
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate | |
else: # 'continuous' | |
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes | |
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve | |
return ap, mpre, mrec | |
def ap_per_class(tp, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=False, | |
on_plot=None, | |
save_dir=Path(), | |
names=(), | |
eps=1e-16, | |
prefix=''): | |
""" | |
Computes the average precision per class for object detection evaluation. | |
Args: | |
tp (np.ndarray): Binary array indicating whether the detection is correct (True) or not (False). | |
conf (np.ndarray): Array of confidence scores of the detections. | |
pred_cls (np.ndarray): Array of predicted classes of the detections. | |
target_cls (np.ndarray): Array of true classes of the detections. | |
plot (bool, optional): Whether to plot PR curves or not. Defaults to False. | |
on_plot (func, optional): A callback to pass plots path and data when they are rendered. Defaults to None. | |
save_dir (Path, optional): Directory to save the PR curves. Defaults to an empty path. | |
names (tuple, optional): Tuple of class names to plot PR curves. Defaults to an empty tuple. | |
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-16. | |
prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string. | |
Returns: | |
(tuple): A tuple of six arrays and one array of unique classes, where: | |
tp (np.ndarray): True positive counts for each class. | |
fp (np.ndarray): False positive counts for each class. | |
p (np.ndarray): Precision values at each confidence threshold. | |
r (np.ndarray): Recall values at each confidence threshold. | |
f1 (np.ndarray): F1-score values at each confidence threshold. | |
ap (np.ndarray): Average precision for each class at different IoU thresholds. | |
unique_classes (np.ndarray): An array of unique classes that have data. | |
""" | |
# Sort by objectness | |
i = np.argsort(-conf) | |
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] | |
# Find unique classes | |
unique_classes, nt = np.unique(target_cls, return_counts=True) | |
nc = unique_classes.shape[0] # number of classes, number of detections | |
# Create Precision-Recall curve and compute AP for each class | |
px, py = np.linspace(0, 1, 1000), [] # for plotting | |
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) | |
for ci, c in enumerate(unique_classes): | |
i = pred_cls == c | |
n_l = nt[ci] # number of labels | |
n_p = i.sum() # number of predictions | |
if n_p == 0 or n_l == 0: | |
continue | |
# Accumulate FPs and TPs | |
fpc = (1 - tp[i]).cumsum(0) | |
tpc = tp[i].cumsum(0) | |
# Recall | |
recall = tpc / (n_l + eps) # recall curve | |
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases | |
# Precision | |
precision = tpc / (tpc + fpc) # precision curve | |
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score | |
# AP from recall-precision curve | |
for j in range(tp.shape[1]): | |
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) | |
if plot and j == 0: | |
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 | |
# Compute F1 (harmonic mean of precision and recall) | |
f1 = 2 * p * r / (p + r + eps) | |
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data | |
names = dict(enumerate(names)) # to dict | |
if plot: | |
plot_pr_curve(px, py, ap, save_dir / f'{prefix}PR_curve.png', names, on_plot=on_plot) | |
plot_mc_curve(px, f1, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1', on_plot=on_plot) | |
plot_mc_curve(px, p, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision', on_plot=on_plot) | |
plot_mc_curve(px, r, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall', on_plot=on_plot) | |
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index | |
p, r, f1 = p[:, i], r[:, i], f1[:, i] | |
tp = (r * nt).round() # true positives | |
fp = (tp / (p + eps) - tp).round() # false positives | |
return tp, fp, p, r, f1, ap, unique_classes.astype(int) | |
class Metric(SimpleClass): | |
""" | |
Class for computing evaluation metrics for YOLOv8 model. | |
Attributes: | |
p (list): Precision for each class. Shape: (nc,). | |
r (list): Recall for each class. Shape: (nc,). | |
f1 (list): F1 score for each class. Shape: (nc,). | |
all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). | |
ap_class_index (list): Index of class for each AP score. Shape: (nc,). | |
nc (int): Number of classes. | |
Methods: | |
ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. | |
ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. | |
mp(): Mean precision of all classes. Returns: Float. | |
mr(): Mean recall of all classes. Returns: Float. | |
map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. | |
map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. | |
map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. | |
mean_results(): Mean of results, returns mp, mr, map50, map. | |
class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. | |
maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). | |
fitness(): Model fitness as a weighted combination of metrics. Returns: Float. | |
update(results): Update metric attributes with new evaluation results. | |
""" | |
def __init__(self) -> None: | |
self.p = [] # (nc, ) | |
self.r = [] # (nc, ) | |
self.f1 = [] # (nc, ) | |
self.all_ap = [] # (nc, 10) | |
self.ap_class_index = [] # (nc, ) | |
self.nc = 0 | |
def ap50(self): | |
""" | |
Returns the Average Precision (AP) at an IoU threshold of 0.5 for all classes. | |
Returns: | |
(np.ndarray, list): Array of shape (nc,) with AP50 values per class, or an empty list if not available. | |
""" | |
return self.all_ap[:, 0] if len(self.all_ap) else [] | |
def ap(self): | |
""" | |
Returns the Average Precision (AP) at an IoU threshold of 0.5-0.95 for all classes. | |
Returns: | |
(np.ndarray, list): Array of shape (nc,) with AP50-95 values per class, or an empty list if not available. | |
""" | |
return self.all_ap.mean(1) if len(self.all_ap) else [] | |
def mp(self): | |
""" | |
Returns the Mean Precision of all classes. | |
Returns: | |
(float): The mean precision of all classes. | |
""" | |
return self.p.mean() if len(self.p) else 0.0 | |
def mr(self): | |
""" | |
Returns the Mean Recall of all classes. | |
Returns: | |
(float): The mean recall of all classes. | |
""" | |
return self.r.mean() if len(self.r) else 0.0 | |
def map50(self): | |
""" | |
Returns the mean Average Precision (mAP) at an IoU threshold of 0.5. | |
Returns: | |
(float): The mAP50 at an IoU threshold of 0.5. | |
""" | |
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 | |
def map75(self): | |
""" | |
Returns the mean Average Precision (mAP) at an IoU threshold of 0.75. | |
Returns: | |
(float): The mAP50 at an IoU threshold of 0.75. | |
""" | |
return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0 | |
def map(self): | |
""" | |
Returns the mean Average Precision (mAP) over IoU thresholds of 0.5 - 0.95 in steps of 0.05. | |
Returns: | |
(float): The mAP over IoU thresholds of 0.5 - 0.95 in steps of 0.05. | |
""" | |
return self.all_ap.mean() if len(self.all_ap) else 0.0 | |
def mean_results(self): | |
"""Mean of results, return mp, mr, map50, map.""" | |
return [self.mp, self.mr, self.map50, self.map] | |
def class_result(self, i): | |
"""class-aware result, return p[i], r[i], ap50[i], ap[i].""" | |
return self.p[i], self.r[i], self.ap50[i], self.ap[i] | |
def maps(self): | |
"""mAP of each class.""" | |
maps = np.zeros(self.nc) + self.map | |
for i, c in enumerate(self.ap_class_index): | |
maps[c] = self.ap[i] | |
return maps | |
def fitness(self): | |
"""Model fitness as a weighted combination of metrics.""" | |
w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] | |
return (np.array(self.mean_results()) * w).sum() | |
def update(self, results): | |
""" | |
Args: | |
results (tuple): A tuple of (p, r, ap, f1, ap_class) | |
""" | |
self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results | |
class DetMetrics(SimpleClass): | |
""" | |
This class is a utility class for computing detection metrics such as precision, recall, and mean average precision | |
(mAP) of an object detection model. | |
Args: | |
save_dir (Path): A path to the directory where the output plots will be saved. Defaults to current directory. | |
plot (bool): A flag that indicates whether to plot precision-recall curves for each class. Defaults to False. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. | |
names (tuple of str): A tuple of strings that represents the names of the classes. Defaults to an empty tuple. | |
Attributes: | |
save_dir (Path): A path to the directory where the output plots will be saved. | |
plot (bool): A flag that indicates whether to plot the precision-recall curves for each class. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. | |
names (tuple of str): A tuple of strings that represents the names of the classes. | |
box (Metric): An instance of the Metric class for storing the results of the detection metrics. | |
speed (dict): A dictionary for storing the execution time of different parts of the detection process. | |
Methods: | |
process(tp, conf, pred_cls, target_cls): Updates the metric results with the latest batch of predictions. | |
keys: Returns a list of keys for accessing the computed detection metrics. | |
mean_results: Returns a list of mean values for the computed detection metrics. | |
class_result(i): Returns a list of values for the computed detection metrics for a specific class. | |
maps: Returns a dictionary of mean average precision (mAP) values for different IoU thresholds. | |
fitness: Computes the fitness score based on the computed detection metrics. | |
ap_class_index: Returns a list of class indices sorted by their average precision (AP) values. | |
results_dict: Returns a dictionary that maps detection metric keys to their computed values. | |
""" | |
def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: | |
self.save_dir = save_dir | |
self.plot = plot | |
self.on_plot = on_plot | |
self.names = names | |
self.box = Metric() | |
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} | |
def process(self, tp, conf, pred_cls, target_cls): | |
"""Process predicted results for object detection and update metrics.""" | |
results = ap_per_class(tp, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=self.plot, | |
save_dir=self.save_dir, | |
names=self.names, | |
on_plot=self.on_plot)[2:] | |
self.box.nc = len(self.names) | |
self.box.update(results) | |
def keys(self): | |
"""Returns a list of keys for accessing specific metrics.""" | |
return ['metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)'] | |
def mean_results(self): | |
"""Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" | |
return self.box.mean_results() | |
def class_result(self, i): | |
"""Return the result of evaluating the performance of an object detection model on a specific class.""" | |
return self.box.class_result(i) | |
def maps(self): | |
"""Returns mean Average Precision (mAP) scores per class.""" | |
return self.box.maps | |
def fitness(self): | |
"""Returns the fitness of box object.""" | |
return self.box.fitness() | |
def ap_class_index(self): | |
"""Returns the average precision index per class.""" | |
return self.box.ap_class_index | |
def results_dict(self): | |
"""Returns dictionary of computed performance metrics and statistics.""" | |
return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) | |
class SegmentMetrics(SimpleClass): | |
""" | |
Calculates and aggregates detection and segmentation metrics over a given set of classes. | |
Args: | |
save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. | |
plot (bool): Whether to save the detection and segmentation plots. Default is False. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. | |
names (list): List of class names. Default is an empty list. | |
Attributes: | |
save_dir (Path): Path to the directory where the output plots should be saved. | |
plot (bool): Whether to save the detection and segmentation plots. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. | |
names (list): List of class names. | |
box (Metric): An instance of the Metric class to calculate box detection metrics. | |
seg (Metric): An instance of the Metric class to calculate mask segmentation metrics. | |
speed (dict): Dictionary to store the time taken in different phases of inference. | |
Methods: | |
process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. | |
mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. | |
class_result(i): Returns the detection and segmentation metrics of class `i`. | |
maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. | |
fitness: Returns the fitness scores, which are a single weighted combination of metrics. | |
ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). | |
results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. | |
""" | |
def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: | |
self.save_dir = save_dir | |
self.plot = plot | |
self.on_plot = on_plot | |
self.names = names | |
self.box = Metric() | |
self.seg = Metric() | |
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} | |
def process(self, tp_b, tp_m, conf, pred_cls, target_cls): | |
""" | |
Processes the detection and segmentation metrics over the given set of predictions. | |
Args: | |
tp_b (list): List of True Positive boxes. | |
tp_m (list): List of True Positive masks. | |
conf (list): List of confidence scores. | |
pred_cls (list): List of predicted classes. | |
target_cls (list): List of target classes. | |
""" | |
results_mask = ap_per_class(tp_m, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=self.plot, | |
on_plot=self.on_plot, | |
save_dir=self.save_dir, | |
names=self.names, | |
prefix='Mask')[2:] | |
self.seg.nc = len(self.names) | |
self.seg.update(results_mask) | |
results_box = ap_per_class(tp_b, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=self.plot, | |
on_plot=self.on_plot, | |
save_dir=self.save_dir, | |
names=self.names, | |
prefix='Box')[2:] | |
self.box.nc = len(self.names) | |
self.box.update(results_box) | |
def keys(self): | |
"""Returns a list of keys for accessing metrics.""" | |
return [ | |
'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', | |
'metrics/precision(M)', 'metrics/recall(M)', 'metrics/mAP50(M)', 'metrics/mAP50-95(M)'] | |
def mean_results(self): | |
"""Return the mean metrics for bounding box and segmentation results.""" | |
return self.box.mean_results() + self.seg.mean_results() | |
def class_result(self, i): | |
"""Returns classification results for a specified class index.""" | |
return self.box.class_result(i) + self.seg.class_result(i) | |
def maps(self): | |
"""Returns mAP scores for object detection and semantic segmentation models.""" | |
return self.box.maps + self.seg.maps | |
def fitness(self): | |
"""Get the fitness score for both segmentation and bounding box models.""" | |
return self.seg.fitness() + self.box.fitness() | |
def ap_class_index(self): | |
"""Boxes and masks have the same ap_class_index.""" | |
return self.box.ap_class_index | |
def results_dict(self): | |
"""Returns results of object detection model for evaluation.""" | |
return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) | |
class PoseMetrics(SegmentMetrics): | |
""" | |
Calculates and aggregates detection and pose metrics over a given set of classes. | |
Args: | |
save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. | |
plot (bool): Whether to save the detection and segmentation plots. Default is False. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. | |
names (list): List of class names. Default is an empty list. | |
Attributes: | |
save_dir (Path): Path to the directory where the output plots should be saved. | |
plot (bool): Whether to save the detection and segmentation plots. | |
on_plot (func): An optional callback to pass plots path and data when they are rendered. | |
names (list): List of class names. | |
box (Metric): An instance of the Metric class to calculate box detection metrics. | |
pose (Metric): An instance of the Metric class to calculate mask segmentation metrics. | |
speed (dict): Dictionary to store the time taken in different phases of inference. | |
Methods: | |
process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. | |
mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. | |
class_result(i): Returns the detection and segmentation metrics of class `i`. | |
maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. | |
fitness: Returns the fitness scores, which are a single weighted combination of metrics. | |
ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). | |
results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. | |
""" | |
def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None: | |
super().__init__(save_dir, plot, names) | |
self.save_dir = save_dir | |
self.plot = plot | |
self.on_plot = on_plot | |
self.names = names | |
self.box = Metric() | |
self.pose = Metric() | |
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} | |
def __getattr__(self, attr): | |
"""Raises an AttributeError if an invalid attribute is accessed.""" | |
name = self.__class__.__name__ | |
raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") | |
def process(self, tp_b, tp_p, conf, pred_cls, target_cls): | |
""" | |
Processes the detection and pose metrics over the given set of predictions. | |
Args: | |
tp_b (list): List of True Positive boxes. | |
tp_p (list): List of True Positive keypoints. | |
conf (list): List of confidence scores. | |
pred_cls (list): List of predicted classes. | |
target_cls (list): List of target classes. | |
""" | |
results_pose = ap_per_class(tp_p, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=self.plot, | |
on_plot=self.on_plot, | |
save_dir=self.save_dir, | |
names=self.names, | |
prefix='Pose')[2:] | |
self.pose.nc = len(self.names) | |
self.pose.update(results_pose) | |
results_box = ap_per_class(tp_b, | |
conf, | |
pred_cls, | |
target_cls, | |
plot=self.plot, | |
on_plot=self.on_plot, | |
save_dir=self.save_dir, | |
names=self.names, | |
prefix='Box')[2:] | |
self.box.nc = len(self.names) | |
self.box.update(results_box) | |
def keys(self): | |
"""Returns list of evaluation metric keys.""" | |
return [ | |
'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', | |
'metrics/precision(P)', 'metrics/recall(P)', 'metrics/mAP50(P)', 'metrics/mAP50-95(P)'] | |
def mean_results(self): | |
"""Return the mean results of box and pose.""" | |
return self.box.mean_results() + self.pose.mean_results() | |
def class_result(self, i): | |
"""Return the class-wise detection results for a specific class i.""" | |
return self.box.class_result(i) + self.pose.class_result(i) | |
def maps(self): | |
"""Returns the mean average precision (mAP) per class for both box and pose detections.""" | |
return self.box.maps + self.pose.maps | |
def fitness(self): | |
"""Computes classification metrics and speed using the `targets` and `pred` inputs.""" | |
return self.pose.fitness() + self.box.fitness() | |
class ClassifyMetrics(SimpleClass): | |
""" | |
Class for computing classification metrics including top-1 and top-5 accuracy. | |
Attributes: | |
top1 (float): The top-1 accuracy. | |
top5 (float): The top-5 accuracy. | |
speed (Dict[str, float]): A dictionary containing the time taken for each step in the pipeline. | |
Properties: | |
fitness (float): The fitness of the model, which is equal to top-5 accuracy. | |
results_dict (Dict[str, Union[float, str]]): A dictionary containing the classification metrics and fitness. | |
keys (List[str]): A list of keys for the results_dict. | |
Methods: | |
process(targets, pred): Processes the targets and predictions to compute classification metrics. | |
""" | |
def __init__(self) -> None: | |
self.top1 = 0 | |
self.top5 = 0 | |
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} | |
def process(self, targets, pred): | |
"""Target classes and predicted classes.""" | |
pred, targets = torch.cat(pred), torch.cat(targets) | |
correct = (targets[:, None] == pred).float() | |
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy | |
self.top1, self.top5 = acc.mean(0).tolist() | |
def fitness(self): | |
"""Returns mean of top-1 and top-5 accuracies as fitness score.""" | |
return (self.top1 + self.top5) / 2 | |
def results_dict(self): | |
"""Returns a dictionary with model's performance metrics and fitness score.""" | |
return dict(zip(self.keys + ['fitness'], [self.top1, self.top5, self.fitness])) | |
def keys(self): | |
"""Returns a list of keys for the results_dict property.""" | |
return ['metrics/accuracy_top1', 'metrics/accuracy_top5'] | |