rapadilla's picture
init commit
7d5136d
from PIL import Image, ImageDraw, ImageFont
import torch
from typing import Tuple, List, Dict, Union, Optional
import torch.utils.data as data
from tqdm import tqdm
import copy
# Typings
_TYPING_BOX = Tuple[float, float, float, float]
_TYPING_IMAGES = List[Dict[str, int]]
_TYPING_ANNOTATIONS = List[Dict[str, Union[int, _TYPING_BOX]]]
_TYPING_CATEGORIES = List[Dict[str, Union[int, str]]]
_TYPING_JSON_COCO = Dict[
str, Union[_TYPING_IMAGES, _TYPING_ANNOTATIONS, _TYPING_CATEGORIES]
]
_TYPING_BOX = Tuple[float, float, float, float]
_TYPING_SCORES = List[float]
_TYPING_LABELS = List[int]
_TYPING_BOXES = List[_TYPING_BOX]
_TYPING_PRED_REF = Union[_TYPING_SCORES, _TYPING_LABELS, _TYPING_BOXES]
_TYPING_PREDICTION = Dict[str, _TYPING_PRED_REF]
_acc_box_format = ['xywh', 'xyx2y2']
def draw_rectangles(
image: Image,
boxes,
box_format='xyx2y2',
color_bbx=(255, 0, 0),
color_txt=(255, 255, 255),
thickness=1,
labels=None,
confidences=None,
draw_confidence=True,
):
"""
Draw rectangles around objects in an image.
Args:
image (Image): Image object to draw on.
boxes (List[torch.Tensor]): List of bounding boxes in (xywh or xyx2y2) format.
color_bbx (Tuple[int, int, int]): RGB color tuple for bounding box outlines. Default \
is (255, 0, 0) (red).
color_txt (Tuple[int, int, int]): RGB color tuple for text. Default is \
(255, 255, 255) (white).
thickness (int): Thickness of the bounding box outline. Default is 1.
labels (List[str]): List of labels for each object. Default is None.
confidences (List[float]): List of confidences for each object. Default is None.
draw_confidence (bool): Whether to draw confidence values. Default is True.
Returns:
Image: Image with rectangles drawn around objects.
"""
# boxes: (x,y,x2,y2)
# color: (RGB)
# https://pillow.readthedocs.io/en/stable/handbook/text-anchors.html
# https://pillow.readthedocs.io/en/stable/reference/ImageFont.html
assert box_format in _acc_box_format, "box_format must be {}".format(_acc_box_format)
offset = 0.05
font = ImageFont.load_default()
# Make clones to avoid overwriting the original data
if boxes is not None:
if isinstance(boxes, torch.Tensor):
_boxes = copy.deepcopy(boxes).tolist()
elif isinstance(boxes, list):
_boxes = copy.deepcopy(boxes)
else:
_boxes = None
if confidences is not None:
if isinstance(confidences, torch.Tensor):
_confidences = copy.deepcopy(confidences).tolist()
elif isinstance(confidences, list):
_confidences = copy.deepcopy(confidences)
else:
_confidences = None
draw_confidence = False
_confidences = ["" for i in _boxes]
ret_image = image.copy()
img_draw = ImageDraw.Draw(ret_image)
for box, label, confidence in zip(_boxes, labels, _confidences):
if box_format == "xywh":
# convert to xyx2y2
box[2] = box[0]+box[2]
box[3] = box[1]+box[3]
text = f"{label}"
if draw_confidence:
text += f" ({100*confidence:.2f}%)"
text = " " + text + " "
_, _, txt_w, txt_h = font.getbbox(text)
offset_y = txt_h * offset
x, y, _, _ = box
box_txt = (x, y - txt_h - (2 * offset_y), x + txt_w, y)
pos_text = (x, y - txt_h - (offset_y))
# Draws rectangle around object
img_draw.rectangle(box, outline=color_bbx, width=thickness)
# Draws filled rectangle for text
img_draw.rectangle(box_txt, fill=color_bbx, width=thickness)
# Draws text
img_draw.text(pos_text, text, fill=color_txt, anchor="ma", font=font)
return ret_image
def val_formatted_anns(
image_id: int, objects: _TYPING_PREDICTION, feat_name: str = "category"
) -> List[_TYPING_PREDICTION]:
"""
This function formats annotations the same way they are for training, without the need \
for data augmentation.
Args:
image_id (int): The id of the image.
objects (_TYPING_PREDICTION): The dictionary containing object annotations.
feat_name (str): The name of the feature containing the category id.
Returns:
List[Dict[str, Union[int, _TYPING_BOX]]]: List of dictionaries with formatted annotations.
"""
annotations = []
for i in range(0, len(objects["id"])):
new_ann = {
"id": objects["id"][i],
"category_id": objects[feat_name][i],
"iscrowd": objects["iscrowd"][i],
"image_id": image_id,
"area": objects["area"][i],
"bbox": objects["bbox"][i],
}
annotations.append(new_ann)
return annotations
def create_json_COCO_format(
dataset: data.Dataset, round_approx: Optional[int] = None
) -> Tuple[Dict[int, int], _TYPING_JSON_COCO]:
"""
Function to create a JSON in COCO format.
Args:
dataset (Dataset): The dataset to be converted to COCO format.
round_approx (Optional[int]): The number of decimal places to round the boxes.
Returns:
A tuple of a dictionary mapping image_id to index in dataset and a dictionary \
in COCO format.
"""
feature = dataset.features["objects"].feature
# Look for the feature name
for feat_name in ["category", "label"]:
if feat_name in feature:
break
categories = feature[feat_name].names
id2label = {index: x for index, x in enumerate(categories, start=0)}
categories_json = [
{"supercategory": "none", "id": id, "name": id2label[id]} for id in id2label
]
output_json = {}
output_json["images"] = []
output_json["annotations"] = []
# Collecting outputs from dataset
ids_mapping = {}
pbar = tqdm(dataset, desc="Collecting ground-truth annotations from dataset")
for idx, example in enumerate(pbar):
ids_mapping[example["image_id"]] = idx
ann = val_formatted_anns(example["image_id"], example["objects"], feat_name)
output_json["images"].append(
{
"id": example["image_id"],
"width": example["image"].width,
"height": example["image"].height,
}
)
if round_approx is not None:
for annotation in ann:
annotation["bbox"] = [round(val, round_approx) for val in annotation["bbox"]]
output_json["annotations"].extend(ann)
output_json["categories"] = categories_json
return ids_mapping, output_json