|
|
|
|
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union |
|
|
|
import torch |
|
from datasets import Dataset |
|
from PIL import Image |
|
from torchvision.datasets.vision import VisionDataset |
|
|
|
_TYPING_BOXES = Tuple[float, float, float, float] |
|
_TYPING_ANNOTS = Dict[str, Union[int, str, _TYPING_BOXES]] |
|
_TYPING_LABELS = Dict[str, torch.Tensor] |
|
|
|
class COCODataset(VisionDataset): |
|
""" |
|
A class that extends VisionDataset and represents a COCO detection dataset. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
loaded_json: _TYPING_ANNOTS, |
|
ids_mapping: Dict[int, int], |
|
dataset: Dataset, |
|
transforms: Optional[Callable] = None, |
|
transform: Optional[Callable] = None, |
|
target_transform: Optional[Callable] = None, |
|
) -> None: |
|
""" |
|
Arguments: |
|
loaded_json: A dictionary that contains loaded json. |
|
ids_mapping (Dict[int, int]): A dictionary that maps the index to the id. |
|
dataset (Dataset): The data which is going to be used. |
|
transforms (Optional): A function/transform that takes in an PIL image |
|
and returns a transformed version. |
|
transform (Optional): A function/transform that takes in an PIL image |
|
and returns a transformed version. E.g, ``transforms.RandomCrop``. |
|
target_transform (Optional): A function/transform that takes in the |
|
target and transforms it. |
|
""" |
|
root = "" |
|
super().__init__(root, transforms, transform, target_transform) |
|
|
|
self.ids_mapping = ids_mapping |
|
self.dataset = dataset |
|
|
|
self.images = {img["id"]: img for img in loaded_json["images"]} |
|
self.ids = sorted(self.images) |
|
self.annotations = {} |
|
for annot in loaded_json["annotations"]: |
|
img_id = annot["image_id"] |
|
self.annotations.setdefault(img_id, []).append(annot) |
|
|
|
def _load_image(self, idx: int) -> Image: |
|
""" |
|
Load an image given its id. |
|
|
|
Arguments: |
|
idx: Index of the image to be loaded. |
|
|
|
Returns: |
|
PIL Image instance. |
|
""" |
|
id = self.ids_mapping[idx] |
|
img = self.dataset[id]["image"].convert("RGB") |
|
return img |
|
|
|
def _load_target(self, idx: int) -> List[Any]: |
|
""" |
|
Load the annotations of an image given its id. |
|
|
|
Arguments: |
|
idx: Index of the image to load its annotations. |
|
|
|
Returns: |
|
List containing the annotations of the image. |
|
""" |
|
if idx not in self.annotations: |
|
return [] |
|
return self.annotations[idx] |
|
|
|
def __len__(self) -> int: |
|
""" |
|
Returns the number of elements in the dataset. |
|
|
|
Returns: |
|
int: Number of images in the dataset. |
|
""" |
|
return len(self.ids) |
|
|
|
def __getitem__(self, index: int) -> Dict[str, Union[torch.Tensor, _TYPING_LABELS]]: |
|
""" |
|
Given an index, it preprocesses and returns the image and its associated annotations \ |
|
at a that index. |
|
|
|
Arguments: |
|
index: Index of the image. |
|
|
|
Returns: |
|
Dictionary containing preprocessed image as pixel values and its associated \ |
|
annotations as labels. |
|
""" |
|
image_id = self.ids[index] |
|
|
|
image = self._load_image(image_id) |
|
|
|
annot_dicts = self._load_target(image_id) |
|
|
|
target = {"image_id": image_id, "annotations": annot_dicts} |
|
return {"image": image, "target": target} |
|
|