from builtins import isinstance import os import glob import json import logging import zipfile import functools import collections import datasets logger = logging.getLogger(__name__) _VERSION = datasets.Version("1.0.0", "") _URL = "https://cocodataset.org/#home" # Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py _CITATION = """\ @article{DBLP:journals/corr/LinMBHPRDZ14, author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{\'{a}}r and C. Lawrence Zitnick}, title = {Microsoft {COCO:} Common Objects in Context}, journal = {CoRR}, volume = {abs/1405.0312}, year = {2014}, url = {http://arxiv.org/abs/1405.0312}, archivePrefix = {arXiv}, eprint = {1405.0312}, timestamp = {Mon, 13 Aug 2018 16:48:13 +0200}, biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14}, bibsource = {dblp computer science bibliography, https://dblp.org} } """ # Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py _DESCRIPTION = """COCO is a large-scale object detection, segmentation, and captioning dataset. Note: * Some images from the train and validation sets don't have annotations. * Coco 2014 and 2017 uses the same images, but different train/val/test splits * The test split don't have any annotations (only images). * Coco defines 91 classes but the data only uses 80 classes. * Panotptic annotations defines defines 200 classes but only uses 133. """ # Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py _CONFIG_DESCRIPTION = """ This version contains images, bounding boxes and labels for the {year} version. """ Split = collections.namedtuple( 'Split', ['name', 'images', 'annotations', 'annotation_type'] ) # stuffing class 'none' for index 0 CAT = [ "none", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "street sign", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "hat", "backpack", "umbrella", "shoe", "eye glasses", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "plate", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "mirror", "dining table", "window", "desk", "toilet", "door", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "blender", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", "hair brush", ] CAT_PANOPTIC = CAT + [ "banner", "blanket", "none1", "bridge", "none2", "none3", "none4", "none5", "cardboard", "none6", "none7", "none8", "none9", "none10", "none11", "counter", "none12", "curtain", "none13", "none14", "door-stuff", "none15", "none16", "none17", "none18", "none19", "floor-wood", "flower", "none20", "none21", "fruit", "none22", "none23", "gravel", "none24", "none25", "house", "none26", "light", "none27", "none28", "mirror-stuff", "none29", "none30", "none31", "none32", "net", "none33", "none34", "pillow", "none35", "none36", "platform", "playingfield", "none37", "railroad", "river", "road", "none38", "roof", "none39", "none40", "sand", "sea", "shelf", "none41", "none42", "snow", "none43", "stairs", "none44", "none45", "none46", "none47", "tent", "none48", "towel", "none49", "none50", "wall-brick", "none51", "none52", "none53", "wall-stone", "wall-tile", "wall-wood", "water-other", "none54", "window-blind", "window-other", "none55", "none56", "tree-merged", "fence-merged", "ceiling-merged", "sky-other-merged", "cabinet-merged", "table-merged", "floor-other-merged", "pavement-merged", "mountain-merged", "grass-merged", "dirt-merged", "paper-merged", "food-other-merged", "building-other-merged", "rock-merged", "wall-other-merged", "rug-merged", ] SUPER_CAT = [ "none", "person", "vehicle", "outdoor", "animal", "accessory", "sports", "kitchen", "food", "furniture", "electronic", "appliance", "indoor", ] SUPER_CAT_PANOPTIC = SUPER_CAT + [ "textile", "building", "raw-material", "furniture-stuff", "floor", "plant", "food-stuff", "ground", "structural", "water", "wall", "window", "ceiling", "sky", "solid", ] CAT2SUPER_CAT = [ "none", "person", "vehicle", "vehicle", "vehicle", "vehicle", "vehicle", "vehicle", "vehicle", "vehicle", "outdoor", "outdoor", "outdoor", "outdoor", "outdoor", "outdoor", "animal", "animal", "animal", "animal", "animal", "animal", "animal", "animal", "animal", "animal", "accessory", "accessory", "accessory", "accessory", "accessory", "accessory", "accessory", "accessory", "sports", "sports", "sports", "sports", "sports", "sports", "sports", "sports", "sports", "sports", "kitchen", "kitchen", "kitchen", "kitchen", "kitchen", "kitchen", "kitchen", "kitchen", "food", "food", "food", "food", "food", "food", "food", "food", "food", "food", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "electronic", "electronic", "electronic", "electronic", "electronic", "electronic", "appliance", "appliance", "appliance", "appliance", "appliance", "appliance", "indoor", "indoor", "indoor", "indoor", "indoor", "indoor", "indoor", "indoor", "textile", "textile", "none", "building", "none", "none", "none", "none", "raw-material", "none", "none", "none", "none", "none", "none", "furniture-stuff", "none", "textile", "none", "none", "furniture-stuff", "none", "none", "none", "none", "none", "floor", "plant", "none", "none", "food-stuff", "none", "none", "ground", "none", "none", "building", "none", "furniture-stuff", "none", "none", "furniture-stuff", "none", "none", "none", "none", "structural", "none", "none", "textile", "none", "none", "ground", "ground", "none", "ground", "water", "ground", "none", "building", "none", "none", "ground", "water", "furniture-stuff", "none", "none", "ground", "none", "furniture-stuff", "none", "none", "none", "none", "building", "none", "textile", "none", "none", "wall", "none", "none", "none", "wall", "wall", "wall", "water", "none", "window", "window", "none", "none", "plant", "structural", "ceiling", "sky", "furniture-stuff", "furniture-stuff", "floor", "ground", "solid", "plant", "ground", "raw-material", "food-stuff", "building", "solid", "wall", "textile", ] class AnnotationType(object): """Enum of the annotation format types. Splits are annotated with different formats. """ BBOXES = 'bboxes' PANOPTIC = 'panoptic' NONE = 'none' DETECTION_FEATURE = datasets.Features( { "image": datasets.Image(), "image/filename": datasets.Value("string"), "image/id": datasets.Value("int64"), "objects": datasets.Sequence(feature=datasets.Features({ "id": datasets.Value("int64"), "area": datasets.Value("float32"), "bbox": datasets.Sequence( feature=datasets.Value("float32") ), "label": datasets.ClassLabel(names=CAT), "super_cat_label": datasets.ClassLabel(names=SUPER_CAT), "is_crowd": datasets.Value("bool"), })), } ) PANOPTIC_FEATURE = datasets.Features( { "image": datasets.Image(), "image/filename": datasets.Value("string"), "image/id": datasets.Value("int64"), "panoptic_objects": datasets.Sequence(feature=datasets.Features({ "id": datasets.Value("int64"), "area": datasets.Value("float32"), "bbox": datasets.Sequence( feature=datasets.Value("float32") ), "label": datasets.ClassLabel(names=CAT_PANOPTIC), "super_cat_label": datasets.ClassLabel(names=SUPER_CAT_PANOPTIC), "is_crowd": datasets.Value("bool"), })), "panoptic_image": datasets.Image(), "panoptic_image/filename": datasets.Value("string"), } ) # More info could be added, like segmentation (as png mask), captions, # person key-points, more metadata (original flickr url,...). # Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py class CocoConfig(datasets.BuilderConfig): """BuilderConfig for CocoConfig.""" def __init__(self, features, splits=None, has_panoptic=False, skip_empty_annotations=False, **kwargs): super(CocoConfig, self).__init__( **kwargs ) self.features = features self.splits = splits self.has_panoptic = has_panoptic self.skip_empty_annotations = skip_empty_annotations # Copied from https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/object_detection/coco.py class Coco(datasets.GeneratorBasedBuilder): """Base MS Coco dataset.""" BUILDER_CONFIGS = [ CocoConfig( name='2014', features=DETECTION_FEATURE, description=_CONFIG_DESCRIPTION.format(year=2014), version=_VERSION, splits=[ Split( name=datasets.Split.TRAIN, images='train2014', annotations='annotations_trainval2014', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.VALIDATION, images='val2014', annotations='annotations_trainval2014', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.TEST, images='test2014', annotations='image_info_test2014', annotation_type=AnnotationType.NONE, ), # Coco2014 contains an extra test split Split( name='test2015', images='test2015', annotations='image_info_test2015', annotation_type=AnnotationType.NONE, ), ], ), CocoConfig( name='2017', features=DETECTION_FEATURE, description=_CONFIG_DESCRIPTION.format(year=2017), version=_VERSION, splits=[ Split( name=datasets.Split.TRAIN, images='train2017', annotations='annotations_trainval2017', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.VALIDATION, images='val2017', annotations='annotations_trainval2017', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.TEST, images='test2017', annotations='image_info_test2017', annotation_type=AnnotationType.NONE, ), ], ), CocoConfig( name='2017_panoptic', features=PANOPTIC_FEATURE, description=_CONFIG_DESCRIPTION.format(year=2017), version=_VERSION, has_panoptic=True, splits=[ Split( name=datasets.Split.TRAIN, images='train2017', annotations='panoptic_annotations_trainval2017', annotation_type=AnnotationType.PANOPTIC, ), Split( name=datasets.Split.VALIDATION, images='val2017', annotations='panoptic_annotations_trainval2017', annotation_type=AnnotationType.PANOPTIC, ), ], ), CocoConfig( name='2017_skip', features=DETECTION_FEATURE, description=_CONFIG_DESCRIPTION.format(year=2017), version=_VERSION, skip_empty_annotations=True, splits=[ Split( name=datasets.Split.TRAIN, images='train2017', annotations='annotations_trainval2017', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.VALIDATION, images='val2017', annotations='annotations_trainval2017', annotation_type=AnnotationType.BBOXES, ), Split( name=datasets.Split.TEST, images='test2017', annotations='image_info_test2017', annotation_type=AnnotationType.NONE, ), ], ), CocoConfig( name='2017_panoptic_skip', features=PANOPTIC_FEATURE, description=_CONFIG_DESCRIPTION.format(year=2017), version=_VERSION, has_panoptic=True, skip_empty_annotations=True, splits=[ Split( name=datasets.Split.TRAIN, images='train2017', annotations='panoptic_annotations_trainval2017', annotation_type=AnnotationType.PANOPTIC, ), Split( name=datasets.Split.VALIDATION, images='val2017', annotations='panoptic_annotations_trainval2017', annotation_type=AnnotationType.PANOPTIC, ), ], ), ] DEFAULT_CONFIG_NAME = "2017" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=self.config.features, supervised_keys=None, # Probably needs to be fixed. homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager): # DownloadManager memoize the url, so duplicate urls will only be downloaded # once. if dl_manager.manual_dir is None: # Merge urls from all splits together urls = {} for split in self.config.splits: urls['{}_images'.format(split.name)] = 'zips/{}.zip'.format(split.images) urls['{}_annotations'.format(split.name)] = 'annotations/{}.zip'.format( split.annotations ) logging.info("download and extract coco dataset") root_url = 'http://images.cocodataset.org/' extracted_paths = dl_manager.download_and_extract( {key: root_url + url for key, url in urls.items()} ) else: logging.info(f"use manual directory: {dl_manager.manual_dir}") extracted_paths = {} for split in self.config.splits: extracted_paths['{}_images'.format(split.name)] = dl_manager.manual_dir extracted_paths['{}_annotations'.format(split.name)] = dl_manager.manual_dir splits = [] for split in self.config.splits: image_dir = extracted_paths['{}_images'.format(split.name)] annotations_dir = extracted_paths['{}_annotations'.format(split.name)] if self.config.has_panoptic: if dl_manager.manual_dir is None: logging.info("extract panoptic data") panoptic_image_zip_path = os.path.join( annotations_dir, 'annotations', 'panoptic_{}.zip'.format(split.images), ) panoptic_dir = dl_manager.extract(panoptic_image_zip_path) panoptic_dir = os.path.join( panoptic_dir, 'panoptic_{}'.format(split.images) ) else: logging.info("use extracted data") panoptic_dir = os.path.join(annotations_dir, 'annotations', 'panoptic_{}.zip'.format(split.images)) else: panoptic_dir = None splits.append( datasets.SplitGenerator( name=split.name, gen_kwargs={ 'image_dir': image_dir, 'annotation_dir': annotations_dir, 'split_name': split.images, 'annotation_type': split.annotation_type, 'panoptic_dir': panoptic_dir, } ) ) return splits def _generate_examples(self, image_dir, annotation_dir, split_name, annotation_type, panoptic_dir): """Generate examples as dicts. Args: image_dir: `str`, directory containing the images annotation_dir: `str`, directory containing annotations split_name: `str`, (ex: train2014, val2017) annotation_type: `AnnotationType`, the annotation format (NONE, BBOXES, PANOPTIC) panoptic_dir: If annotation_type is PANOPTIC, contains the panoptic image directory Yields: example key and data """ if annotation_type == AnnotationType.BBOXES: instance_filename = 'instances_{}.json' elif annotation_type == AnnotationType.PANOPTIC: instance_filename = 'panoptic_{}.json' elif annotation_type == AnnotationType.NONE: # No annotation for test sets instance_filename = 'image_info_{}.json' skip_empty_annotations = self.config.skip_empty_annotations # Load the annotations (label names, images metadata,...) instance_path = os.path.join( annotation_dir, 'annotations', instance_filename.format(split_name), ) coco_annotation = ANNOTATION_CLS[annotation_type](instance_path) # Each image is a dict: # { # 'id': 262145, # 'file_name': 'COCO_train2017_000000262145.jpg' # 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg', # 'coco_url': 'http://images.cocodataset.org/train2017/xyz.jpg', # 'license': 2, # 'date_captured': '2013-11-20 02:07:55', # 'height': 427, # 'width': 640, # } images = coco_annotation.images # TODO(b/121375022): ClassLabel names should also contains 'id' and # and 'supercategory' (in addition to 'name') # Warning: As Coco only use 80 out of the 91 labels, the c['id'] and # dataset names ids won't match. if self.config.has_panoptic: objects_key = 'panoptic_objects' else: objects_key = 'objects' # self.info.features[objects_key]['label'].names = [ # c['name'] for c in categories # ] # TODO(b/121375022): Conversion should be done by ClassLabel # categories_id2name = {c['id']: c['name'] for c in categories} # Iterate over all images annotation_skipped = 0 for image_info in sorted(images, key=lambda x: x['id']): if annotation_type == AnnotationType.BBOXES: # Each instance annotation is a dict: # { # 'iscrowd': 0, # 'bbox': [116.95, 305.86, 285.3, 266.03], # 'image_id': 480023, # 'segmentation': [[312.29, 562.89, 402.25, ...]], # 'category_id': 58, # 'area': 54652.9556, # 'id': 86, # } instances = coco_annotation.get_annotations(img_id=image_info['id']) elif annotation_type == AnnotationType.PANOPTIC: # Each panoptic annotation is a dict: # { # 'file_name': '000000037777.png', # 'image_id': 37777, # 'segments_info': [ # { # 'area': 353, # 'category_id': 52, # 'iscrowd': 0, # 'id': 6202563, # 'bbox': [221, 179, 37, 27], # }, # ... # ] # } panoptic_annotation = coco_annotation.get_annotations( img_id=image_info['id'] ) instances = panoptic_annotation['segments_info'] else: instances = [] # No annotations if not instances: annotation_skipped += 1 if skip_empty_annotations: continue def build_bbox(x, y, width, height): # pylint: disable=cell-var-from-loop # build_bbox is only used within the loop so it is ok to use image_info return [ x, y, (x + width), (y + height), ] # pylint: enable=cell-var-from-loop example = { 'image': os.path.abspath(os.path.join(image_dir, split_name, image_info['file_name'])), 'image/filename': image_info['file_name'], 'image/id': image_info['id'], objects_key: [ { # pylint: disable=g-complex-comprehension 'id': instance['id'], 'area': instance['area'], 'bbox': build_bbox(*instance['bbox']), 'label': instance['category_id'], 'super_cat_label': SUPER_CAT_PANOPTIC.index(CAT2SUPER_CAT[instance['category_id']]), 'is_crowd': bool(instance['iscrowd']), } for instance in instances ], } if self.config.has_panoptic: panoptic_filename = panoptic_annotation['file_name'] panoptic_image_path = os.path.join(panoptic_dir, panoptic_filename) example['panoptic_image'] = panoptic_image_path example['panoptic_image/filename'] = panoptic_filename yield image_info['file_name'], example logging.info( '%d/%d images do not contains any annotations', annotation_skipped, len(images), ) class CocoAnnotation(object): """Coco annotation helper class.""" def __init__(self, annotation_path): with open(annotation_path, "r") as f: data = json.load(f) self._data = data @property def categories(self): """Return the category dicts, as sorted in the file.""" return self._data['categories'] @property def images(self): """Return the image dicts, as sorted in the file.""" return self._data['images'] def get_annotations(self, img_id): """Return all annotations associated with the image id string.""" raise NotImplementedError # AnotationType.NONE don't have annotations class CocoAnnotationBBoxes(CocoAnnotation): """Coco annotation helper class.""" def __init__(self, annotation_path): super(CocoAnnotationBBoxes, self).__init__(annotation_path) img_id2annotations = collections.defaultdict(list) for a in self._data['annotations']: img_id2annotations[a['image_id']].append(a) self._img_id2annotations = { k: list(sorted(v, key=lambda a: a['id'])) for k, v in img_id2annotations.items() } def get_annotations(self, img_id): """Return all annotations associated with the image id string.""" # Some images don't have any annotations. Return empty list instead. return self._img_id2annotations.get(img_id, []) class CocoAnnotationPanoptic(CocoAnnotation): """Coco annotation helper class.""" def __init__(self, annotation_path): super(CocoAnnotationPanoptic, self).__init__(annotation_path) self._img_id2annotations = { a['image_id']: a for a in self._data['annotations'] } def get_annotations(self, img_id): """Return all annotations associated with the image id string.""" return self._img_id2annotations[img_id] ANNOTATION_CLS = { AnnotationType.NONE: CocoAnnotation, AnnotationType.BBOXES: CocoAnnotationBBoxes, AnnotationType.PANOPTIC: CocoAnnotationPanoptic, }