import collections import json import os import datasets _HOMEPAGE = "https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep/dataset/1" _LICENSE = "CC BY 4.0" _CITATION = """\ @misc{ instance-segmentation-kgvep_dataset, title = { Instance Segmentation Dataset }, type = { Open Source Dataset }, author = { UAI }, howpublished = { \\url{ https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep } }, url = { https://universe.roboflow.com/uai-63qde/instance-segmentation-kgvep }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2023 }, month = { nov }, note = { visited on 2023-11-04 }, } """ _CATEGORIES = ['copiapoa', 'copiapoa-v2'] _ANNOTATION_FILENAME = "_annotations.coco.json" class AERIALSEMANTICSEGMENTATIONCACTISConfig(datasets.BuilderConfig): """Builder Config for Aerial-Semantic-Segmentation-Cactis""" def __init__(self, data_urls, **kwargs): """ BuilderConfig for Aerial-Semantic-Segmentation-Cactis. Args: data_urls: `dict`, name to url to download the zip file from. **kwargs: keyword arguments forwarded to super. """ super(AERIALSEMANTICSEGMENTATIONCACTISConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) self.data_urls = data_urls class AERIALSEMANTICSEGMENTATIONCACTIS(datasets.GeneratorBasedBuilder): """Aerial-Semantic-Segmentation-Cactis instance segmentation dataset""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ AERIALSEMANTICSEGMENTATIONCACTISConfig( name="full", description="Full version of Aerial-Semantic-Segmentation-Cactis dataset.", data_urls={ "train": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/train.zip", "validation": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/valid.zip", "test": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/test.zip", }, ), AERIALSEMANTICSEGMENTATIONCACTISConfig( name="mini", description="Mini version of Aerial-Semantic-Segmentation-Cactis dataset.", data_urls={ "train": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/valid-mini.zip", "validation": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/valid-mini.zip", "test": "https://huggingface.co/datasets/aghent/Aerial-Semantic-Segmentation-Cactis/resolve/main/data/valid-mini.zip", }, ) ] def _info(self): features = datasets.Features( { "image_id": datasets.Value("int64"), "image": datasets.Image(), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "objects": datasets.Sequence( { "id": datasets.Value("int64"), "area": datasets.Value("int64"), "bbox": datasets.Sequence(datasets.Value("float32"), length=4), "segmentation": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), "category": datasets.ClassLabel(names=_CATEGORIES), } ), } ) return datasets.DatasetInfo( features=features, homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(self.config.data_urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "folder_dir": data_files["train"], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "folder_dir": data_files["validation"], }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "folder_dir": data_files["test"], }, ), ] def _generate_examples(self, folder_dir): def process_annot(annot, category_id_to_category): return { "id": annot["id"], "area": annot["area"], "bbox": annot["bbox"], "segmentation": annot["segmentation"], "category": category_id_to_category[annot["category_id"]], } image_id_to_image = {} idx = 0 annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME) with open(annotation_filepath, "r") as f: annotations = json.load(f) category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]} image_id_to_annotations = collections.defaultdict(list) for annot in annotations["annotations"]: image_id_to_annotations[annot["image_id"]].append(annot) filename_to_image = {image["file_name"]: image for image in annotations["images"]} for filename in os.listdir(folder_dir): filepath = os.path.join(folder_dir, filename) if filename in filename_to_image: image = filename_to_image[filename] objects = [ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]] ] with open(filepath, "rb") as f: image_bytes = f.read() yield idx, { "image_id": image["id"], "image": {"path": filepath, "bytes": image_bytes}, "width": image["width"], "height": image["height"], "objects": objects, } idx += 1