Datasets:
Tasks:
Image Classification
Size:
< 1K
import os | |
import datasets | |
from datasets.tasks import ImageClassification | |
_HOMEPAGE = "https://universe.roboflow.com/yolo-po0ro/proj-2-qmdk0/dataset/3" | |
_LICENSE = "CC BY 4.0" | |
_CITATION = """\ | |
@misc{ proj-2-qmdk0_dataset, | |
title = { proj 2 Dataset }, | |
type = { Open Source Dataset }, | |
author = { Yolo }, | |
howpublished = { \\url{ https://universe.roboflow.com/yolo-po0ro/proj-2-qmdk0 } }, | |
url = { https://universe.roboflow.com/yolo-po0ro/proj-2-qmdk0 }, | |
journal = { Roboflow Universe }, | |
publisher = { Roboflow }, | |
year = { 2023 }, | |
month = { oct }, | |
note = { visited on 2023-10-18 }, | |
} | |
""" | |
_CATEGORIES = ['Thermostat', 'Housing', 'Insert'] | |
class THERMOCLASSIFICATIONConfig(datasets.BuilderConfig): | |
"""Builder Config for thermo-classification""" | |
def __init__(self, data_urls, **kwargs): | |
""" | |
BuilderConfig for thermo-classification. | |
Args: | |
data_urls: `dict`, name to url to download the zip file from. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(THERMOCLASSIFICATIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) | |
self.data_urls = data_urls | |
class THERMOCLASSIFICATION(datasets.GeneratorBasedBuilder): | |
"""thermo-classification image classification dataset""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
THERMOCLASSIFICATIONConfig( | |
name="full", | |
description="Full version of thermo-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/train.zip", | |
"validation": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/valid.zip", | |
"test": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/test.zip", | |
} | |
, | |
), | |
THERMOCLASSIFICATIONConfig( | |
name="mini", | |
description="Mini version of thermo-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/valid-mini.zip", | |
"validation": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/valid-mini.zip", | |
"test": "https://huggingface.co/datasets/sargishunanyan/thermo-classification/resolve/main/data/valid-mini.zip", | |
}, | |
) | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"image_file_path": datasets.Value("string"), | |
"image": datasets.Image(), | |
"labels": datasets.features.ClassLabel(names=_CATEGORIES), | |
} | |
), | |
supervised_keys=("image", "labels"), | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
license=_LICENSE, | |
task_templates=[ImageClassification(image_column="image", label_column="labels")], | |
) | |
def _split_generators(self, dl_manager): | |
data_files = dl_manager.download_and_extract(self.config.data_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["train"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["validation"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["test"]]), | |
}, | |
), | |
] | |
def _generate_examples(self, files): | |
for i, path in enumerate(files): | |
file_name = os.path.basename(path) | |
if file_name.endswith((".jpg", ".png", ".jpeg", ".bmp", ".tif", ".tiff")): | |
yield i, { | |
"image_file_path": path, | |
"image": path, | |
"labels": os.path.basename(os.path.dirname(path)), | |
} | |