Datasets:
Tasks:
Image Classification
Size:
1K - 10K
import os | |
import datasets | |
from datasets.tasks import ImageClassification | |
_HOMEPAGE = "https://universe.roboflow.com/robert-demo-qvail/pokedex/dataset/14" | |
_LICENSE = "Public Domain" | |
_CITATION = """\ | |
@misc{ pokedex_dataset, | |
title = { Pokedex Dataset }, | |
type = { Open Source Dataset }, | |
author = { Lance Zhang }, | |
howpublished = { \\url{ https://universe.roboflow.com/robert-demo-qvail/pokedex } }, | |
url = { https://universe.roboflow.com/robert-demo-qvail/pokedex }, | |
journal = { Roboflow Universe }, | |
publisher = { Roboflow }, | |
year = { 2022 }, | |
month = { dec }, | |
note = { visited on 2023-01-14 }, | |
} | |
""" | |
_CATEGORIES = ['Porygon', 'Goldeen', 'Hitmonlee', 'Hitmonchan', 'Gloom', 'Aerodactyl', 'Mankey', 'Seadra', 'Gengar', 'Venonat', 'Articuno', 'Seaking', 'Dugtrio', 'Machop', 'Jynx', 'Oddish', 'Dodrio', 'Dragonair', 'Weedle', 'Golduck', 'Flareon', 'Krabby', 'Parasect', 'Ninetales', 'Nidoqueen', 'Kabutops', 'Drowzee', 'Caterpie', 'Jigglypuff', 'Machamp', 'Clefairy', 'Kangaskhan', 'Dragonite', 'Weepinbell', 'Fearow', 'Bellsprout', 'Grimer', 'Nidorina', 'Staryu', 'Horsea', 'Electabuzz', 'Dratini', 'Machoke', 'Magnemite', 'Squirtle', 'Gyarados', 'Pidgeot', 'Bulbasaur', 'Nidoking', 'Golem', 'Dewgong', 'Moltres', 'Zapdos', 'Poliwrath', 'Vulpix', 'Beedrill', 'Charmander', 'Abra', 'Zubat', 'Golbat', 'Wigglytuff', 'Charizard', 'Slowpoke', 'Poliwag', 'Tentacruel', 'Rhyhorn', 'Onix', 'Butterfree', 'Exeggcute', 'Sandslash', 'Pinsir', 'Rattata', 'Growlithe', 'Haunter', 'Pidgey', 'Ditto', 'Farfetchd', 'Pikachu', 'Raticate', 'Wartortle', 'Vaporeon', 'Cloyster', 'Hypno', 'Arbok', 'Metapod', 'Tangela', 'Kingler', 'Exeggutor', 'Kadabra', 'Seel', 'Voltorb', 'Chansey', 'Venomoth', 'Ponyta', 'Vileplume', 'Koffing', 'Blastoise', 'Tentacool', 'Lickitung', 'Paras', 'Clefable', 'Cubone', 'Marowak', 'Nidorino', 'Jolteon', 'Muk', 'Magikarp', 'Slowbro', 'Tauros', 'Kabuto', 'Spearow', 'Sandshrew', 'Eevee', 'Kakuna', 'Omastar', 'Ekans', 'Geodude', 'Magmar', 'Snorlax', 'Meowth', 'Pidgeotto', 'Venusaur', 'Persian', 'Rhydon', 'Starmie', 'Charmeleon', 'Lapras', 'Alakazam', 'Graveler', 'Psyduck', 'Rapidash', 'Doduo', 'Magneton', 'Arcanine', 'Electrode', 'Omanyte', 'Poliwhirl', 'Mew', 'Alolan Sandslash', 'Mewtwo', 'Weezing', 'Gastly', 'Victreebel', 'Ivysaur', 'MrMime', 'Shellder', 'Scyther', 'Diglett', 'Primeape', 'Raichu'] | |
class POKEMONCLASSIFICATIONConfig(datasets.BuilderConfig): | |
"""Builder Config for pokemon-classification""" | |
def __init__(self, data_urls, **kwargs): | |
""" | |
BuilderConfig for pokemon-classification. | |
Args: | |
data_urls: `dict`, name to url to download the zip file from. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(POKEMONCLASSIFICATIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) | |
self.data_urls = data_urls | |
class POKEMONCLASSIFICATION(datasets.GeneratorBasedBuilder): | |
"""pokemon-classification image classification dataset""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
POKEMONCLASSIFICATIONConfig( | |
name="full", | |
description="Full version of pokemon-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/train.zip", | |
"validation": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/valid.zip", | |
"test": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/test.zip", | |
} | |
, | |
), | |
POKEMONCLASSIFICATIONConfig( | |
name="mini", | |
description="Mini version of pokemon-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/valid-mini.zip", | |
"validation": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/valid-mini.zip", | |
"test": "https://huggingface.co/datasets/keremberke/pokemon-classification/resolve/main/data/valid-mini.zip", | |
}, | |
) | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"image_file_path": datasets.Value("string"), | |
"image": datasets.Image(), | |
"labels": datasets.features.ClassLabel(names=_CATEGORIES), | |
} | |
), | |
supervised_keys=("image", "labels"), | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
license=_LICENSE, | |
task_templates=[ImageClassification(image_column="image", label_column="labels")], | |
) | |
def _split_generators(self, dl_manager): | |
data_files = dl_manager.download_and_extract(self.config.data_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["train"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["validation"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["test"]]), | |
}, | |
), | |
] | |
def _generate_examples(self, files): | |
for i, path in enumerate(files): | |
file_name = os.path.basename(path) | |
if file_name.endswith((".jpg", ".png", ".jpeg", ".bmp", ".tif", ".tiff")): | |
yield i, { | |
"image_file_path": path, | |
"image": path, | |
"labels": os.path.basename(os.path.dirname(path)), | |
} | |