import sys | |
import csv | |
import json | |
import os | |
import os.path as op | |
import base64 | |
import zipfile | |
from getpass import getpass | |
from tqdm import tqdm | |
import platform | |
import subprocess | |
from urllib.parse import urlparse | |
import datasets | |
from datasets.filesystems import S3FileSystem | |
import boto3 | |
from botocore import UNSIGNED | |
from botocore.client import Config | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@article{mehrer2021ecologically, | |
title={An ecologically motivated image dataset for deep learning yields better models of human vision}, | |
author={Mehrer, Johannes and Spoerer, Courtney J and Jones, Emer C and Kriegeskorte, Nikolaus and Kietzmann, Tim C}, | |
journal={Proceedings of the National Academy of Sciences}, | |
volume={118}, | |
number={8}, | |
year={2021}, | |
publisher={National Acad Sciences} | |
} | |
""" | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
Tired of all the dogs in ImageNet (ILSVRC)? Then ecoset is here for you. 1.5m images | |
from 565 basic level categories, chosen to be both (i) frequent in linguistic usage, | |
and (ii) rated by human observers as concrete (e.g. ‘table’ is concrete, ‘romance’ | |
is not). Here we collect resources associated with ecoset. This includes the dataset, | |
trained deep neural network models, code to interact with them, and published papers | |
using it. | |
""" | |
# official homepage for the dataset here | |
_HOMEPAGE = "https://www.kietzmannlab.org/ecoset/" | |
# licence for the dataset here | |
_LICENSE = "CC BY NC SA 2.0" | |
# dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
#"codeocean": "https://files.codeocean.com/datasets/verified/0ab003f4-ff2d-4de3-b4f8-b6e349c0e5e5/ecoset.zip?download", # codeocean cancels after 50GB | |
"codeocean": "s3://codeocean-datasets/0ab003f4-ff2d-4de3-b4f8-b6e349c0e5e5/ecoset.zip", | |
} | |
# Define the labels available for ecoset | |
labels = ['cymbals', 'bison', 'lemonade', 'crib', 'chestnut', 'mosquito', 'aloe', 'extinguisher', 'onion', 'starfish', 'basket', 'jar', 'snail', 'mushroom', 'coffin', 'joystick', 'raspberry', 'gearshift', 'tyrannosaurus', 'stadium', 'telescope', 'blueberry', 'hippo', 'cannabis', 'hairbrush', 'river', 'artichoke', 'wallet', 'city', 'bee', 'rifle', 'boar', 'bib', 'envelope', 'silverfish', 'shower', 'curtain', 'pinwheel', 'guillotine', 'snowplow', 'hut', 'jukebox', 'gecko', 'marshmallow', 'lobster', 'flashlight', 'breadfruit', 'cow', 'spoon', 'blender', 'croissant', 'greenhouse', 'church', 'antenna', 'monkey', 'zucchini', 'snake', 'manatee', 'child', 'table', 'winterberry', 'sloth', 'cannon', 'baguette', 'persimmon', 'candelabra', 'necklace', 'flag', 'geyser', 'thermos', 'tweezers', 'chandelier', 'kebab', 'mailbox', 'steamroller', 'crayon', 'lawnmower', 'pomegranate', 'fire', 'violin', 'matchstick', 'train', 'hamster', 'bobsleigh', 'boat', 'bullet', 'forklift', 'clock', 'saltshaker', 'anteater', 'crowbar', 'lightbulb', 'pier', 'muffin', 'paintbrush', 'crawfish', 'bench', 'nectarine', 'eyedropper', 'backpack', 'goat', 'hotplate', 'fishnet', 'robot', 'rice', 'shovel', 'candle', 'blimp', 'bridge', 'mountain', 'coleslaw', 'stagecoach', 'waterfall', 'ladle', 'radiator', 'drain', 'tray', 'house', 'key', 'skunk', 'lake', 'earpiece', 'gazebo', 'blackberry', 'groundhog', 'paperclip', 'cookie', 'milk', 'rug', 'thermostat', 'milkshake', 'scoreboard', 'bean', 'giraffe', 'antelope', 'newsstand', 'camcorder', 'sawmill', 'balloon', 'ladder', 'videotape', 'microphone', 'coin', 'hay', 'moth', 'octopus', 'honeycomb', 'wrench', 'cane', 'bobcat', 'banner', 'newspaper', 'reef', 'worm', 'cucumber', 'beach', 'couch', 'streetlamp', 'rhino', 'ceiling', 'cupcake', 'hourglass', 'caterpillar', 'tamale', 'asparagus', 'flower', 'frog', 'dog', 'knife', 'lamp', 'walnut', 'grape', 'scone', 'peanut', 'ferret', 'kettle', 'elephant', 'oscilloscope', 'weasel', 'guava', 'gramophone', 'stove', 'bamboo', 'chicken', 'guacamole', 'toolbox', 'tractor', 'tiger', 'butterfly', 'coffeepot', 'bus', 'meteorite', 'fish', 'graveyard', 'blowtorch', 'grapefruit', 'cat', 'jellyfish', 'carousel', 'wheat', 'tadpole', 'kazoo', 'raccoon', 'typewriter', 'scissors', 'pothole', 'earring', 'drawers', 'cup', 'warthog', 'wall', 'lighthouse', 'burrito', 'cassette', 'nacho', 'sink', 'seashell', 'bed', 'noodles', 'woman', 'rabbit', 'fence', 'pistachio', 'pencil', 'hotdog', 'ball', 'ship', 'strawberry', 'pan', 'custard', 'dolphin', 'tent', 'bun', 'tortilla', 'tumbleweed', 'playground', 'scallion', 'anchor', 'hare', 'waterspout', 'dough', 'burner', 'kale', 'razor', 'chocolate', 'doughnut', 'squeegee', 'bandage', 'beaver', 'refrigerator', 'cork', 'anvil', 'microchip', 'banana', 'thumbtack', 'chair', 'sharpener', 'bird', 'castle', 'wand', 'doormat', 'celery', 'steak', 'ant', 'apple', 'cave', 'scaffolding', 'bell', 'towel', 'mantis', 'thimble', 'bowl', 'chess', 'pickle', 'lollypop', 'leek', 'barrel', 'dollhouse', 'tapioca', 'spareribs', 'fig', 'apricot', 'strongbox', 'brownie', 'beaker', 'manhole', 'piano', 'whale', 'hammer', 'dishrag', 'pecan', 'highlighter', 'pretzel', 'earwig', 'cogwheel', 'trashcan', 'syringe', 'turnip', 'pear', 'lettuce', 'hedgehog', 'guardrail', 'bubble', 'pineapple', 'burlap', 'moon', 'spider', 'fern', 'binoculars', 'gravel', 'plum', 'scorpion', 'cube', 'squirrel', 'book', 'crouton', 'bag', 'lantern', 'parsley', 'jaguar', 'thyme', 'oyster', 'kumquat', 'chinchilla', 'cherry', 'umbrella', 'bicycle', 'eggbeater', 'pig', 'kitchen', 'fondue', 'treadmill', 'casket', 'papaya', 'beetle', 'shredder', 'grasshopper', 'anthill', 'chili', 'bottle', 'calculator', 'gondola', 'pizza', 'compass', 'mop', 'hamburger', 'chipmunk', 'bagel', 'outhouse', 'pliers', 'wolf', 'matchbook', 'corn', 'salamander', 'lasagna', 'stethoscope', 'eggroll', 'avocado', 'eggplant', 'mouse', 'walrus', 'sprinkler', 'glass', 'cauldron', 'parsnip', 'canoe', 'pancake', 'koala', 'deer', 'chalk', 'urinal', 'toilet', 'cabbage', 'platypus', 'lizard', 'leopard', 'cake', 'hammock', 'defibrillator', 'sundial', 'beet', 'popcorn', 'spinach', 'cauliflower', 'canyon', 'spacecraft', 'teapot', 'tunnel', 'porcupine', 'jail', 'spearmint', 'dustpan', 'calipers', 'toast', 'drum', 'phone', 'wire', 'alligator', 'vase', 'motorcycle', 'toothpick', 'coconut', 'lion', 'turtle', 'cheetah', 'bugle', 'casino', 'fountain', 'pie', 'bread', 'meatball', 'windmill', 'gun', 'projector', 'chameleon', 'tomato', 'nutmeg', 'plate', 'bulldozer', 'camel', 'sphinx', 'mall', 'hanger', 'ukulele', 'wheelbarrow', 'ring', 'dildo', 'loudspeaker', 'odometer', 'ruler', 'mousetrap', 'breadbox', 'parachute', 'bolt', 'bracelet', 'library', 'otter', 'airplane', 'pea', 'tongs', 'cactus', 'knot', 'shrimp', 'computer', 'sheep', 'television', 'melon', 'kangaroo', 'helicopter', 'birdcage', 'pumpkin', 'dishwasher', 'crocodile', 'stairs', 'garlic', 'barnacle', 'crate', 'lime', 'axe', 'hairpin', 'egg', 'emerald', 'candy', 'stegosaurus', 'broom', 'mistletoe', 'submarine', 'fireworks', 'peach', 'ape', 'chalkboard', 'bumblebee', 'potato', 'battery', 'guitar', 'opossum', 'volcano', 'llama', 'ashtray', 'sieve', 'coliseum', 'cinnamon', 'moose', 'tree', 'donkey', 'wasp', 'corkscrew', 'gargoyle', 'taco', 'macadamia', 'camera', 'mandolin', 'kite', 'cranberry', 'thermometer', 'tofu', 'closet', 'hovercraft', 'escalator', 'horseshoe', 'wristwatch', 'lemon', 'sushi', 'rat', 'rainbow', 'pillow', 'radish', 'granola', 'okra', 'pastry', 'mango', 'dragonfly', 'flashbulb', 'chalice', 'acorn', 'birdhouse', 'gooseberry', 'locker', 'padlock', 'missile', 'clarinet', 'panda', 'iceberg', 'road', 'flea', 'hazelnut', 'cockroach', 'needle', 'omelet', 'desert', 'condom', 'graffiti', 'iguana', 'bucket', 'photocopier', 'blanket', 'microscope', 'horse', 'nest', 'screwdriver', 'toaster', 'car', 'doll', 'salsa', 'man', 'zebra', 'stapler', 'grate', 'truck', 'bear', 'carrot', 'auditorium', 'cashew', 'shield', 'crown', 'altar', 'pudding', 'cheese', 'rhubarb', 'broccoli', 'tower', 'cumin', 'elevator', 'wheelchair', 'flyswatter'] | |
# handle password entry | |
_PWD_MSG = "\nIn order to use ecoset, please read the README and License agreement found under:\nhttps://codeocean.com/capsule/9570390\nand enter the mentioned password.\n\nPlease Enter Password:\n" | |
def check_pass(pw): | |
if base64.b64encode(pw.encode("ascii")) != (b"ZWNvc2V0X21zamtr"): | |
raise AttributeError("Wrong password! Please try again.") | |
else: | |
print("Password correct.\n") | |
# Name of the dataset usually match the script name with CamelCase instead of snake_case | |
class Ecoset(datasets.GeneratorBasedBuilder): | |
"""Ecoset is a large clean and ecologically valid image dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="Full", version=VERSION, description="We could do different splits of the dataset here. But we don't"), | |
] | |
DEFAULT_CONFIG_NAME = "Full" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# define dataset features | |
features=datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"label": datasets.ClassLabel(names=labels), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
# creating a template | |
task_templates=[datasets.tasks.ImageClassification(image_column="image", label_column="label")], | |
) | |
def _split_generators(self, dl_manager): | |
# Ask password of user. This could be also handled through dataset config | |
def abslist(path): | |
"""Helper function to give abspaths of os.listdir""" | |
return [op.join(path, p) for p in os.listdir(path)] | |
def subprocess_call_print(command_list): | |
"""Execute a subprocess while printing the command line output""" | |
p = subprocess.Popen(command_list, stdout=subprocess.PIPE) | |
while True: | |
line = p.stdout.readline() | |
if not line: | |
break | |
print(line.strip()) | |
sys.stdout.flush() | |
def s3_zipfile_download(source_url, target_dir): | |
"""Extremely slow download""" | |
# ask password | |
password = getpass(_PWD_MSG) | |
check_pass(password) | |
# download and unzip | |
print('Using slow Python-based download and unzipping. This can take up to 70h on a typical computer. Sorry.') | |
s3 = S3FileSystem(anon=True, use_ssl=False, default_block_size=int(15 * 2**20)) | |
with s3.open(source_url, "rb") as raw_filw: | |
with ZipFile(raw_filw, compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip_file: | |
member_list = zip_file.namelist() | |
for member in tqdm(member_list, total=len(member_list), desc="Extracting ecoset to disc"): | |
zip_file.extract(member, target_dir, pwd=password.encode("ascii")) | |
def subprocess_download(source_url, target_dir): | |
"""Moderately slow download""" | |
# ask password | |
password = getpass(_PWD_MSG) | |
check_pass(password) | |
# download | |
print('Using native OS unzipping. This will take about 15h on a typical Linux/Mac and 8h on a typical Windows Computer.') | |
urlinfo = urlparse(source_url, allow_fragments=False) | |
# create destination path if not existing | |
if not op.exists(target_dir): | |
os.makedirs(target_dir) | |
# download zip file if not existing | |
zip_path = op.join(target_dir, "ecoset.zip") | |
if not op.exists(zip_path): | |
s3 = boto3.client(urlinfo.scheme, config=Config(signature_version=UNSIGNED)) | |
object_size = s3.head_object(Bucket=urlinfo.netloc, Key=urlinfo.path[1:])["ContentLength"] | |
with tqdm(total=object_size, unit="MB", unit_scale=True, desc="Downloading ecoset") as pbar: | |
s3.download_file(Bucket=urlinfo.netloc, Key=urlinfo.path[1:], Filename=zip_path, | |
Callback=lambda bytes_transferred: pbar.update(bytes_transferred)) | |
# unzip using platform-based subprocess | |
if platform.system() in ("Linux", "Darwin"): | |
#subprocess.call(["unzip", "-n", "-P", password.encode("ascii"), "-o", zip_path, "-d", target_dir], shell=False) | |
subprocess_call_print(["unzip", "-n", "-P", password.encode("ascii"), "-o", zip_path, "-d", target_dir]) | |
else: | |
#subprocess.call(["tar.exe", "-xf", zip_path, "-C", target_dir, "--passphrase", password], shell=False) | |
subprocess_call_print(["tar.exe", "-xf", zip_path, "-C", target_dir, "--passphrase", password]) | |
def subprocess_download2(source_url, target_dir): | |
"""Moderately slow download""" | |
# ask password | |
password = getpass(_PWD_MSG) | |
check_pass(password) | |
# download | |
print('Using native OS unzipping. This will take about 15h on a typical Linux/Mac and 8h on a typical Windows Computer.') | |
urlinfo = urlparse(source_url, allow_fragments=False) | |
# create destination path if not existing | |
if not op.exists(target_dir): | |
os.makedirs(target_dir) | |
# download zip file if not existing | |
zip_path = op.join(target_dir, "ecoset.zip") | |
if not op.exists(zip_path): | |
s3 = boto3.client(urlinfo.scheme, config=Config(signature_version=UNSIGNED)) | |
s3.download_file(urlinfo.netloc, urlinfo.path[1:], zip_path) | |
# unzip using platform-based subprocess | |
if platform.system() in ("Linux", "Darwin"): | |
subprocess.call(["unzip", "-n", "-P", password.encode("ascii"), "-o", zip_path, "-d", target_dir], shell=False) | |
else: | |
subprocess.call(["tar.exe", "-xf", zip_path, "-C", target_dir, "--passphrase", password], shell=False) | |
# download and unzip using subprocess. S3 download was discontinued due to being extremely slow | |
archives = dl_manager.download_custom(_URLS["codeocean"], subprocess_download) | |
print("Ecoset files are stored under: \n", archives) | |
# create a dict containing all files | |
split_dict = {split:[] for split in ("train", "val", "test")} | |
for split in split_dict.keys(): | |
fnames = abslist(op.join(archives, split)) | |
for f in fnames: | |
split_dict[split].extend(abslist(f)) | |
# return data splits | |
return [datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"archives": split_dict["train"], | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"archives": split_dict["val"], | |
"split": "validation", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"archives": split_dict["test"], | |
"split": "test", | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, archives, split): | |
"""Yields examples.""" | |
idx = 0 | |
for archive in archives: | |
if any(archive.endswith(i) for i in (".JPEG", ".JPG", ".jpeg", ".jpg")): | |
# extract file, label, etc | |
file = open(archive, 'rb') | |
synset_id, label = archive.split("/")[-2].split("_") | |
ex = {"image": {"path": archive, "bytes": file.read()}, "label": label} | |
yield idx, ex | |
idx += 1 |