Datasets:

v-lol-trains / v-lol-trains.py
LukasHug's picture
Update v-lol-trains.py
26299b4
raw
history blame
7.28 kB
# coding=utf-8
"""Snacks Data Set"""
import os
import json
import datasets
from datasets.tasks import ImageClassification
from sklearn.model_selection import train_test_split
_CITATION = """
@misc{helff2023vlol,
title={V-LoL: A Diagnostic Dataset for Visual Logical Learning},
author={Lukas Helff and Wolfgang Stammer and Hikaru Shindo and Devendra Singh Dhami and Kristian Kersting},
journal={Dataset available from https://sites.google.com/view/v-lol},
year={2023},
eprint={2306.07743},
archivePrefix={arXiv},
primaryClass={cs.AI}
}
"""
_DESCRIPTION = "This is a diagnostic dataset for visual logical learning. " \
"It consists of 2D images of trains, where each train is either going eastbound or westbound. " \
"The trains are composed of multiple wagons, which are composed of multiple properties. " \
"The task is to predict the direction of the train. " \
"The dataset is designed to test the ability of machine learning models to learn logical rules from visual input."
_HOMEPAGE = "https://huggingface.co/datasets/LukasHug/v-lol-trains/"
_LICENSE = "cc-by-4.0"
_IMAGES_URL = "https://huggingface.co/datasets/LukasHug/v-lol-trains/resolve/main/data"
# _DIR = './data'
_DIR = _IMAGES_URL
# _URL_DATA = {
# "V-LoL-Trains-TheoryX": f"{_DIR}/V-LoL-Trains-TheoryX.zip",
# "V-LoL-Trains-Numerical": f"{_DIR}/V-LoL-Trains-Numerical.zip",
# "V-LoL-Trains-Complex": f"{_DIR}/V-LoL-Trains-Complex.zip",
# "V-LoL-Blocks-TheoryX": f"{_DIR}/V-LoL-Blocks-TheoryX.zip",
# "V-LoL-Blocks-Numerical": f"{_DIR}/V-LoL-Blocks-Numerical.zip",
# "V-LoL-Blocks-Complex": f"{_DIR}/V-LoL-Blocks-Complex.zip",
# "V-LoL-Trains-TheoryX-len7": f"{_DIR}/V-LoL-Trains-TheoryX-len7.zip",
# "V-LoL-Trains-Numerical-len7": f"{_DIR}/V-LoL-Trains-Numerical-len7.zip",
# "V-LoL-Trains-Complex-len7": f"{_DIR}/V-LoL-Trains-Complex-len7.zip",
# "V-LoL-Random-Blocks-TheoryX": f"{_DIR}/V-LoL-Random-Blocks-TheoryX.zip",
# "V-LoL-Random-Trains-TheoryX": f"{_DIR}/V-LoL-Random-Trains-TheoryX.zip",
# }
_URL_DATA = {
"V-LoL-Trains-TheoryX": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-Numerical": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-Complex": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-Numerical": f"{_DIR}/SimpleObjects_numerical_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-Complex": f"{_DIR}/SimpleObjects_complex_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-TheoryX-len7": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7.zip",
"V-LoL-Trains-Numerical-len7": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7.zip",
"V-LoL-Trains-Complex-len7": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7.zip",
"V-LoL-Random-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_2-4.zip",
"V-LoL-Random-Trains-TheoryX": f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_2-4.zip",
}
_NAMES = ["westbound", "eastbound"]
class VLoLConfig(datasets.BuilderConfig):
"""Builder Config for Food-101"""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for Food-101.
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(VLoLConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
class vloltrains(datasets.GeneratorBasedBuilder):
'''v-lol-trains Data Set'''
BUILDER_CONFIGS = [
VLoLConfig(
name=name,
description=name,
data_url=data_url,
) for name, data_url in _URL_DATA.items()
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=ImageClassification(image_column="image", label_column="label"),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive_path = dl_manager.download_and_extract(self.config.data_url)
print(os.listdir(archive_path))
image_dir = os.path.join(archive_path, "data", "images")
metadata_pth = os.path.join(archive_path, "data", "all_scenes", "all_scenes.json")
self.images, self.y, self.trains, self.masks = [], [], [], []
# ds settings
# load data
with open(metadata_pth, 'r') as f:
all_scenes = json.load(f)
for scene in all_scenes['scenes']:
self.images.append(scene['image_filename'])
train = scene['train']
y = int(train.split(' ')[0] == 'east')
self.y.append(y)
# self.depths.append(scene['depth_map_filename'])
# if 'train' in scene:
# # new json data format
# train = scene['train']
# l = train.split(' ')
# y = l[0]
# y = int(l[0] == 'east')
# train = MichalskiTrain.from_text(train, train_vis)
# else:
# # old json data format
# train = scene['m_train']
# train = jsonpickle.decode(train)
# # self.trains.append(train.replace('michalski_trains.m_train.', 'm_train.'))
# # text = train.to_txt()
# # t1 = MichalskiTrain.from_text(text, train_vis)
# lab = int(train.get_label() == 'east')
# self.y.append(lab)
# self.trains.append(train)
# self.masks.append(scene['car_masks'])
# split y and images into train and test
self.y_train, self.y_test, self.images_train, self.images_test = train_test_split(self.y, self.images,
test_size=0.2, random_state=0)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"image_dir": image_dir, "labels": self.y_train, "images": self.images_train}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"image_dir": image_dir, "labels": self.y_test, "images": self.images_test}
),
]
def _generate_examples(self, image_dir, labels, images):
for i, (image, label) in enumerate(zip(images, labels)):
yield i, {"image": os.path.join(image_dir, image), "label": label}