v-lol-trains / v-lol-trains.py
LukasHug's picture
Update v-lol-trains.py
29fef7d
# coding=utf-8
"""Snacks Data Set"""
import os
import json
import datasets
from datasets.tasks import ImageClassification
_CITATION = """
@misc{helff2023vlol,
title={V-LoL: A Diagnostic Dataset for Visual Logical Learning},
author={Lukas Helff and Wolfgang Stammer and Hikaru Shindo and Devendra Singh Dhami and Kristian Kersting},
journal={Dataset available from https://sites.google.com/view/v-lol},
year={2023},
eprint={2306.07743},
archivePrefix={arXiv},
primaryClass={cs.AI}
}
"""
_DESCRIPTION = "This is a diagnostic dataset for visual logical learning. " \
"It consists of 2D images of trains, where each train is either going eastbound or westbound. " \
"The trains are composed of multiple wagons, which are composed of multiple properties. " \
"The task is to predict the direction of the train. " \
"The dataset is designed to test the ability of machine learning models to learn logical rules from visual input."
_HOMEPAGE = "https://huggingface.co/datasets/LukasHug/v-lol-trains/"
_LICENSE = "cc-by-4.0"
_IMAGES_URL = "https://huggingface.co/datasets/LukasHug/v-lol-trains/resolve/main/data"
_DIR = _IMAGES_URL
# _URL_DATA = {
# "V-LoL-Trains-TheoryX": f"{_DIR}/V-LoL-Trains-TheoryX.zip",
# "V-LoL-Trains-Numerical": f"{_DIR}/V-LoL-Trains-Numerical.zip",
# "V-LoL-Trains-Complex": f"{_DIR}/V-LoL-Trains-Complex.zip",
# "V-LoL-Blocks-TheoryX": f"{_DIR}/V-LoL-Blocks-TheoryX.zip",
# "V-LoL-Blocks-Numerical": f"{_DIR}/V-LoL-Blocks-Numerical.zip",
# "V-LoL-Blocks-Complex": f"{_DIR}/V-LoL-Blocks-Complex.zip",
# "V-LoL-Trains-TheoryX-len7": f"{_DIR}/V-LoL-Trains-TheoryX-len7.zip",
# "V-LoL-Trains-Numerical-len7": f"{_DIR}/V-LoL-Trains-Numerical-len7.zip",
# "V-LoL-Trains-Complex-len7": f"{_DIR}/V-LoL-Trains-Complex-len7.zip",
# "V-LoL-Random-Blocks-TheoryX": f"{_DIR}/V-LoL-Random-Blocks-TheoryX.zip",
# "V-LoL-Random-Trains-TheoryX": f"{_DIR}/V-LoL-Random-Trains-TheoryX.zip",
# }
_URL_DATA = {
"V-LoL-Trains-TheoryX": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-Numerical": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-Complex": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-Numerical": f"{_DIR}/SimpleObjects_numerical_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Blocks-Complex": f"{_DIR}/SimpleObjects_complex_MichalskiTrains_base_scene_len_2-4.zip",
"V-LoL-Trains-TheoryX-len7":
{'train': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
'test': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7-7.zip"},
"V-LoL-Trains-Numerical-len7":
{'train': f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_2-4.zip",
'test': f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7-7.zip"},
"V-LoL-Trains-Complex-len7":
{'train': f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_2-4.zip",
'test': f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7-7.zip"},
"V-LoL-Random-Blocks-TheoryX":
{'train': f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
'test': f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_2-4.zip"},
"V-LoL-Random-Trains-TheoryX":
{'train': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
'test': f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_2-4.zip"},
# "V-LoL-Trains-TheoryX-len7": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7.zip",
# "V-LoL-Trains-Numerical-len7": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7.zip",
# "V-LoL-Trains-Complex-len7": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7.zip",
# "V-LoL-Random-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_2-4.zip",
# "V-LoL-Random-Trains-TheoryX": f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_2-4.zip",
}
_NAMES = ["westbound", "eastbound"]
class VLoLConfig(datasets.BuilderConfig):
"""Builder Config for Food-101"""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for Food-101.
Args:
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(VLoLConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
if isinstance(data_url, dict):
self.metadata_urls = data_url
else:
self.metadata_urls = {'train': data_url, 'test': None}
class vloltrains(datasets.GeneratorBasedBuilder):
'''v-lol-trains Data Set'''
BUILDER_CONFIGS = [
VLoLConfig(
name=name,
description=name,
data_url=data_url,
) for name, data_url in _URL_DATA.items()
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
task_templates=ImageClassification(image_column="image", label_column="label"),
)
def get_data(self, dl_manager, url):
archive_path = os.path.join(dl_manager.download_and_extract(url), url.split('/')[-1].split('.')[0])
# print containg folders
print(os.listdir(archive_path))
image_dir = os.path.join(archive_path, "images")
metadata_pth = os.path.join(archive_path, "all_scenes", "all_scenes.json")
images, y, trains, masks = [], [], [], []
# ds settings
# load data
with open(metadata_pth, 'r') as f:
all_scenes = json.load(f)
for scene in all_scenes['scenes']:
images.append(scene['image_filename'])
train = scene['train']
y.append(int(train.split(' ')[0] == 'east'))
# depths.append(scene['depth_map_filename'])
# if 'train' in scene:
# # new json data format
# train = scene['train']
# l = train.split(' ')
# y = l[0]
# y = int(l[0] == 'east')
# train = MichalskiTrain.from_text(train, train_vis)
# else:
# # old json data format
# train = scene['m_train']
# train = jsonpickle.decode(train)
# # trains.append(train.replace('michalski_trains.m_train.', 'm_train.'))
# # text = train.to_txt()
# # t1 = MichalskiTrain.from_text(text, train_vis)
# lab = int(train.get_label() == 'east')
# y.append(lab)
# trains.append(train)
# masks.append(scene['car_masks'])
return image_dir, y, images
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if self.config.metadata_urls['test'] is None:
image_dir, y, images = self.get_data(dl_manager, self.config.metadata_urls['train'])
image_dir_train, image_dir_test = image_dir, image_dir
from sklearn.model_selection import train_test_split
y_train, y_test, images_train, images_test = train_test_split(y, images, test_size=0.2, random_state=0)
else:
image_dir_train, y_train, images_train = self.get_data(dl_manager, self.config.metadata_urls['train'])
image_dir_test, y_test, images_test = self.get_data(dl_manager, self.config.metadata_urls['test'])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"image_dir": image_dir_train, "labels": y_train, "images": images_train}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"image_dir": image_dir_test, "labels": y_test, "images": images_test}
),
]
def _generate_examples(self, image_dir, labels, images):
for i, (image, label) in enumerate(zip(images, labels)):
yield i, {"image": os.path.join(image_dir, image), "label": label}