# coding=utf-8 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """High-Level dataset.""" import json from pathlib import Path import datasets _CITATION = """\ @misc{} """ _DESCRIPTION = """\ High-level Dataset """ # github link _HOMEPAGE = "" _LICENSE = "Apache 2.0" #_URL = "https://huggingface.co/datasets/michelecafagna26/hl/main/data/images.zip" _IMG = "https://huggingface.co/datasets/michelecafagna26/hl/blob/main/data/images.zip" _TRAIN = "https://huggingface.co/datasets/michelecafagna26/hl/blob/main/data/annotations/train.jsonl" _TEST = "https://huggingface.co/datasets/michelecafagna26/hl/blob/main/data/annotations/test.jsonl" class HL(datasets.GeneratorBasedBuilder): """High Level Dataset.""" VERSION = datasets.Version("1.0.0") def _info(self): features = datasets.Features( { "file_name": datasets.Value("string"), "image": datasets.Image(), "scene": datasets.Sequence(datasets.Value("string")), "action": datasets.Sequence(datasets.Value("string")), "rationale": datasets.Sequence(datasets.Value("string")), "object": datasets.Sequence(datasets.Value("string")), # "captions": { # "scene": datasets.Sequence(datasets.Value("string")), # "action": datasets.Sequence(datasets.Value("string")), # "rationale": datasets.Sequence(datasets.Value("string")), # "object": datasets.Sequence(datasets.Value("string")), # }, "confidence": { "scene": datasets.Sequence(datasets.Value("float32")), "action": datasets.Sequence(datasets.Value("float32")), "rationale": datasets.Sequence(datasets.Value("float32")), "object": datasets.Sequence(datasets.Value("float32")), } # "purity": { # "scene": datasets.Sequence(datasets.Value("float32")), # "action": datasets.Sequence(datasets.Value("float32")), # "rationale": datasets.Sequence(datasets.Value("float32")), # }, # "diversity": { # "scene": datasets.Value("float32"), # "action": datasets.Value("float32"), # "rationale": datasets.Value("float32"), # }, } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): image_files = dl_manager.download(_IMG) annotation_files = dl_manager.download_and_extract([_TRAIN, _TEST]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "annotation_file_path": annotation_files[0], "images": dl_manager.iter_archive(archive), }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "annotation_file_path": annotation_files[1], "images": dl_manager.iter_archive(archive), }, ), ] def _generate_examples(self, annotation_file_path, images): idx = 0 with open(annotation_file_path, "r") as fp: metadata = {json.loads(item)['file_name']: json.loads(item) for item in fp} # This loop relies on the ordering of the files in the archive: # Annotation files come first, then the images. for img_file_path, img_obj in images: file_name = Path(img_file_path).name yield idx, { "file_name": file_name, "image": {"path": img_file_path, "bytes": img_obj.read()}, "scene": metadata[file_name]['captions']['scene'], "action": metadata[file_name]['captions']['action'], "rationale": metadata[file_name]['captions']['rationale'], "object": metadata[file_name]['captions']['object'], "confidence": metadata[file_name]['captions']['confidence'], } idx += 1