import datasets from datasets import load_dataset, Dataset, Value, Sequence, Features, DatasetInfo, GeneratorBasedBuilder, Image from pathlib import Path import os import pandas as pd import json _DESCRIPTION = """\ The PatFig Dataset is a curated collection of over 18,000 patent images from more than 7, 000 European patent applications, spanning the year 2020. It aims to provide a comprehensive resource for research and applications in image captioning, abstract reasoning, patent analysis, and automated documentprocessing. The overarching goal of this dataset is to advance the research in visually situated language understanding towards more hollistic consumption of the visual and textual data. """ _BASE_URL = "https://huggingface.co/datasets/lcolonn/patfig/resolve/main/" _METADATA_URLS = { "annotations_train": "train/annotations_train.zip", "annotations_test": "test/annotations_test.zip" } _IMAGES_URLS = { "test_images": "train/train_images.tar.gz", "train_images": "test/test_images.tar.gz", } _URLS = { "train_images": "train/train_images.tar.gz", "test_images": "test/test_images.tar.gz", "annotations_train": "train/annotations_train.zip", "annotations_test": "test/annotations_test.zip", } class PatFig(GeneratorBasedBuilder): """DatasetBuilder for patfig dataset.""" def _info(self): return DatasetInfo( description=_DESCRIPTION, features=Features({ "image": Image(), "image_name": Value("string"), "pub_number": Value("string"), "title": Value("string"), "figs_norm": Sequence(feature=Value("string"), length=-1), "short_description": Sequence(feature=Value("string"), length=-1), "long_description": Sequence(feature=Value("string"), length=-1), "short_description_token_count": Value("int64"), "long_description_token_count": Value("int64"), "draft_class": Value("string"), "cpc_class": Value("string"), "relevant_terms": [{'element_identifier': Value("string"), "terms": Sequence(feature=Value("string"), length=-1)}], "associated_claims": Value("string"), "compound": Value("bool"), "references": Sequence(feature=Value(dtype='string'), length=-1), }), ) def _split_generators(self, dl_manager: datasets.DownloadManager): urls_to_download = {key: _BASE_URL + fname for key, fname in _URLS.items()} downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"images_dir": f'{downloaded_files["train_images"]}/train', "annotations_dir": f'{downloaded_files["annotations_train"]}/annotations_train.json'} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"images_dir": f'{downloaded_files["test_images"]}/test', "annotations_dir": f'{downloaded_files["annotations_test"]}/annotations_test.json'} ), ] def _generate_examples(self, images_dir: str, annotations_dir: str): with open(annotations_dir, "r") as f: data = json.load(f) for idx, record in data.items(): image_path = os.path.join(images_dir, record["pub_number"], record["image_name"]) yield idx, { "image": image_path, **record, }