patfig / patfig.py
lcolonn
Revert "feat: remove loading script"
e22ef0a unverified
raw
history blame
No virus
3.26 kB
import datasets
from datasets import load_dataset, Dataset, Value, Sequence, Features, DatasetInfo, GeneratorBasedBuilder, Image
from pathlib import Path
import os
import pandas as pd
_DESCRIPTION = """\ The PatFig Dataset is a curated collection of over 18,000 patent images from more than 7,
000 European patent applications, spanning the year 2020. It aims to provide a comprehensive resource for research
and applications in image captioning, abstract reasoning, patent analysis, and automated documentprocessing. The
overarching goal of this dataset is to advance the research in visually situated language understanding towards more
hollistic consumption of the visual and textual data.
"""
_URL = "https://huggingface.co/datasets/lcolonn/patfig/resolve/main/"
_URLS = {
"train_images": "train_images.tar.gz",
"test_images": "test_images.tar.gz",
"annotations_train": "annotations_train.csv",
"annotations_test": "annotations_test.csv",
}
class PatFig(GeneratorBasedBuilder):
"""DatasetBuilder for patfig dataset."""
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"image": Image(),
"image_name": Value("string"),
"pub_number": Value("string"),
"title": Value("string"),
"figs_norm": Sequence(feature=Value("string"), length=-1),
"short_description": Sequence(feature=Value("string"), length=-1),
"long_description": Sequence(feature=Value("string"), length=-1),
"short_description_token_count": Value("int64"),
"long_description_token_count": Value("int64"),
"draft_class": Value("string"),
"cpc_class": Value("string"),
"relevant_terms": [{'element_identifier': Value("string"), "terms": Sequence(feature=Value("string"), length=-1)}],
"associated_claims": Value("string"),
"compound": Value("bool"),
"references": Sequence(feature=Value(dtype='string'), length=-1),
}),
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
# FIXME: Currently downloads all the files regardless of the split
urls_to_download = {key: _URL + fname for key, fname in _URLS.items()}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"images_dir": downloaded_files["train_images"], "annotations_dir": downloaded_files["annotations_train"]}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"images_dir": f'{downloaded_files["test_images"]}/test', "annotations_dir": downloaded_files["annotations_test"]}
),
]
def _generate_examples(self, images_dir: str, annotations_dir: str):
df = pd.read_csv(annotations_dir)
for idx, row in df.iterrows():
image_path = os.path.join(images_dir, row["pub_number"], row["image_name"])
yield idx, {
"image": image_path,
**row.to_dict(),
}