|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DUDE dataset loader""" |
|
|
|
import os |
|
import copy |
|
import json |
|
from pathlib import Path |
|
from typing import List |
|
import pdf2image |
|
from tqdm import tqdm |
|
|
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{dude2023icdar, |
|
title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)}, |
|
author={Van Landeghem, Jordy et . al.}, |
|
booktitle={Proceedings of the ICDAR}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them. |
|
Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason |
|
over it to answer DUDE questions. DUDE Contains X questions and Y and ... |
|
""" |
|
|
|
_HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_SPLITS = ["sample"] |
|
|
|
_URLS = {} |
|
for split in _SPLITS: |
|
_URLS[ |
|
f"{split}_annotations" |
|
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_dataset.json" |
|
_URLS[ |
|
f"{split}_pdfs" |
|
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_pdfs.tar.gz" |
|
_URLS[ |
|
f"{split}_OCR" |
|
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_OCR.tar.gz" |
|
|
|
|
|
def batched_conversion(pdf_file): |
|
info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None) |
|
maxPages = info["Pages"] |
|
|
|
logger.info(f"{pdf_file} has {str(maxPages)} pages") |
|
|
|
images = [] |
|
|
|
for page in range(1, maxPages + 1, 10): |
|
images.extend( |
|
pdf2image.convert_from_path( |
|
pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages) |
|
) |
|
) |
|
return images |
|
|
|
|
|
def open_pdf_binary(pdf_file): |
|
with open(pdf_file, "rb") as f: |
|
return f.read() |
|
|
|
|
|
class DUDE(datasets.GeneratorBasedBuilder): |
|
"""DUDE dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="DUDE", |
|
version=datasets.Version("0.0.1"), |
|
description=_DESCRIPTION, |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "DUDE" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"docId": datasets.Value("string"), |
|
"questionId": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.Sequence(datasets.Value("string")), |
|
"answers_page_bounding_boxes": datasets.Sequence( |
|
{ |
|
"left": datasets.Value("int32"), |
|
"top": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
"page": datasets.Value("int32"), |
|
} |
|
), |
|
"answers_variants": datasets.Sequence(datasets.Value("string")), |
|
"answer_type": datasets.Value("string"), |
|
"data_split": datasets.Value("string"), |
|
"document": datasets.Value("binary"), |
|
"OCR": datasets.Value("binary"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
|
|
splits = [] |
|
for split in _SPLITS: |
|
annotations = {} |
|
if f"{split}_annotations" in _URLS: |
|
annotations = json.load(open(_URLS[f"{split}_annotations"], "r")) |
|
pdfs_archive_path = dl_manager.download(_URLS[f"{split}_pdfs"]) |
|
pdfs_archive = dl_manager.iter_archive(pdfs_archive_path) |
|
OCR_archive_path = dl_manager.download(_URLS[f"{split}_OCR"]) |
|
OCR_archive = dl_manager.iter_archive(OCR_archive_path) |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"pdfs_archive": pdfs_archive, |
|
"OCR_archive": OCR_archive, |
|
"annotations": annotations, |
|
"split": split, |
|
}, |
|
) |
|
) |
|
return splits |
|
|
|
def _generate_examples(self, pdfs_archive, OCR_archive, annotations, split): |
|
def retrieve_doc(pdfs_archive, docid): |
|
for file_path, file_obj in pdfs_archive: |
|
path, ext = file_path.split(".") |
|
md5 = path.split("/")[-1] |
|
|
|
if md5 == docid: |
|
|
|
return file_obj.read() |
|
|
|
def retrieve_OCR(OCR_archive, docid): |
|
for file_path, file_obj in OCR_archive: |
|
|
|
path, ext = file_path.split(".") |
|
filename = path.split("/")[-1] |
|
md5 = filename.split("_")[0] |
|
|
|
if md5 == docid and "original" in filename: |
|
return json.loads(file_obj.read()) |
|
|
|
question = self.info.features["question"] |
|
answers = self.info.features["answers"] |
|
|
|
extensions = {"pdf", "PDF"} |
|
|
|
for i, a in enumerate(annotations): |
|
a["data_split"] = split |
|
a["document"] = retrieve_doc(pdfs_archive, a["docId"]) |
|
a["OCR"] = retrieve_OCR(OCR_archive, a["docId"]) |
|
|
|
|
|
|
|
if a["answers_page_bounding_boxes"] in [ [], [[]] ]: |
|
a["answers_page_bounding_boxes"] = None |
|
else: |
|
if isinstance(a['answers_page_bounding_boxes'][0], list): |
|
a["answers_page_bounding_boxes"] = a['answers_page_bounding_boxes'][0] |
|
yield i, a |
|
|