DUDE_loader / DUDE_loader.py
jordyvl's picture
close to done for bboxes
10355f2
raw
history blame
6.84 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DUDE dataset loader"""
import os
import copy
import json
from pathlib import Path
from typing import List
import pdf2image
from tqdm import tqdm
import datasets
_CITATION = """
@inproceedings{dude2023icdar,
title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)},
author={Van Landeghem, Jordy et . al.},
booktitle={Proceedings of the ICDAR},
year={2023}
}
"""
_DESCRIPTION = """\
DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them.
Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason
over it to answer DUDE questions. DUDE Contains X questions and Y and ...
"""
_HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23"
_LICENSE = "CC BY 4.0"
_SPLITS = ["sample"] # ["train", "val", "test"]
_URLS = {}
for split in _SPLITS:
_URLS[
f"{split}_annotations"
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_dataset.json"
_URLS[
f"{split}_pdfs"
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_pdfs.tar.gz"
_URLS[
f"{split}_OCR"
] = f"https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_{split}_OCR.tar.gz"
def batched_conversion(pdf_file):
info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
maxPages = info["Pages"]
logger.info(f"{pdf_file} has {str(maxPages)} pages")
images = []
for page in range(1, maxPages + 1, 10):
images.extend(
pdf2image.convert_from_path(
pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
)
)
return images
def open_pdf_binary(pdf_file):
with open(pdf_file, "rb") as f:
return f.read()
class DUDE(datasets.GeneratorBasedBuilder):
"""DUDE dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="DUDE",
version=datasets.Version("0.0.1"),
description=_DESCRIPTION,
)
]
DEFAULT_CONFIG_NAME = "DUDE"
def _info(self):
features = datasets.Features(
{
"docId": datasets.Value("string"),
"questionId": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(datasets.Value("string")),
"answers_page_bounding_boxes": datasets.Sequence(
{
"left": datasets.Value("int32"),
"top": datasets.Value("int32"),
"width": datasets.Value("int32"),
"height": datasets.Value("int32"),
"page": datasets.Value("int32"),
}
),
"answers_variants": datasets.Sequence(datasets.Value("string")),
"answer_type": datasets.Value("string"),
"data_split": datasets.Value("string"),
"document": datasets.Value("binary"),
"OCR": datasets.Value("binary"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
splits = []
for split in _SPLITS:
annotations = {}
if f"{split}_annotations" in _URLS: # blind test set
annotations = json.load(open(_URLS[f"{split}_annotations"], "r"))
pdfs_archive_path = dl_manager.download(_URLS[f"{split}_pdfs"])
pdfs_archive = dl_manager.iter_archive(pdfs_archive_path)
OCR_archive_path = dl_manager.download(_URLS[f"{split}_OCR"])
OCR_archive = dl_manager.iter_archive(OCR_archive_path)
splits.append(
datasets.SplitGenerator(
name=split,
gen_kwargs={
"pdfs_archive": pdfs_archive,
"OCR_archive": OCR_archive,
"annotations": annotations,
"split": split,
},
)
)
return splits
def _generate_examples(self, pdfs_archive, OCR_archive, annotations, split):
def retrieve_doc(pdfs_archive, docid):
for file_path, file_obj in pdfs_archive:
path, ext = file_path.split(".")
md5 = path.split("/")[-1]
if md5 == docid:
# images = pdf2image.convert_from_bytes(file_obj.read())
return file_obj.read() # binary
def retrieve_OCR(OCR_archive, docid):
for file_path, file_obj in OCR_archive:
# /DUDE_sample_OCR/OCR/Amazon Textract/md5_{original,due}.json
path, ext = file_path.split(".")
filename = path.split("/")[-1]
md5 = filename.split("_")[0]
if md5 == docid and "original" in filename:
return json.loads(file_obj.read()) # binary
question = self.info.features["question"]
answers = self.info.features["answers"]
extensions = {"pdf", "PDF"}
for i, a in enumerate(annotations):
a["data_split"] = split
a["document"] = retrieve_doc(pdfs_archive, a["docId"])
a["OCR"] = retrieve_OCR(OCR_archive, a["docId"])
# FIXES for faulty generation
#a.pop("answers_page_bounding_boxes") # fix later
if a["answers_page_bounding_boxes"] in [ [], [[]] ]:
a["answers_page_bounding_boxes"] = None
else:
if isinstance(a['answers_page_bounding_boxes'][0], list):
a["answers_page_bounding_boxes"] = a['answers_page_bounding_boxes'][0]
yield i, a