OCR-IDL / OCR-IDL.py
rubentito's picture
Upload OCR-IDL.py
faa996a
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""OCR-IDL: OCR annotations for the Industry Document Library."""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{biten2022ocr,
title = {OCR-IDL: Ocr annotations for industry document library dataset},
author = {{Biten}, Ali Furkan and {Tito}, Ruben and {Gomez}, Lluis and {Valveny}, Ernest and {Karatzas}, Dimosthenis},
journal = {arXiv preprint arXiv:2202.12985},
year = 2022,
eid = {arXiv:2202.12985},
pages = {arXiv:2202.12985},
archivePrefix = {arXiv},
eprint = {2202.12985},
}
"""
_DESCRIPTION = """\
The OCR-IDL Dataset contains the OCR annotations of 26M pages of theIndustry Document Library (IDL).\
It is specially intended to be used for text-layout self-supervised tasks such as Masked Language Modeling or Text De-noising.\
However, we also include the url to the documents so that can be downloaded for image-text alignment tasks.
"""
_URL = "http://datasets.cvc.uab.es/UCSF_IDL/"
_PROJECT_URL = "https://github.com/furkanbiten/idl_data"
_URLS = {
"train": _URL + "train-v1.1.json",
"dev": _URL + "dev-v1.1.json",
}
class OCRIDLConfig(datasets.BuilderConfig):
"""BuilderConfig for OCR-IDL."""
def __init__(self, **kwargs):
"""BuilderConfig for OCR-IDL.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(OCRIDLConfig, self).__init__(**kwargs)
class OCR_IDL(datasets.GeneratorBasedBuilder):
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1."""
BUILDER_CONFIGS = [
OCRIDLConfig(
name="OCR-IDL",
version=datasets.Version("1.0.0", ""),
description=_DESCRIPTION, # This should be the description of the version. Since we have only one, use the same as the global dataset description.
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"document_id": datasets.Value("string"),
"document_url": datasets.Value("string"),
"page_id": datasets.Value("string"),
"page_height": datasets.Value("int32"),
"page_width": datasets.Value("int32"),
"words": [],
"boxes": [],
"word_lines_id": [],
"text_types": [],
"recog_conf": []
}
),
# No default supervised_keys (as we have to pass both question and context as input).
supervised_keys=None,
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download('https://huggingface.co/datasets/rubentito/OCR-IDL/resolve/main/val.csv')
# data_dir = dl_manager.download_and_extract('http://datasets.cvc.uab.es/UCSF_IDL/Samples/imdb_sample_v2.tar.gz')
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data": downloaded_files[0]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files[0]}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
key = 0
with open(filepath, encoding="utf-8") as f:
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "")
for paragraph in article["paragraphs"]:
context = paragraph["context"] # do not strip leading blank spaces GH-2585
for qa in paragraph["qas"]:
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"] for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield key, {
"title": title,
"context": context,
"question": qa["question"],
"id": qa["id"],
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
key += 1