File size: 5,973 Bytes
b52c6bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
# Lint as: python3
import json
import logging
import os
import datasets
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import ResizeTransform, TransformList
from torch import tensor
def load_image(image_path):
image = read_image(image_path, format="BGR")
h = image.shape[0]
w = image.shape[1]
img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
# convert to RGB
image = tensor(img_trans.apply_image(image).copy()).permute(
2, 0, 1
) # copy to make it writeable
return image, (w, h)
_URL = "https://github.com/doc-analysis/XFUND/releases/download/v1.0/"
_LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"]
logger = logging.getLogger(__name__)
class XFUNDConfig(datasets.BuilderConfig):
"""BuilderConfig for XFUND."""
def __init__(self, lang, additional_langs=None, **kwargs):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
_LABELS = ["header", "question", "answer", "other"]
def _get_box_feature():
return datasets.Sequence(datasets.Value("int64"))
class XFUND(datasets.GeneratorBasedBuilder):
"""XFUND dataset."""
BUILDER_CONFIGS = [XFUNDConfig(name=f"xfund.{lang}", lang=lang) for lang in _LANG]
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"uid": datasets.Value("string"),
"document": datasets.Sequence(
{
"id": datasets.Value("int64"),
"box": _get_box_feature(),
"text": datasets.Value("string"),
"label": datasets.ClassLabel(names=_LABELS),
"words": datasets.Sequence(
{
"box": _get_box_feature(),
"text": datasets.Value("string"),
}
),
"linking": datasets.Sequence(
datasets.Sequence(datasets.Value("int64"))
),
}
),
"img_meta": {
"fname": datasets.Value("string"),
"width": datasets.Value("int64"),
"height": datasets.Value("int64"),
"format": datasets.Value("string"),
},
# has to be at the root level, crashes otherwise
"img_data": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
},
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": [
f"{_URL}{self.config.lang}.train.json",
f"{_URL}{self.config.lang}.train.zip",
],
"val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
# "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs = [downloaded_files["train"]]
val_files_for_many_langs = [downloaded_files["val"]]
# test_files_for_many_langs = [downloaded_files["test"]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split("+")
if "all" in additional_langs:
additional_langs = [lang for lang in _LANG if lang != self.config.lang]
for lang in additional_langs:
urls_to_download = {
"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]
}
additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs.append(additional_downloaded_files["train"])
logger.info(
f"Training on {self.config.lang} with additional langs({self.config.additional_langs})"
)
logger.info(f"Evaluating on {self.config.lang}")
logger.info(f"Testing on {self.config.lang}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info("Generating examples from = %s", filepath)
with open(filepath[0], encoding="utf-8") as f:
data = json.load(f)
for doc in data["documents"]:
# print(json.dumps(doc, indent=2))
fpath = os.path.join(filepath[1], doc["img"]["fname"])
image, size = load_image(fpath)
expected_size = tuple([doc["img"]["width"], doc["img"]["height"]])
if size != expected_size:
raise ValueError(
f"image has unexpected size: {size}. expected: {expected_size}"
)
doc["img_meta"] = doc.pop("img")
doc["img_meta"]["format"] = "RGB"
doc["img_data"] = image
yield doc["id"], doc
|