Add dataset loading script
Browse files- synthtiger.py +221 -0
synthtiger.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""SynthTiger synthetic OCR dataset in huggingface format.
|
15 |
+
|
16 |
+
https://github.com/clovaai/synthtiger
|
17 |
+
https://drive.google.com/drive/folders/1faHxo6gVeUmmFKJf8dxFZf_yRjamUL96
|
18 |
+
synthtiger_v1.1.zip (38G) (md5: b2757a7e2b5040b14ed64c473533b592)
|
19 |
+
|
20 |
+
Unpacked dataset structure:
|
21 |
+
```
|
22 |
+
gt.txt
|
23 |
+
images/
|
24 |
+
0/
|
25 |
+
0.jpg
|
26 |
+
1.jpg
|
27 |
+
...
|
28 |
+
9998.jpg
|
29 |
+
9999.jpg
|
30 |
+
1/
|
31 |
+
...
|
32 |
+
998/
|
33 |
+
...
|
34 |
+
999/
|
35 |
+
9990000.jpg
|
36 |
+
9990001.jpg
|
37 |
+
...
|
38 |
+
9999998.jpg
|
39 |
+
9999999.jpg
|
40 |
+
```
|
41 |
+
|
42 |
+
gt.txt format (tab delimited):
|
43 |
+
```
|
44 |
+
images/0/0.jpg 10
|
45 |
+
images/0/1.jpg date:
|
46 |
+
...
|
47 |
+
images/999/9999998.jpg STUFFIER
|
48 |
+
images/999/9999999.jpg Re:
|
49 |
+
```"""
|
50 |
+
import os
|
51 |
+
import shutil
|
52 |
+
|
53 |
+
from PIL import Image
|
54 |
+
|
55 |
+
import datasets
|
56 |
+
|
57 |
+
|
58 |
+
_CITATION = """\
|
59 |
+
@inproceedings{yim2021synthtiger,
|
60 |
+
title={Synthtiger: Synthetic text image generator towards better text recognition models},
|
61 |
+
author={Yim, Moonbin and Kim, Yoonsik and Cho, Han-Cheol and Park, Sungrae},
|
62 |
+
booktitle={International Conference on Document Analysis and Recognition},
|
63 |
+
pages={109--124},
|
64 |
+
year={2021},
|
65 |
+
organization={Springer}
|
66 |
+
}
|
67 |
+
"""
|
68 |
+
|
69 |
+
_DESCRIPTION = """\
|
70 |
+
A synthetic scene text OCR dataset derived from the
|
71 |
+
[SynthTIGER](https://github.com/clovaai/synthtiger) generator.
|
72 |
+
"""
|
73 |
+
|
74 |
+
_HOMEPAGE = "https://github.com/clovaai/synthtiger"
|
75 |
+
|
76 |
+
_LICENSE = """\
|
77 |
+
SynthTIGER
|
78 |
+
Copyright (c) 2021-present NAVER Corp.
|
79 |
+
|
80 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
81 |
+
of this software and associated documentation files (the "Software"), to deal
|
82 |
+
in the Software without restriction, including without limitation the rights
|
83 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
84 |
+
copies of the Software, and to permit persons to whom the Software is
|
85 |
+
furnished to do so, subject to the following conditions:
|
86 |
+
|
87 |
+
The above copyright notice and this permission notice shall be included in
|
88 |
+
all copies or substantial portions of the Software.
|
89 |
+
|
90 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
91 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
92 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
93 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
94 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
95 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
96 |
+
THE SOFTWARE.
|
97 |
+
"""
|
98 |
+
|
99 |
+
_FOLDER_URL = "https://drive.google.com/drive/folders/18wjIad7_R9AXqXlQ4bhgOxMZY_8XRCdh?usp=sharing"
|
100 |
+
_PART_URLS = {
|
101 |
+
"synthtiger_v1.1.zip.00": "https://drive.google.com/file/d/1ELxTy95sAFQ1ZD81QqS_dbMBnqi6N7aR/view?usp=sharing",
|
102 |
+
"synthtiger_v1.1.zip.01": "https://drive.google.com/file/d/1mf9rNBlAVC_xMRUkXmjJSyIBhVgPstPR/view?usp=sharing",
|
103 |
+
"synthtiger_v1.1.zip.02": "https://drive.google.com/file/d/1AN0d96JaCZRq37UP3ZHTnEjl-1YwXajR/view?usp=sharing",
|
104 |
+
"synthtiger_v1.1.zip.03": "https://drive.google.com/file/d/1WCj-S7GXDlRpLEcHRdnIyjVZTcoY1zJU/view?usp=sharing",
|
105 |
+
"synthtiger_v1.1.zip.04": "https://drive.google.com/file/d/1iWOh-dUUSTDOD9jYbSSe5Iq1qiDyYtOg/view?usp=sharing",
|
106 |
+
"synthtiger_v1.1.zip.05": "https://drive.google.com/file/d/1QtEoZpNsTCUtq4DuZ5IwdJpuFpeNJCXK/view?usp=sharing",
|
107 |
+
"synthtiger_v1.1.zip.06": "https://drive.google.com/file/d/1lTlRRxqkhkgA3CMJ5cSz2-6n_F07KnJ-/view?usp=sharing",
|
108 |
+
"synthtiger_v1.1.zip.07": "https://drive.google.com/file/d/1iERfjCHzX9_i-WchYazaK9mF6--41r6b/view?usp=sharing",
|
109 |
+
"synthtiger_v1.1.zip.08": "https://drive.google.com/file/d/1pxhGbTP3tDMh8cJTz-d7A-gq4_nWSZIO/view?usp=sharing",
|
110 |
+
"synthtiger_v1.1.zip.09": "https://drive.google.com/file/d/1WJ-QrHsp6joWAU5JVs3TgpMCvz-E4YKq/view?usp=sharing",
|
111 |
+
"synthtiger_v1.1.zip.10": "https://drive.google.com/file/d/1Pp3TRa60oUDHLIhN0oQ_y4SukXxfIjcw/view?usp=sharing",
|
112 |
+
}
|
113 |
+
_ARCHIVE_ROOT = "synthtiger_v1.1"
|
114 |
+
|
115 |
+
|
116 |
+
def _get_google_drive_url(url):
|
117 |
+
"""Re-format the "Get link" version of a Google Drive URL."""
|
118 |
+
base_url = "https://drive.google.com/uc?id="
|
119 |
+
split_url = url.split("/")
|
120 |
+
return base_url + split_url[5]
|
121 |
+
|
122 |
+
|
123 |
+
class Synthtiger(datasets.GeneratorBasedBuilder):
|
124 |
+
"""A synthtetic scene text OCR dataset generated by SynthTIGER."""
|
125 |
+
|
126 |
+
VERSION = datasets.Version("1.1.0")
|
127 |
+
|
128 |
+
def _info(self):
|
129 |
+
"""Define dataset metadata and feature types."""
|
130 |
+
features = datasets.Features(
|
131 |
+
{
|
132 |
+
"image": datasets.Image(),
|
133 |
+
"transcript": datasets.Value("string"),
|
134 |
+
"height": datasets.Value("uint32"),
|
135 |
+
"width": datasets.Value("uint32"),
|
136 |
+
"aspect_ratio": datasets.Value("float32"),
|
137 |
+
"script": datasets.Value("string"),
|
138 |
+
"lang": datasets.Value("string"),
|
139 |
+
}
|
140 |
+
)
|
141 |
+
|
142 |
+
return datasets.DatasetInfo(
|
143 |
+
description=_DESCRIPTION,
|
144 |
+
features=features,
|
145 |
+
supervised_keys=("image", "transcript"),
|
146 |
+
homepage=_HOMEPAGE,
|
147 |
+
license=_LICENSE,
|
148 |
+
citation=_CITATION,
|
149 |
+
)
|
150 |
+
|
151 |
+
def _split_generators(self, dl_manager):
|
152 |
+
"""Build split metadata and fetch data if needed."""
|
153 |
+
folder_url = _FOLDER_URL
|
154 |
+
part_urls = {k: _get_google_drive_url(v) for k, v in _PART_URLS.items()}
|
155 |
+
|
156 |
+
def custom_download(src_url: str, dest_path: str):
|
157 |
+
"""Internal utility to download and combine archive parts.
|
158 |
+
|
159 |
+
As of October 23, 2021, the SynthTIGER dataset v1.1 is distributed
|
160 |
+
via Google Drive. It is a ~38Gb zip file split into 11 parts of
|
161 |
+
~3.5Gb each with the `split` utility. This means that the parts
|
162 |
+
are not usable archives on their own, they must be concatenated
|
163 |
+
back into the whole zip file before any of the contents can be
|
164 |
+
extracted.
|
165 |
+
|
166 |
+
This inner function provides a closure around the `dl_manager` and
|
167 |
+
`part_urls` variables so that the download manager's custom
|
168 |
+
download function can track and cache the download of the parts as
|
169 |
+
well as the concatenated version. This makes it easier to re-start
|
170 |
+
an interrupted download of the parts at the cost of disk space.
|
171 |
+
|
172 |
+
The `src_url` is chosen to be the viewable folder url for Google
|
173 |
+
Drive, but it serves no purpose except to be a hashable string
|
174 |
+
that allows the concatenated archive to be cached.
|
175 |
+
|
176 |
+
The parts of the download will not be cleaned up. This would be
|
177 |
+
easy to add if needed, since the paths are known, but keeping them
|
178 |
+
makes the download manager's stats more accurate. The parts should
|
179 |
+
not be needed once the full archive has been cached and can be
|
180 |
+
deleted manually.
|
181 |
+
"""
|
182 |
+
downloaded_part_paths = dl_manager.download(part_urls)
|
183 |
+
with open(dest_path, "wb") as concatenated_file:
|
184 |
+
for name, path in downloaded_part_paths.items():
|
185 |
+
with open(path, "rb") as part_file:
|
186 |
+
shutil.copyfileobj(part_file, concatenated_file)
|
187 |
+
return dest_path
|
188 |
+
|
189 |
+
archive_path = dl_manager.download_custom(folder_url, custom_download)
|
190 |
+
extracted_dir = dl_manager.extract(archive_path, num_proc=1)
|
191 |
+
data_dir = os.path.join(extracted_dir, _ARCHIVE_ROOT)
|
192 |
+
|
193 |
+
return [
|
194 |
+
datasets.SplitGenerator(
|
195 |
+
name=datasets.Split.TRAIN,
|
196 |
+
gen_kwargs={
|
197 |
+
"data_dir": data_dir,
|
198 |
+
"split": "train",
|
199 |
+
},
|
200 |
+
),
|
201 |
+
]
|
202 |
+
|
203 |
+
def _generate_examples(self, data_dir, split):
|
204 |
+
"""Iterate over dataset images and annotations."""
|
205 |
+
gt_path = os.path.join(data_dir, "gt.txt")
|
206 |
+
with open(gt_path, encoding="utf-8") as gt_file:
|
207 |
+
for key, line in enumerate(gt_file):
|
208 |
+
rel_path, transcript = line.strip().split("\t")
|
209 |
+
image_path = os.path.join(data_dir, rel_path)
|
210 |
+
image = Image.open(image_path)
|
211 |
+
width, height = image.size
|
212 |
+
aspect_ratio = width / height
|
213 |
+
yield key, {
|
214 |
+
"image": image,
|
215 |
+
"transcript": transcript,
|
216 |
+
"height": height,
|
217 |
+
"width": width,
|
218 |
+
"aspect_ratio": aspect_ratio,
|
219 |
+
"script": "Latn",
|
220 |
+
"lang": "en",
|
221 |
+
}
|