Datasets:

Modalities:
Image
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
Thibault Clérice commited on
Commit
3cb5323
0 Parent(s):

First loading attempt

Browse files
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ data.tar.gz filter=lfs diff=lfs merge=lfs -text
2
+ *.parquet filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ env
2
+ .idea
3
+ *.json
4
+ *.arrow
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - object-detection
4
+ license: cc-by-4.0
5
+ pretty_name: LADaS
6
+ size_categories:
7
+ - 1K<n<10K
8
+ ---
9
+
10
+ # LADaS: Layout Analysis Dataset with Segmonto
11
+
build.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datasets import load_dataset
3
+ from datasets import config
4
+ from datasets.utils.py_utils import convert_file_size_to_int
5
+ from datasets.table import embed_table_storage
6
+ from tqdm import tqdm
7
+
8
+
9
+ def build_parquet(split):
10
+ # Source: https://discuss.huggingface.co/t/how-to-save-audio-dataset-with-parquet-format-on-disk/66179
11
+ dataset = load_dataset("./src/LADaS.py", split=split, trust_remote_code=True)
12
+ max_shard_size = '500MB'
13
+
14
+ dataset_nbytes = dataset._estimate_nbytes()
15
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
16
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
17
+ num_shards = max(num_shards, 1)
18
+ shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
19
+
20
+ def shards_with_embedded_external_files(shards):
21
+ for shard in shards:
22
+ format = shard.format
23
+ shard = shard.with_format("arrow")
24
+ shard = shard.map(
25
+ embed_table_storage,
26
+ batched=True,
27
+ batch_size=1000,
28
+ keep_in_memory=True,
29
+ )
30
+ shard = shard.with_format(**format)
31
+ yield shard
32
+
33
+ shards = shards_with_embedded_external_files(shards)
34
+
35
+ os.makedirs("data", exist_ok=True)
36
+
37
+ for index, shard in tqdm(
38
+ enumerate(shards),
39
+ desc="Save the dataset shards",
40
+ total=num_shards,
41
+ ):
42
+ shard_path = f"data/{split}-{index:05d}-of-{num_shards:05d}.parquet"
43
+ shard.to_parquet(shard_path)
44
+
45
+
46
+ if __name__ == "__main__":
47
+ build_parquet("train")
48
+ build_parquet("validation")
49
+ build_parquet("test")
50
+
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25468ecc49668b733cfbbf449c8cf86e833206694bb8775e88e7954d9900c4f2
3
+ size 99720003
data/train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:136ed281a7d0ff7c9b282d4d02197cacfed7b874bababbf94a14a836ae2c3141
3
+ size 474001898
data/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda0ba4fe11591a96b3cfb5938ba797e174cf87a5a2b6cbdf26430311addac01
3
+ size 542842553
data/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9729c1c52872d237e63bbc17e3881e97a9693d438b21407ea3b8fdf36706bf23
3
+ size 144611550
data/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:985cdfdc856ea74290ce3683227a51eb73cf6a60cca24f24b79a26000621b876
3
+ size 126443927
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ee6fe09e615473daabc1d7c97973a4adbc71cd92d9229bad0311b0d9d0e67d
3
+ size 240644642
src/LADaS.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from typing import List, Any
4
+
5
+ import yaml
6
+ import datasets
7
+ from PIL import Image
8
+
9
+
10
+ _VERSION = "2024-07-17"
11
+ _URL = f"https://github.com/DEFI-COLaF/LADaS/archive/refs/tags/{_VERSION}.tar.gz"
12
+ _HOMEPAGE = "https://github.com/DEFI-COLaF/LADaS"
13
+ _LICENSE = "CC BY 4.0"
14
+ _CITATION = """\
15
+ @misc{Clerice_Layout_Analysis_Dataset,
16
+ author = {Clérice, Thibault and Janès, Juliette and Scheithauer, Hugo and Bénière, Sarah and Romary, Laurent and Sagot, Benoit and Bougrelle, Roxane},
17
+ title = {{Layout Analysis Dataset with SegmOnto (LADaS)}},
18
+ url = {https://github.com/DEFI-COLaF/LADaS}
19
+ }
20
+ """
21
+
22
+ _CATEGORIES: list[str] = ["AdvertisementZone", "DigitizationArtefactZone", "DropCapitalZone", "FigureZone",
23
+ "FigureZone-FigDesc", "FigureZone-Head", "GraphicZone", "GraphicZone-Decoration",
24
+ "GraphicZone-FigDesc", "GraphicZone-Head", "GraphicZone-Maths", "GraphicZone-Part",
25
+ "GraphicZone-TextualContent", "MainZone-Date", "MainZone-Entry", "MainZone-Entry-Continued",
26
+ "MainZone-Form", "MainZone-Head", "MainZone-Lg", "MainZone-Lg-Continued", "MainZone-List",
27
+ "MainZone-List-Continued", "MainZone-Other", "MainZone-P", "MainZone-P-Continued",
28
+ "MainZone-Signature", "MainZone-Sp", "MainZone-Sp-Continued",
29
+ "MarginTextZone-ManuscriptAddendum", "MarginTextZone-Notes", "MarginTextZone-Notes-Continued",
30
+ "NumberingZone", "TitlePageZone", "TitlePageZone-Index", "QuireMarksZone", "RunningTitleZone",
31
+ "StampZone", "StampZone-Sticker", "TableZone", "TableZone-Continued", "TableZone-Head"]
32
+
33
+
34
+ class LadasConfig(datasets.BuilderConfig):
35
+ """Builder Config for LADaS"""
36
+ def __init__(self, *args, **kwargs):
37
+ super().__init__(*args, **kwargs)
38
+
39
+
40
+ class LadasDataset(datasets.GeneratorBasedBuilder):
41
+ VERSION = datasets.Version(_VERSION.replace("-", "."))
42
+ BUILDER_CONFIGS = [
43
+ LadasConfig(
44
+ name="full",
45
+ description="Full version of the dataset"
46
+ )
47
+ ]
48
+
49
+ def _info(self) -> datasets.DatasetInfo:
50
+ features = datasets.Features({
51
+ "image_path": datasets.Value("string"),
52
+ "image": datasets.Image(),
53
+ "width": datasets.Value("int32"),
54
+ "height": datasets.Value("int32"),
55
+ "objects": datasets.Sequence(
56
+ {
57
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
58
+ "category": datasets.ClassLabel(names=_CATEGORIES),
59
+ }
60
+ )
61
+ })
62
+ return datasets.DatasetInfo(
63
+ features=features,
64
+ homepage=_HOMEPAGE,
65
+ citation=_CITATION,
66
+ license=_LICENSE
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ urls_to_download = _URL
71
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
72
+ return [
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.TRAIN,
75
+ gen_kwargs={
76
+ "local_dir": downloaded_files,
77
+ "split": "train"
78
+ },
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.VALIDATION,
82
+ gen_kwargs={
83
+ "local_dir": downloaded_files,
84
+ "split": "valid"
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={
90
+ "local_dir": downloaded_files,
91
+ "split": "test"
92
+ },
93
+ ),
94
+ ]
95
+
96
+ def _generate_examples(self, local_dir: str, split: str):
97
+
98
+ idx = 0
99
+
100
+ for file in glob.glob(os.path.join(local_dir, "*", "data", "*", split, "labels", "*.txt")):
101
+ objects = []
102
+ with open(file) as f:
103
+ for line in f:
104
+ cls, *bbox = line.strip().split()
105
+ objects.append({"category": _CATEGORIES[int(cls)], "bbox": list(map(float, bbox))})
106
+
107
+ image_path = os.path.normpath(file).split(os.sep)
108
+ image_path = os.path.join(*image_path[:-2], "images", image_path[-1].replace(".txt", ".jpg"))
109
+ if file.startswith("/") and not image_path.startswith("/"):
110
+ image_path = "/" + image_path
111
+
112
+ with open(image_path, "rb") as f:
113
+ image_bytes = f.read()
114
+
115
+ with Image.open(image_path) as im:
116
+ width, height = im.size
117
+
118
+ yield idx, {
119
+ "image_id": f"{image_path[-4]}/{image_path[-1]}",
120
+ "image": {"path": image_path, "bytes": image_bytes},
121
+ "width": width,
122
+ "height": height,
123
+ "objects": objects,
124
+ }
125
+ idx += 1