parquet-converter commited on
Commit
5dbb5cf
·
1 Parent(s): 0c96f19

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
funsd-layoutlmv3.py DELETED
@@ -1,143 +0,0 @@
1
- # coding=utf-8
2
- '''
3
- Reference: https://huggingface.co/datasets/nielsr/funsd/blob/main/funsd.py
4
- '''
5
- import json
6
- import os
7
-
8
- from PIL import Image
9
-
10
- import datasets
11
-
12
- def load_image(image_path):
13
- image = Image.open(image_path).convert("RGB")
14
- w, h = image.size
15
- return image, (w, h)
16
-
17
- def normalize_bbox(bbox, size):
18
- return [
19
- int(1000 * bbox[0] / size[0]),
20
- int(1000 * bbox[1] / size[1]),
21
- int(1000 * bbox[2] / size[0]),
22
- int(1000 * bbox[3] / size[1]),
23
- ]
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _CITATION = """\
29
- @article{Jaume2019FUNSDAD,
30
- title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
31
- author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
32
- journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
33
- year={2019},
34
- volume={2},
35
- pages={1-6}
36
- }
37
- """
38
-
39
- _DESCRIPTION = """\
40
- https://guillaumejaume.github.io/FUNSD/
41
- """
42
-
43
-
44
- class FunsdConfig(datasets.BuilderConfig):
45
- """BuilderConfig for FUNSD"""
46
-
47
- def __init__(self, **kwargs):
48
- """BuilderConfig for FUNSD.
49
-
50
- Args:
51
- **kwargs: keyword arguments forwarded to super.
52
- """
53
- super(FunsdConfig, self).__init__(**kwargs)
54
-
55
-
56
- class Funsd(datasets.GeneratorBasedBuilder):
57
- """Conll2003 dataset."""
58
-
59
- BUILDER_CONFIGS = [
60
- FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
61
- ]
62
-
63
- def _info(self):
64
- return datasets.DatasetInfo(
65
- description=_DESCRIPTION,
66
- features=datasets.Features(
67
- {
68
- "id": datasets.Value("string"),
69
- "tokens": datasets.Sequence(datasets.Value("string")),
70
- "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
71
- "ner_tags": datasets.Sequence(
72
- datasets.features.ClassLabel(
73
- names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
74
- )
75
- ),
76
- "image": datasets.features.Image(),
77
- }
78
- ),
79
- supervised_keys=None,
80
- homepage="https://guillaumejaume.github.io/FUNSD/",
81
- citation=_CITATION,
82
- )
83
-
84
- def _split_generators(self, dl_manager):
85
- """Returns SplitGenerators."""
86
- downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
90
- ),
91
- datasets.SplitGenerator(
92
- name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
93
- ),
94
- ]
95
-
96
- def get_line_bbox(self, bboxs):
97
- x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
98
- y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
99
-
100
- x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
101
-
102
- assert x1 >= x0 and y1 >= y0
103
- bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
104
- return bbox
105
-
106
- def _generate_examples(self, filepath):
107
- logger.info("⏳ Generating examples from = %s", filepath)
108
- ann_dir = os.path.join(filepath, "annotations")
109
- img_dir = os.path.join(filepath, "images")
110
- for guid, file in enumerate(sorted(os.listdir(ann_dir))):
111
- tokens = []
112
- bboxes = []
113
- ner_tags = []
114
-
115
- file_path = os.path.join(ann_dir, file)
116
- with open(file_path, "r", encoding="utf8") as f:
117
- data = json.load(f)
118
- image_path = os.path.join(img_dir, file)
119
- image_path = image_path.replace("json", "png")
120
- image, size = load_image(image_path)
121
- for item in data["form"]:
122
- cur_line_bboxes = []
123
- words, label = item["words"], item["label"]
124
- words = [w for w in words if w["text"].strip() != ""]
125
- if len(words) == 0:
126
- continue
127
- if label == "other":
128
- for w in words:
129
- tokens.append(w["text"])
130
- ner_tags.append("O")
131
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
132
- else:
133
- tokens.append(words[0]["text"])
134
- ner_tags.append("B-" + label.upper())
135
- cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
136
- for w in words[1:]:
137
- tokens.append(w["text"])
138
- ner_tags.append("I-" + label.upper())
139
- cur_line_bboxes.append(normalize_bbox(w["box"], size))
140
- cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
141
- bboxes.extend(cur_line_bboxes)
142
- yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
143
- "image": image}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
funsd/funsd-layoutlmv3-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc79890390ce9800cf3b7ca6836fe9ba80f4b18255e78e59e2b6412a3095152
3
+ size 9537996
funsd/funsd-layoutlmv3-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c7c349594ef6142fa97ec16d87d9019c4c1b4aae95fa56ef15e792639e112d0
3
+ size 26288911