Datasets:

Languages:
Indonesian
ArXiv:
holylovenia commited on
Commit
67d795c
·
1 Parent(s): 746da61

Upload indolem_ud_id_gsd.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_ud_id_gsd.py +223 -0
indolem_ud_id_gsd.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This template serves as a starting point for contributing a dataset to the Nusantara Dataset repo.
18
+
19
+ When modifying it for your dataset, look for TODO items that offer specific instructions.
20
+
21
+ Full documentation on writing dataset loading scripts can be found here:
22
+ https://huggingface.co/docs/datasets/add_dataset.html
23
+
24
+ To create a dataset loading script you will create a class and implement 3 methods:
25
+ * `_info`: Establishes the schema for the dataset, and returns a datasets.DatasetInfo object.
26
+ * `_split_generators`: Downloads and extracts data for each split (e.g. train/val/test) or associate local data with each split.
27
+ * `_generate_examples`: Creates examples from data on disk that conform to each schema defined in `_info`.
28
+
29
+ TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
30
+
31
+ [nusantara_schema_name] = (kb, pairs, qa, text, t2t, entailment)
32
+ """
33
+ from pathlib import Path
34
+ from typing import Dict, List, Tuple
35
+
36
+ import datasets
37
+
38
+ from nusacrowd.utils import schemas
39
+ from nusacrowd.utils.common_parser import load_ud_data, load_ud_data_as_nusantara_kb
40
+ from nusacrowd.utils.configs import NusantaraConfig
41
+ from nusacrowd.utils.constants import Tasks
42
+
43
+ _CITATION = """\
44
+ @inproceedings{mcdonald-etal-2013-universal,
45
+ title = "{U}niversal {D}ependency Annotation for Multilingual Parsing",
46
+ author = {McDonald, Ryan and
47
+ Nivre, Joakim and
48
+ Quirmbach-Brundage, Yvonne and
49
+ Goldberg, Yoav and
50
+ Das, Dipanjan and
51
+ Ganchev, Kuzman and
52
+ Hall, Keith and
53
+ Petrov, Slav and
54
+ Zhang, Hao and
55
+ T{\"a}ckstr{\"o}m, Oscar and
56
+ Bedini, Claudia and
57
+ Bertomeu Castell{\'o}, N{\'u}ria and
58
+ Lee, Jungmee},
59
+ booktitle = "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
60
+ month = aug,
61
+ year = "2013",
62
+ address = "Sofia, Bulgaria",
63
+ publisher = "Association for Computational Linguistics",
64
+ url = "https://aclanthology.org/P13-2017",
65
+ pages = "92--97",
66
+ }
67
+
68
+ @article{DBLP:journals/corr/abs-2011-00677,
69
+ author = {Fajri Koto and
70
+ Afshin Rahimi and
71
+ Jey Han Lau and
72
+ Timothy Baldwin},
73
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
74
+ Model for Indonesian {NLP}},
75
+ journal = {CoRR},
76
+ volume = {abs/2011.00677},
77
+ year = {2020},
78
+ url = {https://arxiv.org/abs/2011.00677},
79
+ eprinttype = {arXiv},
80
+ eprint = {2011.00677},
81
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
82
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
83
+ bibsource = {dblp computer science bibliography, https://dblp.org}
84
+ }
85
+ """
86
+
87
+
88
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
89
+ _LOCAL = False
90
+
91
+ _DATASETNAME = "indolem_ud_id_gsd"
92
+
93
+ _DESCRIPTION = """\
94
+ The Indonesian-GSD treebank consists of 5598 sentences and 122k words split into train/dev/test of 97k/12k/11k words.
95
+ The treebank was originally converted from the content head version of the universal dependency treebank v2.0 (legacy) in 2015.\
96
+ In order to comply with the latest Indonesian annotation guidelines, the treebank has undergone a major revision between UD releases v2.8 and v2.9 (2021).
97
+ """
98
+
99
+ _HOMEPAGE = "https://indolem.github.io/"
100
+
101
+ _LICENSE = "Creative Commons Attribution 4.0"
102
+
103
+ _URLS = {
104
+ _DATASETNAME: {
105
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-train.conllu",
106
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-dev.conllu",
107
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-test.conllu",
108
+ },
109
+ }
110
+
111
+ _SUPPORTED_TASKS = [Tasks.DEPENDENCY_PARSING]
112
+
113
+ _SOURCE_VERSION = "1.0.0"
114
+
115
+ _NUSANTARA_VERSION = "1.0.0"
116
+
117
+
118
+ # TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
119
+ class IndolemUdIdGsdDataset(datasets.GeneratorBasedBuilder):
120
+ """The Indonesian-GSD treebank, part of Universal-Dependency project. Consists of 5598 sentences and 122k words."""
121
+
122
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
123
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
124
+
125
+ BUILDER_CONFIGS = [
126
+ NusantaraConfig(
127
+ name=f"{_DATASETNAME}_source",
128
+ version=SOURCE_VERSION,
129
+ description=f"{_DATASETNAME} source schema",
130
+ schema="source",
131
+ subset_id=f"{_DATASETNAME}",
132
+ ),
133
+ NusantaraConfig(
134
+ name=f"{_DATASETNAME}_nusantara_kb",
135
+ version=NUSANTARA_VERSION,
136
+ description=f"{_DATASETNAME} Nusantara KB schema",
137
+ schema="nusantara_kb",
138
+ subset_id=f"{_DATASETNAME}",
139
+ ),
140
+ ]
141
+
142
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
143
+
144
+ def _info(self) -> datasets.DatasetInfo:
145
+
146
+ if self.config.schema == "source":
147
+ features = datasets.Features(
148
+ {
149
+ # metadata
150
+ "sent_id": datasets.Value("string"),
151
+ "text": datasets.Value("string"),
152
+ # tokens
153
+ "id": [datasets.Value("string")],
154
+ "form": [datasets.Value("string")],
155
+ "lemma": [datasets.Value("string")],
156
+ "upos": [datasets.Value("string")],
157
+ "xpos": [datasets.Value("string")],
158
+ "feats": [datasets.Value("string")],
159
+ "head": [datasets.Value("string")],
160
+ "deprel": [datasets.Value("string")],
161
+ "deps": [datasets.Value("string")],
162
+ "misc": [datasets.Value("string")],
163
+ }
164
+ )
165
+
166
+ elif self.config.schema == "nusantara_kb":
167
+ features = schemas.kb_features
168
+
169
+ else:
170
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
171
+
172
+ return datasets.DatasetInfo(
173
+ description=_DESCRIPTION,
174
+ features=features,
175
+ homepage=_HOMEPAGE,
176
+ license=_LICENSE,
177
+ citation=_CITATION,
178
+ )
179
+
180
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
181
+ """Returns SplitGenerators."""
182
+ urls = _URLS[self.config.subset_id]
183
+ data_dir = dl_manager.download(urls)
184
+
185
+ return [
186
+ datasets.SplitGenerator(
187
+ name=datasets.Split.TRAIN,
188
+ gen_kwargs={
189
+ "filepath": data_dir["train"],
190
+ },
191
+ ),
192
+ datasets.SplitGenerator(
193
+ name=datasets.Split.TEST,
194
+ gen_kwargs={
195
+ "filepath": data_dir["test"],
196
+ },
197
+ ),
198
+ datasets.SplitGenerator(
199
+ name=datasets.Split.VALIDATION,
200
+ gen_kwargs={
201
+ "filepath": data_dir["validation"],
202
+ },
203
+ ),
204
+ ]
205
+
206
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
207
+ """Yields examples as (key, example) tuples."""
208
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
209
+
210
+ try:
211
+ generator_fn = {
212
+ "source": load_ud_data,
213
+ "nusantara_kb": load_ud_data_as_nusantara_kb,
214
+ }[self.config.schema]
215
+ except KeyError:
216
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
217
+
218
+ for key, example in enumerate(generator_fn(filepath)):
219
+ yield key, example
220
+
221
+
222
+ # if __name__ == "__main__":
223
+ # datasets.load_dataset(__file__)