Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
d700fb4
1 Parent(s): 380af90

Upload etos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. etos.py +205 -0
etos.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import conllu
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @INPROCEEDINGS{10053062,
28
+ author={Samsuri, Mukhlizar Nirwan and Yuliawati, Arlisa and Alfina, Ika},
29
+ booktitle={2022 5th International Seminar on Research of Information Technology and Intelligent Systems (ISRITI)},
30
+ title={A Comparison of Distributed, PAM, and Trie Data Structure Dictionaries in Automatic Spelling Correction for Indonesian Formal Text},
31
+ year={2022},
32
+ pages={525-530},
33
+ keywords={Seminars;Dictionaries;Data structures;Intelligent systems;Information technology;automatic spelling correction;distributed dictionary;non-word error;trie data structure;Partition Around Medoids},
34
+ doi={10.1109/ISRITI56927.2022.10053062},
35
+ url = {https://ieeexplore.ieee.org/document/10053062},
36
+ }
37
+ """
38
+
39
+ _DATASETNAME = "etos"
40
+
41
+ _DESCRIPTION = """\
42
+ ETOS (Ejaan oTOmatiS) is a dataset for parts-of-speech (POS) tagging for formal Indonesian
43
+ text. It consists of 200 sentences, with 4,323 tokens in total, annotated following the
44
+ CoNLL format.
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/etos"
48
+
49
+ _LANGUAGES = ["ind"]
50
+
51
+ _LICENSE = Licenses.AGPL_3_0.value
52
+
53
+ _LOCAL = False
54
+
55
+ _URLS = "https://raw.githubusercontent.com/ir-nlp-csui/etos/main/gold_standard.conllu"
56
+
57
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+
64
+ class ETOSDataset(datasets.GeneratorBasedBuilder):
65
+ """
66
+ ETOS is an Indonesian parts-of-speech (POS) tagging dataset from https://github.com/ir-nlp-csui/etos.
67
+ """
68
+
69
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
70
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
71
+
72
+ UPOS_TAGS = [
73
+ "NOUN",
74
+ "PUNCT",
75
+ "ADP",
76
+ "NUM",
77
+ "SYM",
78
+ "SCONJ",
79
+ "ADJ",
80
+ "PART",
81
+ "DET",
82
+ "CCONJ",
83
+ "PROPN",
84
+ "PRON",
85
+ "X",
86
+ "_",
87
+ "ADV",
88
+ "INTJ",
89
+ "VERB",
90
+ "AUX",
91
+ ]
92
+
93
+ BUILDER_CONFIGS = [
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_source",
96
+ version=datasets.Version(_SOURCE_VERSION),
97
+ description=f"{_DATASETNAME} source schema",
98
+ schema="source",
99
+ subset_id=f"{_DATASETNAME}",
100
+ ),
101
+ SEACrowdConfig(
102
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
103
+ version=datasets.Version(_SOURCE_VERSION),
104
+ description=f"{_DATASETNAME} sequence labeling schema",
105
+ schema="seacrowd_seq_label",
106
+ subset_id=f"{_DATASETNAME}",
107
+ ),
108
+ ]
109
+
110
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
111
+
112
+ def _info(self) -> datasets.DatasetInfo:
113
+ if self.config.schema == "source":
114
+ features = datasets.Features(
115
+ {
116
+ "sent_id": datasets.Value("string"),
117
+ "text": datasets.Value("string"),
118
+ "tokens": datasets.Sequence(datasets.Value("string")),
119
+ "lemmas": datasets.Sequence(datasets.Value("string")),
120
+ "upos": datasets.Sequence(datasets.features.ClassLabel(names=self.UPOS_TAGS)),
121
+ "xpos": datasets.Sequence(datasets.Value("string")),
122
+ "feats": datasets.Sequence(datasets.Value("string")),
123
+ "head": datasets.Sequence(datasets.Value("string")),
124
+ "deprel": datasets.Sequence(datasets.Value("string")),
125
+ "deps": datasets.Sequence(datasets.Value("string")),
126
+ "misc": datasets.Sequence(datasets.Value("string")),
127
+ }
128
+ )
129
+
130
+ elif self.config.schema == "seacrowd_seq_label":
131
+ features = schemas.seq_label_features(self.UPOS_TAGS)
132
+
133
+ else:
134
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")
135
+
136
+ return datasets.DatasetInfo(
137
+ description=_DESCRIPTION,
138
+ features=features,
139
+ homepage=_HOMEPAGE,
140
+ license=_LICENSE,
141
+ citation=_CITATION,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
145
+ """
146
+ Returns SplitGenerators.
147
+ """
148
+
149
+ train_path = dl_manager.download_and_extract(_URLS)
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "filepath": train_path,
156
+ "split": "train",
157
+ },
158
+ )
159
+ ]
160
+
161
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
162
+ """
163
+ Yields examples as (key, example) tuples.
164
+ """
165
+
166
+ with open(filepath, "r", encoding="utf-8") as data_file:
167
+ tokenlist = list(conllu.parse_incr(data_file))
168
+
169
+ for idx, sent in enumerate(tokenlist):
170
+ if "sent_id" in sent.metadata:
171
+ sent_id = sent.metadata["sent_id"]
172
+ else:
173
+ sent_id = idx
174
+
175
+ tokens = [token["form"] for token in sent]
176
+
177
+ if "text" in sent.metadata:
178
+ txt = sent.metadata["text"]
179
+ else:
180
+ txt = " ".join(tokens)
181
+
182
+ if self.config.schema == "source":
183
+ yield idx, {
184
+ "sent_id": str(sent_id),
185
+ "text": txt,
186
+ "tokens": tokens,
187
+ "lemmas": [token["lemma"] for token in sent],
188
+ "upos": [token["upos"] for token in sent],
189
+ "xpos": [token["xpos"] for token in sent],
190
+ "feats": [str(token["feats"]) for token in sent],
191
+ "head": [str(token["head"]) for token in sent],
192
+ "deprel": [str(token["deprel"]) for token in sent],
193
+ "deps": [str(token["deps"]) for token in sent],
194
+ "misc": [str(token["misc"]) for token in sent],
195
+ }
196
+
197
+ elif self.config.schema == "seacrowd_seq_label":
198
+ yield idx, {
199
+ "id": str(sent_id),
200
+ "tokens": tokens,
201
+ "labels": [token["upos"] for token in sent],
202
+ }
203
+
204
+ else:
205
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")