holylovenia commited on
Commit
26a952b
1 Parent(s): c2e6187

Upload idner_news_2k.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. idner_news_2k.py +219 -0
idner_news_2k.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset of Indonesian News for Named-Entity Recognition task.
18
+ This dataset re-annotated the dataset previously provided by Syaifudin & Nurwidyantoro (2016)
19
+ (https://github.com/yusufsyaifudin/Indonesia-ner) with a more standardized NER tags.
20
+ There are three subsets, namely train.txt, dev.txt, and test.txt.
21
+ Each file consists of three columns which are Tokens, PoS Tag, and NER Tag respectively.
22
+ The format is following CoNLL dataset. The NER tag use the IOB format.
23
+ The PoS tag using UDPipe (http://ufal.mff.cuni.cz/udpipe),
24
+ a pipeline for tokenization, tagging, lemmatization and dependency parsing
25
+ whose model is trained on UD Treebanks.
26
+ """
27
+
28
+ from pathlib import Path
29
+ from typing import Dict, List, Tuple
30
+
31
+ import datasets
32
+ import pandas as pd
33
+
34
+ from seacrowd.utils import schemas
35
+ from seacrowd.utils.configs import SEACrowdConfig
36
+ from seacrowd.utils.constants import Licenses, Tasks
37
+
38
+ _CITATION = """\
39
+ @inproceedings{khairunnisa-etal-2020-towards,
40
+ title = "Towards a Standardized Dataset on {I}ndonesian Named Entity Recognition",
41
+ author = "Khairunnisa, Siti Oryza and
42
+ Imankulova, Aizhan and
43
+ Komachi, Mamoru",
44
+ editor = "Shmueli, Boaz and
45
+ Huang, Yin Jou",
46
+ booktitle = "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics
47
+ and the 10th International Joint Conference on Natural Language Processing: Student Research Workshop",
48
+ month = dec,
49
+ year = "2020",
50
+ address = "Suzhou, China",
51
+ publisher = "Association for Computational Linguistics",
52
+ url = "https://aclanthology.org/2020.aacl-srw.10",
53
+ pages = "64--71",
54
+ abstract = "In recent years, named entity recognition (NER) tasks in the Indonesian language
55
+ have undergone extensive development. There are only a few corpora for Indonesian NER;
56
+ hence, recent Indonesian NER studies have used diverse datasets. Although an open dataset is available,
57
+ it includes only approximately 2,000 sentences and contains inconsistent annotations,
58
+ thereby preventing accurate training of NER models without reliance on pre-trained models.
59
+ Therefore, we re-annotated the dataset and compared the two annotations{'} performance
60
+ using the Bidirectional Long Short-Term Memory and Conditional Random Field (BiLSTM-CRF) approach.
61
+ Fixing the annotation yielded a more consistent result for the organization tag and improved the prediction score
62
+ by a large margin. Moreover, to take full advantage of pre-trained models, we compared different feature embeddings
63
+ to determine their impact on the NER task for the Indonesian language.",
64
+ }
65
+ """
66
+
67
+ _DATASETNAME = "idner_news_2k"
68
+
69
+ _DESCRIPTION = """\
70
+ A dataset of Indonesian News for Named-Entity Recognition task.
71
+ This dataset re-annotated the dataset previously provided by Syaifudin & Nurwidyantoro (2016)
72
+ (https://github.com/yusufsyaifudin/Indonesia-ner) with a more standardized NER tags.
73
+ There are three subsets, namely train.txt, dev.txt, and test.txt.
74
+ Each file consists of three columns which are Tokens, PoS Tag, and NER Tag respectively.
75
+ The format is following CoNLL dataset. The NER tag use the IOB format.
76
+ The PoS tag using UDPipe (http://ufal.mff.cuni.cz/udpipe),
77
+ a pipeline for tokenization, tagging, lemmatization and dependency parsing
78
+ whose model is trained on UD Treebanks.
79
+ """
80
+
81
+ _HOMEPAGE = "https://github.com/khairunnisaor/idner-news-2k"
82
+
83
+ _LANGUAGES = ["ind"]
84
+
85
+ _LICENSE = Licenses.MIT.value
86
+
87
+ _LOCAL = False
88
+
89
+ _URLS = {
90
+ _DATASETNAME: {
91
+ "train": "https://raw.githubusercontent.com/khairunnisaor/idner-news-2k/main/train.txt",
92
+ "dev": "https://raw.githubusercontent.com/khairunnisaor/idner-news-2k/main/dev.txt",
93
+ "test": "https://raw.githubusercontent.com/khairunnisaor/idner-news-2k/main/test.txt",
94
+ },
95
+ }
96
+
97
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
98
+
99
+ _SOURCE_VERSION = "1.0.0"
100
+
101
+ _SEACROWD_VERSION = "2024.06.20"
102
+
103
+
104
+ class IdNerNews2kDataset(datasets.GeneratorBasedBuilder):
105
+ """This dataset is designed for Named-Entity Recognition NLP task in Indonesian,
106
+ consisting of train, dev, and test files in CoNLL format. The NER tag in IOB format."""
107
+
108
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
109
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
110
+
111
+ SEACROWD_SCHEMA_NAME = "seq_label"
112
+
113
+ BUILDER_CONFIGS = [
114
+ SEACrowdConfig(
115
+ name=f"{_DATASETNAME}_source",
116
+ version=SOURCE_VERSION,
117
+ description=f"{_DATASETNAME} source schema",
118
+ schema="source",
119
+ subset_id=f"{_DATASETNAME}",
120
+ ),
121
+ SEACrowdConfig(
122
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
123
+ version=SEACROWD_VERSION,
124
+ description=f"{_DATASETNAME} SEACrowd schema",
125
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
126
+ subset_id=f"{_DATASETNAME}",
127
+ ),
128
+ ]
129
+
130
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
131
+
132
+ def _info(self) -> datasets.DatasetInfo:
133
+ NAMED_ENTITIES = ["B-LOC", "I-LOC", "B-ORG", "I-ORG", "B-PER", "I-PER", "O"]
134
+ POS_TAGS = ["PROPN", "AUX", "NUM", "NOUN", "ADP", "PRON", "VERB", "ADV", "ADJ", "PUNCT", "DET", "PART", "SCONJ", "CCONJ", "SYM", "X"]
135
+
136
+ if self.config.schema == "source":
137
+ features = datasets.Features(
138
+ {
139
+ "tokens": datasets.Sequence(datasets.Value("string")),
140
+ "pos_tags": datasets.Sequence(datasets.ClassLabel(names=POS_TAGS)),
141
+ "ner_tags": datasets.Sequence(datasets.ClassLabel(names=NAMED_ENTITIES)),
142
+ }
143
+ )
144
+
145
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
146
+ features = schemas.seq_label.features(NAMED_ENTITIES)
147
+
148
+ else:
149
+ raise ValueError(f"Invalid config: {self.config.name}")
150
+
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=_LICENSE,
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
160
+
161
+ urls = _URLS[_DATASETNAME]
162
+ train_path = dl_manager.download_and_extract(urls["train"])
163
+ dev_path = dl_manager.download_and_extract(urls["dev"])
164
+ test_path = dl_manager.download_and_extract(urls["test"])
165
+
166
+ return [
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TRAIN,
169
+ gen_kwargs={
170
+ "filepath": train_path,
171
+ "split": "train",
172
+ },
173
+ ),
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TEST,
176
+ gen_kwargs={
177
+ "filepath": test_path,
178
+ "split": "test",
179
+ },
180
+ ),
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.VALIDATION,
183
+ gen_kwargs={
184
+ "filepath": dev_path,
185
+ "split": "dev",
186
+ },
187
+ ),
188
+ ]
189
+
190
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
191
+ """Yields examples as (key, example) tuples."""
192
+
193
+ df = pd.read_csv(filepath, delimiter=" ", header=None, skip_blank_lines=False)
194
+ if self.config.schema == "source":
195
+ tokens, pos_tags, ner_tags = [], [], []
196
+
197
+ for idx, row in df.iterrows():
198
+ if pd.isnull(row[0]):
199
+ if tokens:
200
+ yield idx, {"id": idx, "tokens": tokens, "pos_tags": pos_tags, "ner_tags": ner_tags}
201
+ tokens, pos_tags, ner_tags = [], [], []
202
+ else:
203
+ tokens.append(row[0])
204
+ pos_tags.append(row[1])
205
+ ner_tags.append(row[2])
206
+
207
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
208
+ tokens, ner_tags = [], []
209
+
210
+ for idx, row in df.iterrows():
211
+ if pd.isnull(row[0]):
212
+ if tokens:
213
+ yield idx, {"id": idx, "tokens": tokens, "labels": ner_tags}
214
+ tokens, ner_tags = [], []
215
+ else:
216
+ tokens.append(row[0])
217
+ ner_tags.append(row[2])
218
+ else:
219
+ raise ValueError(f"Invalid config: {self.config.name}")