Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Indonesian
Size:
10K<n<100K
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- id_nergrit_corpus.py +36 -40
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- expert-generated
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: Nergrit Corpus
|
3 |
annotations_creators:
|
4 |
- expert-generated
|
5 |
language_creators:
|
id_nergrit_corpus.py
CHANGED
@@ -15,8 +15,6 @@
|
|
15 |
"""Nergrit Corpus"""
|
16 |
|
17 |
|
18 |
-
import os
|
19 |
-
|
20 |
import datasets
|
21 |
|
22 |
|
@@ -180,62 +178,60 @@ class IdNergritCorpus(datasets.GeneratorBasedBuilder):
|
|
180 |
|
181 |
def _split_generators(self, dl_manager):
|
182 |
my_urls = _URLs[0]
|
183 |
-
|
184 |
return [
|
185 |
datasets.SplitGenerator(
|
186 |
name=datasets.Split.TRAIN,
|
187 |
gen_kwargs={
|
188 |
-
"filepath":
|
189 |
-
data_dir, "nergrit-corpus/{}/data/train_corrected.txt".format(self.config.name)
|
190 |
-
),
|
191 |
"split": "train",
|
|
|
192 |
},
|
193 |
),
|
194 |
datasets.SplitGenerator(
|
195 |
name=datasets.Split.TEST,
|
196 |
gen_kwargs={
|
197 |
-
"filepath":
|
198 |
-
data_dir, "nergrit-corpus/{}/data/test_corrected.txt".format(self.config.name)
|
199 |
-
),
|
200 |
"split": "test",
|
|
|
201 |
},
|
202 |
),
|
203 |
datasets.SplitGenerator(
|
204 |
name=datasets.Split.VALIDATION,
|
205 |
gen_kwargs={
|
206 |
-
"filepath":
|
207 |
-
data_dir, "nergrit-corpus/{}/data/valid_corrected.txt".format(self.config.name)
|
208 |
-
),
|
209 |
"split": "dev",
|
|
|
210 |
},
|
211 |
),
|
212 |
]
|
213 |
|
214 |
-
def _generate_examples(self, filepath, split):
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
|
15 |
"""Nergrit Corpus"""
|
16 |
|
17 |
|
|
|
|
|
18 |
import datasets
|
19 |
|
20 |
|
|
|
178 |
|
179 |
def _split_generators(self, dl_manager):
|
180 |
my_urls = _URLs[0]
|
181 |
+
archive = dl_manager.download(my_urls)
|
182 |
return [
|
183 |
datasets.SplitGenerator(
|
184 |
name=datasets.Split.TRAIN,
|
185 |
gen_kwargs={
|
186 |
+
"filepath": f"nergrit-corpus/{self.config.name}/data/train_corrected.txt",
|
|
|
|
|
187 |
"split": "train",
|
188 |
+
"files": dl_manager.iter_archive(archive),
|
189 |
},
|
190 |
),
|
191 |
datasets.SplitGenerator(
|
192 |
name=datasets.Split.TEST,
|
193 |
gen_kwargs={
|
194 |
+
"filepath": f"nergrit-corpus/{self.config.name}/data/test_corrected.txt",
|
|
|
|
|
195 |
"split": "test",
|
196 |
+
"files": dl_manager.iter_archive(archive),
|
197 |
},
|
198 |
),
|
199 |
datasets.SplitGenerator(
|
200 |
name=datasets.Split.VALIDATION,
|
201 |
gen_kwargs={
|
202 |
+
"filepath": f"nergrit-corpus/{self.config.name}/data/valid_corrected.txt",
|
|
|
|
|
203 |
"split": "dev",
|
204 |
+
"files": dl_manager.iter_archive(archive),
|
205 |
},
|
206 |
),
|
207 |
]
|
208 |
|
209 |
+
def _generate_examples(self, filepath, split, files):
|
210 |
+
for path, f in files:
|
211 |
+
if path == filepath:
|
212 |
+
guid = 0
|
213 |
+
tokens = []
|
214 |
+
ner_tags = []
|
215 |
+
for line in f:
|
216 |
+
splits = line.decode("utf-8").strip().split()
|
217 |
+
if len(splits) != 2:
|
218 |
+
if tokens:
|
219 |
+
assert len(tokens) == len(ner_tags), "word len doesn't match label length"
|
220 |
+
yield guid, {
|
221 |
+
"id": str(guid),
|
222 |
+
"tokens": tokens,
|
223 |
+
"ner_tags": ner_tags,
|
224 |
+
}
|
225 |
+
guid += 1
|
226 |
+
tokens = []
|
227 |
+
ner_tags = []
|
228 |
+
else:
|
229 |
+
tokens.append(splits[0])
|
230 |
+
ner_tags.append(splits[1].rstrip())
|
231 |
+
# last example
|
232 |
+
yield guid, {
|
233 |
+
"id": str(guid),
|
234 |
+
"tokens": tokens,
|
235 |
+
"ner_tags": ner_tags,
|
236 |
+
}
|
237 |
+
break
|