Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- polyglot_ner.py +49 -58
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
paperswithcode_id: polyglot-ner
|
3 |
---
|
4 |
|
|
|
1 |
---
|
2 |
+
pretty_name: Polyglot-NER
|
3 |
paperswithcode_id: polyglot-ner
|
4 |
---
|
5 |
|
polyglot_ner.py
CHANGED
@@ -16,9 +16,6 @@
|
|
16 |
# Lint as: python3
|
17 |
"""The Polyglot-NER Dataset."""
|
18 |
|
19 |
-
|
20 |
-
import os
|
21 |
-
|
22 |
import datasets
|
23 |
|
24 |
|
@@ -76,16 +73,6 @@ _LANGUAGES = [
|
|
76 |
"uk",
|
77 |
]
|
78 |
|
79 |
-
_LANG_FILEPATHS = {
|
80 |
-
lang: os.path.join(
|
81 |
-
"acl_datasets",
|
82 |
-
lang,
|
83 |
-
"data" if lang != "zh" else "", # they're all lang/data/lang_wiki.conll except "zh"
|
84 |
-
f"{lang}_wiki.conll",
|
85 |
-
)
|
86 |
-
for lang in _LANGUAGES
|
87 |
-
}
|
88 |
-
|
89 |
_DESCRIPTION = """\
|
90 |
Polyglot-NER
|
91 |
A training dataset automatically generated from Wikipedia and Freebase the task
|
@@ -107,10 +94,7 @@ class PolyglotNERConfig(datasets.BuilderConfig):
|
|
107 |
def __init__(self, *args, languages=None, **kwargs):
|
108 |
super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
|
109 |
self.languages = languages
|
110 |
-
|
111 |
-
@property
|
112 |
-
def filepaths(self):
|
113 |
-
return [_LANG_FILEPATHS[lang] for lang in self.languages]
|
114 |
|
115 |
|
116 |
class PolyglotNER(datasets.GeneratorBasedBuilder):
|
@@ -145,47 +129,54 @@ class PolyglotNER(datasets.GeneratorBasedBuilder):
|
|
145 |
|
146 |
def _split_generators(self, dl_manager):
|
147 |
"""Returns SplitGenerators."""
|
148 |
-
|
149 |
|
150 |
-
return [
|
|
|
|
|
151 |
|
152 |
-
def _generate_examples(self,
|
|
|
153 |
sentence_counter = 0
|
154 |
-
for
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
|
|
|
|
|
|
|
|
|
16 |
# Lint as: python3
|
17 |
"""The Polyglot-NER Dataset."""
|
18 |
|
|
|
|
|
|
|
19 |
import datasets
|
20 |
|
21 |
|
|
|
73 |
"uk",
|
74 |
]
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
_DESCRIPTION = """\
|
77 |
Polyglot-NER
|
78 |
A training dataset automatically generated from Wikipedia and Freebase the task
|
|
|
94 |
def __init__(self, *args, languages=None, **kwargs):
|
95 |
super().__init__(*args, version=datasets.Version(_VERSION, ""), **kwargs)
|
96 |
self.languages = languages
|
97 |
+
assert all(lang in _LANGUAGES for lang in languages), f"Invalid languages. Please use a subset of {_LANGUAGES}"
|
|
|
|
|
|
|
98 |
|
99 |
|
100 |
class PolyglotNER(datasets.GeneratorBasedBuilder):
|
|
|
129 |
|
130 |
def _split_generators(self, dl_manager):
|
131 |
"""Returns SplitGenerators."""
|
132 |
+
archive = dl_manager.download(_DATA_URL)
|
133 |
|
134 |
+
return [
|
135 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive)})
|
136 |
+
]
|
137 |
|
138 |
+
def _generate_examples(self, files):
|
139 |
+
languages = list(self.config.languages)
|
140 |
sentence_counter = 0
|
141 |
+
for path, f in files:
|
142 |
+
if not languages:
|
143 |
+
break
|
144 |
+
if path.endswith("_wiki.conll"):
|
145 |
+
lang = path.split("/")[1]
|
146 |
+
if lang in languages:
|
147 |
+
languages.remove(lang)
|
148 |
+
current_words = []
|
149 |
+
current_ner = []
|
150 |
+
for row in f:
|
151 |
+
row = row.decode("utf-8").rstrip()
|
152 |
+
if row:
|
153 |
+
token, label = row.split("\t")
|
154 |
+
current_words.append(token)
|
155 |
+
current_ner.append(label)
|
156 |
+
else:
|
157 |
+
# New sentence
|
158 |
+
if not current_words:
|
159 |
+
# Consecutive empty lines will cause empty sentences
|
160 |
+
continue
|
161 |
+
assert len(current_words) == len(current_ner), "π between len of words & ner"
|
162 |
+
sentence = (
|
163 |
+
sentence_counter,
|
164 |
+
{
|
165 |
+
"id": str(sentence_counter),
|
166 |
+
"lang": lang,
|
167 |
+
"words": current_words,
|
168 |
+
"ner": current_ner,
|
169 |
+
},
|
170 |
+
)
|
171 |
+
sentence_counter += 1
|
172 |
+
current_words = []
|
173 |
+
current_ner = []
|
174 |
+
yield sentence
|
175 |
+
# Don't forget last sentence in dataset π§
|
176 |
+
if current_words:
|
177 |
+
yield sentence_counter, {
|
178 |
+
"id": str(sentence_counter),
|
179 |
+
"lang": lang,
|
180 |
+
"words": current_words,
|
181 |
+
"ner": current_ner,
|
182 |
+
}
|