Datasets:
ShkalikovOleh
commited on
Commit
•
9bd4e59
1
Parent(s):
a69690d
Add loader script
Browse filesThis commit adds a loader script for the dataset which download data from the original GitHub repository and generate HF dataset
- europarl_ner_loader.py +137 -0
europarl_ner_loader.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
|
3 |
+
"""The HF Datasets adapter for Evaluation Corpus for Named Entity Recognition using Europarl"""
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
_CITATION = """@inproceedings{agerri-etal-2018-building,
|
8 |
+
title = "Building Named Entity Recognition Taggers via Parallel Corpora",
|
9 |
+
author = "Agerri, Rodrigo and
|
10 |
+
Chung, Yiling and
|
11 |
+
Aldabe, Itziar and
|
12 |
+
Aranberri, Nora and
|
13 |
+
Labaka, Gorka and
|
14 |
+
Rigau, German",
|
15 |
+
editor = "Calzolari, Nicoletta and
|
16 |
+
Choukri, Khalid and
|
17 |
+
Cieri, Christopher and
|
18 |
+
Declerck, Thierry and
|
19 |
+
Goggi, Sara and
|
20 |
+
Hasida, Koiti and
|
21 |
+
Isahara, Hitoshi and
|
22 |
+
Maegaard, Bente and
|
23 |
+
Mariani, Joseph and
|
24 |
+
Mazo, H{\'e}l{\`e}ne and
|
25 |
+
Moreno, Asuncion and
|
26 |
+
Odijk, Jan and
|
27 |
+
Piperidis, Stelios and
|
28 |
+
Tokunaga, Takenobu",
|
29 |
+
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
|
30 |
+
month = may,
|
31 |
+
year = "2018",
|
32 |
+
address = "Miyazaki, Japan",
|
33 |
+
publisher = "European Language Resources Association (ELRA)",
|
34 |
+
url = "https://aclanthology.org/L18-1557",
|
35 |
+
}"""
|
36 |
+
|
37 |
+
_DESCRIPTION = """This dataset contains a gold-standard test set created from the
|
38 |
+
Europarl corpus. The test set consists of 799 sentences manually annotated using
|
39 |
+
four entity types and following the CoNLL 2002 and 2003 guidelines for 4 languages:
|
40 |
+
English, German, Italian and Spanish."""
|
41 |
+
|
42 |
+
_DATA_URLs = {
|
43 |
+
"en": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/en-europarl.test.conll02",
|
44 |
+
"de": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/de-europarl.test.conll02",
|
45 |
+
"es": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/es-europarl.test.conll02",
|
46 |
+
"it": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/it-europarl.test.conll02",
|
47 |
+
}
|
48 |
+
_HOMEPAGE = "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl"
|
49 |
+
_VERSION = "1.0.0"
|
50 |
+
_LANGS = ["en", "de", "es", "it"]
|
51 |
+
|
52 |
+
|
53 |
+
class EuroparlNERConfig(datasets.BuilderConfig):
|
54 |
+
def __init__(self, **kwargs):
|
55 |
+
super(EuroparlNERConfig, self).__init__(
|
56 |
+
version=datasets.Version(_VERSION, ""), **kwargs
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
class EuroparlNER(datasets.GeneratorBasedBuilder):
|
61 |
+
"""EuroparlNER is a multilingual named entity recognition dataset consisting of
|
62 |
+
manualy anotated part of the European Parliament Proceedings Parallel Corpus
|
63 |
+
1996-2011 with LOC, PER, ORG and MISC tags"""
|
64 |
+
|
65 |
+
VERSION = datasets.Version(_VERSION)
|
66 |
+
BUILDER_CONFIGS = [
|
67 |
+
EuroparlNERConfig(
|
68 |
+
name=lang, description=f"EuroparlNER examples in language {lang}"
|
69 |
+
)
|
70 |
+
for lang in _LANGS
|
71 |
+
]
|
72 |
+
DEFAULT_CONFIG_NAME = "en"
|
73 |
+
|
74 |
+
def _info(self):
|
75 |
+
features = datasets.Features(
|
76 |
+
{
|
77 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
78 |
+
"ner_tags": datasets.Sequence(
|
79 |
+
datasets.features.ClassLabel(
|
80 |
+
names=[
|
81 |
+
"O",
|
82 |
+
"B-PER",
|
83 |
+
"I-PER",
|
84 |
+
"B-ORG",
|
85 |
+
"I-ORG",
|
86 |
+
"B-LOC",
|
87 |
+
"I-LOC",
|
88 |
+
"B-MISC",
|
89 |
+
"I-MISC",
|
90 |
+
]
|
91 |
+
)
|
92 |
+
),
|
93 |
+
}
|
94 |
+
)
|
95 |
+
return datasets.DatasetInfo(
|
96 |
+
description=_DESCRIPTION,
|
97 |
+
features=features,
|
98 |
+
supervised_keys=None,
|
99 |
+
homepage=_HOMEPAGE,
|
100 |
+
citation=_CITATION,
|
101 |
+
)
|
102 |
+
|
103 |
+
def _split_generators(self, dl_manager):
|
104 |
+
lang = self.config.name
|
105 |
+
dl_dir = dl_manager.download(_DATA_URLs[lang])
|
106 |
+
|
107 |
+
return [
|
108 |
+
datasets.SplitGenerator(
|
109 |
+
name=datasets.Split.TEST,
|
110 |
+
gen_kwargs={"filepath": dl_dir},
|
111 |
+
),
|
112 |
+
]
|
113 |
+
|
114 |
+
def _generate_examples(self, filepath):
|
115 |
+
guid_index = 1
|
116 |
+
with open(filepath, encoding="utf-8") as f:
|
117 |
+
tokens = []
|
118 |
+
ner_tags = []
|
119 |
+
for line in f:
|
120 |
+
if line == "" or line == "\n":
|
121 |
+
if tokens:
|
122 |
+
yield guid_index, {
|
123 |
+
"tokens": tokens,
|
124 |
+
"ner_tags": ner_tags,
|
125 |
+
}
|
126 |
+
guid_index += 1
|
127 |
+
tokens = []
|
128 |
+
ner_tags = []
|
129 |
+
else:
|
130 |
+
# EuroparlNER data is tab separated
|
131 |
+
splits = line.split("\t")
|
132 |
+
tokens.append(splits[0])
|
133 |
+
if len(splits) > 1:
|
134 |
+
ner_tags.append(splits[1].replace("\n", ""))
|
135 |
+
else:
|
136 |
+
# examples have no label in test set
|
137 |
+
ner_tags.append("O")
|