Datasets:
File size: 4,887 Bytes
9bd4e59 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
# coding=utf-8
"""The HF Datasets adapter for Evaluation Corpus for Named Entity Recognition using Europarl"""
import datasets
_CITATION = """@inproceedings{agerri-etal-2018-building,
title = "Building Named Entity Recognition Taggers via Parallel Corpora",
author = "Agerri, Rodrigo and
Chung, Yiling and
Aldabe, Itziar and
Aranberri, Nora and
Labaka, Gorka and
Rigau, German",
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Cieri, Christopher and
Declerck, Thierry and
Goggi, Sara and
Hasida, Koiti and
Isahara, Hitoshi and
Maegaard, Bente and
Mariani, Joseph and
Mazo, H{\'e}l{\`e}ne and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios and
Tokunaga, Takenobu",
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
month = may,
year = "2018",
address = "Miyazaki, Japan",
publisher = "European Language Resources Association (ELRA)",
url = "https://aclanthology.org/L18-1557",
}"""
_DESCRIPTION = """This dataset contains a gold-standard test set created from the
Europarl corpus. The test set consists of 799 sentences manually annotated using
four entity types and following the CoNLL 2002 and 2003 guidelines for 4 languages:
English, German, Italian and Spanish."""
_DATA_URLs = {
"en": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/en-europarl.test.conll02",
"de": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/de-europarl.test.conll02",
"es": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/es-europarl.test.conll02",
"it": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/it-europarl.test.conll02",
}
_HOMEPAGE = "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl"
_VERSION = "1.0.0"
_LANGS = ["en", "de", "es", "it"]
class EuroparlNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(EuroparlNERConfig, self).__init__(
version=datasets.Version(_VERSION, ""), **kwargs
)
class EuroparlNER(datasets.GeneratorBasedBuilder):
"""EuroparlNER is a multilingual named entity recognition dataset consisting of
manualy anotated part of the European Parliament Proceedings Parallel Corpus
1996-2011 with LOC, PER, ORG and MISC tags"""
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
EuroparlNERConfig(
name=lang, description=f"EuroparlNER examples in language {lang}"
)
for lang in _LANGS
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
"B-MISC",
"I-MISC",
]
)
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang = self.config.name
dl_dir = dl_manager.download(_DATA_URLs[lang])
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_dir},
),
]
def _generate_examples(self, filepath):
guid_index = 1
with open(filepath, encoding="utf-8") as f:
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid_index, {
"tokens": tokens,
"ner_tags": ner_tags,
}
guid_index += 1
tokens = []
ner_tags = []
else:
# EuroparlNER data is tab separated
splits = line.split("\t")
tokens.append(splits[0])
if len(splits) > 1:
ner_tags.append(splits[1].replace("\n", ""))
else:
# examples have no label in test set
ner_tags.append("O")
|