europarl-ner / europarl-ner.py
ShkalikovOleh's picture
Rename europarl_ner_loader.py to europarl-ner.py
ef1dbcb verified
raw
history blame
No virus
4.89 kB
# coding=utf-8
"""The HF Datasets adapter for Evaluation Corpus for Named Entity Recognition using Europarl"""
import datasets
_CITATION = """@inproceedings{agerri-etal-2018-building,
title = "Building Named Entity Recognition Taggers via Parallel Corpora",
author = "Agerri, Rodrigo and
Chung, Yiling and
Aldabe, Itziar and
Aranberri, Nora and
Labaka, Gorka and
Rigau, German",
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Cieri, Christopher and
Declerck, Thierry and
Goggi, Sara and
Hasida, Koiti and
Isahara, Hitoshi and
Maegaard, Bente and
Mariani, Joseph and
Mazo, H{\'e}l{\`e}ne and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios and
Tokunaga, Takenobu",
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
month = may,
year = "2018",
address = "Miyazaki, Japan",
publisher = "European Language Resources Association (ELRA)",
url = "https://aclanthology.org/L18-1557",
}"""
_DESCRIPTION = """This dataset contains a gold-standard test set created from the
Europarl corpus. The test set consists of 799 sentences manually annotated using
four entity types and following the CoNLL 2002 and 2003 guidelines for 4 languages:
English, German, Italian and Spanish."""
_DATA_URLs = {
"en": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/en-europarl.test.conll02",
"de": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/de-europarl.test.conll02",
"es": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/es-europarl.test.conll02",
"it": "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl/raw/master/it-europarl.test.conll02",
}
_HOMEPAGE = "https://github.com/ixa-ehu/ner-evaluation-corpus-europarl"
_VERSION = "1.0.0"
_LANGS = ["en", "de", "es", "it"]
class EuroparlNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(EuroparlNERConfig, self).__init__(
version=datasets.Version(_VERSION, ""), **kwargs
)
class EuroparlNER(datasets.GeneratorBasedBuilder):
"""EuroparlNER is a multilingual named entity recognition dataset consisting of
manualy anotated part of the European Parliament Proceedings Parallel Corpus
1996-2011 with LOC, PER, ORG and MISC tags"""
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
EuroparlNERConfig(
name=lang, description=f"EuroparlNER examples in language {lang}"
)
for lang in _LANGS
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
"B-MISC",
"I-MISC",
]
)
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lang = self.config.name
dl_dir = dl_manager.download(_DATA_URLs[lang])
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": dl_dir},
),
]
def _generate_examples(self, filepath):
guid_index = 1
with open(filepath, encoding="utf-8") as f:
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid_index, {
"tokens": tokens,
"ner_tags": ner_tags,
}
guid_index += 1
tokens = []
ner_tags = []
else:
# EuroparlNER data is tab separated
splits = line.split("\t")
tokens.append(splits[0])
if len(splits) > 1:
ner_tags.append(splits[1].replace("\n", ""))
else:
# examples have no label in test set
ner_tags.append("O")