|
|
|
""" No Language Left Behind Multi-Domain Evaluation Dataset |
|
""" |
|
|
|
import os |
|
import sys |
|
import datasets |
|
from collections import defaultdict |
|
from pathlib import Path |
|
from typing import Union, List, Optional |
|
|
|
|
|
_CITATION = """ |
|
@article{nllb2022, |
|
author = {NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Jeff Wang}, |
|
title = {No Language Left Behind: Scaling Human-Centered Machine Translation}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
NLLB Multi Domain is a set of professionally-translated sentences in News, Unscripted informal speech, and Health domains. It is designed to enable assessment of out-of-domain performance and to study domain adaptation for machine translation. Each domain has approximately 3000 sentences. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/facebookresearch/flores" |
|
|
|
_LICENSE = "CC-BY-SA-4.0" |
|
|
|
_LANGUAGES = [ |
|
"ayr_Latn", "bho_Deva", "dyu_Latn", "fur_Latn", "rus_Cyrl", "wol_Latn" |
|
] |
|
|
|
_URLS = { |
|
"chat" : "https://tinyurl.com/NLLBMDchat", |
|
"news" : "https://tinyurl.com/NLLBMDnews", |
|
"health" : "https://tinyurl.com/NLLBMDhealth" |
|
} |
|
|
|
_SPLITS = ["train", "valid", "test"] |
|
|
|
_DOMAINS = ["chat", "news", "health"] |
|
|
|
_SENTENCES_PATHS = { |
|
f"eng_Latn-{lang}": { |
|
domain : { |
|
split: { |
|
lang : os.path.join("NLLB-MD", domain, f"{split}.eng_Latn-{lang}.{lang}"), |
|
"eng_Latn" : os.path.join("NLLB-MD", domain, f"{split}.eng_Latn-{lang}.eng_Latn") |
|
} |
|
for split in _SPLITS |
|
} for domain in _DOMAINS |
|
} for lang in _LANGUAGES |
|
} |
|
|
|
|
|
|
|
from itertools import permutations |
|
|
|
def _pairings(iterable, r=2): |
|
previous = tuple() |
|
for p in permutations(sorted(iterable), r): |
|
if p > previous: |
|
previous = p |
|
yield p |
|
|
|
|
|
class NLLBMultiDomainConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the NLLB Multi-Domain dataset.""" |
|
def __init__(self, lang: str, lang2: str = None, **kwargs): |
|
""" |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.lang = lang |
|
self.lang2 = lang2 |
|
|
|
|
|
class NLLBMultiDomain(datasets.GeneratorBasedBuilder): |
|
"""NLLB-MD dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
NLLBMultiDomainConfig( |
|
name=f"eng_Latn-{lang}", |
|
description=f"NLLB-MD: {lang} subset.", |
|
lang="eng_Latn", |
|
lang2=lang |
|
) |
|
for lang in _LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
features = { |
|
"id": datasets.Value("int32"), |
|
"domain": datasets.Value("string") |
|
} |
|
if self.config.name != "all" and "-" not in self.config.name: |
|
features["sentence"] = datasets.Value("string") |
|
elif "-" in self.config.name: |
|
for lang in [self.config.lang, self.config.lang2]: |
|
features[f"sentence_{lang}"] = datasets.Value("string") |
|
else: |
|
for lang in _LANGUAGES: |
|
features[f"sentence_{lang}"] = datasets.Value("string") |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_dir = dl_manager.download_and_extract(_URLS) |
|
|
|
def _get_sentence_paths(split): |
|
if isinstance(self.config.lang, str) and isinstance(self.config.lang2, str): |
|
sentence_paths = [os.path.join(dl_dir[domain], _SENTENCES_PATHS[self.config.lang + "-" + self.config.lang2][domain][split][lang]) for lang in (self.config.lang, self.config.lang2) for domain in _DOMAINS] |
|
else: |
|
raise ValueError("Please specify two languages.") |
|
return sentence_paths |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"sentence_paths": _get_sentence_paths(split), |
|
} |
|
) for split in _SPLITS |
|
] |
|
|
|
def _generate_examples(self, sentence_paths: Union[str, List[str]], langs: Optional[List[str]] = None): |
|
"""Yields examples as (key, example) tuples.""" |
|
if isinstance(sentence_paths, str): |
|
with open(sentence_paths, "r") as sentences_file: |
|
for id_, sentence in enumerate( |
|
sentences_file |
|
): |
|
sentence = sentence.strip() |
|
yield id_, { |
|
"id": id_ + 1, |
|
"sentence": sentence, |
|
} |
|
else: |
|
sentences = defaultdict(dict) |
|
|
|
langs_domains = [(lang, domain) for lang in (self.config.lang, self.config.lang2) for domain in _DOMAINS] |
|
|
|
_idx = 0 |
|
for path, (lang, domain) in zip(sentence_paths, langs_domains): |
|
with open(path, "r") as sent_file: |
|
sentences[domain][lang] = [l.strip() for l in sent_file.readlines()] |
|
for domain in _DOMAINS: |
|
for s1, s2 in zip(sentences[domain][self.config.lang], sentences[domain][self.config.lang2]): |
|
_idx += 1 |
|
yield _idx, { |
|
"id": _idx, |
|
"domain" : domain, |
|
f"sentence_{self.config.lang}": s1, |
|
f"sentence_{self.config.lang2}": s2 |
|
} |
|
|
|
|
|
|