Peter Henderson commited on
Commit
fb8ae3c
1 Parent(s): e9c1f5b

Fixes, getting it working.

Browse files
Files changed (1) hide show
  1. nllb_multi_domain.py +163 -0
nllb_multi_domain.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """ No Language Left Behind Multi-Domain Evaluation Dataset
3
+ """
4
+
5
+ import os
6
+ import sys
7
+ import datasets
8
+ from collections import defaultdict
9
+ from pathlib import Path
10
+ from typing import Union, List, Optional
11
+
12
+
13
+ _CITATION = """
14
+ @article{nllb2022,
15
+ author = {NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Jeff Wang},
16
+ title = {No Language Left Behind: Scaling Human-Centered Machine Translation},
17
+ year = {2022}
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ NLLB Multi Domain is a set of professionally-translated sentences in News, Unscripted informal speech, and Health domains. It is designed to enable assessment of out-of-domain performance and to study domain adaptation for machine translation. Each domain has approximately 3000 sentences.
23
+ """
24
+
25
+ _HOMEPAGE = "https://github.com/facebookresearch/flores"
26
+
27
+ _LICENSE = "CC-BY-SA-4.0"
28
+
29
+ _LANGUAGES = [
30
+ "ayr_Latn", "bho_Deva", "dyu_Latn", "fur_Latn", "rus_Cyrl", "wol_Latn"
31
+ ]
32
+
33
+ _URLS = {
34
+ "chat" : "https://tinyurl.com/NLLBMDchat",
35
+ "news" : "https://tinyurl.com/NLLBMDnews",
36
+ "health" : "https://tinyurl.com/NLLBMDhealth"
37
+ }
38
+
39
+ _SPLITS = ["train", "valid", "test"]
40
+
41
+ _DOMAINS = ["chat", "news", "health"]
42
+
43
+ _SENTENCES_PATHS = {
44
+ f"eng_Latn-{lang}": {
45
+ domain : {
46
+ split: {
47
+ lang : os.path.join("NLLB-MD", domain, f"{split}.eng_Latn-{lang}.{lang}"),
48
+ "eng_Latn" : os.path.join("NLLB-MD", domain, f"{split}.eng_Latn-{lang}.eng_Latn")
49
+ }
50
+ for split in _SPLITS
51
+ } for domain in _DOMAINS
52
+ } for lang in _LANGUAGES
53
+ }
54
+
55
+
56
+
57
+ from itertools import permutations
58
+
59
+ def _pairings(iterable, r=2):
60
+ previous = tuple()
61
+ for p in permutations(sorted(iterable), r):
62
+ if p > previous:
63
+ previous = p
64
+ yield p
65
+
66
+
67
+ class NLLBMultiDomainConfig(datasets.BuilderConfig):
68
+ """BuilderConfig for the NLLB Multi-Domain dataset."""
69
+ def __init__(self, lang: str, lang2: str = None, **kwargs):
70
+ """
71
+ Args:
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
75
+ self.lang = lang
76
+ self.lang2 = lang2
77
+
78
+
79
+ class NLLBMultiDomain(datasets.GeneratorBasedBuilder):
80
+ """NLLB-MD dataset."""
81
+
82
+ BUILDER_CONFIGS = [
83
+ NLLBMultiDomainConfig(
84
+ name=f"eng_Latn-{lang}",
85
+ description=f"NLLB-MD: {lang} subset.",
86
+ lang="eng_Latn",
87
+ lang2=lang
88
+ )
89
+ for lang in _LANGUAGES
90
+ ]
91
+
92
+ def _info(self):
93
+ features = {
94
+ "id": datasets.Value("int32"),
95
+ "domain": datasets.Value("string")
96
+ }
97
+ if self.config.name != "all" and "-" not in self.config.name:
98
+ features["sentence"] = datasets.Value("string")
99
+ elif "-" in self.config.name:
100
+ for lang in [self.config.lang, self.config.lang2]:
101
+ features[f"sentence_{lang}"] = datasets.Value("string")
102
+ else:
103
+ for lang in _LANGUAGES:
104
+ features[f"sentence_{lang}"] = datasets.Value("string")
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=datasets.Features(features),
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+ dl_dir = dl_manager.download_and_extract(_URLS)
115
+
116
+ def _get_sentence_paths(split):
117
+ if isinstance(self.config.lang, str) and isinstance(self.config.lang2, str):
118
+ sentence_paths = [os.path.join(dl_dir[domain], _SENTENCES_PATHS[self.config.lang + "-" + self.config.lang2][domain][split][lang]) for lang in (self.config.lang, self.config.lang2) for domain in _DOMAINS]
119
+ else:
120
+ raise ValueError("Please specify two languages.")
121
+ return sentence_paths
122
+
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=split,
126
+ gen_kwargs={
127
+ "sentence_paths": _get_sentence_paths(split),
128
+ }
129
+ ) for split in _SPLITS
130
+ ]
131
+
132
+ def _generate_examples(self, sentence_paths: Union[str, List[str]], langs: Optional[List[str]] = None):
133
+ """Yields examples as (key, example) tuples."""
134
+ if isinstance(sentence_paths, str):
135
+ with open(sentence_paths, "r") as sentences_file:
136
+ for id_, sentence in enumerate(
137
+ sentences_file
138
+ ):
139
+ sentence = sentence.strip()
140
+ yield id_, {
141
+ "id": id_ + 1,
142
+ "sentence": sentence,
143
+ }
144
+ else:
145
+ sentences = defaultdict(dict)
146
+
147
+ langs_domains = [(lang, domain) for lang in (self.config.lang, self.config.lang2) for domain in _DOMAINS]
148
+
149
+ _idx = 0
150
+ for path, (lang, domain) in zip(sentence_paths, langs_domains):
151
+ with open(path, "r") as sent_file:
152
+ sentences[domain][lang] = [l.strip() for l in sent_file.readlines()]
153
+ for domain in _DOMAINS:
154
+ for s1, s2 in zip(sentences[domain][self.config.lang], sentences[domain][self.config.lang2]):
155
+ _idx += 1
156
+ yield _idx, {
157
+ "id": _idx,
158
+ "domain" : domain,
159
+ f"sentence_{self.config.lang}": s1,
160
+ f"sentence_{self.config.lang2}": s2
161
+ }
162
+
163
+