holylovenia commited on
Commit
eaaf221
1 Parent(s): 3909ce3

Upload tico_19.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tico_19.py +298 -0
tico_19.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import csv
17
+ from fnmatch import translate
18
+ import os
19
+ import re
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+ from translate.storage.tmx import tmxfile
23
+
24
+ import datasets
25
+
26
+ from nusacrowd.utils import schemas
27
+ from nusacrowd.utils.configs import NusantaraConfig
28
+ from nusacrowd.utils.constants import Tasks
29
+
30
+ _CITATION = """\
31
+ @inproceedings{anastasopoulos-etal-2020-tico,
32
+ title = "{TICO}-19: the Translation Initiative for {CO}vid-19",
33
+ author = {Anastasopoulos, Antonios and
34
+ Cattelan, Alessandro and
35
+ Dou, Zi-Yi and
36
+ Federico, Marcello and
37
+ Federmann, Christian and
38
+ Genzel, Dmitriy and
39
+ Guzm{\'a}n, Franscisco and
40
+ Hu, Junjie and
41
+ Hughes, Macduff and
42
+ Koehn, Philipp and
43
+ Lazar, Rosie and
44
+ Lewis, Will and
45
+ Neubig, Graham and
46
+ Niu, Mengmeng and
47
+ {\"O}ktem, Alp and
48
+ Paquin, Eric and
49
+ Tang, Grace and
50
+ Tur, Sylwia},
51
+ booktitle = "Proceedings of the 1st Workshop on {NLP} for {COVID}-19 (Part 2) at {EMNLP} 2020",
52
+ month = dec,
53
+ year = "2020",
54
+ address = "Online",
55
+ publisher = "Association for Computational Linguistics",
56
+ url = "https://aclanthology.org/2020.nlpcovid19-2.5",
57
+ doi = "10.18653/v1/2020.nlpcovid19-2.5",
58
+ }
59
+ """
60
+
61
+ # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
62
+ _LANGUAGES = ["ind", "ara", "spa", "fra", "hin", "por", "rus", "zho", "eng"]
63
+ _LOCAL = False
64
+ _SUPPORTED_LANG_PAIRS = [
65
+ ("ind", "ara"), ("ind", "spa"), ("ind", "fra"), ("ind", "hin"), ("ind", "por"), ("ind", "rus"), ("ind", "zho"), ("ind", "eng"),
66
+ ("ara", "ind"), ("spa", "ind"), ("fra", "ind"), ("hin", "ind"), ("por", "ind"), ("rus", "ind"), ("zho", "ind"), ("eng", "ind")
67
+ ]
68
+
69
+ _LANG_CODE_MAP = {
70
+ "ind": "id",
71
+ "ara": "ar",
72
+ "spa": "es-LA",
73
+ "fra": "fr",
74
+ "hin": "hi",
75
+ "por": "pt-BR",
76
+ "rus": "ru",
77
+ "zho": "zh",
78
+ "eng": "en"
79
+ }
80
+
81
+ _DATASETNAME = "tico_19"
82
+
83
+ _DESCRIPTION = """\
84
+ TICO-19 (Translation Initiative for COVID-19) is sampled from a variety of public sources containing
85
+ COVID-19 related content, representing different domains (e.g., news, wiki articles, and others). TICO-19
86
+ includes 30 documents (3071 sentences, 69.7k words) translated from English into 36 languages: Amharic,
87
+ Arabic (Modern Standard), Bengali, Chinese (Simplified), Dari, Dinka, Farsi, French (European), Hausa,
88
+ Hindi, Indonesian, Kanuri, Khmer (Central), Kinyarwanda, Kurdish Kurmanji, Kurdish Sorani, Lingala,
89
+ Luganda, Malay, Marathi, Myanmar, Nepali, Nigerian Fulfulde, Nuer, Oromo, Pashto, Portuguese (Brazilian),
90
+ Russian, Somali, Spanish (Latin American), Swahili, Congolese Swahili, Tagalog, Tamil, Tigrinya, Urdu, Zulu.
91
+ """
92
+
93
+ _HOMEPAGE = "https://tico-19.github.io"
94
+
95
+ _LICENSE = "CC0"
96
+
97
+ _URLS = {
98
+ "evaluation": "https://tico-19.github.io/data/tico19-testset.zip",
99
+ "all": "https://tico-19.github.io/data/TM/all.{lang_pairs}.tmx.zip"
100
+ }
101
+
102
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
103
+
104
+ _SOURCE_VERSION = "1.0.0"
105
+
106
+ _NUSANTARA_VERSION = "1.0.0"
107
+
108
+
109
+ def nusantara_config_constructor(lang_source, lang_target, schema, version):
110
+ """Construct NusantaraConfig with tico_19_{lang_source}_{lang_target}_{schema} as the name format"""
111
+ if schema != "source" and schema != "nusantara_t2t":
112
+ raise ValueError(f"Invalid schema: {schema}")
113
+
114
+ if lang_source == "" and lang_target == "":
115
+ return NusantaraConfig(
116
+ name="tico_19_{schema}".format(schema=schema),
117
+ version=datasets.Version(version),
118
+ description="tico_19 {schema} schema for default language pair (eng-ind)".format(schema=schema),
119
+ schema=schema,
120
+ subset_id="tico_19",
121
+ )
122
+ else:
123
+ return NusantaraConfig(
124
+ name="tico_19_{src}_{tgt}_{schema}".format(src=lang_source, tgt=lang_target, schema=schema),
125
+ version=datasets.Version(version),
126
+ description="tico_19 {schema} schema for {src}-{tgt} language pair".format(src=lang_source, tgt=lang_target, schema=schema),
127
+ schema=schema,
128
+ subset_id="tico_19",
129
+ )
130
+
131
+ class Tico19(datasets.GeneratorBasedBuilder):
132
+ """TICO-19 is MT dataset sampled from a variety of public sources containing COVID-19 related content"""
133
+
134
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
135
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
136
+
137
+ BUILDER_CONFIGS = [
138
+ nusantara_config_constructor(src, tgt, schema, version)
139
+ for src, tgt in [("", "")] + _SUPPORTED_LANG_PAIRS for schema, version in zip(["source", "nusantara_t2t"], [_SOURCE_VERSION, _NUSANTARA_VERSION])
140
+ ]
141
+
142
+ DEFAULT_CONFIG_NAME = "tico_19_source"
143
+
144
+ def _info(self) -> datasets.DatasetInfo:
145
+ if self.config.schema == "source":
146
+ features = datasets.Features(
147
+ {
148
+ "sourceLang": datasets.Value("string"),
149
+ "targetLang": datasets.Value("string"),
150
+ "sourceString": datasets.Value("string"),
151
+ "targetString": datasets.Value("string"),
152
+ "stringID": datasets.Value("string"),
153
+ "url": datasets.Value("string"),
154
+ "license": datasets.Value("string"),
155
+ "translatorId": datasets.Value("string"),
156
+ }
157
+ )
158
+ elif self.config.schema == "nusantara_t2t":
159
+ features = schemas.text2text_features
160
+
161
+ return datasets.DatasetInfo(
162
+ description=_DESCRIPTION,
163
+ features=features,
164
+ homepage=_HOMEPAGE,
165
+ license=_LICENSE,
166
+ citation=_CITATION,
167
+ )
168
+
169
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
170
+ """Returns SplitGenerators."""
171
+
172
+ try:
173
+ lang_pairs_config = re.search("tico_19_(.+?)_(source|nusantara_t2t)", self.config.name).group(1)
174
+ lang_src, lang_tgt = lang_pairs_config.split("_")
175
+ except AttributeError:
176
+ lang_src, lang_tgt = "eng", "ind"
177
+
178
+ lang_pairs = _LANG_CODE_MAP[lang_src] + "-" + _LANG_CODE_MAP[lang_tgt]
179
+
180
+ # dev & test split only applicable to eng-ind language pair
181
+ if lang_pairs in ["en-id", "id-en"]:
182
+ data_dir = dl_manager.download_and_extract(_URLS["evaluation"])
183
+ return [
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.TEST,
186
+ gen_kwargs={
187
+ "filepath": os.path.join(data_dir, "tico19-testset", "test", f"test.en-id.tsv"),
188
+ "lang_source": lang_src,
189
+ "lang_target": lang_tgt
190
+ },
191
+ ),
192
+ datasets.SplitGenerator(
193
+ name=datasets.Split.VALIDATION,
194
+ gen_kwargs={
195
+ "filepath": os.path.join(data_dir, "tico19-testset", "dev", f"dev.en-id.tsv"),
196
+ "lang_source": lang_src,
197
+ "lang_target": lang_tgt
198
+ },
199
+ ),
200
+ ]
201
+ else:
202
+ data_dir = dl_manager.download_and_extract(_URLS["all"].format(lang_pairs=lang_pairs))
203
+ return [
204
+ datasets.SplitGenerator(
205
+ name=datasets.Split.TRAIN,
206
+ gen_kwargs={
207
+ "filepath": os.path.join(data_dir, f"all.{lang_pairs}.tmx"),
208
+ "lang_source": lang_src,
209
+ "lang_target": lang_tgt
210
+ },
211
+ )
212
+ ]
213
+
214
+ def _generate_examples(self, filepath: Path, lang_source: str, lang_target: str) -> Tuple[int, Dict]:
215
+ """Yields examples as (key, example) tuples."""
216
+
217
+ if self.config.schema == "source":
218
+ # eng-ind language pair dataset provided in .tsv format
219
+ if (lang_source == "eng" and lang_target == "ind") or (lang_source == "ind" and lang_target == "eng"):
220
+ with open(filepath, encoding="utf-8") as f:
221
+ reader = csv.reader(f, delimiter="\t", quotechar='"')
222
+ for id_, row in enumerate(reader):
223
+ if id_ == 0:
224
+ continue
225
+ if lang_source == "eng":
226
+ source_lang = row[0]
227
+ target_lang = row[1]
228
+ source_string = row[2]
229
+ target_string = row[3]
230
+ else:
231
+ source_lang = row[1]
232
+ target_lang = row[0]
233
+ source_string = row[3]
234
+ target_string = row[2]
235
+ yield id_, {
236
+ "sourceLang": source_lang,
237
+ "targetLang": target_lang,
238
+ "sourceString": source_string,
239
+ "targetString": target_string,
240
+ "stringID": row[4],
241
+ "url": row[5],
242
+ "license": row[6],
243
+ "translatorId": row[7],
244
+ }
245
+
246
+ # all language pairs except eng-ind dataset provided in .tmx format
247
+ else:
248
+ with open(filepath, "rb") as f:
249
+ tmx_file = tmxfile(f)
250
+
251
+ for id_, node in enumerate(tmx_file.unit_iter()):
252
+ try:
253
+ url = [text for text in node.xmlelement.itertext('prop')][0]
254
+ except:
255
+ url = ""
256
+ yield id_, {
257
+ "sourceLang": _LANG_CODE_MAP[lang_source],
258
+ "targetLang": _LANG_CODE_MAP[lang_target],
259
+ "sourceString": node.source,
260
+ "targetString": node.target,
261
+ "stringID": node.getid(),
262
+ "url": url,
263
+ "license": "",
264
+ "translatorId": "",
265
+ }
266
+
267
+ elif self.config.schema == "nusantara_t2t":
268
+ if (lang_source == "eng" and lang_target == "ind") or (lang_source == "ind" and lang_target == "eng"):
269
+ with open(filepath, encoding="utf-8") as f:
270
+ reader = csv.reader(f, delimiter="\t", quotechar='"')
271
+ for id_, row in enumerate(reader):
272
+ if id_ == 0:
273
+ continue
274
+ if lang_source == "eng":
275
+ source_string = row[2]
276
+ target_string = row[3]
277
+ else:
278
+ source_string = row[3]
279
+ target_string = row[2]
280
+ yield id_, {
281
+ "id": row[4],
282
+ "text_1": source_string,
283
+ "text_2": target_string,
284
+ "text_1_name": lang_source,
285
+ "text_2_name": lang_target
286
+ }
287
+ else:
288
+ with open(filepath, "rb") as f:
289
+ tmx_file = tmxfile(f)
290
+
291
+ for id_, node in enumerate(tmx_file.unit_iter()):
292
+ yield id_, {
293
+ "id": node.getid(),
294
+ "text_1": node.source,
295
+ "text_2": node.target,
296
+ "text_1_name": lang_source,
297
+ "text_2_name": lang_target
298
+ }