Datasets:

ArXiv:
License:
holylovenia commited on
Commit
d80e97d
1 Parent(s): 2e8c122

Upload gatitos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gatitos.py +140 -0
gatitos.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The GATITOS (Google's Additional Translations Into Tail-languages: Often Short) dataset is a high-quality, multi-way parallel dataset of tokens and short phrases.
18
+ This dataset consists in 4,000 English segments (4,500 tokens) that have been translated into each of 173 languages, 170 of which are low-resource, 23 are spoken in Southeast Asia.
19
+ This dataset contains primarily short segments: 93% single tokens, and only 23 sentences (0.6%) have over 5 tokens.
20
+ As such it is best thought of as a multilingual lexicon, rather than a parallel training corpus.
21
+ The source text is frequent words in the English Language, along with some common phrases and short sentences.
22
+ Care has been taken to ensure that they include good coverage of numbers, months, days of the week, swadesh words, and names of the languages themselves (including the endonym).
23
+ """
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+
29
+ from seacrowd.utils import schemas
30
+ from seacrowd.utils.configs import SEACrowdConfig
31
+ from seacrowd.utils.constants import Licenses, Tasks
32
+
33
+ _CITATION = """\
34
+ @misc{jones2023bilex,
35
+ title={Bilex Rx: Lexical Data Augmentation for Massively Multilingual Machine Translation},
36
+ author={Alex Jones and Isaac Caswell and Ishank Saxena and Orhan Firat},
37
+ year={2023},
38
+ eprint={2303.15265},
39
+ archivePrefix={arXiv},
40
+ primaryClass={cs.CL}
41
+ }
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "gatitos"
46
+
47
+ _DESCRIPTION = """\
48
+ The GATITOS (Google's Additional Translations Into Tail-languages: Often Short) dataset is a high-quality, multi-way parallel dataset of tokens and short phrases.
49
+ This dataset consists in 4,000 English segments (4,500 tokens) that have been translated into each of 173 languages, 170 of which are low-resource, 23 are spoken in Southeast Asia.
50
+ This dataset contains primarily short segments: 93% single tokens, and only 23 sentences (0.6%) have over 5 tokens.
51
+ As such it is best thought of as a multilingual lexicon, rather than a parallel training corpus.
52
+ The source text is frequent words in the English Language, along with some common phrases and short sentences.
53
+ Care has been taken to ensure that they include good coverage of numbers, months, days of the week, swadesh words, and names of the languages themselves (including the endonym).
54
+ """
55
+
56
+ _HOMEPAGE = "https://github.com/google-research/url-nlp/blob/main/gatitos/README.md"
57
+
58
+ _LANGUAGES = ["ace", "ban", "bbc", "bew", "bjn", "bts", "btx", "bug", "cnh", "hil", "iba", "ilo", "kac", "lus", "mad", "mak", "meo", "min", "pag", "pam", "shn", "tet", "war"]
59
+
60
+ _LICENSE = Licenses.CC_BY_4_0.value
61
+
62
+ _LOCAL = False
63
+
64
+ _URLs = "https://raw.githubusercontent.com/google-research/url-nlp/main/gatitos/{src}_{tgt}.tsv"
65
+
66
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _SEACROWD_VERSION = "2024.06.20"
70
+
71
+
72
+ class GATITOSDataset(datasets.GeneratorBasedBuilder):
73
+ """The GATITOS (Google's Additional Translations Into Tail-languages: Often Short) dataset is a high-quality, multi-way parallel dataset of tokens and short phrases."""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ SEACrowdConfig(
80
+ name=f"{_DATASETNAME}_{src_lang}_{tgt_lang}_source",
81
+ version=datasets.Version(_SOURCE_VERSION),
82
+ description=f"{_DATASETNAME} source schema",
83
+ schema="source",
84
+ subset_id=f"{_DATASETNAME}_{src_lang}_{tgt_lang}",
85
+ )
86
+ for (src_lang, tgt_lang) in [("eng", lang) for lang in _LANGUAGES] + [(lang, "eng") for lang in _LANGUAGES]
87
+ ] + [
88
+ SEACrowdConfig(
89
+ name=f"{_DATASETNAME}_{src_lang}_{tgt_lang}_seacrowd_t2t",
90
+ version=datasets.Version(_SEACROWD_VERSION),
91
+ description=f"{_DATASETNAME} SEACrowd schema",
92
+ schema="seacrowd_t2t",
93
+ subset_id=f"{_DATASETNAME}_{src_lang}_{tgt_lang}",
94
+ )
95
+ for (src_lang, tgt_lang) in [("eng", lang) for lang in _LANGUAGES] + [(lang, "eng") for lang in _LANGUAGES]
96
+ ]
97
+
98
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
99
+
100
+ def _info(self) -> datasets.DatasetInfo:
101
+
102
+ if self.config.schema == "source":
103
+ features = datasets.Features({"id": datasets.Value("string"), "src_text": datasets.Value("string"), "tgt_text": datasets.Value("string")})
104
+
105
+ elif self.config.schema == "seacrowd_t2t":
106
+ features = schemas.text2text_features
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ """Returns SplitGenerators."""
118
+
119
+ _, src_lang, tgt_lang = self.config.subset_id.split("_")
120
+
121
+ filepath = dl_manager.download_and_extract(_URLs.format(src=src_lang.replace("eng", "en"), tgt=tgt_lang.replace("eng", "en")))
122
+
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
127
+ gen_kwargs={"filepath": filepath, "src_lang": src_lang, "tgt_lang": tgt_lang},
128
+ )
129
+ ]
130
+
131
+ def _generate_examples(self, src_lang: str, tgt_lang: str, filepath: Path) -> Tuple[int, Dict]:
132
+ if self.config.schema == "source":
133
+ for row_id, row in enumerate(open(filepath)):
134
+ src_text, tgt_text = row.strip().split("\t")
135
+ yield row_id, {"id": row_id, "src_text": src_text, "tgt_text": tgt_text}
136
+
137
+ elif self.config.schema == "seacrowd_t2t":
138
+ for row_id, row in enumerate(open(filepath)):
139
+ src_text, tgt_text = row.strip().split("\t")
140
+ yield row_id, {"id": row_id, "text_1": src_text, "text_2": tgt_text, "text_1_name": src_lang, "text_2_name": tgt_lang}