holylovenia commited on
Commit
351a67d
1 Parent(s): 33e265f

Upload talpco.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. talpco.py +160 -0
talpco.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List
4
+
5
+ import datasets
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @article{published_papers/22434604,
13
+ title = {TUFS Asian Language Parallel Corpus (TALPCo)},
14
+ author = {Hiroki Nomoto and Kenji Okano and David Moeljadi and Hideo Sawada},
15
+ journal = {言語処理学会 第24回年次大会 発表論文集},
16
+ pages = {436--439},
17
+ year = {2018}
18
+ }
19
+ @article{published_papers/22434603,
20
+ title = {Interpersonal meaning annotation for Asian language corpora: The case of TUFS Asian Language Parallel Corpus (TALPCo)},
21
+ author = {Hiroki Nomoto and Kenji Okano and Sunisa Wittayapanyanon and Junta Nomura},
22
+ journal = {言語処理学会 第25回年次大会 発表論文集},
23
+ pages = {846--849},
24
+ year = {2019}
25
+ }
26
+ """
27
+ _DATASETNAME = "talpco"
28
+ _DESCRIPTION = """\
29
+ The TUFS Asian Language Parallel Corpus (TALPCo) is an open parallel corpus consisting of Japanese sentences
30
+ and their translations into Korean, Burmese (Myanmar; the official language of the Republic of the Union of Myanmar),
31
+ Malay (the national language of Malaysia, Singapore and Brunei), Indonesian, Thai, Vietnamese and English.
32
+ """
33
+ _HOMEPAGE = "https://github.com/matbahasa/TALPCo"
34
+ _LOCAL = False
35
+ _LANGUAGES = ["eng", "ind", "jpn", "kor", "myn", "tha", "vie", "zsm"]
36
+ _LICENSE = "CC-BY 4.0"
37
+ _URLS = {
38
+ _DATASETNAME: "https://github.com/matbahasa/TALPCo/archive/refs/heads/master.zip",
39
+ }
40
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
41
+ _SOURCE_VERSION = "1.0.0"
42
+ _NUSANTARA_VERSION = "1.0.0"
43
+
44
+
45
+ def nusantara_config_constructor(lang_source, lang_target, schema, version):
46
+ """Construct NusantaraConfig with talpco_{lang_source}_{lang_target}_{schema} as the name format"""
47
+ if schema != "source" and schema != "nusantara_t2t":
48
+ raise ValueError(f"Invalid schema: {schema}")
49
+
50
+ if lang_source == "" and lang_target == "":
51
+ return NusantaraConfig(
52
+ name="talpco_{schema}".format(schema=schema),
53
+ version=datasets.Version(version),
54
+ description="talpco with {schema} schema for all 7 language pairs from / to ind language".format(schema=schema),
55
+ schema=schema,
56
+ subset_id="talpco",
57
+ )
58
+ else:
59
+ return NusantaraConfig(
60
+ name="talpco_{lang_source}_{lang_target}_{schema}".format(lang_source=lang_source, lang_target=lang_target, schema=schema),
61
+ version=datasets.Version(version),
62
+ description="talpco with {schema} schema for {lang_source} source language and {lang_target} target language".format(lang_source=lang_source, lang_target=lang_target, schema=schema),
63
+ schema=schema,
64
+ subset_id="talpco",
65
+ )
66
+
67
+
68
+ class TALPCo(datasets.GeneratorBasedBuilder):
69
+ """TALPCo datasets contains 1372 datasets in 8 languages"""
70
+
71
+ BUILDER_CONFIGS = (
72
+ [nusantara_config_constructor(lang1, lang2, "source", _SOURCE_VERSION) for lang1 in _LANGUAGES for lang2 in _LANGUAGES if lang1 != lang2]
73
+ + [nusantara_config_constructor(lang1, lang2, "nusantara_t2t", _NUSANTARA_VERSION) for lang1 in _LANGUAGES for lang2 in _LANGUAGES if lang1 != lang2]
74
+ + [nusantara_config_constructor("", "", "source", _SOURCE_VERSION), nusantara_config_constructor("", "", "nusantara_t2t", _NUSANTARA_VERSION)]
75
+ )
76
+
77
+ DEFAULT_CONFIG_NAME = "talpco_jpn_ind_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+ if self.config.schema == "source" or self.config.schema == "nusantara_t2t":
81
+ features = schemas.text2text_features
82
+ else:
83
+ raise ValueError(f"Invalid config schema: {self.config.schema}")
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
+ urls = _URLS[_DATASETNAME]
95
+ base_path = Path(dl_manager.download_and_extract(urls)) / "TALPCo-master"
96
+ data = {}
97
+ for lang in _LANGUAGES:
98
+ lang_file_name = "data_" + lang + ".txt"
99
+ lang_file_path = base_path / lang / lang_file_name
100
+ if os.path.isfile(lang_file_path):
101
+ with open(lang_file_path, "r") as file:
102
+ data[lang] = file.read().strip("\n").split("\n")
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={
108
+ "data": data,
109
+ "split": "train",
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, data: Dict, split: str):
115
+ if self.config.schema != "source" and self.config.schema != "nusantara_t2t":
116
+ raise ValueError(f"Invalid config schema: {self.config.schema}")
117
+
118
+ if self.config.name == "talpco_source" or self.config.name == "talpco_nusantara_t2t":
119
+ # load all 7 language pairs from / to ind language
120
+ lang_target = "ind"
121
+ for lang_source in _LANGUAGES:
122
+ if lang_source == lang_target:
123
+ continue
124
+ for language_pair_data in self.generate_language_pair_data(lang_source, lang_target, data):
125
+ yield language_pair_data
126
+
127
+ lang_source = "ind"
128
+ for lang_target in _LANGUAGES:
129
+ if lang_source == lang_target:
130
+ continue
131
+ for language_pair_data in self.generate_language_pair_data(lang_source, lang_target, data):
132
+ yield language_pair_data
133
+ else:
134
+ _, lang_source, lang_target = self.config.name.replace(f"_{self.config.schema}", "").split("_")
135
+ for language_pair_data in self.generate_language_pair_data(lang_source, lang_target, data):
136
+ yield language_pair_data
137
+
138
+ def generate_language_pair_data(self, lang_source, lang_target, data):
139
+ dict_source = {}
140
+ for row in data[lang_source]:
141
+ id, text = row.split("\t")
142
+ dict_source[id] = text
143
+
144
+ dict_target = {}
145
+ for row in data[lang_target]:
146
+ id, text = row.split("\t")
147
+ dict_target[id] = text
148
+
149
+ all_ids = set([k for k in dict_source.keys()] + [k for k in dict_target.keys()])
150
+ dict_merged = {k: [dict_source.get(k), dict_target.get(k)] for k in all_ids}
151
+
152
+ for id in sorted(all_ids):
153
+ ex = {
154
+ "id": lang_source + "_" + lang_target + "_" + id,
155
+ "text_1": dict_merged[id][0],
156
+ "text_2": dict_merged[id][1],
157
+ "text_1_name": lang_source,
158
+ "text_2_name": lang_target,
159
+ }
160
+ yield lang_source + "_" + lang_target + "_" + id, ex