holylovenia commited on
Commit
311a022
1 Parent(s): 02c51f9

Upload korpus_nusantara.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. korpus_nusantara.py +217 -0
korpus_nusantara.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import re
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from nusacrowd.utils import schemas
9
+ from nusacrowd.utils.configs import NusantaraConfig
10
+ from nusacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_NUSANTARA_VIEW_NAME
11
+
12
+ _DATASETNAME = "korpus_nusantara"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["ind", "jav", "xdy", "bug", "sun", "mad", "bjn", "bbc", "khek", "msa", "min", "tiociu"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @article{sujaini2020improving,
20
+ title={Improving the role of language model in statistical machine translation (Indonesian-Javanese)},
21
+ author={Sujaini, Herry},
22
+ journal={International Journal of Electrical and Computer Engineering},
23
+ volume={10},
24
+ number={2},
25
+ pages={2102},
26
+ year={2020},
27
+ publisher={IAES Institute of Advanced Engineering and Science}
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ This parallel corpus was collected from several studies, assignments, and thesis of
33
+ students of the Informatics Study Program, Tanjungpura University. Some of the corpus
34
+ are used in the translation machine from Indonesian to local languages http://nustor.untan.ac.id/cammane/.
35
+ This corpus can be used freely for research purposes by citing the paper
36
+ https://ijece.iaescore.com/index.php/IJECE/article/download/20046/13738.
37
+
38
+ The dataset is a combination of multiple machine translation works from the author,
39
+ Herry Sujaini, covering Indonesian to 25 local dialects in Indonesia. Since not all
40
+ dialects have ISO639-3 standard coding, as agreed with Pak Herry , we decided to
41
+ group the dataset into the closest language family, i.e.: Javanese, Dayak, Buginese,
42
+ Sundanese, Madurese, Banjar, Batak Toba, Khek, Malay, Minangkabau, and Tiociu.
43
+ """
44
+
45
+ _HOMEPAGE = "https://github.com/herrysujaini/korpusnusantara"
46
+ _LICENSE = "Unknown"
47
+ _URLS = {
48
+ _DATASETNAME: "https://github.com/herrysujaini/korpusnusantara/raw/main/korpus nusantara.xlsx",
49
+ }
50
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+ _NUSANTARA_VERSION = "1.0.0"
54
+
55
+
56
+ """
57
+ A collection of all the dialects are: javanese, javanese kromo, javanese ngoko, dayak ahe,
58
+ dayak iban, dayak pesaguan, dayak taman, buginese kelolau, buginese wajo, sundanese,
59
+ madurese, banjar, batak toba, khek pontianak, kapuas hulu, melayu kembayan, melayu ketapang,
60
+ melayu melawi, melayu pontianak, melayu putussibau, melayu sambas, melayu sintang, padang,
61
+ tiociu pontianak.
62
+
63
+ In this project, we group the dialects into several subsets:
64
+
65
+ Javanese (jav) : javanese, javanese kromo, javanese ngoko
66
+ Dayak (day) : dayak ahe, dayak iban, dayak pesaguan, dayak taman
67
+ Buginese (bug) : buginese kelolau, buginese wajo
68
+ Sundanese (sun) : sundanese
69
+ Madurese (mad) : madurese
70
+ Banjar (bjn) : banjar
71
+ Batak Toba (bbc) : batak toba
72
+ Khek (khek) : khek pontianak, kapuas hulu
73
+ Malay (msa) : melayu kembayan, melayu ketapang, melayu melawi, melayu pontianak, melayu putussibau, melayu sambas, melayu sintang
74
+ Minangkabau (min): padang
75
+ Tiociu (tiociu) : tiociu pontianak
76
+ """
77
+
78
+ Domain2Subsets = {
79
+ "jav": ['jawa', 'jawa kromo', 'jawa ngoko'],
80
+ "xdy": ['dayak ahe', 'dayak iban', 'dayak pesaguan', 'dayak taman'],
81
+ "bug": ['bugis kelolao', 'bugis wajo'],
82
+ "sun": ['sunda'],
83
+ "mad": ['madura'],
84
+ "bjn": ['banjar'],
85
+ "bbc": ['Batak'],
86
+ "khek": ['kapuas hulu', 'Khek Pontianak'],
87
+ "msa": ['melayu kembayan', 'melayu ketapang', 'melayu melawi', 'melayu pontianak', 'melayu putussibau', 'melayu sambas', 'melayu sintang'],
88
+ "min": ['padang'],
89
+ "tiociu": ['Tiociu Pontianak'],
90
+ }
91
+
92
+ class KorpusNusantara(datasets.GeneratorBasedBuilder):
93
+ """Bible En-Id is a machine translation dataset containing Indonesian-English parallel sentences collected from the bible.."""
94
+
95
+ BUILDER_CONFIGS = [
96
+ NusantaraConfig(
97
+ name=f"korpus_nusantara_ind_{subset}_source",
98
+ version=datasets.Version(_SOURCE_VERSION),
99
+ description=f"Korpus_Nusantara ind2{subset} source schema",
100
+ schema="source",
101
+ subset_id=f"korpus_nusantara",
102
+ )
103
+ for subset in _LANGUAGES[1:]
104
+ ] + \
105
+ [
106
+ NusantaraConfig(
107
+ name=f"korpus_nusantara_ind_{subset}_nusantara_t2t",
108
+ version=datasets.Version(_NUSANTARA_VERSION),
109
+ description=f"Korpus_Nusantara ind2{subset} Nusantara schema",
110
+ schema="nusantara_t2t",
111
+ subset_id=f"korpus_nusantara",
112
+ )
113
+ for subset in _LANGUAGES[1:]
114
+ ] + \
115
+ [
116
+ NusantaraConfig(
117
+ name=f"korpus_nusantara_{subset}_ind_source",
118
+ version=datasets.Version(_SOURCE_VERSION),
119
+ description=f"Korpus_Nusantara {subset}2ind source schema",
120
+ schema="source",
121
+ subset_id=f"korpus_nusantara",
122
+ )
123
+ for subset in _LANGUAGES[1:]
124
+ ] + \
125
+ [
126
+ NusantaraConfig(
127
+ name=f"korpus_nusantara_{subset}_ind_nusantara_t2t",
128
+ version=datasets.Version(_NUSANTARA_VERSION),
129
+ description=f"Korpus_Nusantara {subset}2ind Nusantara schema",
130
+ schema="nusantara_t2t",
131
+ subset_id=f"korpus_nusantara",
132
+ )
133
+ for subset in _LANGUAGES[1:]
134
+ ]
135
+
136
+ DEFAULT_CONFIG_NAME = "korpus_nusantara_jav_ind_source"
137
+
138
+ def _info(self):
139
+ if self.config.schema == "source":
140
+ features = datasets.Features({"id": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
141
+ elif self.config.schema == "nusantara_t2t":
142
+ features = schemas.text2text_features
143
+
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=features,
147
+ homepage=_HOMEPAGE,
148
+ license=_LICENSE,
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
153
+ """Returns SplitGenerators."""
154
+ # Dataset does not have predetermined split, putting all as TRAIN
155
+ urls = _URLS[_DATASETNAME]
156
+ base_dir = Path(dl_manager.download(urls))
157
+ data_files = {"train": base_dir}
158
+
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "filepath": data_files["train"],
164
+ },
165
+ ),
166
+ ]
167
+
168
+ def _merge_subsets(self, dfs, subsets, revert=False):
169
+ if not subsets: return None
170
+ df = None
171
+ for subset in subsets:
172
+ sub_df = dfs[subset]
173
+ orig_columns = sub_df.columns.tolist()
174
+ sub_df.columns = ["label", "text"]+orig_columns[2:] if revert else ["text", "label"]+orig_columns[2:]
175
+ if df is None:
176
+ df = sub_df
177
+ else:
178
+ df = pd.concat([df, sub_df], axis=0, sort=False)
179
+ return df
180
+
181
+ def get_domain_data(self, dfs):
182
+ domain = self.config.name
183
+ matched_domain = re.findall(r"korpus_nusantara_.*?_.*?_", domain)
184
+
185
+ assert len(matched_domain) == 1
186
+ domain = matched_domain[0][:-1].replace("korpus_nusantara_", "").split("_")
187
+ src_lang, tgt_lang = domain[0], domain[1]
188
+
189
+ subsets = Domain2Subsets.get(src_lang if src_lang != "ind" else tgt_lang, None)
190
+ return src_lang, tgt_lang, self._merge_subsets(dfs, subsets, revert=(src_lang != "ind"))
191
+
192
+ def _generate_examples(self, filepath: Path):
193
+ """Yields examples as (key, example) tuples."""
194
+ dfs = pd.read_excel(filepath, sheet_name=None, header=None)
195
+ src_lang, tgt_lang, df = self.get_domain_data((dfs))
196
+
197
+ if self.config.schema == "source":
198
+ for idx, row in enumerate(df.itertuples()):
199
+ ex = {
200
+ "id": str(idx),
201
+ "text": row.text,
202
+ "label": row.label,
203
+ }
204
+ yield idx, ex
205
+
206
+ elif self.config.schema == "nusantara_t2t":
207
+ for idx, row in enumerate(df.itertuples()):
208
+ ex = {
209
+ "id": str(idx),
210
+ "text_1": row.text,
211
+ "text_2": row.label,
212
+ "text_1_name": src_lang,
213
+ "text_2_name": tgt_lang,
214
+ }
215
+ yield idx, ex
216
+ else:
217
+ raise ValueError(f"Invalid config: {self.config.name}")