holylovenia commited on
Commit
85e4e3d
1 Parent(s): c91f030

Upload indonesian_madurese_bible_translation.py with huggingface_hub

Browse files
indonesian_madurese_bible_translation.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The Madurese Parallel Corpus Dataset is created by scraping content from the online Bible, resulting in 30,013 Indonesian-Madurese sentences.
18
+ This corpus is distinct from a previous Madurese dataset, which was gathered from physical documents such as the Kamus Lengkap Bahasa Madura-Indonesia.
19
+ The proposed dataset provides bilingual sentences, allowing for comparisons between Indonesian and Madurese. It aims to supplement existing Madurese
20
+ corpora, enabling enhanced research and development focused on regional languages in Indonesia. Unlike the prior dataset that included information
21
+ like lemmas, pronunciation, linguistic descriptions, part of speech, loanwords, dialects, and various structures, this new corpus primarily focuses
22
+ on bilingual sentence pairs, potentially broadening the scope for linguistic studies and language technology advancements in the Madurese language.
23
+ """
24
+ import os
25
+ from pathlib import Path
26
+ from typing import Dict, List, Tuple
27
+
28
+ import datasets
29
+ import jsonlines
30
+
31
+ from seacrowd.utils import schemas
32
+ from seacrowd.utils.configs import SEACrowdConfig
33
+ from seacrowd.utils.constants import Licenses, Tasks
34
+
35
+ _CITATION = """\
36
+ @article{,
37
+ author = {Sulistyo, Danang Arbian and Wibawa, Aji Prasetya and Prasetya, Didik Dwi and Nafalski, Andrew},
38
+ title = {Autogenerated Indonesian-Madurese Parallel Corpus Dataset Using Neural Machine Translation},
39
+ journal = {Available at SSRN 4644430},
40
+ volume = {},
41
+ year = {2023},
42
+ url = {https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4644430},
43
+ doi = {},
44
+ biburl = {},
45
+ bibsource = {}
46
+ }
47
+ """
48
+
49
+ _DATASETNAME = "indonesian_madurese_bible_translation"
50
+
51
+ _DESCRIPTION = """\
52
+ The Madurese Parallel Corpus Dataset is created by scraping content from the online Bible, resulting in 30,013 Indonesian-Madurese sentences.
53
+ This corpus is distinct from a previous Madurese dataset, which was gathered from physical documents such as the Kamus Lengkap Bahasa Madura-Indonesia.
54
+ The proposed dataset provides bilingual sentences, allowing for comparisons between Indonesian and Madurese. It aims to supplement existing Madurese
55
+ corpora, enabling enhanced research and development focused on regional languages in Indonesia. Unlike the prior dataset that included information
56
+ like lemmas, pronunciation, linguistic descriptions, part of speech, loanwords, dialects, and various structures, this new corpus primarily focuses
57
+ on bilingual sentence pairs, potentially broadening the scope for linguistic studies and language technology advancements in the Madurese language.
58
+ """
59
+
60
+ _HOMEPAGE = "https://data.mendeley.com/datasets/cgtg4bhrtf/3"
61
+ _LANGUAGES = ["ind", "mad"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
62
+ _LICENSE = Licenses.CC_BY_4_0.value # example: Licenses.MIT.value, Licenses.CC_BY_NC_SA_4_0.value, Licenses.UNLICENSE.value, Licenses.UNKNOWN.value
63
+ _LOCAL = False
64
+ _URLS = {
65
+ _DATASETNAME: "https://prod-dcd-datasets-cache-zipfiles.s3.eu-west-1.amazonaws.com/cgtg4bhrtf-3.zip",
66
+ }
67
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION] # example: [Tasks.TRANSLITERATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _SEACROWD_VERSION = "2024.06.20"
70
+
71
+
72
+ class IndonesianMadureseBibleTranslationDataset(datasets.GeneratorBasedBuilder):
73
+ """TODO: This corpus consists of more than 20,000 Indonesian - Madurese sentences."""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
77
+
78
+ BUILDER_CONFIGS = [
79
+ SEACrowdConfig(
80
+ name=f"{_DATASETNAME}_source",
81
+ version=SOURCE_VERSION,
82
+ description=f"{_DATASETNAME} source schema",
83
+ schema="source",
84
+ subset_id=f"{_DATASETNAME}",
85
+ ),
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_seacrowd_t2t",
88
+ version=SEACROWD_VERSION,
89
+ description=f"{_DATASETNAME} SEACrowd schema",
90
+ schema="seacrowd_t2t",
91
+ subset_id=f"{_DATASETNAME}",
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = "indonesian_madurese_bible_translation_source"
96
+
97
+ def _info(self) -> datasets.DatasetInfo:
98
+ if self.config.schema == "source":
99
+ features = datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "src": datasets.Value("string"),
103
+ "tgt": datasets.Value("string"),
104
+ }
105
+ )
106
+
107
+ elif self.config.schema == "seacrowd_t2t":
108
+ features = schemas.text2text_features
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ homepage=_HOMEPAGE,
114
+ license=_LICENSE,
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
119
+ """Returns SplitGenerators."""
120
+
121
+ urls = _URLS[_DATASETNAME]
122
+ data_dir = dl_manager.download_and_extract(urls)
123
+ data_dir = os.path.join(data_dir, "Bahasa Madura Corpus Dataset/Indonesian-Madurese Corpus")
124
+ all_raw_path = [data_dir + "/" + item for item in os.listdir(data_dir)]
125
+ all_path = []
126
+ id = 0
127
+ for raw_path in all_raw_path:
128
+ if "txt" in raw_path:
129
+ all_path.append(raw_path)
130
+ all_data = []
131
+ for path in all_path:
132
+ data = self._read_txt(path)
133
+ for line in data:
134
+ if line != "\n":
135
+ all_data.append({"src": line.split("\t")[0], "tgt": line.split("\t")[1], "id": id})
136
+ id += 1
137
+ self._write_jsonl(data_dir + "/train.jsonl", all_data)
138
+
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TRAIN,
142
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
143
+ gen_kwargs={
144
+ "filepath": os.path.join(data_dir, "train.jsonl"),
145
+ "split": "train",
146
+ },
147
+ )
148
+ ]
149
+
150
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
151
+ """Yields examples as (key, example) tuples."""
152
+ if self.config.schema == "source":
153
+ i = 0
154
+ with jsonlines.open(filepath) as f:
155
+ for each_data in f.iter():
156
+ ex = {
157
+ "id": each_data["id"],
158
+ "src": each_data["src"],
159
+ "tgt": each_data["tgt"],
160
+ }
161
+ yield i, ex
162
+ i += 1
163
+
164
+ elif self.config.schema == "seacrowd_t2t":
165
+ i = 0
166
+ with jsonlines.open(filepath) as f:
167
+ for each_data in f.iter():
168
+ ex = {"id": each_data["id"], "text_1": each_data["src"].strip(), "text_2": each_data["tgt"].strip(), "text_1_name": "ind", "text_2_name": "mad"}
169
+ yield i, ex
170
+ i += 1
171
+
172
+ def _write_jsonl(self, filepath, values):
173
+ with jsonlines.open(filepath, "w") as writer:
174
+ for line in values:
175
+ writer.write(line)
176
+
177
+ def _read_txt(self, filepath):
178
+ with open(filepath, "r") as f:
179
+ lines = f.readlines()
180
+ return lines