holylovenia commited on
Commit
8bc5179
1 Parent(s): 9839b57

Upload indo4b_plus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indo4b_plus.py +180 -0
indo4b_plus.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from posixpath import split
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from nusacrowd.utils import schemas
22
+ from nusacrowd.utils.configs import NusantaraConfig
23
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
24
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
25
+
26
+ _DATASETNAME = "indo4b_plus"
27
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
28
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
29
+
30
+ _LOCAL = False
31
+ _LANGUAGES = ["ind", "sun", "jav"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
32
+
33
+ _CITATION = """\
34
+ @inproceedings{cahyawijaya-etal-2021-indonlg,
35
+ title = "{I}ndo{NLG}: Benchmark and Resources for Evaluating {I}ndonesian Natural Language Generation",
36
+ author = "Cahyawijaya, Samuel and
37
+ Winata, Genta Indra and
38
+ Wilie, Bryan and
39
+ Vincentio, Karissa and
40
+ Li, Xiaohong and
41
+ Kuncoro, Adhiguna and
42
+ Ruder, Sebastian and
43
+ Lim, Zhi Yuan and
44
+ Bahar, Syafri and
45
+ Khodra, Masayu and
46
+ Purwarianti, Ayu and
47
+ Fung, Pascale",
48
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
49
+ month = nov,
50
+ year = "2021",
51
+ address = "Online and Punta Cana, Dominican Republic",
52
+ publisher = "Association for Computational Linguistics",
53
+ url = "https://aclanthology.org/2021.emnlp-main.699",
54
+ doi = "10.18653/v1/2021.emnlp-main.699",
55
+ pages = "8875--8898",
56
+ abstract = "Natural language generation (NLG) benchmarks provide an important avenue to measure progress
57
+ and develop better NLG systems. Unfortunately, the lack of publicly available NLG benchmarks for low-resource
58
+ languages poses a challenging barrier for building NLG systems that work well for languages with limited
59
+ amounts of data. Here we introduce IndoNLG, the first benchmark to measure natural language generation (NLG)
60
+ progress in three low-resource{---}yet widely spoken{---}languages of Indonesia: Indonesian, Javanese, and Sundanese.
61
+ Altogether, these languages are spoken by more than 100 million native speakers, and hence constitute an important
62
+ use case of NLG systems today. Concretely, IndoNLG covers six tasks: summarization, question answering, chit-chat,
63
+ and three different pairs of machine translation (MT) tasks. We collate a clean pretraining corpus of Indonesian,
64
+ Sundanese, and Javanese datasets, Indo4B-Plus, which is used to pretrain our models: IndoBART and IndoGPT.
65
+ We show that IndoBART and IndoGPT achieve competitive performance on all tasks{---}despite using only one-fifth
66
+ the parameters of a larger multilingual model, mBART-large (Liu et al., 2020). This finding emphasizes
67
+ the importance of pretraining on closely related, localized languages to achieve more efficient learning and faster inference
68
+ at very low-resource languages like Javanese and Sundanese.",
69
+ }
70
+ """
71
+
72
+ _DESCRIPTION = """\
73
+ Indo4B-Plus is an extension of Indo4B, a large-scale Indonesian self-supervised pre-training corpus.
74
+ Indo4B-Plus extend Indo4B by adding two low-resource Indonesian local languages to the corpus, i.e., Sundanese and Javanese.
75
+ Indo4B-Plus adds 82,582,025 words (∼2.07%) of Sundanese sentences and 331,041,877 words (∼8.29%) of Javanese
76
+ """
77
+
78
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlu"
79
+
80
+ _LICENSE = "CC0"
81
+
82
+ _LANGUAGES_MAP = {
83
+ "ind": "id",
84
+ "jav": "jv",
85
+ "sun": "su",
86
+ }
87
+
88
+ _URLS = {
89
+ "indo4b": "https://storage.googleapis.com/babert-pretraining/IndoNLG_finals/IndoNLG_ALL_new_dataset_preprocessed_uncased.txt.zip",
90
+ }
91
+
92
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
93
+
94
+ _SOURCE_VERSION = "1.0.0"
95
+
96
+ _NUSANTARA_VERSION = "1.0.0"
97
+
98
+ class Indo4BPlus(datasets.GeneratorBasedBuilder):
99
+ """Indo4B-Plus is a large-scale Indonesian self-supervised pre-training corpus consists
100
+ of around 4B words, covering three languages, i.e., Indonesian, Sundanese, and Javanese."""
101
+
102
+ DEFAULT_CONFIG_NAME = "indo4b_plus_source"
103
+
104
+
105
+ BUILDER_CONFIGS = [
106
+ NusantaraConfig(
107
+ name="indo4b_plus_source",
108
+ version=_SOURCE_VERSION,
109
+ description="Indo4B-Plus source schema",
110
+ schema="source",
111
+ subset_id="indo4b_plus",
112
+ ),
113
+ NusantaraConfig(
114
+ name="indo4b_plus_nusantara_ssp",
115
+ version=_NUSANTARA_VERSION,
116
+ description="Indo4B-Plus Nusantara schema",
117
+ schema="nusantara_ssp",
118
+ subset_id="indo4b_plus",
119
+ ),
120
+ ]
121
+
122
+ def _info(self) -> datasets.DatasetInfo:
123
+ if self.config.schema == "source":
124
+ features = datasets.Features(
125
+ {
126
+ "id": datasets.Value("string"),
127
+ "text": datasets.Value("string"),
128
+ }
129
+ )
130
+ elif self.config.schema == "nusantara_ssp":
131
+ features = schemas.self_supervised_pretraining.features
132
+
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=features,
136
+ homepage=_HOMEPAGE,
137
+ license=_LICENSE,
138
+ citation=_CITATION,
139
+ )
140
+
141
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
142
+ """Returns SplitGenerators."""
143
+
144
+ url = _URLS["indo4b"]
145
+ path = dl_manager.download_and_extract(url) + "/IndoNLG_ALL_new_dataset_preprocessed_uncased.txt"
146
+
147
+ return [
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TRAIN,
150
+ gen_kwargs={
151
+ "filepath": path,
152
+ "split": "train",
153
+ },
154
+ ),
155
+ ]
156
+
157
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
158
+ """Yields examples as (key, example) tuples."""
159
+
160
+ with open(filepath, encoding="utf-8") as f:
161
+ if self.config.schema == "source":
162
+ for counter, row in enumerate(f):
163
+ if row.strip() != "":
164
+ yield (
165
+ counter,
166
+ {
167
+ "id": str(counter),
168
+ "text": row.strip(),
169
+ },
170
+ )
171
+ elif self.config.schema == "nusantara_ssp":
172
+ for counter, row in enumerate(f):
173
+ if row.strip() != "":
174
+ yield (
175
+ counter,
176
+ {
177
+ "id": str(counter),
178
+ "text": row.strip(),
179
+ },
180
+ )