holylovenia commited on
Commit
a972941
1 Parent(s): fc711bc

Upload indo4b.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indo4b.py +187 -0
indo4b.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from posixpath import split
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from nusacrowd.utils import schemas
22
+ from nusacrowd.utils.configs import NusantaraConfig
23
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
24
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
25
+ import glob
26
+
27
+ _DATASETNAME = "indo4b"
28
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
29
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
30
+
31
+ _LOCAL = False
32
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
33
+
34
+ _CITATION = """\
35
+ @inproceedings{wilie-etal-2020-indonlu,
36
+ title = "{I}ndo{NLU}: Benchmark and Resources for Evaluating {I}ndonesian
37
+ Natural Language Understanding",
38
+ author = "Wilie, Bryan and
39
+ Vincentio, Karissa and
40
+ Winata, Genta Indra and
41
+ Cahyawijaya, Samuel and
42
+ Li, Xiaohong and
43
+ Lim, Zhi Yuan and
44
+ Soleman, Sidik and
45
+ Mahendra, Rahmad and
46
+ Fung, Pascale and
47
+ Bahar, Syafri and
48
+ Purwarianti, Ayu",
49
+ booktitle = "Proceedings of the 1st Conference of the Asia-Pacific Chapter of the
50
+ Association for Computational Linguistics and the 10th International Joint
51
+ Conference on Natural Language Processing",
52
+ month = dec,
53
+ year = "2020",
54
+ address = "Suzhou, China",
55
+ publisher = "Association for Computational Linguistics",
56
+ url = "https://aclanthology.org/2020.aacl-main.85",
57
+ pages = "843--857",
58
+ abstract = "Although Indonesian is known to be the fourth most frequently used language
59
+ over the internet, the research progress on this language in natural language processing (NLP)
60
+ is slow-moving due to a lack of available resources. In response, we introduce the first-ever vast
61
+ resource for training, evaluation, and benchmarking on Indonesian natural language understanding
62
+ (IndoNLU) tasks. IndoNLU includes twelve tasks, ranging from single sentence classification to
63
+ pair-sentences sequence labeling with different levels of complexity. The datasets for the tasks
64
+ lie in different domains and styles to ensure task diversity. We also provide a set of Indonesian
65
+ pre-trained models (IndoBERT) trained from a large and clean Indonesian dataset (Indo4B) collected
66
+ from publicly available sources such as social media texts, blogs, news, and websites.
67
+ We release baseline models for all twelve tasks, as well as the framework for benchmark evaluation,
68
+ thus enabling everyone to benchmark their system performances.",
69
+ }
70
+ """
71
+
72
+ _DESCRIPTION = """\
73
+ Indo4B is a large-scale Indonesian self-supervised pre-training corpus
74
+ consists of around 3.6B words, with around 250M sentences. The corpus
75
+ covers both formal and colloquial Indonesian sentences compiled from
76
+ 12 sources, of which two cover Indonesian colloquial language, eight
77
+ cover formal Indonesian language, and the rest have a mixed style of
78
+ both colloquial and formal.
79
+ """
80
+
81
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlu"
82
+
83
+ _LICENSE = "CC0"
84
+
85
+ _LANGUAGES_MAP = {
86
+ "ind": "id",
87
+ "jav": "jv",
88
+ "sun": "su",
89
+ }
90
+
91
+ _URLS = {
92
+ "indo4b": "https://storage.googleapis.com/babert-pretraining/IndoNLU_finals/dataset/preprocessed/dataset_wot_uncased_blanklines.tar.xz",
93
+ }
94
+
95
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
96
+
97
+ _SOURCE_VERSION = "1.0.0"
98
+
99
+ _NUSANTARA_VERSION = "1.0.0"
100
+
101
+ class Indo4B(datasets.GeneratorBasedBuilder):
102
+ """Indo4B is a large-scale Indonesian self-supervised pre-training corpus
103
+ consists of around 3.6B words, with around 250M sentences."""
104
+
105
+ DEFAULT_CONFIG_NAME = "indo4b_source"
106
+
107
+
108
+ BUILDER_CONFIGS = [
109
+ NusantaraConfig(
110
+ name="indo4b_source",
111
+ version=_SOURCE_VERSION,
112
+ description="Indo4B source schema",
113
+ schema="source",
114
+ subset_id="indo4b",
115
+ ),
116
+ NusantaraConfig(
117
+ name="indo4b_nusantara_ssp",
118
+ version=_NUSANTARA_VERSION,
119
+ description="Indo4B Nusantara schema",
120
+ schema="nusantara_ssp",
121
+ subset_id="indo4b",
122
+ ),
123
+ ]
124
+
125
+ def _info(self) -> datasets.DatasetInfo:
126
+ if self.config.schema == "source":
127
+ features = datasets.Features(
128
+ {
129
+ "id": datasets.Value("string"),
130
+ "text": datasets.Value("string"),
131
+ }
132
+ )
133
+ elif self.config.schema == "nusantara_ssp":
134
+ features = schemas.self_supervised_pretraining.features
135
+
136
+ return datasets.DatasetInfo(
137
+ description=_DESCRIPTION,
138
+ features=features,
139
+ homepage=_HOMEPAGE,
140
+ license=_LICENSE,
141
+ citation=_CITATION,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
145
+ """Returns SplitGenerators."""
146
+
147
+ url = _URLS["indo4b"]
148
+ path = dl_manager.download_and_extract(url) + "/processed_uncased_blanklines"
149
+
150
+ return [
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TRAIN,
153
+ gen_kwargs={
154
+ "filepath": path,
155
+ "split": "train",
156
+ },
157
+ ),
158
+ ]
159
+
160
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
161
+ """Yields examples as (key, example) tuples."""
162
+
163
+ counter = 0
164
+ for txt_path in glob.glob(f'{filepath}/*.txt'):
165
+ with open(txt_path, encoding="utf-8") as f:
166
+ if self.config.schema == "source":
167
+ for row in f:
168
+ if row.strip() != "":
169
+ yield (
170
+ counter,
171
+ {
172
+ "id": str(counter),
173
+ "text": row.strip(),
174
+ },
175
+ )
176
+ counter += 1
177
+ elif self.config.schema == "nusantara_ssp":
178
+ for row in f:
179
+ if row.strip() != "":
180
+ yield (
181
+ counter,
182
+ {
183
+ "id": str(counter),
184
+ "text": row.strip(),
185
+ },
186
+ )
187
+ counter += 1