holylovenia commited on
Commit
39fcc3a
1 Parent(s): f1bbf19

Upload cc100.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cc100.py +51 -34
cc100.py CHANGED
@@ -26,24 +26,24 @@ corpus.
26
 
27
  This contains the Indonesian (ind), the Javanese (jav), and the Sundanese (sun) subset.
28
 
29
- [nusantara_schema_name] = ssp
30
  """
31
 
32
- from posixpath import split
33
  from typing import Dict, List, Tuple
34
 
35
  import datasets
36
 
37
- from nusacrowd.utils import schemas
38
- from nusacrowd.utils.configs import NusantaraConfig
39
- from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
40
- DEFAULT_SOURCE_VIEW_NAME, Tasks)
41
 
42
  _DATASETNAME = "cc100"
43
  _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
44
- _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
45
 
46
- _LANGUAGES = ["ind", "jav", "sun"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
 
47
  _LOCAL = False
48
 
49
  _CITATION = """\
@@ -135,9 +135,17 @@ _HOMEPAGE = "https://data.statmt.org/cc-100/"
135
  _LICENSE = "MIT"
136
 
137
  _LANGUAGES_MAP = {
138
- "ind": "id",
139
- "jav": "jv",
140
- "sun": "su",
 
 
 
 
 
 
 
 
141
  }
142
 
143
  _URLS = {
@@ -146,19 +154,28 @@ _URLS = {
146
 
147
  _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
148
 
 
 
149
  _SOURCE_VERSION = "2018.12.01"
150
 
151
- _NUSANTARA_VERSION = "1.0.0"
 
152
 
153
- def nusantara_config_constructor(lang, schema, version):
154
- """Construct NusantaraConfig with cc100_{lang}_{schema} as the name format."""
155
- if schema != "source" and schema != "nusantara_ssp":
156
  raise ValueError(f"Invalid schema: {schema}")
157
 
158
  if lang == "":
159
- raise ValueError(f"Language is required. Choose one of these languages: {_LANGUAGES}.")
 
 
 
 
 
 
160
  elif lang in _LANGUAGES:
161
- return NusantaraConfig(
162
  name=f"cc100_{lang}_{schema}",
163
  version=datasets.Version(version),
164
  description=f"CC100 with {schema} schema for {lang} language",
@@ -171,14 +188,15 @@ def nusantara_config_constructor(lang, schema, version):
171
 
172
  class CC100(datasets.GeneratorBasedBuilder):
173
  """Monolingual Datasets from Web Crawl Data."""
174
-
175
- DEFAULT_CONFIG_NAME = "cc100_jav_source"
176
-
177
- BUILDER_CONFIGS = [
178
- nusantara_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGES_MAP
179
- ] + [
180
- nusantara_config_constructor(lang, "nusantara_ssp", _NUSANTARA_VERSION) for lang in _LANGUAGES_MAP
181
- ]
 
182
 
183
  def _info(self) -> datasets.DatasetInfo:
184
  if self.config.schema == "source":
@@ -188,7 +206,7 @@ class CC100(datasets.GeneratorBasedBuilder):
188
  "text": datasets.Value("string"),
189
  }
190
  )
191
- elif self.config.schema == "nusantara_ssp":
192
  features = schemas.self_supervised_pretraining.features
193
 
194
  return datasets.DatasetInfo(
@@ -201,14 +219,13 @@ class CC100(datasets.GeneratorBasedBuilder):
201
 
202
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
203
  """Returns SplitGenerators."""
204
-
205
  split_name = self.config.name.split("_")
206
- if split_name[1] == "source" or split_name[1] == "nusantara":
207
- lang = _DEFAULT_LANGUAGE
 
208
  else:
209
- lang = split_name[1]
210
- url = _URLS["train"].format(lang=_LANGUAGES_MAP[lang])
211
- path = dl_manager.download_and_extract(url)
212
 
213
  return [
214
  datasets.SplitGenerator(
@@ -234,7 +251,7 @@ class CC100(datasets.GeneratorBasedBuilder):
234
  "text": row.strip(),
235
  },
236
  )
237
- elif self.config.schema == "nusantara_ssp":
238
  for counter, row in enumerate(f):
239
  if row.strip() != "":
240
  yield (
@@ -243,4 +260,4 @@ class CC100(datasets.GeneratorBasedBuilder):
243
  "id": str(counter),
244
  "text": row.strip(),
245
  },
246
- )
 
26
 
27
  This contains the Indonesian (ind), the Javanese (jav), and the Sundanese (sun) subset.
28
 
29
+ [seacrowd_schema_name] = ssp
30
  """
31
 
 
32
  from typing import Dict, List, Tuple
33
 
34
  import datasets
35
 
36
+ from seacrowd.utils import schemas
37
+ from seacrowd.utils.configs import SEACrowdConfig
38
+ from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
39
+ DEFAULT_SOURCE_VIEW_NAME, Tasks, TASK_TO_SCHEMA)
40
 
41
  _DATASETNAME = "cc100"
42
  _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
43
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
44
 
45
+ # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
46
+ _LANGUAGES = ["ind", "jav", "sun", "mya", "mya_zaw", "lao", "khm", "tgl", "vie", "tha", "zlm"]
47
  _LOCAL = False
48
 
49
  _CITATION = """\
 
135
  _LICENSE = "MIT"
136
 
137
  _LANGUAGES_MAP = {
138
+ "ind": "id", # Indonesian
139
+ "jav": "jv", # Javanese
140
+ "sun": "su", # Sundanese
141
+ "mya": "my", # Burmese
142
+ "mya_zaw": "my_zaw", # Burmese (Zawgyi)
143
+ "lao": "lo", # Lao
144
+ "khm": "km", # Central Khmer, Khmer
145
+ "tgl": "tl", # Tagalog
146
+ "vie": "vi", # Vietnamese
147
+ "tha": "th", # Thai
148
+ "zlm": "ms", # Malay
149
  }
150
 
151
  _URLS = {
 
154
 
155
  _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
156
 
157
+ _SEACROWD_SCHEMA_NAME = TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()
158
+
159
  _SOURCE_VERSION = "2018.12.01"
160
 
161
+ _SEACROWD_VERSION = "2024.06.20"
162
+
163
 
164
+ def seacrowd_config_constructor(lang, schema, version):
165
+ """Construct SEACrowdConfig with cc100_{lang}_{schema} as the name format."""
166
+ if schema != "source" and schema != f"seacrowd_{_SEACROWD_SCHEMA_NAME}":
167
  raise ValueError(f"Invalid schema: {schema}")
168
 
169
  if lang == "":
170
+ return SEACrowdConfig(
171
+ name=f"cc100_{schema}",
172
+ version=datasets.Version(version),
173
+ description=f"CC100 with {schema} schema for all languages",
174
+ schema=schema,
175
+ subset_id="cc100",
176
+ )
177
  elif lang in _LANGUAGES:
178
+ return SEACrowdConfig(
179
  name=f"cc100_{lang}_{schema}",
180
  version=datasets.Version(version),
181
  description=f"CC100 with {schema} schema for {lang} language",
 
188
 
189
  class CC100(datasets.GeneratorBasedBuilder):
190
  """Monolingual Datasets from Web Crawl Data."""
191
+
192
+ BUILDER_CONFIGS = (
193
+ [seacrowd_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGES_MAP]
194
+ + [seacrowd_config_constructor(lang, f"seacrowd_{_SEACROWD_SCHEMA_NAME}", _SEACROWD_VERSION) for lang in _LANGUAGES_MAP]
195
+ + [
196
+ seacrowd_config_constructor("", "source", _SOURCE_VERSION),
197
+ seacrowd_config_constructor("", f"seacrowd_{_SEACROWD_SCHEMA_NAME}", _SOURCE_VERSION),
198
+ ]
199
+ )
200
 
201
  def _info(self) -> datasets.DatasetInfo:
202
  if self.config.schema == "source":
 
206
  "text": datasets.Value("string"),
207
  }
208
  )
209
+ elif self.config.schema == f"seacrowd_{_SEACROWD_SCHEMA_NAME}":
210
  features = schemas.self_supervised_pretraining.features
211
 
212
  return datasets.DatasetInfo(
 
219
 
220
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
221
  """Returns SplitGenerators."""
 
222
  split_name = self.config.name.split("_")
223
+ if self.config.name == "cc100_source" or self.config.name == f"cc100_seacrowd_{_SEACROWD_SCHEMA_NAME}":
224
+ # Load all languages
225
+ path = dl_manager.download_and_extract([_URLS["train"].format(lang=_LANGUAGES_MAP[lang]) for lang in _LANGUAGES_MAP])
226
  else:
227
+ url = _URLS["train"].format(lang=_LANGUAGES_MAP[split_name[1]])
228
+ path = dl_manager.download_and_extract(url)
 
229
 
230
  return [
231
  datasets.SplitGenerator(
 
251
  "text": row.strip(),
252
  },
253
  )
254
+ elif self.config.schema == f"seacrowd_{_SEACROWD_SCHEMA_NAME}":
255
  for counter, row in enumerate(f):
256
  if row.strip() != "":
257
  yield (
 
260
  "id": str(counter),
261
  "text": row.strip(),
262
  },
263
+ )