Datasets:

Modalities:
Text
Formats:
json
ArXiv:
Tags:
License:
RaymondAISG commited on
Commit
2aba7af
1 Parent(s): e520af1

Upload SEA_mC4.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. SEA_mC4.py +97 -0
SEA_mC4.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """South East Asia mC4 dataset."""
2
+ import gzip
3
+ import json
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """
8
+ South East Asia mC4 dataset."""
9
+ _CITATION = """EMPTY"""
10
+ _URL = "EMPTY"
11
+ _DATA_URL = "https://huggingface.co/datasets/aisingapore/sea-pile/tree/main/sea-pile-mc4/{language}/mc4-{language}-{index:05d}-of-{n_shards:05d}.json.gz"
12
+
13
+ _N_SHARDS_PER_LANGUAGES = {
14
+ "zh": 468,
15
+ "id": 21,
16
+ "ms": 4,
17
+ "tl": 6,
18
+ "my": 11,
19
+ "vi": 329,
20
+ "th": 74,
21
+ "lo": 2,
22
+ "km": 9,
23
+ "ta": 29,
24
+ }
25
+
26
+
27
+ class SEAPileConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for SEAmC4."""
29
+
30
+ def __init__(self, *args, languages, **kwargs):
31
+ """BuilderConfig for SEAmC4.
32
+ Args:
33
+ languages (:obj:`List[str]`): list of languages to load
34
+ **kwargs: keyword arguments forwarded to super.
35
+ """
36
+ super().__init__(
37
+ *args,
38
+ name="+".join(languages),
39
+ **kwargs,
40
+ )
41
+ self.languages = languages
42
+
43
+
44
+ class SEAPile(datasets.GeneratorBasedBuilder):
45
+ """South East Asia mC4 dataset."""
46
+
47
+ BUILDER_CONFIGS = [
48
+ SEAPileConfig(languages=[lang]) for lang in _N_SHARDS_PER_LANGUAGES
49
+ ]
50
+ BUILDER_CONFIG_CLASS = SEAPileConfig
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "text": datasets.Value("string"),
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ homepage=_URL,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ data_urls = {}
68
+ for split in ["train"]:
69
+ data_urls[split] = [
70
+ _DATA_URL.format(
71
+ language=lang,
72
+ index=index,
73
+ n_shards=_N_SHARDS_PER_LANGUAGES[lang],
74
+ )
75
+ for lang in self.config.languages
76
+ for index in range(0, _N_SHARDS_PER_LANGUAGES[lang])
77
+ ]
78
+ train_downloaded_files = dl_manager.download(data_urls["train"])
79
+
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ gen_kwargs={"filepaths": train_downloaded_files},
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, filepaths):
88
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
89
+ id_ = 0
90
+ for filepath in filepaths:
91
+ logger.info("generating examples from = %s", filepath)
92
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
93
+ for line in f:
94
+ if line:
95
+ example = json.loads(line)
96
+ yield id_, example
97
+ id_ += 1