asahi417 commited on
Commit
9570129
1 Parent(s): 4d4e88c

Update mc4_validation.py

Browse files
Files changed (1) hide show
  1. mc4_validation.py +329 -0
mc4_validation.py CHANGED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """mC4 dataset based on Common Crawl."""
2
+
3
+
4
+ import gzip
5
+ import json
6
+
7
+ import datasets
8
+
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _DESCRIPTION = """\
14
+ A colossal, cleaned version of Common Crawl's web crawl corpus.
15
+
16
+ Based on Common Crawl dataset: "https://commoncrawl.org".
17
+
18
+ This is the processed version of Google's mC4 dataset by AllenAI.
19
+ """
20
+
21
+ _CITATION = """
22
+ @article{2019t5,
23
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
24
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
25
+ journal = {arXiv e-prints},
26
+ year = {2019},
27
+ archivePrefix = {arXiv},
28
+ eprint = {1910.10683},
29
+ }
30
+ """
31
+
32
+ _URL = "https://github.com/allenai/allennlp/discussions/5056"
33
+
34
+ _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
35
+
36
+ _LANGUAGES = [
37
+ "af",
38
+ "am",
39
+ "ar",
40
+ "az",
41
+ "be",
42
+ "bg",
43
+ "bg-Latn",
44
+ "bn",
45
+ "ca",
46
+ "ceb",
47
+ "co",
48
+ "cs",
49
+ "cy",
50
+ "da",
51
+ "de",
52
+ "el",
53
+ "el-Latn",
54
+ "en",
55
+ "eo",
56
+ "es",
57
+ "et",
58
+ "eu",
59
+ "fa",
60
+ "fi",
61
+ "fil",
62
+ "fr",
63
+ "fy",
64
+ "ga",
65
+ "gd",
66
+ "gl",
67
+ "gu",
68
+ "ha",
69
+ "haw",
70
+ "hi",
71
+ "hi-Latn",
72
+ "hmn",
73
+ "ht",
74
+ "hu",
75
+ "hy",
76
+ "id",
77
+ "ig",
78
+ "is",
79
+ "it",
80
+ "iw",
81
+ "ja",
82
+ "ja-Latn",
83
+ "jv",
84
+ "ka",
85
+ "kk",
86
+ "km",
87
+ "kn",
88
+ "ko",
89
+ "ku",
90
+ "ky",
91
+ "la",
92
+ "lb",
93
+ "lo",
94
+ "lt",
95
+ "lv",
96
+ "mg",
97
+ "mi",
98
+ "mk",
99
+ "ml",
100
+ "mn",
101
+ "mr",
102
+ "ms",
103
+ "mt",
104
+ "my",
105
+ "ne",
106
+ "nl",
107
+ "no",
108
+ "ny",
109
+ "pa",
110
+ "pl",
111
+ "ps",
112
+ "pt",
113
+ "ro",
114
+ "ru",
115
+ "ru-Latn",
116
+ "sd",
117
+ "si",
118
+ "sk",
119
+ "sl",
120
+ "sm",
121
+ "sn",
122
+ "so",
123
+ "sq",
124
+ "sr",
125
+ "st",
126
+ "su",
127
+ "sv",
128
+ "sw",
129
+ "ta",
130
+ "te",
131
+ "tg",
132
+ "th",
133
+ "tr",
134
+ "uk",
135
+ "und",
136
+ "ur",
137
+ "uz",
138
+ "vi",
139
+ "xh",
140
+ "yi",
141
+ "yo",
142
+ "zh",
143
+ "zh-Latn",
144
+ "zu",
145
+ ]
146
+
147
+ _N_SHARDS_PER_SPLIT = {
148
+ 'af': {'validation': 1},
149
+ 'am': {'validation': 1},
150
+ 'ar': {'validation': 4},
151
+ 'az': {'validation': 1},
152
+ 'be': {'validation': 1},
153
+ 'bg': {'validation': 1},
154
+ 'bg-Latn': {'validation': 1},
155
+ 'bn': {'validation': 1},
156
+ 'ca': {'validation': 1},
157
+ 'ceb': {'validation': 1},
158
+ 'co': {'validation': 1},
159
+ 'cs': {'validation': 2},
160
+ 'cy': {'validation': 1},
161
+ 'da': {'validation': 1},
162
+ 'de': {'validation': 16},
163
+ 'el': {'validation': 2},
164
+ 'el-Latn': {'validation': 1},
165
+ 'en': {'validation': 128},
166
+ 'eo': {'validation': 1},
167
+ 'es': {'validation': 16},
168
+ 'et': {'validation': 1},
169
+ 'eu': {'validation': 1},
170
+ 'fa': {'validation': 2},
171
+ 'fi': {'validation': 1},
172
+ 'fil': {'validation': 1},
173
+ 'fr': {'validation': 16},
174
+ 'fy': {'validation': 1},
175
+ 'ga': {'validation': 1},
176
+ 'gd': {'validation': 1},
177
+ 'gl': {'validation': 1},
178
+ 'gu': {'validation': 1},
179
+ 'ha': {'validation': 1},
180
+ 'haw': {'validation': 1},
181
+ 'hi': {'validation': 2},
182
+ 'hi-Latn': {'validation': 1},
183
+ 'hmn': {'validation': 1},
184
+ 'ht': {'validation': 1},
185
+ 'hu': {'validation': 2},
186
+ 'hy': {'validation': 1},
187
+ 'id': {'validation': 4},
188
+ 'ig': {'validation': 1},
189
+ 'is': {'validation': 1},
190
+ 'it': {'validation': 8},
191
+ 'iw': {'validation': 1},
192
+ 'ja': {'validation': 8},
193
+ 'ja-Latn': {'validation': 1},
194
+ 'jv': {'validation': 1},
195
+ 'ka': {'validation': 1},
196
+ 'kk': {'validation': 1},
197
+ 'km': {'validation': 1},
198
+ 'kn': {'validation': 1},
199
+ 'ko': {'validation': 1},
200
+ 'ku': {'validation': 1},
201
+ 'ky': {'validation': 1},
202
+ 'la': {'validation': 1},
203
+ 'lb': {'validation': 1},
204
+ 'lo': {'validation': 1},
205
+ 'lt': {'validation': 1},
206
+ 'lv': {'validation': 1},
207
+ 'mg': {'validation': 1},
208
+ 'mi': {'validation': 1},
209
+ 'mk': {'validation': 1},
210
+ 'ml': {'validation': 1},
211
+ 'mn': {'validation': 1},
212
+ 'mr': {'validation': 1},
213
+ 'ms': {'validation': 1},
214
+ 'mt': {'validation': 1},
215
+ 'my': {'validation': 1},
216
+ 'ne': {'validation': 1},
217
+ 'nl': {'validation': 4},
218
+ 'no': {'validation': 1},
219
+ 'ny': {'validation': 1},
220
+ 'pa': {'validation': 1},
221
+ 'pl': {'validation': 4},
222
+ 'ps': {'validation': 1},
223
+ 'pt': {'validation': 4},
224
+ 'ro': {'validation': 2},
225
+ 'ru': {'validation': 32},
226
+ 'ru-Latn': {'validation': 1},
227
+ 'sd': {'validation': 1},
228
+ 'si': {'validation': 1},
229
+ 'sk': {'validation': 1},
230
+ 'sl': {'validation': 1},
231
+ 'sm': {'validation': 1},
232
+ 'sn': {'validation': 1},
233
+ 'so': {'validation': 1},
234
+ 'sq': {'validation': 1},
235
+ 'sr': {'validation': 1},
236
+ 'st': {'validation': 1},
237
+ 'su': {'validation': 1},
238
+ 'sv': {'validation': 2},
239
+ 'sw': {'validation': 1},
240
+ 'ta': {'validation': 1},
241
+ 'te': {'validation': 1},
242
+ 'tg': {'validation': 1},
243
+ 'th': {'validation': 1},
244
+ 'tr': {'validation': 4},
245
+ 'uk': {'validation': 2},
246
+ 'und': {'validation': 32},
247
+ 'ur': {'validation': 1},
248
+ 'uz': {'validation': 1},
249
+ 'vi': {'validation': 4},
250
+ 'xh': {'validation': 1},
251
+ 'yi': {'validation': 1},
252
+ 'yo': {'validation': 1},
253
+ 'zh': {'validation': 2},
254
+ 'zh-Latn': {'validation': 1},
255
+ 'zu': {'validation': 1}
256
+ }
257
+
258
+
259
+ class Mc4Config(datasets.BuilderConfig):
260
+ """BuilderConfig for mC4."""
261
+
262
+ def __init__(self, *args, languages, **kwargs):
263
+ """BuilderConfig for mC4.
264
+ Args:
265
+ languages (:obj:`List[str]`): list of languages to load
266
+ **kwargs: keyword arguments forwarded to super.
267
+ """
268
+ super().__init__(
269
+ *args,
270
+ name="+".join(languages),
271
+ **kwargs,
272
+ )
273
+ self.languages = languages
274
+
275
+
276
+ class Mc4(datasets.GeneratorBasedBuilder):
277
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
278
+
279
+ BUILDER_CONFIGS = [Mc4Config(languages=[lang]) for lang in _LANGUAGES]
280
+ BUILDER_CONFIG_CLASS = Mc4Config
281
+
282
+ def _info(self):
283
+ return datasets.DatasetInfo(
284
+ description=_DESCRIPTION,
285
+ features=datasets.Features(
286
+ {
287
+ "text": datasets.Value("string"),
288
+ "timestamp": datasets.Value("string"),
289
+ "url": datasets.Value("string"),
290
+ }
291
+ ),
292
+ supervised_keys=None,
293
+ homepage=_URL,
294
+ citation=_CITATION,
295
+ )
296
+
297
+ def _split_generators(self, dl_manager):
298
+ data_urls = {}
299
+ for split in ["validation"]:
300
+ data_urls[split] = [
301
+ _DATA_URL.format(
302
+ language=lang,
303
+ split_suffix="-validation" if split == "validation" else "",
304
+ index=index,
305
+ n_shards=_N_SHARDS_PER_SPLIT[lang][split],
306
+ )
307
+ for lang in self.config.languages
308
+ for index in range(_N_SHARDS_PER_SPLIT[lang][split])
309
+ ]
310
+ train_downloaded_files = dl_manager.download(data_urls["train"])
311
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
312
+ return [
313
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
314
+ datasets.SplitGenerator(
315
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
316
+ ),
317
+ ]
318
+
319
+ def _generate_examples(self, filepaths):
320
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
321
+ id_ = 0
322
+ for filepath in filepaths:
323
+ logger.info("generating examples from = %s", filepath)
324
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
325
+ for line in f:
326
+ if line:
327
+ example = json.loads(line)
328
+ yield id_, example
329
+ id_ += 1