clemsadand commited on
Commit
559789d
1 Parent(s): 0a6c822

Duplicate legacy-datasets/mc4

Browse files
Files changed (3) hide show
  1. .gitattributes +2 -33
  2. README.md +534 -0
  3. mc4.py +334 -0
.gitattributes CHANGED
@@ -1,58 +1,27 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
14
  *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
  *.onnx filter=lfs diff=lfs merge=lfs -text
18
  *.ot filter=lfs diff=lfs merge=lfs -text
19
  *.parquet filter=lfs diff=lfs merge=lfs -text
20
  *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
  *.pt filter=lfs diff=lfs merge=lfs -text
24
  *.pth filter=lfs diff=lfs merge=lfs -text
25
  *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
  *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tar filter=lfs diff=lfs merge=lfs -text
30
  *.tflite filter=lfs diff=lfs merge=lfs -text
31
  *.tgz filter=lfs diff=lfs merge=lfs -text
32
- *.wasm filter=lfs diff=lfs merge=lfs -text
33
  *.xz filter=lfs diff=lfs merge=lfs -text
34
  *.zip filter=lfs diff=lfs merge=lfs -text
35
- *.zst filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - uncompressed
38
- *.pcm filter=lfs diff=lfs merge=lfs -text
39
- *.sam filter=lfs diff=lfs merge=lfs -text
40
- *.raw filter=lfs diff=lfs merge=lfs -text
41
- # Audio files - compressed
42
- *.aac filter=lfs diff=lfs merge=lfs -text
43
- *.flac filter=lfs diff=lfs merge=lfs -text
44
- *.mp3 filter=lfs diff=lfs merge=lfs -text
45
- *.ogg filter=lfs diff=lfs merge=lfs -text
46
- *.wav filter=lfs diff=lfs merge=lfs -text
47
- # Image files - uncompressed
48
- *.bmp filter=lfs diff=lfs merge=lfs -text
49
- *.gif filter=lfs diff=lfs merge=lfs -text
50
- *.png filter=lfs diff=lfs merge=lfs -text
51
- *.tiff filter=lfs diff=lfs merge=lfs -text
52
- # Image files - compressed
53
- *.jpg filter=lfs diff=lfs merge=lfs -text
54
- *.jpeg filter=lfs diff=lfs merge=lfs -text
55
- *.webp filter=lfs diff=lfs merge=lfs -text
56
- # Video files - compressed
57
- *.mp4 filter=lfs diff=lfs merge=lfs -text
58
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: mC4
3
+ annotations_creators:
4
+ - no-annotation
5
+ language_creators:
6
+ - found
7
+ language:
8
+ - af
9
+ - am
10
+ - ar
11
+ - az
12
+ - be
13
+ - bg
14
+ - bn
15
+ - ca
16
+ - ceb
17
+ - co
18
+ - cs
19
+ - cy
20
+ - da
21
+ - de
22
+ - el
23
+ - en
24
+ - eo
25
+ - es
26
+ - et
27
+ - eu
28
+ - fa
29
+ - fi
30
+ - fil
31
+ - fr
32
+ - fy
33
+ - ga
34
+ - gd
35
+ - gl
36
+ - gu
37
+ - ha
38
+ - haw
39
+ - he
40
+ - hi
41
+ - hmn
42
+ - ht
43
+ - hu
44
+ - hy
45
+ - id
46
+ - ig
47
+ - is
48
+ - it
49
+ - iw
50
+ - ja
51
+ - jv
52
+ - ka
53
+ - kk
54
+ - km
55
+ - kn
56
+ - ko
57
+ - ku
58
+ - ky
59
+ - la
60
+ - lb
61
+ - lo
62
+ - lt
63
+ - lv
64
+ - mg
65
+ - mi
66
+ - mk
67
+ - ml
68
+ - mn
69
+ - mr
70
+ - ms
71
+ - mt
72
+ - my
73
+ - ne
74
+ - nl
75
+ - 'no'
76
+ - ny
77
+ - pa
78
+ - pl
79
+ - ps
80
+ - pt
81
+ - ro
82
+ - ru
83
+ - sd
84
+ - si
85
+ - sk
86
+ - sl
87
+ - sm
88
+ - sn
89
+ - so
90
+ - sq
91
+ - sr
92
+ - st
93
+ - su
94
+ - sv
95
+ - sw
96
+ - ta
97
+ - te
98
+ - tg
99
+ - th
100
+ - tr
101
+ - uk
102
+ - und
103
+ - ur
104
+ - uz
105
+ - vi
106
+ - xh
107
+ - yi
108
+ - yo
109
+ - zh
110
+ - zu
111
+ language_bcp47:
112
+ - bg-Latn
113
+ - el-Latn
114
+ - hi-Latn
115
+ - ja-Latn
116
+ - ru-Latn
117
+ - zh-Latn
118
+ license:
119
+ - odc-by
120
+ multilinguality:
121
+ - multilingual
122
+ size_categories:
123
+ - n<1K
124
+ - 1K<n<10K
125
+ - 10K<n<100K
126
+ - 100K<n<1M
127
+ - 1M<n<10M
128
+ - 10M<n<100M
129
+ - 100M<n<1B
130
+ - 1B<n<10B
131
+ source_datasets:
132
+ - original
133
+ task_categories:
134
+ - text-generation
135
+ - fill-mask
136
+ task_ids:
137
+ - language-modeling
138
+ - masked-language-modeling
139
+ paperswithcode_id: mc4
140
+ viewer: false
141
+ ---
142
+
143
+ <div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400">
144
+ <p><b>Deprecated:</b> Dataset "mc4" is deprecated and will be deleted. Use "<a href="https://huggingface.co/datasets/allenai/c4">allenai/c4</a>" instead.</p>
145
+ </div>
146
+
147
+ # Dataset Card for mC4
148
+
149
+ ## Table of Contents
150
+
151
+ - [Dataset Card for mC4](#dataset-card-for-mc4)
152
+ - [Table of Contents](#table-of-contents)
153
+ - [Dataset Description](#dataset-description)
154
+ - [Dataset Summary](#dataset-summary)
155
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
156
+ - [Languages](#languages)
157
+ - [Dataset Structure](#dataset-structure)
158
+ - [Data Instances](#data-instances)
159
+ - [Data Fields](#data-fields)
160
+ - [Data Splits](#data-splits)
161
+ - [Dataset Creation](#dataset-creation)
162
+ - [Curation Rationale](#curation-rationale)
163
+ - [Source Data](#source-data)
164
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
165
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
166
+ - [Annotations](#annotations)
167
+ - [Annotation process](#annotation-process)
168
+ - [Who are the annotators?](#who-are-the-annotators)
169
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
170
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
171
+ - [Social Impact of Dataset](#social-impact-of-dataset)
172
+ - [Discussion of Biases](#discussion-of-biases)
173
+ - [Other Known Limitations](#other-known-limitations)
174
+ - [Additional Information](#additional-information)
175
+ - [Dataset Curators](#dataset-curators)
176
+ - [Licensing Information](#licensing-information)
177
+ - [Citation Information](#citation-information)
178
+ - [Contributions](#contributions)
179
+
180
+ ## Dataset Description
181
+
182
+ - **Homepage:** https://huggingface.co/datasets/allenai/c4
183
+ - **Paper:** https://arxiv.org/abs/1910.10683
184
+
185
+ ### Dataset Summary
186
+
187
+ A multilingual colossal, cleaned version of Common Crawl's web crawl corpus. Based on Common Crawl dataset: "https://commoncrawl.org".
188
+
189
+ This is the version prepared by AllenAI, hosted at this address: https://huggingface.co/datasets/allenai/c4
190
+
191
+ 108 languages are available and are reported in the table below.
192
+
193
+ Note that the languages that end with "-Latn" are simply romanized variants, i.e. written using the Latin script.
194
+
195
+ | language code | language name |
196
+ |:----------------|:---------------------|
197
+ | af | Afrikaans |
198
+ | am | Amharic |
199
+ | ar | Arabic |
200
+ | az | Azerbaijani |
201
+ | be | Belarusian |
202
+ | bg | Bulgarian |
203
+ | bg-Latn | Bulgarian (Latin) |
204
+ | bn | Bangla |
205
+ | ca | Catalan |
206
+ | ceb | Cebuano |
207
+ | co | Corsican |
208
+ | cs | Czech |
209
+ | cy | Welsh |
210
+ | da | Danish |
211
+ | de | German |
212
+ | el | Greek |
213
+ | el-Latn | Greek (Latin) |
214
+ | en | English |
215
+ | eo | Esperanto |
216
+ | es | Spanish |
217
+ | et | Estonian |
218
+ | eu | Basque |
219
+ | fa | Persian |
220
+ | fi | Finnish |
221
+ | fil | Filipino |
222
+ | fr | French |
223
+ | fy | Western Frisian |
224
+ | ga | Irish |
225
+ | gd | Scottish Gaelic |
226
+ | gl | Galician |
227
+ | gu | Gujarati |
228
+ | ha | Hausa |
229
+ | haw | Hawaiian |
230
+ | hi | Hindi |
231
+ | hi-Latn | Hindi (Latin script) |
232
+ | hmn | Hmong, Mong |
233
+ | ht | Haitian |
234
+ | hu | Hungarian |
235
+ | hy | Armenian |
236
+ | id | Indonesian |
237
+ | ig | Igbo |
238
+ | is | Icelandic |
239
+ | it | Italian |
240
+ | iw | former Hebrew |
241
+ | ja | Japanese |
242
+ | ja-Latn | Japanese (Latin) |
243
+ | jv | Javanese |
244
+ | ka | Georgian |
245
+ | kk | Kazakh |
246
+ | km | Khmer |
247
+ | kn | Kannada |
248
+ | ko | Korean |
249
+ | ku | Kurdish |
250
+ | ky | Kyrgyz |
251
+ | la | Latin |
252
+ | lb | Luxembourgish |
253
+ | lo | Lao |
254
+ | lt | Lithuanian |
255
+ | lv | Latvian |
256
+ | mg | Malagasy |
257
+ | mi | Maori |
258
+ | mk | Macedonian |
259
+ | ml | Malayalam |
260
+ | mn | Mongolian |
261
+ | mr | Marathi |
262
+ | ms | Malay |
263
+ | mt | Maltese |
264
+ | my | Burmese |
265
+ | ne | Nepali |
266
+ | nl | Dutch |
267
+ | no | Norwegian |
268
+ | ny | Nyanja |
269
+ | pa | Punjabi |
270
+ | pl | Polish |
271
+ | ps | Pashto |
272
+ | pt | Portuguese |
273
+ | ro | Romanian |
274
+ | ru | Russian |
275
+ | ru-Latn | Russian (Latin) |
276
+ | sd | Sindhi |
277
+ | si | Sinhala |
278
+ | sk | Slovak |
279
+ | sl | Slovenian |
280
+ | sm | Samoan |
281
+ | sn | Shona |
282
+ | so | Somali |
283
+ | sq | Albanian |
284
+ | sr | Serbian |
285
+ | st | Southern Sotho |
286
+ | su | Sundanese |
287
+ | sv | Swedish |
288
+ | sw | Swahili |
289
+ | ta | Tamil |
290
+ | te | Telugu |
291
+ | tg | Tajik |
292
+ | th | Thai |
293
+ | tr | Turkish |
294
+ | uk | Ukrainian |
295
+ | und | Unknown language |
296
+ | ur | Urdu |
297
+ | uz | Uzbek |
298
+ | vi | Vietnamese |
299
+ | xh | Xhosa |
300
+ | yi | Yiddish |
301
+ | yo | Yoruba |
302
+ | zh | Chinese |
303
+ | zh-Latn | Chinese (Latin) |
304
+ | zu | Zulu |
305
+
306
+ You can load the mC4 subset of any language like this:
307
+
308
+ ```python
309
+ from datasets import load_dataset
310
+
311
+ en_mc4 = load_dataset("mc4", "en")
312
+ ```
313
+
314
+ And if you can even specify a list of languages:
315
+
316
+ ```python
317
+ from datasets import load_dataset
318
+
319
+ mc4_subset_with_five_languages = load_dataset("mc4", languages=["en", "fr", "es", "de", "zh"])
320
+ ```
321
+
322
+ ### Supported Tasks and Leaderboards
323
+
324
+ mC4 is mainly intended to pretrain language models and word representations.
325
+
326
+ ### Languages
327
+
328
+ The dataset supports 108 languages.
329
+
330
+ ## Dataset Structure
331
+
332
+ ### Data Instances
333
+
334
+ An example form the `en` config is:
335
+
336
+ ```
337
+ {'timestamp': '2018-06-24T01:32:39Z',
338
+ 'text': 'Farm Resources in Plumas County\nShow Beginning Farmer Organizations & Professionals (304)\nThere are 304 resources serving Plumas County in the following categories:\nMap of Beginning Farmer Organizations & Professionals serving Plumas County\nVictoria Fisher - Office Manager - Loyalton, CA\nAmy Lynn Rasband - UCCE Plumas-Sierra Administrative Assistant II - Quincy , CA\nShow Farm Income Opportunities Organizations & Professionals (353)\nThere are 353 resources serving Plumas County in the following categories:\nFarm Ranch And Forest Retailers (18)\nMap of Farm Income Opportunities Organizations & Professionals serving Plumas County\nWarner Valley Wildlife Area - Plumas County\nShow Farm Resources Organizations & Professionals (297)\nThere are 297 resources serving Plumas County in the following categories:\nMap of Farm Resources Organizations & Professionals serving Plumas County\nThere are 57 resources serving Plumas County in the following categories:\nMap of Organic Certification Organizations & Professionals serving Plumas County',
339
+ 'url': 'http://www.californialandcan.org/Plumas/Farm-Resources/'}
340
+ ```
341
+
342
+ ### Data Fields
343
+
344
+ The data have several fields:
345
+
346
+ - `url`: url of the source as a string
347
+ - `text`: text content as a string
348
+ - `timestamp`: timestamp as a string
349
+
350
+ ### Data Splits
351
+
352
+ To build mC4, the authors used [CLD3](https://github.com/google/cld3) to identify over 100 languages. The resulting mC4 subsets for each language are reported in this table:
353
+
354
+ | config | train | validation |
355
+ |:---------|:--------|:-------------|
356
+ | af | ? | ? |
357
+ | am | ? | ? |
358
+ | ar | ? | ? |
359
+ | az | ? | ? |
360
+ | be | ? | ? |
361
+ | bg | ? | ? |
362
+ | bg-Latn | ? | ? |
363
+ | bn | ? | ? |
364
+ | ca | ? | ? |
365
+ | ceb | ? | ? |
366
+ | co | ? | ? |
367
+ | cs | ? | ? |
368
+ | cy | ? | ? |
369
+ | da | ? | ? |
370
+ | de | ? | ? |
371
+ | el | ? | ? |
372
+ | el-Latn | ? | ? |
373
+ | en | ? | ? |
374
+ | eo | ? | ? |
375
+ | es | ? | ? |
376
+ | et | ? | ? |
377
+ | eu | ? | ? |
378
+ | fa | ? | ? |
379
+ | fi | ? | ? |
380
+ | fil | ? | ? |
381
+ | fr | ? | ? |
382
+ | fy | ? | ? |
383
+ | ga | ? | ? |
384
+ | gd | ? | ? |
385
+ | gl | ? | ? |
386
+ | gu | ? | ? |
387
+ | ha | ? | ? |
388
+ | haw | ? | ? |
389
+ | hi | ? | ? |
390
+ | hi-Latn | ? | ? |
391
+ | hmn | ? | ? |
392
+ | ht | ? | ? |
393
+ | hu | ? | ? |
394
+ | hy | ? | ? |
395
+ | id | ? | ? |
396
+ | ig | ? | ? |
397
+ | is | ? | ? |
398
+ | it | ? | ? |
399
+ | iw | ? | ? |
400
+ | ja | ? | ? |
401
+ | ja-Latn | ? | ? |
402
+ | jv | ? | ? |
403
+ | ka | ? | ? |
404
+ | kk | ? | ? |
405
+ | km | ? | ? |
406
+ | kn | ? | ? |
407
+ | ko | ? | ? |
408
+ | ku | ? | ? |
409
+ | ky | ? | ? |
410
+ | la | ? | ? |
411
+ | lb | ? | ? |
412
+ | lo | ? | ? |
413
+ | lt | ? | ? |
414
+ | lv | ? | ? |
415
+ | mg | ? | ? |
416
+ | mi | ? | ? |
417
+ | mk | ? | ? |
418
+ | ml | ? | ? |
419
+ | mn | ? | ? |
420
+ | mr | ? | ? |
421
+ | ms | ? | ? |
422
+ | mt | ? | ? |
423
+ | my | ? | ? |
424
+ | ne | ? | ? |
425
+ | nl | ? | ? |
426
+ | no | ? | ? |
427
+ | ny | ? | ? |
428
+ | pa | ? | ? |
429
+ | pl | ? | ? |
430
+ | ps | ? | ? |
431
+ | pt | ? | ? |
432
+ | ro | ? | ? |
433
+ | ru | ? | ? |
434
+ | ru-Latn | ? | ? |
435
+ | sd | ? | ? |
436
+ | si | ? | ? |
437
+ | sk | ? | ? |
438
+ | sl | ? | ? |
439
+ | sm | ? | ? |
440
+ | sn | ? | ? |
441
+ | so | ? | ? |
442
+ | sq | ? | ? |
443
+ | sr | ? | ? |
444
+ | st | ? | ? |
445
+ | su | ? | ? |
446
+ | sv | ? | ? |
447
+ | sw | ? | ? |
448
+ | ta | ? | ? |
449
+ | te | ? | ? |
450
+ | tg | ? | ? |
451
+ | th | ? | ? |
452
+ | tr | ? | ? |
453
+ | uk | ? | ? |
454
+ | und | ? | ? |
455
+ | ur | ? | ? |
456
+ | uz | ? | ? |
457
+ | vi | ? | ? |
458
+ | xh | ? | ? |
459
+ | yi | ? | ? |
460
+ | yo | ? | ? |
461
+ | zh | ? | ? |
462
+ | zh-Latn | ? | ? |
463
+ | zu | ? | ? |
464
+
465
+ ## Dataset Creation
466
+
467
+ ### Curation Rationale
468
+
469
+ [More Information Needed]
470
+
471
+ ### Source Data
472
+
473
+ #### Initial Data Collection and Normalization
474
+
475
+ [More Information Needed]
476
+
477
+ #### Who are the source language producers?
478
+
479
+ [More Information Needed]
480
+
481
+ ### Annotations
482
+
483
+ #### Annotation process
484
+
485
+ [More Information Needed]
486
+
487
+ #### Who are the annotators?
488
+
489
+ [More Information Needed]
490
+
491
+ ### Personal and Sensitive Information
492
+
493
+ [More Information Needed]
494
+
495
+ ## Considerations for Using the Data
496
+
497
+ ### Social Impact of Dataset
498
+
499
+ [More Information Needed]
500
+
501
+ ### Discussion of Biases
502
+
503
+ [More Information Needed]
504
+
505
+ ### Other Known Limitations
506
+
507
+ [More Information Needed]
508
+
509
+ ## Additional Information
510
+
511
+ ### Dataset Curators
512
+
513
+ [More Information Needed]
514
+
515
+ ### Licensing Information
516
+
517
+ AllenAI are releasing this dataset under the terms of ODC-BY. By using this, you are also bound by the Common Crawl terms of use in respect of the content contained in the dataset.
518
+
519
+ ### Citation Information
520
+
521
+ ```
522
+ @article{2019t5,
523
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
524
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
525
+ journal = {arXiv e-prints},
526
+ year = {2019},
527
+ archivePrefix = {arXiv},
528
+ eprint = {1910.10683},
529
+ }
530
+ ```
531
+
532
+ ### Contributions
533
+
534
+ Thanks to [@dirkgr](https://github.com/dirkgr) and [@lhoestq](https://github.com/lhoestq) for adding this dataset.
mc4.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """mC4 dataset based on Common Crawl."""
2
+
3
+
4
+ import gzip
5
+ import json
6
+ import warnings
7
+
8
+ import datasets
9
+
10
+
11
+ logger = datasets.logging.get_logger(__name__)
12
+
13
+
14
+ _DESCRIPTION = """\
15
+ A colossal, cleaned version of Common Crawl's web crawl corpus.
16
+
17
+ Based on Common Crawl dataset: "https://commoncrawl.org".
18
+
19
+ This is the processed version of Google's mC4 dataset by AllenAI.
20
+ """
21
+
22
+ _CITATION = """
23
+ @article{2019t5,
24
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
25
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
26
+ journal = {arXiv e-prints},
27
+ year = {2019},
28
+ archivePrefix = {arXiv},
29
+ eprint = {1910.10683},
30
+ }
31
+ """
32
+
33
+ _URL = "https://github.com/allenai/allennlp/discussions/5056"
34
+
35
+ _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-{language}{split_suffix}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
36
+
37
+ _LANGUAGES = [
38
+ "af",
39
+ "am",
40
+ "ar",
41
+ "az",
42
+ "be",
43
+ "bg",
44
+ "bg-Latn",
45
+ "bn",
46
+ "ca",
47
+ "ceb",
48
+ "co",
49
+ "cs",
50
+ "cy",
51
+ "da",
52
+ "de",
53
+ "el",
54
+ "el-Latn",
55
+ "en",
56
+ "eo",
57
+ "es",
58
+ "et",
59
+ "eu",
60
+ "fa",
61
+ "fi",
62
+ "fil",
63
+ "fr",
64
+ "fy",
65
+ "ga",
66
+ "gd",
67
+ "gl",
68
+ "gu",
69
+ "ha",
70
+ "haw",
71
+ "hi",
72
+ "hi-Latn",
73
+ "hmn",
74
+ "ht",
75
+ "hu",
76
+ "hy",
77
+ "id",
78
+ "ig",
79
+ "is",
80
+ "it",
81
+ "iw",
82
+ "ja",
83
+ "ja-Latn",
84
+ "jv",
85
+ "ka",
86
+ "kk",
87
+ "km",
88
+ "kn",
89
+ "ko",
90
+ "ku",
91
+ "ky",
92
+ "la",
93
+ "lb",
94
+ "lo",
95
+ "lt",
96
+ "lv",
97
+ "mg",
98
+ "mi",
99
+ "mk",
100
+ "ml",
101
+ "mn",
102
+ "mr",
103
+ "ms",
104
+ "mt",
105
+ "my",
106
+ "ne",
107
+ "nl",
108
+ "no",
109
+ "ny",
110
+ "pa",
111
+ "pl",
112
+ "ps",
113
+ "pt",
114
+ "ro",
115
+ "ru",
116
+ "ru-Latn",
117
+ "sd",
118
+ "si",
119
+ "sk",
120
+ "sl",
121
+ "sm",
122
+ "sn",
123
+ "so",
124
+ "sq",
125
+ "sr",
126
+ "st",
127
+ "su",
128
+ "sv",
129
+ "sw",
130
+ "ta",
131
+ "te",
132
+ "tg",
133
+ "th",
134
+ "tr",
135
+ "uk",
136
+ "und",
137
+ "ur",
138
+ "uz",
139
+ "vi",
140
+ "xh",
141
+ "yi",
142
+ "yo",
143
+ "zh",
144
+ "zh-Latn",
145
+ "zu",
146
+ ]
147
+
148
+ _N_SHARDS_PER_SPLIT = {
149
+ "af": {"train": 64, "validation": 1},
150
+ "am": {"train": 16, "validation": 1},
151
+ "ar": {"train": 1024, "validation": 4},
152
+ "az": {"train": 256, "validation": 1},
153
+ "be": {"train": 128, "validation": 1},
154
+ "bg": {"train": 1024, "validation": 1},
155
+ "bg-Latn": {"train": 4, "validation": 1},
156
+ "bn": {"train": 512, "validation": 1},
157
+ "ca": {"train": 512, "validation": 1},
158
+ "ceb": {"train": 8, "validation": 1},
159
+ "co": {"train": 8, "validation": 1},
160
+ "cs": {"train": 1024, "validation": 2},
161
+ "cy": {"train": 256, "validation": 1},
162
+ "da": {"train": 1024, "validation": 1},
163
+ "de": {"train": 2048, "validation": 16},
164
+ "el": {"train": 1024, "validation": 2},
165
+ "el-Latn": {"train": 16, "validation": 1},
166
+ "en": {"train": 11264, "validation": 128},
167
+ "eo": {"train": 32, "validation": 1},
168
+ "es": {"train": 2048, "validation": 16},
169
+ "et": {"train": 256, "validation": 1},
170
+ "eu": {"train": 64, "validation": 1},
171
+ "fa": {"train": 1024, "validation": 2},
172
+ "fi": {"train": 1024, "validation": 1},
173
+ "fil": {"train": 64, "validation": 1},
174
+ "fr": {"train": 2048, "validation": 16},
175
+ "fy": {"train": 16, "validation": 1},
176
+ "ga": {"train": 16, "validation": 1},
177
+ "gd": {"train": 16, "validation": 1},
178
+ "gl": {"train": 128, "validation": 1},
179
+ "gu": {"train": 64, "validation": 1},
180
+ "ha": {"train": 8, "validation": 1},
181
+ "haw": {"train": 2, "validation": 1},
182
+ "hi": {"train": 1024, "validation": 2},
183
+ "hi-Latn": {"train": 16, "validation": 1},
184
+ "hmn": {"train": 8, "validation": 1},
185
+ "ht": {"train": 8, "validation": 1},
186
+ "hu": {"train": 1024, "validation": 2},
187
+ "hy": {"train": 128, "validation": 1},
188
+ "id": {"train": 1024, "validation": 4},
189
+ "ig": {"train": 4, "validation": 1},
190
+ "is": {"train": 128, "validation": 1},
191
+ "it": {"train": 1024, "validation": 8},
192
+ "iw": {"train": 1024, "validation": 1},
193
+ "ja": {"train": 1024, "validation": 8},
194
+ "ja-Latn": {"train": 8, "validation": 1},
195
+ "jv": {"train": 8, "validation": 1},
196
+ "ka": {"train": 256, "validation": 1},
197
+ "kk": {"train": 256, "validation": 1},
198
+ "km": {"train": 64, "validation": 1},
199
+ "kn": {"train": 64, "validation": 1},
200
+ "ko": {"train": 1024, "validation": 1},
201
+ "ku": {"train": 16, "validation": 1},
202
+ "ky": {"train": 64, "validation": 1},
203
+ "la": {"train": 64, "validation": 1},
204
+ "lb": {"train": 32, "validation": 1},
205
+ "lo": {"train": 8, "validation": 1},
206
+ "lt": {"train": 512, "validation": 1},
207
+ "lv": {"train": 256, "validation": 1},
208
+ "mg": {"train": 8, "validation": 1},
209
+ "mi": {"train": 4, "validation": 1},
210
+ "mk": {"train": 128, "validation": 1},
211
+ "ml": {"train": 128, "validation": 1},
212
+ "mn": {"train": 128, "validation": 1},
213
+ "mr": {"train": 1024, "validation": 1},
214
+ "ms": {"train": 512, "validation": 1},
215
+ "mt": {"train": 128, "validation": 1},
216
+ "my": {"train": 64, "validation": 1},
217
+ "ne": {"train": 256, "validation": 1},
218
+ "nl": {"train": 1024, "validation": 4},
219
+ "no": {"train": 1024, "validation": 1},
220
+ "ny": {"train": 4, "validation": 1},
221
+ "pa": {"train": 32, "validation": 1},
222
+ "pl": {"train": 1024, "validation": 4},
223
+ "ps": {"train": 16, "validation": 1},
224
+ "pt": {"train": 1024, "validation": 4},
225
+ "ro": {"train": 1024, "validation": 2},
226
+ "ru": {"train": 4096, "validation": 32},
227
+ "ru-Latn": {"train": 32, "validation": 1},
228
+ "sd": {"train": 64, "validation": 1},
229
+ "si": {"train": 64, "validation": 1},
230
+ "sk": {"train": 512, "validation": 1},
231
+ "sl": {"train": 256, "validation": 1},
232
+ "sm": {"train": 4, "validation": 1},
233
+ "sn": {"train": 8, "validation": 1},
234
+ "so": {"train": 64, "validation": 1},
235
+ "sq": {"train": 128, "validation": 1},
236
+ "sr": {"train": 256, "validation": 1},
237
+ "st": {"train": 2, "validation": 1},
238
+ "su": {"train": 4, "validation": 1},
239
+ "sv": {"train": 1024, "validation": 2},
240
+ "sw": {"train": 32, "validation": 1},
241
+ "ta": {"train": 256, "validation": 1},
242
+ "te": {"train": 128, "validation": 1},
243
+ "tg": {"train": 64, "validation": 1},
244
+ "th": {"train": 1024, "validation": 1},
245
+ "tr": {"train": 1024, "validation": 4},
246
+ "uk": {"train": 1024, "validation": 2},
247
+ "und": {"train": 3072, "validation": 32},
248
+ "ur": {"train": 128, "validation": 1},
249
+ "uz": {"train": 32, "validation": 1},
250
+ "vi": {"train": 1024, "validation": 4},
251
+ "xh": {"train": 2, "validation": 1},
252
+ "yi": {"train": 16, "validation": 1},
253
+ "yo": {"train": 2, "validation": 1},
254
+ "zh": {"train": 1024, "validation": 2},
255
+ "zh-Latn": {"train": 8, "validation": 1},
256
+ "zu": {"train": 8, "validation": 1},
257
+ }
258
+
259
+
260
+ class Mc4Config(datasets.BuilderConfig):
261
+ """BuilderConfig for mC4."""
262
+
263
+ def __init__(self, *args, languages, **kwargs):
264
+ """BuilderConfig for mC4.
265
+ Args:
266
+ languages (:obj:`List[str]`): list of languages to load
267
+ **kwargs: keyword arguments forwarded to super.
268
+ """
269
+ super().__init__(
270
+ *args,
271
+ name="+".join(languages),
272
+ **kwargs,
273
+ )
274
+ self.languages = languages
275
+
276
+
277
+ class Mc4(datasets.GeneratorBasedBuilder):
278
+ """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
279
+
280
+ BUILDER_CONFIGS = [Mc4Config(languages=[lang]) for lang in _LANGUAGES]
281
+ BUILDER_CONFIG_CLASS = Mc4Config
282
+
283
+ def _info(self):
284
+ warnings.warn(
285
+ "Dataset 'mc4' is deprecated and will be deleted. Use 'allenai/c4' instead.",
286
+ FutureWarning,
287
+ )
288
+ return datasets.DatasetInfo(
289
+ description=_DESCRIPTION,
290
+ features=datasets.Features(
291
+ {
292
+ "text": datasets.Value("string"),
293
+ "timestamp": datasets.Value("string"),
294
+ "url": datasets.Value("string"),
295
+ }
296
+ ),
297
+ supervised_keys=None,
298
+ homepage=_URL,
299
+ citation=_CITATION,
300
+ )
301
+
302
+ def _split_generators(self, dl_manager):
303
+ data_urls = {}
304
+ for split in ["train", "validation"]:
305
+ data_urls[split] = [
306
+ _DATA_URL.format(
307
+ language=lang,
308
+ split_suffix="-validation" if split == "validation" else "",
309
+ index=index,
310
+ n_shards=_N_SHARDS_PER_SPLIT[lang][split],
311
+ )
312
+ for lang in self.config.languages
313
+ for index in range(_N_SHARDS_PER_SPLIT[lang][split])
314
+ ]
315
+ train_downloaded_files = dl_manager.download(data_urls["train"])
316
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
317
+ return [
318
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
319
+ datasets.SplitGenerator(
320
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
321
+ ),
322
+ ]
323
+
324
+ def _generate_examples(self, filepaths):
325
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
326
+ id_ = 0
327
+ for filepath in filepaths:
328
+ logger.info("generating examples from = %s", filepath)
329
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
330
+ for line in f:
331
+ if line:
332
+ example = json.loads(line)
333
+ yield id_, example
334
+ id_ += 1