sabilmakbar commited on
Commit
550da22
1 Parent(s): 02367e1

Init Commit

Browse files
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. README.md +468 -0
  3. dedup_raw_wiki_data.py +414 -0
  4. dedup_raw_wiki_data_sea.sh +61 -0
  5. extract_raw_wiki_data.py +73 -0
  6. extract_raw_wiki_data_sea.sh +37 -0
  7. requirements.txt +6 -0
  8. sea_wiki.py +246 -0
  9. sea_wiki_dedup_data/wiki_ace_20231101_dataset_dedup_cleansed.csv +3 -0
  10. sea_wiki_dedup_data/wiki_ban_20231101_dataset_dedup_cleansed.csv +3 -0
  11. sea_wiki_dedup_data/wiki_bjn_20231101_dataset_dedup_cleansed.csv +3 -0
  12. sea_wiki_dedup_data/wiki_bug_20231101_dataset_dedup_cleansed.csv +3 -0
  13. sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv +3 -0
  14. sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv +3 -0
  15. sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv +3 -0
  16. sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv +3 -0
  17. sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv +3 -0
  18. sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv +3 -0
  19. sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv +3 -0
  20. sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv +3 -0
  21. sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv +3 -0
  22. sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv +3 -0
  23. sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv +3 -0
  24. sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv +3 -0
  25. sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv +3 -0
  26. sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv +3 -0
  27. sea_wiki_dedup_data/wiki_tet_20231101_dataset_dedup_cleansed.csv +3 -0
  28. sea_wiki_dedup_data/wiki_th_20231101_dataset_dedup_cleansed.csv +3 -0
  29. sea_wiki_dedup_data/wiki_vi_20231101_dataset_dedup_cleansed.csv +3 -0
  30. sea_wiki_raw_data/wiki_ace_20231101_raw_dataset.csv +3 -0
  31. sea_wiki_raw_data/wiki_ban_20231101_raw_dataset.csv +3 -0
  32. sea_wiki_raw_data/wiki_bjn_20231101_raw_dataset.csv +3 -0
  33. sea_wiki_raw_data/wiki_bug_20231101_raw_dataset.csv +3 -0
  34. sea_wiki_raw_data/wiki_gor_20231101_raw_dataset.csv +3 -0
  35. sea_wiki_raw_data/wiki_id_20231101_raw_dataset.csv +3 -0
  36. sea_wiki_raw_data/wiki_jv_20231101_raw_dataset.csv +3 -0
  37. sea_wiki_raw_data/wiki_km_20231101_raw_dataset.csv +3 -0
  38. sea_wiki_raw_data/wiki_lo_20231101_raw_dataset.csv +3 -0
  39. sea_wiki_raw_data/wiki_mad_20231101_raw_dataset.csv +3 -0
  40. sea_wiki_raw_data/wiki_map-bms_20231101_raw_dataset.csv +3 -0
  41. sea_wiki_raw_data/wiki_min_20231101_raw_dataset.csv +3 -0
  42. sea_wiki_raw_data/wiki_mnw_20231101_raw_dataset.csv +3 -0
  43. sea_wiki_raw_data/wiki_ms_20231101_raw_dataset.csv +3 -0
  44. sea_wiki_raw_data/wiki_my_20231101_raw_dataset.csv +3 -0
  45. sea_wiki_raw_data/wiki_nia_20231101_raw_dataset.csv +3 -0
  46. sea_wiki_raw_data/wiki_shn_20231101_raw_dataset.csv +3 -0
  47. sea_wiki_raw_data/wiki_su_20231101_raw_dataset.csv +3 -0
  48. sea_wiki_raw_data/wiki_tet_20231101_raw_dataset.csv +3 -0
  49. sea_wiki_raw_data/wiki_th_20231101_raw_dataset.csv +3 -0
  50. sea_wiki_raw_data/wiki_vi_20231101_raw_dataset.csv +3 -0
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Wiki data files CSV
57
+ *.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,471 @@
1
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  license: cc-by-sa-3.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - crowdsourced
6
+ language:
7
+ - ace
8
+ - ban
9
+ - bjn
10
+ - bug
11
+ - gor
12
+ - id
13
+ - jv
14
+ - mis
15
+ - min
16
+ - ms
17
+ - nia
18
+ - su
19
+ - tet
20
+ license:
21
+ - cc-by-sa-3.0
22
+ - gfdl
23
+ multilinguality:
24
+ - multilingual
25
+ source_datasets:
26
+ - Wikipedia-HF
27
+ task_categories:
28
+ - text-generation
29
+ - fill-mask
30
+ task_ids:
31
+ - language-modeling
32
+ - masked-language-modeling
33
+ pretty_name: Wikipedia Archive for SEA Languages
34
+ tags:
35
+ - Wikipedia
36
+ - Southeast Asia (SEA)
37
+ - Dialect
38
+ - SEA-related Languages
39
+ - SEA Local Languages
40
+ dataset_info:
41
+ - config_name: seawiki_all
42
+ features:
43
+ - name: url
44
+ dtype: string
45
+ - name: title
46
+ dtype: string
47
+ - name: text
48
+ dtype: string
49
+ splits:
50
+ - name: ace
51
+ num_bytes: 4952102
52
+ num_examples: 13003
53
+ - name: ban
54
+ num_bytes: 18198909
55
+ num_examples: 20987
56
+ - name: bjn
57
+ num_bytes: 6792259
58
+ num_examples: 10519
59
+ - name: bug
60
+ num_bytes: 3298561
61
+ num_examples: 15880
62
+ - name: gor
63
+ num_bytes: 6239133
64
+ num_examples: 15359
65
+ - name: id
66
+ num_bytes: 1118834498
67
+ num_examples: 665622
68
+ - name: jv
69
+ num_bytes: 72101470
70
+ num_examples: 73380
71
+ - name: km
72
+ num_bytes: 103146669
73
+ num_examples: 11994
74
+ - name: lo
75
+ num_bytes: 15240262
76
+ num_examples: 5014
77
+ - name: mad
78
+ num_bytes: 1612542
79
+ num_examples: 1192
80
+ - name: map_bms
81
+ num_bytes: 5221506
82
+ num_examples: 13580
83
+ - name: min
84
+ num_bytes: 116824020
85
+ num_examples: 227143
86
+ - name: mnw
87
+ num_bytes: 47321734
88
+ num_examples: 3296
89
+ - name: ms
90
+ num_bytes: 419662356
91
+ num_examples: 368628
92
+ - name: my
93
+ num_bytes: 313370839
94
+ num_examples: 109310
95
+ - name: nia
96
+ num_bytes: 2153274
97
+ num_examples: 1714
98
+ - name: shn
99
+ num_bytes: 33754296
100
+ num_examples: 13945
101
+ - name: su
102
+ num_bytes: 47516268
103
+ num_examples: 61555
104
+ - name: tet
105
+ num_bytes: 1454499
106
+ num_examples: 1468
107
+ - name: th
108
+ num_bytes: 1012930269
109
+ num_examples: 159719
110
+ - name: vi
111
+ num_bytes: 1603057632
112
+ num_examples: 1288680
113
+ download_size: 4959860254
114
+ dataset_size: 4953683098
115
+ - config_name: seawiki_dedup_all
116
+ features:
117
+ - name: url
118
+ dtype: string
119
+ - name: title
120
+ dtype: string
121
+ - name: text
122
+ dtype: string
123
+ splits:
124
+ - name: ace
125
+ num_bytes: 4944916
126
+ num_examples: 12979
127
+ - name: ban
128
+ num_bytes: 18025267
129
+ num_examples: 20611
130
+ - name: bjn
131
+ num_bytes: 6786207
132
+ num_examples: 10503
133
+ - name: bug
134
+ num_bytes: 2182435
135
+ num_examples: 9969
136
+ - name: gor
137
+ num_bytes: 6217480
138
+ num_examples: 15290
139
+ - name: id
140
+ num_bytes: 1117891512
141
+ num_examples: 662443
142
+ - name: jv
143
+ num_bytes: 71997517
144
+ num_examples: 73080
145
+ - name: km
146
+ num_bytes: 102698901
147
+ num_examples: 11466
148
+ - name: lo
149
+ num_bytes: 14908444
150
+ num_examples: 4897
151
+ - name: mad
152
+ num_bytes: 1612542
153
+ num_examples: 1192
154
+ - name: map_bms
155
+ num_bytes: 5067489
156
+ num_examples: 11839
157
+ - name: min
158
+ num_bytes: 116721269
159
+ num_examples: 225972
160
+ - name: mnw
161
+ num_bytes: 47243333
162
+ num_examples: 3271
163
+ - name: ms
164
+ num_bytes: 414783365
165
+ num_examples: 348045
166
+ - name: my
167
+ num_bytes: 312990457
168
+ num_examples: 108819
169
+ - name: nia
170
+ num_bytes: 2153274
171
+ num_examples: 1714
172
+ - name: shn
173
+ num_bytes: 33616591
174
+ num_examples: 13662
175
+ - name: su
176
+ num_bytes: 47512744
177
+ num_examples: 61529
178
+ - name: tet
179
+ num_bytes: 1452151
180
+ num_examples: 1464
181
+ - name: th
182
+ num_bytes: 1012868861
183
+ num_examples: 159666
184
+ - name: vi
185
+ num_bytes: 1602828123
186
+ num_examples: 1287910
187
+ download_size: 4950689052
188
+ dataset_size: 4944502878
189
+ - config_name: seawiki_with_countries_all
190
+ features:
191
+ - name: url
192
+ dtype: string
193
+ - name: title
194
+ dtype: string
195
+ - name: text
196
+ dtype: string
197
+ splits:
198
+ - name: idn_ace
199
+ num_bytes: 4952102
200
+ num_examples: 13003
201
+ - name: idn_ban
202
+ num_bytes: 18198909
203
+ num_examples: 20987
204
+ - name: idn_bjn
205
+ num_bytes: 6792259
206
+ num_examples: 10519
207
+ - name: idn_bug
208
+ num_bytes: 3298561
209
+ num_examples: 15880
210
+ - name: idn_gor
211
+ num_bytes: 6239133
212
+ num_examples: 15359
213
+ - name: idn_id
214
+ num_bytes: 1118834498
215
+ num_examples: 665622
216
+ - name: idn_jv
217
+ num_bytes: 72101470
218
+ num_examples: 73380
219
+ - name: idn_mad
220
+ num_bytes: 1612542
221
+ num_examples: 1192
222
+ - name: idn_map_bms
223
+ num_bytes: 5221506
224
+ num_examples: 13580
225
+ - name: idn_min
226
+ num_bytes: 116824020
227
+ num_examples: 227143
228
+ - name: idn_ms
229
+ num_bytes: 419662356
230
+ num_examples: 368628
231
+ - name: idn_nia
232
+ num_bytes: 2153274
233
+ num_examples: 1714
234
+ - name: idn_su
235
+ num_bytes: 47516268
236
+ num_examples: 61555
237
+ - name: idn_tet
238
+ num_bytes: 1454499
239
+ num_examples: 1468
240
+ - name: sgp_ms
241
+ num_bytes: 419662356
242
+ num_examples: 368628
243
+ - name: mys_ms
244
+ num_bytes: 419662356
245
+ num_examples: 368628
246
+ - name: brn_ms
247
+ num_bytes: 419662356
248
+ num_examples: 368628
249
+ - name: tha_th
250
+ num_bytes: 1012930269
251
+ num_examples: 159719
252
+ - name: mmr_my
253
+ num_bytes: 313370839
254
+ num_examples: 109310
255
+ - name: mmr_shn
256
+ num_bytes: 33754296
257
+ num_examples: 13945
258
+ - name: mmr_mnw
259
+ num_bytes: 47321734
260
+ num_examples: 3296
261
+ - name: lao_lo
262
+ num_bytes: 15240262
263
+ num_examples: 5014
264
+ - name: vnm_vi
265
+ num_bytes: 1603057632
266
+ num_examples: 1288680
267
+ - name: khm_km
268
+ num_bytes: 103146669
269
+ num_examples: 11994
270
+ - name: tls_tet
271
+ num_bytes: 1454499
272
+ num_examples: 1468
273
+ download_size: 4959860254
274
+ dataset_size: 6214124665
275
+ - config_name: seawiki_with_countries_dedup_all
276
+ features:
277
+ - name: url
278
+ dtype: string
279
+ - name: title
280
+ dtype: string
281
+ - name: text
282
+ dtype: string
283
+ splits:
284
+ - name: idn_ace
285
+ num_bytes: 4944916
286
+ num_examples: 12979
287
+ - name: idn_ban
288
+ num_bytes: 18025267
289
+ num_examples: 20611
290
+ - name: idn_bjn
291
+ num_bytes: 6786207
292
+ num_examples: 10503
293
+ - name: idn_bug
294
+ num_bytes: 2182435
295
+ num_examples: 9969
296
+ - name: idn_gor
297
+ num_bytes: 6217480
298
+ num_examples: 15290
299
+ - name: idn_id
300
+ num_bytes: 1117891512
301
+ num_examples: 662443
302
+ - name: idn_jv
303
+ num_bytes: 71997517
304
+ num_examples: 73080
305
+ - name: idn_mad
306
+ num_bytes: 1612542
307
+ num_examples: 1192
308
+ - name: idn_map_bms
309
+ num_bytes: 5067489
310
+ num_examples: 11839
311
+ - name: idn_min
312
+ num_bytes: 116721269
313
+ num_examples: 225972
314
+ - name: idn_ms
315
+ num_bytes: 414783365
316
+ num_examples: 348045
317
+ - name: idn_nia
318
+ num_bytes: 2153274
319
+ num_examples: 1714
320
+ - name: idn_su
321
+ num_bytes: 47512744
322
+ num_examples: 61529
323
+ - name: idn_tet
324
+ num_bytes: 1452151
325
+ num_examples: 1464
326
+ - name: sgp_ms
327
+ num_bytes: 414783365
328
+ num_examples: 348045
329
+ - name: mys_ms
330
+ num_bytes: 414783365
331
+ num_examples: 348045
332
+ - name: brn_ms
333
+ num_bytes: 414783365
334
+ num_examples: 348045
335
+ - name: tha_th
336
+ num_bytes: 1012868861
337
+ num_examples: 159666
338
+ - name: mmr_my
339
+ num_bytes: 312990457
340
+ num_examples: 108819
341
+ - name: mmr_shn
342
+ num_bytes: 33616591
343
+ num_examples: 13662
344
+ - name: mmr_mnw
345
+ num_bytes: 47243333
346
+ num_examples: 3271
347
+ - name: lao_lo
348
+ num_bytes: 14908444
349
+ num_examples: 4897
350
+ - name: vnm_vi
351
+ num_bytes: 1602828123
352
+ num_examples: 1287910
353
+ - name: khm_km
354
+ num_bytes: 102698901
355
+ num_examples: 11466
356
+ - name: tls_tet
357
+ num_bytes: 1452151
358
+ num_examples: 1464
359
+ download_size: 4950689052
360
+ dataset_size: 6190305124
361
+ ---
362
+
363
+ # **SEA Wikipedia Data Repository**
364
+ ---
365
  license: cc-by-sa-3.0
366
  ---
367
+ Welcome to SEA Wikipedia Data Repository. The datasets are extracted from [Wikipedia HF](https://huggingface.co/datasets/wikipedia) and processed using the scripts available in this repository for reproducibility purpose.
368
+
369
+ # **FAQS**
370
+ ### What are the available languages provided in dataset and from which country?
371
+ You may check the following tables to understand the current coverage of this dataset (languages, countries, data size & volume).
372
+
373
+ #### 1. Table of Countries and its Country Code
374
+ | Country Code | Country Name | Wiki Info |
375
+ | :---: | :---: | :---: |
376
+ | brn | Brunei | [Wiki Link](https://en.wikipedia.org/wiki/Brunei) |
377
+ | idn | Indonesia | [Wiki Link](https://en.wikipedia.org/wiki/Indonesia) |
378
+ | khm | Cambodia | [Wiki Link](https://en.wikipedia.org/wiki/Cambodia) |
379
+ | lao | Laos | [Wiki Link](https://en.wikipedia.org/wiki/Laos) |
380
+ | mmr | Myanmar | [Wiki Link](https://en.wikipedia.org/wiki/Myanmar) |
381
+ | mys | Malaysia | [Wiki Link](https://en.wikipedia.org/wiki/Malaysia) |
382
+ | sgp | Singapore | [Wiki Link](https://en.wikipedia.org/wiki/Singapore) |
383
+ | tha | Thailand | [Wiki Link](https://en.wikipedia.org/wiki/Thailand) |
384
+ | tls | East Timor | [Wiki Link](https://en.wikipedia.org/wiki/East_Timor) |
385
+ | vnm | Vietnam | [Wiki Link](https://en.wikipedia.org/wiki/Vietnam) |
386
+
387
+ #### 2. Table of Languages and Countries of its speakers
388
+ | Lang Code | Lang Name | Country Codes Spoken | Wiki Info | Total Data | Total Size (bytes) |
389
+ :---: | :---: | :---: | :--- | ---: | ---: |
390
+ | ace | Acehnese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Acehnese_language) | 12904 | 4867838 |
391
+ | ban | Balinese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Balinese_language) | 19837 | 17366080 |
392
+ | bjn | Banjarese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banjarese_language) | 10437 | 6655378 |
393
+ | bug | Buginese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Buginese_language) | 9793 | 2072609 |
394
+ | gor | Gorontalo | idn | [Wiki Link](https://en.wikipedia.org/wiki/Gorontalo_language) | 14514 | 5989252 |
395
+ | km | Khmer | khm | [Wiki Link](https://en.wikipedia.org/wiki/Khmer_language) | 11994 | 103146669 |
396
+ | id | Indonesian | idn | [Wiki Link](https://en.wikipedia.org/wiki/Indonesian_language) | 654287 | 1100932403 |
397
+ | jv | Javanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Javanese_language) | 72667 | 69774853 |
398
+ | lo | Lao | lao | [Wiki Link](https://en.wikipedia.org/wiki/Lao_language) | 5014 | 15240262 |
399
+ | mad | Madurese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Madurese_language) | 1192 | 1612542 |
400
+ | map_bms | Banyumasan <br />(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11832 | 5060989 |
401
+ | mnw | Mon | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Mon_language) | 3296 | 47321734 |
402
+ | min | Minangkabau | idn | [Wiki Link](https://en.wikipedia.org/wiki/Minangkabau_language) | 225858 | 116376870 |
403
+ | ms | Malay | mys, sgp, brn, idn | [Wiki Link](https://en.wikipedia.org/wiki/Malay_language) | 346186 | 410443550 |
404
+ | my | Burmese | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Burmese_language) | 109310 | 313370839 |
405
+ | nia | Nias | idn | [Wiki Link](https://en.wikipedia.org/wiki/Nias_language) | 1650 | 1938121 |
406
+ | shn | Shan | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Shan_language) | 13945 | 33754296 |
407
+ | su | Sundanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Sundanese_language) | 61494 | 47410439 |
408
+ | tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1465 | 1452716 |
409
+ | th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159719 | 1012930269 |
410
+ | vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1288680 | 1603057632 |
411
+
412
+
413
+ Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
414
+
415
+ ### How do I extract new Wikipedia Dataset of SEA languages?
416
+ You may check to the script [_```extract_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data.py) to understand its implementations, or you can adjust the bash provided in [_```extract_raw_wiki_data_sea.sh```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data_sea.sh) to extract it on your own. Please note that this dataset is extensible to any languages of your choice.
417
+
418
+ ### How do I extract new Wikipedia Dataset of SEA languages?
419
+ You may visit this [Wikipedia Dump Index](https://dumps.wikimedia.org/backup-index.html) to check any latest available data and this link [Wikipedia Language Coverage](https://meta.wikimedia.org/wiki/List_of_Wikipedias#All_Wikipedias_ordered_by_number_of_articles) to map into any languages that you're wanting to extract.
420
+
421
+ ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
422
+ The data available in here are processed with following flows:
423
+ 1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for no-available informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
424
+ 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars validated). You may check this [ ```dedup_raw_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) script to understand its implementation.
425
+
426
+ # Getting Started #
427
+ ### To read the datasets directly ###
428
+ Use one of the following code chunks to load it from HuggingFace Hub:
429
+ You can refer to the 2nd args of ```config name``` using the following script
430
+ ```
431
+ dataset = load_dataset(
432
+ "sabilmakbar/sea_wiki",
433
+ "seawiki_dedup_all" # a config name, can be "seawiki_dedup_all" or "seawiki_with_countries_all", or "seawiki_with_countries_dedup_all" , defaults to "seawiki_dedup_all"
434
+ )
435
+ ```
436
+ Or you can provide both ```lang``` and ```date_stamp``` (or just lang only by assuming the ```date_stamp``` will take the newest one)
437
+ ```
438
+ dataset = load_dataset(
439
+ "sabilmakbar/sea_wiki",
440
+ lang = "id", # see README for complete lang choices
441
+ date_stamp="20230901"
442
+ )
443
+ ```
444
+ Or you can provide a ```country``` params with similar fashion to ```lang``` args(providing both ```country``` and ```lang``` will prioritize the ```lang``` kwarg)
445
+ ```
446
+ dataset = load_dataset(
447
+ "sabilmakbar/sea_wiki",
448
+ lang = "id", # see the splits for complete lang choices
449
+ date_stamp="20230901"
450
+ )
451
+ ```
452
+
453
+ ### To replicate the whole dataset generation process ###
454
+ 1. Set-up a new Python/Conda Environment (recommended Python version: 3.9.6 to 3.9.18 or 3.10.0 to 3.10.13) and install the requirements on ```requirements.txt``` use this codebase via ```pip install -r requirements.txt```.
455
+ 2. Activate the chosen Python/Conda environment which the requirements are being installed.
456
+ 3. Force install ```multiprocess==0.70.15``` by using ```pip install multiprocess==0.70.15``` to avoid [this issue](https://github.com/huggingface/datasets/issues/5613#issuecomment-1703169594) (there's no other workaround for now, esp for Python 3.10.x)
457
+ 4. Run this ```sh``` script for extractions from Wikimedia Dump:<\b>
458
+ ```sh extract_raw_wiki_data_sea.sh```.
459
+ 5. Run this ```sh``` script of deduplication:<\b>
460
+ ```sh dedup_raw_wiki_data_sea.sh```.
461
+
462
+ ## Citation Info:
463
+ ```
464
+ @ONLINE{wikidump,
465
+ author = "Wikimedia Foundation",
466
+ title = "Wikimedia Downloads",
467
+ url = "https://dumps.wikimedia.org"}
468
+ @ONLINE{wikipedia-hf,
469
+ title = "Huggingface Wikipedia Dataset",
470
+ url = "https://huggingface.co/datasets/wikipedia"}
471
+ ```
dedup_raw_wiki_data.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ '''
3
+ Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py
4
+ '''
5
+ #core functionality modules
6
+ import os, gc
7
+ import logging
8
+ import argparse
9
+ import warnings
10
+
11
+ from functools import partial
12
+
13
+ #text preprocess modules
14
+ import re
15
+ import urllib
16
+ from xml.etree import ElementTree as ET
17
+
18
+ #dataset related modules
19
+ import numpy as np
20
+ import pandas as pd
21
+
22
+
23
+ ### MODULES DEFINITION ###
24
+ #create custom type-checking of incoming ArgParse
25
+ def argparse_bool_check(value: str):
26
+ #cast str with value like float into actual float
27
+ try:
28
+ value = float(value)
29
+ #can't be parsed as float, keep as it is
30
+ except ValueError:
31
+ pass
32
+
33
+ #cast float-like value (incl int) into str
34
+ if isinstance(value, float) and int(value) == value:
35
+ value = str(int(value))
36
+ #raise ArgumentTypeError if the value isn't in string already
37
+ else:
38
+ if not isinstance(value, str):
39
+ raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!")
40
+ #check for these combinations of values
41
+ if value.lower() in ("yes", "true", "t", "y", "1"):
42
+ return True
43
+ elif value.lower() in ("no", "false", "f", "n", "0"):
44
+ return False
45
+ else:
46
+ raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
47
+
48
+
49
+ def text_processing_args_checker(value: str):
50
+ if value not in ["all", "text", "title", "neither"]:
51
+ raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!")
52
+ else:
53
+ return value
54
+
55
+
56
+ def set_logger():
57
+ # Set up the logger
58
+ logging.basicConfig(
59
+ level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
60
+ format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
61
+ datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
62
+ )
63
+
64
+ # Create a file handler to write logs into a file
65
+ file_handler = logging.FileHandler('app.log')
66
+
67
+ # Set the log level for the file handler
68
+ file_handler.setLevel(logging.INFO)
69
+
70
+ # Create a formatter for the file handler (customize the log format for the file)
71
+ file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
72
+ file_handler.setFormatter(file_formatter)
73
+
74
+ logger = logging.getLogger("Wiki Dataset Generation")
75
+ logger.addHandler(file_handler)
76
+
77
+ return logger
78
+
79
+
80
+ #wrapper fn of text-cleansing
81
+ def text_cleansing_wrapper(fn, exception_class_names = []):
82
+
83
+ #ensure caught exception class names passed to decorator is a list (if provided)
84
+ if not isinstance(exception_class_names, list):
85
+ raise TypeError("Exception Class Name for Wrapper is not a list!")
86
+ #ensure all values of caught exception class name list is a string
87
+ if not all([isinstance(val, str) for val in exception_class_names]):
88
+ raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!")
89
+
90
+ #lowercase all exception class name
91
+ exception_class_names = [val.lower() for val in exception_class_names]
92
+ if len(exception_class_names) == 0:
93
+ warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!")
94
+
95
+ def text_fn_wrapper(text: str, *args, **kwargs):
96
+ try:
97
+ return fn(text, *args, **kwargs)
98
+ except Exception as e:
99
+ _exc_name = type(e).__name__
100
+ if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0:
101
+ raise Exception(f"Exception Occured of {_exc_name} in {fn.__name__}!") from e
102
+ else:
103
+ _followup_msg = "Returning the input as it is..."
104
+ _text_warn = f"An exception of {_exc_name} occured in {fn.__name__}! {_followup_msg}"
105
+ warnings.warn(_text_warn)
106
+ return text
107
+
108
+ return text_fn_wrapper
109
+
110
+
111
+ #create html tags cleanser of a given text
112
+ partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"])
113
+ @partial_decorator
114
+ def remove_html_tags(text: str):
115
+ #extracted from "https://stackoverflow.com/a/9662410", w/ additional decorator of error handler
116
+ return (''.join(ET.fromstring(text).itertext())).strip()
117
+
118
+
119
+ #create url decoder of text
120
+ @text_cleansing_wrapper
121
+ def decode_url(text: str):
122
+ # return (urllib.parse.unquote(text)).encode('utf8', errors='ignore').decode().strip()
123
+ return (urllib.parse.unquote(text)).strip()
124
+
125
+ #create encoder check of text
126
+ @text_cleansing_wrapper
127
+ def check_text_by_encoder(text: str, encoder: str="utf8"):
128
+ return text.encode(encoder, errors='ignore').decode().strip()
129
+
130
+ #create excessive whitespace removal of text
131
+ @text_cleansing_wrapper
132
+ def remove_excessive_whitespace(text: str):
133
+ return re.sub("(\s)(\s+)", r"\1", text).strip()
134
+
135
+ #create non-alphanumeric removal of text
136
+ @text_cleansing_wrapper
137
+ def remove_non_alphanumeric(text: str):
138
+ return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip()
139
+
140
+ # def cleanse_wiki_text(text: str):
141
+ # return remove_html_tags(decode_url_and_remove_non_ascii(text))
142
+
143
+ # def normalize_wiki_title(text: str):
144
+ # return remove_non_alphanumeric(remove_excessive_whitespace(text.lower()))
145
+
146
+
147
+ def _text_normalizer_constructor(
148
+ remove_non_alphanumeric_bool: bool, remove_excessive_whitespace_bool: bool,
149
+ remove_html_tags_bool: bool, decode_url_bool: bool, encoder_check_bool: bool,
150
+ encoder: str="utf8"):
151
+
152
+ _lambda_fn_1 = partial(check_text_by_encoder, encoder=encoder) if encoder_check_bool else lambda x: x
153
+ _lambda_fn_2 = lambda x: remove_non_alphanumeric(_lambda_fn_1(x)) if remove_non_alphanumeric_bool else _lambda_fn_1(x)
154
+ _lambda_fn_3 = lambda x: remove_excessive_whitespace(_lambda_fn_2(x)) if remove_excessive_whitespace_bool else _lambda_fn_2(x)
155
+ _lambda_fn_4 = lambda x: remove_html_tags(_lambda_fn_3(x)) if remove_html_tags_bool else _lambda_fn_3(x)
156
+ _lambda_fn_5 = lambda x: decode_url(_lambda_fn_4(x)) if decode_url_bool else _lambda_fn_4(x)
157
+
158
+ return _lambda_fn_5
159
+
160
+
161
+ def _args_to_text_constructor_fn(**kwargs):
162
+
163
+ def _decode_options(opt: str):
164
+ # return decoded options with format `text_opt`, `title_opt`
165
+ # possible values are ["all", "text", "title", "neither"]
166
+ if opt == "all":
167
+ return True, True
168
+ elif opt == "text":
169
+ return True, False
170
+ elif opt == "title":
171
+ return False, True
172
+ else:
173
+ return False, False
174
+
175
+ kwargs_title, kwargs_text = {}, {}
176
+
177
+ kwargs_title["encoder"] = kwargs["text_encoder_choice_title"]
178
+ kwargs_text["encoder"] = kwargs["text_encoder_choice_text"]
179
+
180
+ for key, val in kwargs.items():
181
+ if key not in [
182
+ "remove_non_alphanumeric_option", "remove_excessive_whitespace_option",
183
+ "remove_html_tags_option", "decode_url_option", "encoder_check_option"]:
184
+ continue
185
+ new_key = "_".join(key.split("_")[:-1]) + "_bool"
186
+ text_opt_val, title_opt_val = _decode_options(val)
187
+ kwargs_text[new_key], kwargs_title[new_key] = text_opt_val, title_opt_val
188
+
189
+ return _text_normalizer_constructor(**kwargs_text), _text_normalizer_constructor(**kwargs_title)
190
+
191
+
192
+ def _text_processing_wrapper(text: str, _fn, mode: str="text"):
193
+ if mode not in ["text", "title"]:
194
+ raise ValueError(f"Provided `mode` isn't either 'text' or 'title'! Received: {mode}")
195
+ return _fn(text.lower()) if mode=="title" else _fn(text)
196
+
197
+
198
+ ### MAIN CODE ###
199
+ if __name__ == "__main__":
200
+ parser = argparse.ArgumentParser()
201
+
202
+ parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data")
203
+
204
+ parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates
205
+ (exact values of data of relevant text fields, Titles & Desc)""",
206
+ default=True, type=argparse_bool_check)
207
+
208
+ parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates
209
+ (duplicates after cleansed and normalized relevant text fields, Titles & Desc)""",
210
+ default=True, type=argparse_bool_check)
211
+
212
+ parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
213
+ to the `dedup_raw_wiki_data.py` script dir""",
214
+ default=os.path.dirname(os.path.abspath(__file__)))
215
+
216
+ ### THE FOLLOWING ARGUMENTS ONLY TEMPORARILY ALTER THE TEXT DATA ONLY FOR SOFT-DEDUP CHECK ###
217
+ ### THE INITIAL TEXT DATA WON'T BE OVERWRITTEN AFTER BEING PREPROCESSED ###
218
+ ### UNLESS YOU ARE SPECIFYING IN ARGS `overwrite-initial-title-data` AND `overwrite-initial-text-data` ###
219
+
220
+ ### ARGS TO OVERWRITTE INITIAL TEXT DATA WITH PROCESSED ONES ###
221
+ parser.add_argument("--overwrite-initial-title-data", help="""Flag whether to overwrite title
222
+ init data w/ processed data (True) or keep it as it is (False)""",
223
+ default=False, type=argparse_bool_check)
224
+
225
+ parser.add_argument("--overwrite-initial-text-data", help="""Flag whether to overwrite text
226
+ init data w/ processed data (True) or keep it as it is (False)""",
227
+ default=False, type=argparse_bool_check)
228
+
229
+ ### INSTANTIATOR ARGS FOR CONSTRUCTING TEXT PROCESSING FN TO BE APPLIED ###
230
+ parser.add_argument("--remove-non-alphanumeric-option", help="""Identifier which columns to be preprocessed
231
+ using `remove_non_alphanumeric` for soft duplicates detection
232
+ (Choices are "all", "text", "title", "neither")""",
233
+ default="neither", type=text_processing_args_checker)
234
+
235
+ parser.add_argument("--remove-excessive-whitespace-option", help="""Identifier which columns to be preprocessed
236
+ using `remove_excessive_whitespace` for soft duplicates detection
237
+ (Choices are "all", "text", "title", "neither")""",
238
+ default="all", type=text_processing_args_checker)
239
+
240
+ parser.add_argument("--remove-html-tags-option", help="""Identifier which columns to be preprocessed
241
+ using `remove_html_tags` for soft duplicates detection
242
+ (Choices are "all", "text", "title", "neither")""",
243
+ default="all", type=text_processing_args_checker)
244
+
245
+ parser.add_argument("--decode-url-option", help="""Identifier which columns to be preprocessed
246
+ using `decode_url` for soft duplicates detection
247
+ (Choices are "all", "text", "title", "neither")""",
248
+ default="all", type=text_processing_args_checker)
249
+
250
+ ### ARGS TO CHOOSE ENCODER CHECKING AND ITS CONFIG INITIALIZATION ###
251
+ parser.add_argument("--encoder-check-option", help="""Identifier which columns to be preprocessed
252
+ using `check_text_by_encoder` for soft duplicates detection
253
+ (Choices are "all", "text", "title", "neither")""",
254
+ default="all", type=text_processing_args_checker)
255
+
256
+ parser.add_argument("--text-encoder-choice-title", help="""Identifier of title encoder type
257
+ to be applied into `check_text_by_encoder` for soft duplicates detection""",
258
+ default="utf8", type=str)
259
+
260
+ parser.add_argument("--text-encoder-choice-text", help="""Identifier of text encoder type
261
+ to be applied into `check_text_by_encoder` for soft duplicates detection""",
262
+ default="utf8", type=str)
263
+
264
+
265
+ _EXPECTED_COLNAMES = ["id", "url", "title", "text"]
266
+
267
+ logger = set_logger()
268
+ logger.info("Parsing arguments...")
269
+
270
+ args = parser.parse_args()
271
+
272
+ # class dotdict(dict):
273
+ # """dot.notation access to dictionary attributes"""
274
+ # __getattr__ = dict.get
275
+ # __setattr__ = dict.__setitem__
276
+ # __delattr__ = dict.__delitem__
277
+
278
+ # args = dotdict({
279
+ # "raw_csv_path":"",
280
+ # "drop_hard_dupl": True,
281
+ # "drop_soft_dupl": True,
282
+ # "save_dir_path": os.path.dirname(os.path.abspath(__file__)),
283
+ # "overwrite_initial_title_data": False,
284
+ # "overwrite_initial_text_data": False,
285
+ # "remove_non_alphanumeric_option":"neither",
286
+ # "remove_excessive_whitespace_option": "neither",
287
+ # "remove_html_tags_option":"neither",
288
+ # "decode_url_option":"neither",
289
+ # "encoder_check_option":"all",
290
+ # "text_encoder_choice_title":"utf8",
291
+ # "text_encoder_choice_text":"utf8"
292
+ # })
293
+
294
+ _TEXT_PROCESSING_FN, _TITLE_PROCESSING_FN = _args_to_text_constructor_fn(
295
+ remove_non_alphanumeric_option = args.remove_non_alphanumeric_option,
296
+ remove_excessive_whitespace_option = args.remove_excessive_whitespace_option,
297
+ remove_html_tags_option = args.remove_html_tags_option,
298
+ decode_url_option = args.text_encoder_choice_title,
299
+ encoder_check_option = args.encoder_check_option,
300
+ text_encoder_choice_title = args.text_encoder_choice_title,
301
+ text_encoder_choice_text = args.text_encoder_choice_text
302
+ )
303
+
304
+ raw_data_path = args.raw_csv_path
305
+ drop_hard_dupl = args.drop_hard_dupl
306
+ drop_soft_dupl = args.drop_soft_dupl
307
+ save_dir = args.save_dir_path
308
+
309
+ overwrite_initial_title_data = args.overwrite_initial_title_data
310
+ overwrite_initial_text_data = args.overwrite_initial_text_data
311
+
312
+
313
+ df = pd.read_csv(raw_data_path)
314
+ if len(set(df.columns).difference(set(_EXPECTED_COLNAMES))) != 0 or len(set(_EXPECTED_COLNAMES).difference(set(df.columns))) != 0:
315
+ raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(_EXPECTED_COLNAMES)}!")
316
+
317
+ if (not drop_hard_dupl) and (not drop_soft_dupl):
318
+ raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!")
319
+ elif (not drop_hard_dupl):
320
+ warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)")
321
+
322
+ #will save id identifier colname first (popping first list val)
323
+ id_colname = _EXPECTED_COLNAMES.pop(0)
324
+
325
+ # if any of the data has duplicate values from columns checked (url, title, or text),
326
+ # it means the data integrity is questionable
327
+ # i.e. copied from other article or filled with template text
328
+ # hence, we will delete those duplicated datasets
329
+
330
+ #hard duplicate drop (drop all duplicate values that has exact same text on expected unique colnames)
331
+ if drop_hard_dupl:
332
+
333
+ for colname in _EXPECTED_COLNAMES:
334
+ logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...")
335
+ dupl_text_df = df[df.duplicated(subset=colname,keep=False)]
336
+ shape_of_dupl_data = dupl_text_df.shape[0]
337
+
338
+ if shape_of_dupl_data > 0:
339
+ logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
340
+ df.drop_duplicates(subset=colname, keep=False, inplace=True)
341
+
342
+
343
+ #check id/idx of the cleansed data, whether it has duplicate
344
+ # (the duplication of id/idx should came from the very first extraction, not from the cleansing)
345
+
346
+ if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0:
347
+ logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!")
348
+ df[id_colname] = df.reset_index().index
349
+
350
+ #soft duplicate drop (drop all except one duplicate values that has exact same text on expected unique colnames)
351
+ #keep the data that has longest value of its raw form
352
+ if drop_soft_dupl:
353
+
354
+ idx_to_keep = set(df.index.to_list())
355
+ #clean from text & title only, url isn't needed for this process
356
+ _EXPECTED_COLNAMES.remove("url")
357
+
358
+ for colname in _EXPECTED_COLNAMES:
359
+ #Construct Text Cleanser Fn for soft-duplicate cleansing
360
+ _PROCESSING_FN = _TEXT_PROCESSING_FN if colname == "text" else _TITLE_PROCESSING_FN
361
+ text_processing_fn = partial(_text_processing_wrapper, _fn=_PROCESSING_FN, mode=colname)
362
+ logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...")
363
+ _df = df.copy(deep=True)
364
+
365
+ #Setting up DF cols as String so it can be text-processed
366
+ _df = _df[[colname]]
367
+ _df[colname] = _df[colname].astype("str")
368
+ logger.info(f"Cleansing the data based on {colname}")
369
+
370
+ #applying text processing
371
+ _df[colname+"_raw_len"] = _df[colname].apply(len)
372
+ _df[colname+"_cleansed"] = _df[colname].apply(lambda row_text: text_processing_fn(text=row_text))
373
+
374
+ #overwrite its text data if set as true
375
+ if overwrite_initial_title_data and colname == "title":
376
+ df[colname] = _df[colname+"_cleansed"]
377
+ elif overwrite_initial_text_data and colname == "text":
378
+ df[colname] = _df[colname+"_cleansed"]
379
+
380
+ #choose the data to keep by "ranking" it according to len of its raw text (greatest to keep)
381
+ logger.info(f"Ranking and grouping the data based on {colname}")
382
+ _df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False)
383
+ shape_of_dupl_data = _df[_df["rk"]>1].shape[0]
384
+
385
+ if shape_of_dupl_data > 0:
386
+ logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped")
387
+ _idx_to_keep = _df[_df["rk"]==1].index.to_list()
388
+ if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]:
389
+ raise AssertionError("Mismatch of data number!")
390
+ idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep))
391
+ else:
392
+ logger.info(f"No soft-duplicate found in colname {colname}. Continuing")
393
+
394
+ del _df
395
+ gc.collect()
396
+
397
+ logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}")
398
+ df = df.loc[list(idx_to_keep),:]
399
+
400
+ logger.info("Saving dataset cleansed form...")
401
+ #input path splitted by ("/") for the last entry should return filename
402
+ #whereas the filename splitted by (".") except the last value should return the filename w/o ".csv" extension
403
+
404
+ _override_suffix_identifier = ""
405
+ if overwrite_initial_title_data or overwrite_initial_text_data:
406
+ _override_suffix_identifier = "_overwritten"
407
+ if overwrite_initial_text_data:
408
+ _override_suffix_identifier = "_text"+_override_suffix_identifier
409
+ if overwrite_initial_title_data:
410
+ _override_suffix_identifier = "_title"+_override_suffix_identifier
411
+
412
+ _save_file_name = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + "_dedup_cleansed" + _override_suffix_identifier + ".csv"
413
+ _save_file_name = _save_file_name.replace("_raw", "")
414
+ df.to_csv(f"{save_dir}/{_save_file_name}", index=False)
dedup_raw_wiki_data_sea.sh ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
+ # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
+ # Singapore: "ms" (Malay)
6
+ # Malaysia: "ms" (Malay)
7
+ # Brunei: "ms" (Malay)
8
+ # Thailand: "th" (Thai)
9
+ # Myanmar: "my" (Burmese), "shn" (Shan), "mnw" (Mon)
10
+ # Laos: "lo" (Lao)
11
+ # Vietnam: "vi" (Vietnamese)
12
+ # Cambodia: "km" (Khmer)
13
+ # East Timor: "tet" (Tetum)
14
+
15
+ #params of executions
16
+ folder_dir_to_save=./sea_wiki_dedup_data
17
+ input_folder_to_be_dedup=./sea_wiki_raw_data
18
+
19
+ drop_hard_dupl=True
20
+ drop_soft_dupl=True
21
+
22
+
23
+ # main executions
24
+
25
+ # src: https://stackoverflow.com/a/18887210 (to list all files under a dir)
26
+ shopt -s nullglob
27
+ file_name_array=($input_folder_to_be_dedup/*)
28
+ shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later
29
+ file_name_array="${file_name_array}"
30
+
31
+ if [ ${#file_name_array[@]} == 0 ]; then
32
+ echo "No files found under directory $input_folder_to_be_dedup" >&2
33
+ fi
34
+
35
+ if [ ! -d $folder_dir_to_save ];
36
+ then
37
+ echo "Dir $folder_dir_to_save not exists! Creating the dir..."
38
+ mkdir $folder_dir_to_save
39
+ fi
40
+
41
+ echo "The params hard-dedup drop is set as $drop_hard_dupl"
42
+ echo "The params soft-dedup drop is set as $drop_soft_dupl"
43
+
44
+ for val in ${!file_name_array[@]}; do
45
+ csv_path=${file_name_array[$val]}
46
+
47
+ if [[ ${csv_path} != *".csv" ]]; then
48
+ echo "The extracted file name isn't a CSV! Skipping! Received $csv_path"
49
+ continue
50
+ fi
51
+
52
+ echo "Executing Dedup on iteration no "$((val+1))" of total ${#file_name_array[@]} for input data $csv_path"
53
+ #see the script bcs there are more args than this command is using
54
+ python dedup_raw_wiki_data.py \
55
+ --raw-csv-path $csv_path \
56
+ --drop-hard-dupl $drop_hard_dupl \
57
+ --drop-soft-dupl $drop_soft_dupl \
58
+ --save-dir-path $folder_dir_to_save
59
+ echo "Done Execution"
60
+ done
61
+ echo "Done Dedup Process"
extract_raw_wiki_data.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/
3
+ More info can be read on https://huggingface.co/datasets/wikipedia
4
+ -------------------
5
+ Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html
6
+ Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias
7
+ '''
8
+
9
+ import os, gc
10
+ import logging
11
+ import argparse
12
+
13
+ import pandas as pd
14
+ from datasets import load_dataset
15
+
16
+
17
+ def set_logger():
18
+ # Set up the logger
19
+ logging.basicConfig(
20
+ level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
21
+ format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format
22
+ datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format
23
+ )
24
+
25
+ # Create a file handler to write logs into a file
26
+ file_handler = logging.FileHandler('app.log')
27
+
28
+ # Set the log level for the file handler
29
+ file_handler.setLevel(logging.INFO)
30
+
31
+ # Create a formatter for the file handler (customize the log format for the file)
32
+ file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
33
+ file_handler.setFormatter(file_formatter)
34
+
35
+ logger = logging.getLogger("Wiki Dataset Generation")
36
+ logger.addHandler(file_handler)
37
+
38
+ return logger
39
+
40
+
41
+ #only executed if called directly
42
+ if __name__ == "__main__":
43
+ parser = argparse.ArgumentParser()
44
+
45
+ parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract")
46
+
47
+ parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract")
48
+
49
+ parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data
50
+ to the `extract_raw_wiki_data.py` script dir""",
51
+ default=os.path.dirname(os.path.abspath(__file__)))
52
+
53
+ args = parser.parse_args()
54
+
55
+
56
+ dset_name = "wikipedia"
57
+
58
+ logger = set_logger()
59
+ logger.info("Parsing arguments...")
60
+
61
+ lang_id = args.lang_id
62
+ date_ver = args.date_ver
63
+ save_dir = args.save_dir_path
64
+
65
+ logger.info("Loading the dataset from Wikipedia...")
66
+ df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner', split="train").to_pandas()
67
+ logger.info("Loading done!")
68
+ logger.info(f"#Data collected: {df.shape[0]}")
69
+ logger.info("Saving dataset raw form...")
70
+ df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset.csv", index=False)
71
+
72
+ del df
73
+ gc.collect()
extract_raw_wiki_data_sea.sh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # all available lang codes in SEA local-languages or linguistically-related to following countries in SEA:
4
+ # Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum)
5
+ # Singapore: "ms" (Malay)
6
+ # Malaysia: "ms" (Malay)
7
+ # Brunei: "ms" (Malay)
8
+ # Thailand: "th" (Thai)
9
+ # Myanmar: "my" (Burmese), "shn" (Shan), "mnw" (Mon)
10
+ # Laos: "lo" (Lao)
11
+ # Vietnam: "vi" (Vietnamese)
12
+ # Cambodia: "km" (Khmer)
13
+ # East Timor: "tet" (Tetum)
14
+
15
+ #params of executions
16
+ date_ver=20231101
17
+ folder_dir_to_save=./sea_wiki_raw_data
18
+ lang_list=(ace ban bjn bug gor id km lo jv mad map-bms my min mnw ms nia su shn tet th vi)
19
+
20
+
21
+ #main executions
22
+
23
+ if [ ! -d $folder_dir_to_save ]; then
24
+ echo "Dir $folder_dir_to_save not exists! Creating the dir..."
25
+ mkdir $folder_dir_to_save
26
+ fi
27
+
28
+ for val in ${!lang_list[@]}; do
29
+ lang=${lang_list[$val]}
30
+ echo "Executing Extractor on iteration no $((val+1)) of total ${#lang_list[@]} for language $lang and date version of $date_ver"
31
+ python extract_raw_wiki_data.py \
32
+ --lang-id $lang \
33
+ --date-ver $date_ver \
34
+ --save-dir-path $folder_dir_to_save
35
+ echo "Done Execution"
36
+ done
37
+ echo "Done Extraction Process"
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ datasets==2.14.6
2
+ pandas==2.1.0
3
+ fsspec==2023.9.1
4
+ apache-beam==2.50.0
5
+ dill~=0.3.1.0
6
+ numpy==1.24.4
sea_wiki.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The Southeast Asia Lan Wiki Loader"""
2
+
3
+ import os
4
+ import re
5
+
6
+ from functools import reduce
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+ import datasets
12
+
13
+
14
+ _CITATIONS = """\
15
+ @ONLINE{wikidump,
16
+ author = "Wikimedia Foundation",
17
+ title = "Wikimedia Downloads",
18
+ url = "https://dumps.wikimedia.org"}
19
+
20
+ @ONLINE{wikipedia-hf,
21
+ title = "Huggingface Wikipedia Dataset",
22
+ url = "https://huggingface.co/datasets/wikipedia"}"""
23
+
24
+ _REPO_URL = "https://huggingface.co/datasets/sabilmakbar/sea_wiki"
25
+
26
+ _LICENSE = (
27
+ "This work is licensed under the Creative Commons Attribution-ShareAlike "
28
+ "3.0 Unported License. To view a copy of this license, visit "
29
+ "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
30
+ "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
31
+ )
32
+
33
+
34
+ _SEA_WIKI_RAW_DESCRIPTION = """\
35
+ Southeast Asia Wikipedia Data Repository contains Wikipedia Data from Wikipedia HF that focuses
36
+ on extraction in all available Languanges and Local Languages across South East Asia, which some of them
37
+ are considered as low-resource languages or extremely low-resource languages"""
38
+
39
+ _SEA_WIKI_DEDUP_DESCRIPTION = """\
40
+ This is a derivative of South East Asia Wikipedia Data Repository which is already pre-processed
41
+ by identifying and dropping duplicates to prevent boilerplate texts occuring in dataset"""
42
+
43
+ _AVAILABLE_DUMP_VERSION_DATE = ["20231101"]
44
+
45
+ # map from alpha-3 country codes to ISO-639 3 lang codes
46
+ # alpha-3 codes: https://www.iban.com/country-codes
47
+ # ISO-639 codes: https://iso639-3.sil.org/code_tables/639/data
48
+ _COUNTRY_TO_LANG_MAPPER = {
49
+ "idn": ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "mad", "map-bms", "min", "ms", "nia", "su", "tet"],
50
+ "sgp": ["ms"],
51
+ "mys": ["ms"],
52
+ "brn": ["ms"],
53
+ "tha": ["th"],
54
+ "mmr": ["my", "shn", "mnw"],
55
+ "lao": ["lo"],
56
+ "vnm": ["vi"],
57
+ "khm": ["km"],
58
+ "tls": ["tet"]}
59
+
60
+ _AVAILABLE_DUMP_LANGUAGES = reduce(np.union1d, list(_COUNTRY_TO_LANG_MAPPER.values()))
61
+
62
+ _LATEST_DUMP_VERSION_DATE = sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1]
63
+
64
+
65
+ def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
66
+ _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"}
67
+ _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv", "raw": "raw_dataset.csv"}
68
+
69
+ return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
70
+
71
+
72
+ class SEAWikiConfig(datasets.BuilderConfig):
73
+ """BuilderConfig for SEAWiki."""
74
+
75
+ def __init__(self, description: str=None, features: list=['url', 'title', 'text'],
76
+ data_url: str=None, date_stamp: str=_LATEST_DUMP_VERSION_DATE, country: str=None,
77
+ lang: str=None, mode = "dedup", **kwargs):
78
+ """BuilderConfig for SEAWiki.
79
+
80
+ Args:
81
+ description: `string`, description of dataset
82
+ features: `list[string]`, list of the features that will appear in the
83
+ feature dict. Should not include "label" if it's a supervised.
84
+ data_url: `string`, url to download the data.
85
+ date_stamp: `string`, wikidump date_stamp for data available in repo.
86
+ lang: `string`, language to be loaded.
87
+ **kwargs: keyword arguments forwarded to super.
88
+ """
89
+ # validate configs
90
+ if mode not in ["dedup", "raw"]:
91
+ raise ValueError(f"Error occured! Expected values are 'dedup' or 'raw' for arg `mode`, received {mode}!")
92
+
93
+ if ((lang is None and country is None) or date_stamp is None) and data_url is None:
94
+ raise ValueError("Expected `data_url` is provided or both `date_stamp` and `lang` or `country` are provided!")
95
+
96
+ _mode_to_desc_mapper = {"dedup": _SEA_WIKI_DEDUP_DESCRIPTION, "raw": _SEA_WIKI_RAW_DESCRIPTION}
97
+
98
+ if date_stamp is not None and date_stamp not in _AVAILABLE_DUMP_VERSION_DATE:
99
+ raise ValueError("Provided `date_stamp` dataset versioning doesn't match! Please re-check")
100
+
101
+ if lang is not None and lang not in _AVAILABLE_DUMP_LANGUAGES:
102
+ raise ValueError("Provided `lang` doesn't match! Please re-check")
103
+
104
+ if country is not None and country not in _COUNTRY_TO_LANG_MAPPER.keys() and lang is None:
105
+ raise ValueError("Provided `country` doesn't match! Please re-check")
106
+
107
+ super(SEAWikiConfig, self).__init__(**kwargs)
108
+ self.features = features
109
+
110
+ # prioritize kwargs data_url
111
+ if data_url is not None:
112
+ self.data_url = data_url
113
+ # prioritize lang provided over country
114
+ elif lang is not None:
115
+ self.data_url = _construct_dset_url_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode)
116
+ # if only country provided, create dict of langs
117
+ elif country is not None:
118
+ self.data_url = {lang: _construct_dset_url_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode) for lang in _COUNTRY_TO_LANG_MAPPER[country]}
119
+
120
+ # auto-construct desc if not provided
121
+ if description is None:
122
+ self.description = _mode_to_desc_mapper[mode] + "\n" + f"Extracted from file path {self.data_url}"
123
+
124
+ #define citations & info URL internally in config class
125
+ self.citation = _CITATIONS
126
+ self.url = _REPO_URL
127
+
128
+
129
+ class SEAWiki(datasets.GeneratorBasedBuilder):
130
+ """The SEAWiki Dataset."""
131
+
132
+ # if name isn't provided, will create a dataset of all languages
133
+ DEFAULT_CONFIG_NAME = "seawiki_dedup_all"
134
+ BUILDER_CONFIG_CLASS = SEAWikiConfig
135
+
136
+ # construct data-url with countries a list of spoken langs as value
137
+ _newest_data_raw_all_langs = [_construct_dset_url_from_dset_version_and_lang(
138
+ date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="raw") for lang in _AVAILABLE_DUMP_LANGUAGES]
139
+ _newest_data_dedup_all_langs = [_construct_dset_url_from_dset_version_and_lang(
140
+ date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="dedup") for lang in _AVAILABLE_DUMP_LANGUAGES]
141
+
142
+ # construct data-url with countries as key-dict, being country code as key and list of spoken langs as value
143
+ _newest_data_raw_with_countries_all_langs = {
144
+ country: [_construct_dset_url_from_dset_version_and_lang(date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="raw") for lang in lang_list]
145
+ for country, lang_list in _COUNTRY_TO_LANG_MAPPER.items()}
146
+ _newest_data_dedup_with_countries_all_langs = {
147
+ country: [_construct_dset_url_from_dset_version_and_lang(date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="dedup") for lang in lang_list]
148
+ for country, lang_list in _COUNTRY_TO_LANG_MAPPER.items()}
149
+
150
+ BUILDER_CONFIGS = [
151
+ SEAWikiConfig(
152
+ name="seawiki_all",
153
+ description=_SEA_WIKI_RAW_DESCRIPTION,
154
+ data_url=_newest_data_raw_all_langs
155
+ ),
156
+ SEAWikiConfig(
157
+ name="seawiki_dedup_all",
158
+ description=_SEA_WIKI_DEDUP_DESCRIPTION,
159
+ data_url=_newest_data_dedup_all_langs
160
+ ),
161
+ SEAWikiConfig(
162
+ name="seawiki_with_countries_all",
163
+ description=_SEA_WIKI_RAW_DESCRIPTION,
164
+ data_url=_newest_data_raw_with_countries_all_langs
165
+ ),
166
+ SEAWikiConfig(
167
+ name="seawiki_with_countries_dedup_all",
168
+ description=_SEA_WIKI_DEDUP_DESCRIPTION,
169
+ data_url=_newest_data_dedup_with_countries_all_langs
170
+ ),
171
+ ]
172
+
173
+
174
+ def _info(self):
175
+ features = {feature: datasets.Value("string") for feature in self.config.features}
176
+
177
+ return datasets.DatasetInfo(
178
+ description = self.config.description,
179
+ features = datasets.Features(features),
180
+ homepage = self.config.url,
181
+ citation = self.config.citation,
182
+ license=_LICENSE)
183
+
184
+
185
+ @staticmethod
186
+ def _get_lang_name_from_data_url(data_url: str):
187
+ # lang code occurred after "wiki_" and before date versioning (using 8len date)
188
+ _list_folder_sep = data_url.split("/")[-1].split("_")
189
+ _min_pos = min([pos for pos, data in enumerate(_list_folder_sep) if bool(re.search("\d{8}", data))])
190
+ return re.sub("[^\w\.]", "_", "_".join(_list_folder_sep[1:_min_pos]))
191
+
192
+
193
+ def _split_generators(self, dl_manager):
194
+
195
+ # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country)
196
+ if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None):
197
+ file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
198
+ dl_dir = dl_manager.download_and_extract(file_dict)
199
+
200
+ return [
201
+ datasets.SplitGenerator(
202
+ name=datasets.Split(split_name),
203
+ gen_kwargs={
204
+ "data_file": file_name
205
+ }
206
+ )
207
+ for split_name, file_name in dl_dir.items()]
208
+
209
+ # handle cases of config "seawiki_with_countries_all", "seawiki_with_countries_dedup_all"
210
+ elif self.config.name in ("seawiki_with_countries_all", "seawiki_with_countries_dedup_all"):
211
+ file_dict = {}
212
+
213
+ for country, file_list in self.config.data_url.items():
214
+ for file in file_list:
215
+ file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file
216
+
217
+ dl_dir = dl_manager.download_and_extract(file_dict)
218
+
219
+ return [
220
+ datasets.SplitGenerator(
221
+ name=datasets.Split(split_name),
222
+ gen_kwargs={
223
+ "data_file": file_name
224
+ }
225
+ )
226
+ for split_name, file_name in dl_dir.items()]
227
+
228
+ # handle custom config where only country is provided
229
+ elif self.config.lang is not None:
230
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
231
+ return [
232
+ datasets.SplitGenerator(
233
+ name=datasets.Split.TRAIN,
234
+ gen_kwargs={
235
+ "data_file": dl_dir
236
+ },
237
+ )
238
+ ]
239
+
240
+
241
+ def _generate_examples(self, data_file):
242
+ pd_df = pd.read_csv(data_file)
243
+ for _, row in pd_df.iterrows():
244
+ example = {feature: row[feature] for feature in self.config.features}
245
+ idx = row["id"]
246
+ yield idx, example
sea_wiki_dedup_data/wiki_ace_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f52ce2ae7c8d340a30014bd036bc0806f5782f5a0856f80ffe3bf80f71b33152
3
+ size 4938934
sea_wiki_dedup_data/wiki_ban_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:325006efe32de9ee07f718cb187094a358e9a50577a4945470471c798bfa4d2b
3
+ size 18034158
sea_wiki_dedup_data/wiki_bjn_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a0914f929502fa632e764dadeddf49969b45ccf35d9933743bd4e9e2b16ea0f
3
+ size 6791315
sea_wiki_dedup_data/wiki_bug_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2a1d85b4306eb2d0cf74db4ca2d997b0ea20e2f4016167aeb66394ac5b9b59
3
+ size 2172844
sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0607ee607dd6c00c5eb6729efc0c38a21ea9e10da39005e34373959e179e7707
3
+ size 6222508
sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f61d796d89cb4f8877a80494a4b26ee379bb3602de96dfc455d8cdbd8661fd
3
+ size 1120126829
sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f36dba2f186c2a710cac07918a218ca74006d8940fc8fd1955f4122725efd43
3
+ size 72052487
sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0f043cd2dddb22a3076abe0af5805b6b8130678aa6650bbdb158f91cb6e1b30
3
+ size 102709279
sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47d4132398f9002803a88454ad4318a9f2f41c11281a9f085c23255810beccd6
3
+ size 14905688
sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:517777f9ec0da20ce5ccf1f70cca6d2a8192452745afefc6288ff360dad4ee7c
3
+ size 1610155
sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49bee130293507159198f83422c654d5e0000e6e20d27ed5f5afeb378a663967
3
+ size 5076335
sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6debf5f9204262a3bcdcb37966873f877158882c435a3b5cae55a01ad1418a3f
3
+ size 116663617
sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daf4ec4e087fcf46c2ebf71a3db8e11b8bdbfbbe0eacd9e5efc6d9f9c5b6b6d2
3
+ size 47243726
sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c1aca444ed1a161d30069d3106f604597cc9e3e48267b223d2ef3cf7b52fa7c
3
+ size 415339805
sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd9ad7c34a340d4ae62820dccede45d515dc69b145340f4dc5485f01d83745f
3
+ size 312976625
sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3895f612ef2366104e669eac8acd5ec13646ab4dd388a9372422bbcdfbbe45d6
3
+ size 2151317
sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05d1589795708541a95366f18528a47c5ac95f4e287291a19d5589f03183cf8f
3
+ size 33599756
sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b758f801e232d271882a619b7107237b15964dc467b16a5867493cfdb1b655
3
+ size 47525184
sea_wiki_dedup_data/wiki_tet_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:550083c7c657441c29cf8de3a8dc6dcf9506bc472c0000d15b88af7bbd855699
3
+ size 1450499
sea_wiki_dedup_data/wiki_th_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793f1b801eecdcc9d4b1bd00a384957de08b916f491bf62b1c73e5792e3bcfa9
3
+ size 1013480563
sea_wiki_dedup_data/wiki_vi_20231101_dataset_dedup_cleansed.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ce2151076f483b2885e580d2d0b392753a68443eca21776ce8eb5373b785499
3
+ size 1605617428
sea_wiki_raw_data/wiki_ace_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e723d9930ae552692b1c45c5463acfb4632c71ed3736e1e49c0f21a0c5086e
3
+ size 4946116
sea_wiki_raw_data/wiki_ban_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8b51380f75333cf170c29383bab07d24800a7cd19491a094f7f78ccb018f128
3
+ size 18207081
sea_wiki_raw_data/wiki_bjn_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e1b9f81f122388fcc774b1f70f63bfe331aeae367c60ed1437811fb2f87538
3
+ size 6797366
sea_wiki_raw_data/wiki_bug_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a4f9e90c1e589096a213c808f8a3067046c3d56f538bd16f48108d9d2f513a4
3
+ size 3280997
sea_wiki_raw_data/wiki_gor_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ebaa34881179ed7ded2f6c7640c984473086dfece48f769d61a9da7503e8a57
3
+ size 6244189
sea_wiki_raw_data/wiki_id_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a4ec9e94f097b1d40eabe435231cea329d97cf17eb77dd3f84a2582f3497eb
3
+ size 1121070688
sea_wiki_raw_data/wiki_jv_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58dfa79e01e4518bc9af414daf98836072ad57d5f863e825e540e000e0d94826
3
+ size 72156567
sea_wiki_raw_data/wiki_km_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a42cd60f4790892c6e3b3a807901f1fdcbcf8ddbe42108e6686bbcca89f71ec
3
+ size 103156370
sea_wiki_raw_data/wiki_lo_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ccd7122594e6367faf454a3ea838bb9341bf82774eecec878573a2bd8cf547
3
+ size 15237378
sea_wiki_raw_data/wiki_mad_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:517777f9ec0da20ce5ccf1f70cca6d2a8192452745afefc6288ff360dad4ee7c
3
+ size 1610155
sea_wiki_raw_data/wiki_map-bms_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1ecc56766b3297f6b19946c856898c0a1bfe7f2f472245d5e27001840560729
3
+ size 5227494
sea_wiki_raw_data/wiki_min_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c817fad237578a7ef319b82c35b1e6925699c28310f590e631aaa9f51477978c
3
+ size 116764626
sea_wiki_raw_data/wiki_mnw_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9f20ff42eeedfe51b6e7e6f73e16feb30afc185e2e0e95459b2efd9053a84c9
3
+ size 47322074
sea_wiki_raw_data/wiki_ms_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e21c0d9a01340b911bcd7396fd48202e0ccf98a9e18fef91a07590aa39896ba6
3
+ size 420222523
sea_wiki_raw_data/wiki_my_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bb0ec1fc163fbafc31e0c8c8618bc1c23740a010e201c16468cfa67ebf4cc2e
3
+ size 313356770
sea_wiki_raw_data/wiki_nia_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3895f612ef2366104e669eac8acd5ec13646ab4dd388a9372422bbcdfbbe45d6
3
+ size 2151317
sea_wiki_raw_data/wiki_shn_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6e89ac9b6d938d9454b06be1aaf58d0d382e6f2793139791ec0ebfdf5354ba
3
+ size 33737143
sea_wiki_raw_data/wiki_su_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19379716ae3995ac5ad7e9528e5a9ccd1e5fcfbe01b80e66356317fe8ca79b03
3
+ size 47528683
sea_wiki_raw_data/wiki_tet_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dde3d9f8f978adb7c4c26bbcd252a53832fe721217e614561e0e9f8459ee8308
3
+ size 1452853
sea_wiki_raw_data/wiki_th_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be292a3229a6581e05fa56149fccc493fa2a08b21615f4d3098467779f0020c
3
+ size 1013541968
sea_wiki_raw_data/wiki_vi_20231101_raw_dataset.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b866efb43b4605fec03716ac71aa5d60612956f6fb2d3f283224c6d7f317aa
3
+ size 1605847896