sabilmakbar commited on
Commit
444ec85
1 Parent(s): dae695e

Update dset loader and README

Browse files
Files changed (2) hide show
  1. README.md +53 -52
  2. sea_wiki.py +18 -5
README.md CHANGED
@@ -3,6 +3,36 @@ annotations_creators:
3
  - no-annotation
4
  language_creators:
5
  - crowdsourced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  license:
7
  - cc-by-sa-4.0
8
  multilinguality:
@@ -12,36 +42,6 @@ source_datasets:
12
  task_categories:
13
  - text-generation
14
  - fill-mask
15
- language:
16
- - ace
17
- - ban
18
- - bcl
19
- - bjn
20
- - bug
21
- - cbk
22
- - ceb
23
- - gor
24
- - id
25
- - ilo
26
- - jv
27
- - km
28
- - lo
29
- - mad
30
- - min
31
- - mnw
32
- - ms
33
- - my
34
- - nia
35
- - pag
36
- - pam
37
- - shn
38
- - su
39
- - ta
40
- - th
41
- - tl
42
- - tet
43
- - vi
44
- - war
45
  task_ids:
46
  - language-modeling
47
  - masked-language-modeling
@@ -82,7 +82,7 @@ dataset_info:
82
  num_bytes: 2033238
83
  num_examples: 3285
84
  - name: ceb
85
- num_bytes: 4572804909
86
  num_examples: 6302896
87
  - name: gor
88
  num_bytes: 6239133
@@ -148,13 +148,13 @@ dataset_info:
148
  num_bytes: 85356818
149
  num_examples: 45341
150
  - name: vi
151
- num_bytes: 1603057632
152
  num_examples: 1288680
153
  - name: war
154
  num_bytes: 454304567
155
  num_examples: 1266394
156
- download_size: 10940051715
157
- dataset_size: 10923905689
158
  - config_name: seawiki_dedup_all
159
  features:
160
  - name: url
@@ -183,7 +183,7 @@ dataset_info:
183
  num_bytes: 1579651
184
  num_examples: 2242
185
  - name: ceb
186
- num_bytes: 4346511152
187
  num_examples: 5815254
188
  - name: gor
189
  num_bytes: 6217480
@@ -249,13 +249,13 @@ dataset_info:
249
  num_bytes: 85286023
250
  num_examples: 45121
251
  - name: vi
252
- num_bytes: 1602828123
253
- num_examples: 1287910
254
  - name: war
255
  num_bytes: 454266479
256
  num_examples: 1266204
257
- download_size: 10701952694
258
- dataset_size: 10686874347
259
  - config_name: seawiki_with_countries_all
260
  features:
261
  - name: url
@@ -353,7 +353,7 @@ dataset_info:
353
  num_bytes: 1370162
354
  num_examples: 2665
355
  - name: phl_ceb
356
- num_bytes: 4572804909
357
  num_examples: 6302896
358
  - name: sgp_ms
359
  num_bytes: 419662356
@@ -374,10 +374,10 @@ dataset_info:
374
  num_bytes: 1454499
375
  num_examples: 1468
376
  - name: vnm_vi
377
- num_bytes: 1603057632
378
  num_examples: 1288680
379
- download_size: 10940051715
380
- dataset_size: 13074580032
381
  - config_name: seawiki_with_countries_dedup_all
382
  features:
383
  - name: url
@@ -475,7 +475,7 @@ dataset_info:
475
  num_bytes: 764869
476
  num_examples: 1108
477
  - name: phl_ceb
478
- num_bytes: 4346511152
479
  num_examples: 5815254
480
  - name: sgp_ms
481
  num_bytes: 414783365
@@ -496,10 +496,10 @@ dataset_info:
496
  num_bytes: 1452151
497
  num_examples: 1464
498
  - name: vnm_vi
499
- num_bytes: 1602828123
500
- num_examples: 1287910
501
- download_size: 10701952694
502
- dataset_size: 12822597856
503
  ---
504
 
505
  # **SEA Wikipedia Data Repository**
@@ -582,7 +582,7 @@ You may check the following tables to understand the current coverage of this da
582
  | tgl | tl | Tagalog | phl | [Wiki Link](https://en.wikipedia.org/wiki/Tagalog_language) | 45121 | 81.34 |
583
  | tha | th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159666 | 965.95 |
584
  | tet | tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1464 | 1.38 |
585
- | vie | vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Vietnamese_language) | 1287910 | 1,528.58 |
586
  | war | war | Waray | phl | [Wiki Link](https://en.wikipedia.org/wiki/Waray_language) | 1266204 | 433.22 |
587
  | (dialect) | map_bms | Banyumasan <br>(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11839 | 4.83 |
588
 
@@ -590,7 +590,7 @@ You may check the following tables to understand the current coverage of this da
590
  #### 3. Table of Token Statistics for Covered Languages
591
  The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
592
 
593
- | Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |
594
  | :---: | ---: | ---: | ---: | ---: | :--- |
595
  | ace | 1,370,829 | 105.61899992295247 | 3 | 9,659 | [38.0, 52.0, 54.0, 69.0, 76.0, 84.0, 90.0, 123.0, 126.0] |
596
  | ban | 5,924,610 | 287.44893503469024 | 5 | 24,364 | [97.0, 144.0, 165.0, 187.0, 209.0, 245.0, 276.0, 315.0, 421.0] |
@@ -620,7 +620,7 @@ The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
620
  | tet | 487,016 | 332.6612021857924 | 4 | 24,287 | [30.3, 47.0, 66.9, 101.0, 164.0, 177.0, 187.0, 248.6, 604.4] |
621
  | th | 330,964,733 | 2,072.8566695476807 | 1 | 289,150 | [231.0, 390.0, 546.0, 727.0, 969.0, 1276.0, 1741.0, 2533.0, 4361.0] |
622
  | tl | 27,789,730 | 615.8934864032269 | 7 | 60,728 | [73.0, 116.0, 161.0, 214.0, 281.0, 360.0, 465.0, 666.0, 1136.0] |
623
- | vi | 546,481,258 | 424.3163404275143 | 3 | 246,463 | [46.0, 64.0, 71.0, 80.0, 86.0, 92.0, 120.0, 240.0, 824.0] |
624
  | war | 117,438,315 | 92.74833676090108 | 1 | 25,689 | [60.0, 77.0, 81.0, 84.0, 87.0, 90.0, 94.0, 99.0, 110.0] |
625
 
626
  Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
@@ -628,7 +628,8 @@ Some other languages in SEA that are already exists its Wiki Index at Wikimedia
628
  ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
629
  The data available in here are processed with following flows:
630
  1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for unavailable informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
631
- 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars/UTF-8 chars validated). You may check this [ ```dedup_raw_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) script to understand its implementation.
 
632
 
633
  ### How do I extract new Wikipedia Dataset of SEA languages?
634
  Please refer to the corresponding Github Repo for more detailed info [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
 
3
  - no-annotation
4
  language_creators:
5
  - crowdsourced
6
+ language:
7
+ - ace
8
+ - ban
9
+ - bcl
10
+ - bjn
11
+ - bug
12
+ - cbk
13
+ - ceb
14
+ - gor
15
+ - id
16
+ - ilo
17
+ - jv
18
+ - km
19
+ - lo
20
+ - mad
21
+ - min
22
+ - mnw
23
+ - ms
24
+ - my
25
+ - nia
26
+ - pag
27
+ - pam
28
+ - shn
29
+ - su
30
+ - ta
31
+ - th
32
+ - tl
33
+ - tet
34
+ - vi
35
+ - war
36
  license:
37
  - cc-by-sa-4.0
38
  multilinguality:
 
42
  task_categories:
43
  - text-generation
44
  - fill-mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  task_ids:
46
  - language-modeling
47
  - masked-language-modeling
 
82
  num_bytes: 2033238
83
  num_examples: 3285
84
  - name: ceb
85
+ num_bytes: 4572804910
86
  num_examples: 6302896
87
  - name: gor
88
  num_bytes: 6239133
 
148
  num_bytes: 85356818
149
  num_examples: 45341
150
  - name: vi
151
+ num_bytes: 1603057633
152
  num_examples: 1288680
153
  - name: war
154
  num_bytes: 454304567
155
  num_examples: 1266394
156
+ download_size: 1829748651
157
+ dataset_size: 10923905691
158
  - config_name: seawiki_dedup_all
159
  features:
160
  - name: url
 
183
  num_bytes: 1579651
184
  num_examples: 2242
185
  - name: ceb
186
+ num_bytes: 4346511153
187
  num_examples: 5815254
188
  - name: gor
189
  num_bytes: 6217480
 
249
  num_bytes: 85286023
250
  num_examples: 45121
251
  - name: vi
252
+ num_bytes: 1602830022
253
+ num_examples: 1287912
254
  - name: war
255
  num_bytes: 454266479
256
  num_examples: 1266204
257
+ download_size: 1811459996
258
+ dataset_size: 10686876247
259
  - config_name: seawiki_with_countries_all
260
  features:
261
  - name: url
 
353
  num_bytes: 1370162
354
  num_examples: 2665
355
  - name: phl_ceb
356
+ num_bytes: 4572804910
357
  num_examples: 6302896
358
  - name: sgp_ms
359
  num_bytes: 419662356
 
374
  num_bytes: 1454499
375
  num_examples: 1468
376
  - name: vnm_vi
377
+ num_bytes: 1603057633
378
  num_examples: 1288680
379
+ download_size: 1829748651
380
+ dataset_size: 13074580034
381
  - config_name: seawiki_with_countries_dedup_all
382
  features:
383
  - name: url
 
475
  num_bytes: 764869
476
  num_examples: 1108
477
  - name: phl_ceb
478
+ num_bytes: 4346511153
479
  num_examples: 5815254
480
  - name: sgp_ms
481
  num_bytes: 414783365
 
496
  num_bytes: 1452151
497
  num_examples: 1464
498
  - name: vnm_vi
499
+ num_bytes: 1602830022
500
+ num_examples: 1287912
501
+ download_size: 1811459996
502
+ dataset_size: 12822599756
503
  ---
504
 
505
  # **SEA Wikipedia Data Repository**
 
582
  | tgl | tl | Tagalog | phl | [Wiki Link](https://en.wikipedia.org/wiki/Tagalog_language) | 45121 | 81.34 |
583
  | tha | th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159666 | 965.95 |
584
  | tet | tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1464 | 1.38 |
585
+ | vie | vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Vietnamese_language) | 1287912 | 1,528.58 |
586
  | war | war | Waray | phl | [Wiki Link](https://en.wikipedia.org/wiki/Waray_language) | 1266204 | 433.22 |
587
  | (dialect) | map_bms | Banyumasan <br>(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11839 | 4.83 |
588
 
 
590
  #### 3. Table of Token Statistics for Covered Languages
591
  The token statistics is generated using ```tiktoken``` using encoder for GPT-4.
592
 
593
+ | Dataset Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |
594
  | :---: | ---: | ---: | ---: | ---: | :--- |
595
  | ace | 1,370,829 | 105.61899992295247 | 3 | 9,659 | [38.0, 52.0, 54.0, 69.0, 76.0, 84.0, 90.0, 123.0, 126.0] |
596
  | ban | 5,924,610 | 287.44893503469024 | 5 | 24,364 | [97.0, 144.0, 165.0, 187.0, 209.0, 245.0, 276.0, 315.0, 421.0] |
 
620
  | tet | 487,016 | 332.6612021857924 | 4 | 24,287 | [30.3, 47.0, 66.9, 101.0, 164.0, 177.0, 187.0, 248.6, 604.4] |
621
  | th | 330,964,733 | 2,072.8566695476807 | 1 | 289,150 | [231.0, 390.0, 546.0, 727.0, 969.0, 1276.0, 1741.0, 2533.0, 4361.0] |
622
  | tl | 27,789,730 | 615.8934864032269 | 7 | 60,728 | [73.0, 116.0, 161.0, 214.0, 281.0, 360.0, 465.0, 666.0, 1136.0] |
623
+ | vi | 546,481,913 | 424.3161900813099 | 3 | 246,463 | [46.0, 64.0, 71.0, 80.0, 86.0, 92.0, 120.0, 240.0, 824.0] |
624
  | war | 117,438,315 | 92.74833676090108 | 1 | 25,689 | [60.0, 77.0, 81.0, 84.0, 87.0, 90.0, 94.0, 99.0, 110.0] |
625
 
626
  Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated!
 
628
  ### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF?
629
  The data available in here are processed with following flows:
630
  1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for unavailable informations or asking for contributions of content in that article), which usually deemed noisy for NLP data.
631
+ 2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars/UTF-8 chars validated).
632
+ The source code can be found on this Github Repo [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
633
 
634
  ### How do I extract new Wikipedia Dataset of SEA languages?
635
  Please refer to the corresponding Github Repo for more detailed info [SEA Wiki Github Source Code](https://github.com/sabilmakbar/sea_wiki)
sea_wiki.py CHANGED
@@ -66,11 +66,24 @@ _LATEST_DUMP_VERSION_DATE = sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1]
66
 
67
  def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
68
  _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"}
69
- _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv", "raw": "raw_dataset.csv"}
70
 
71
  return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
72
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  class SEAWikiConfig(datasets.BuilderConfig):
75
  """BuilderConfig for SEAWiki."""
76
 
@@ -199,7 +212,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
199
  # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country)
200
  if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None):
201
  file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
202
- dl_dir = dl_manager.download_and_extract(file_dict)
203
 
204
  return [
205
  datasets.SplitGenerator(
@@ -218,7 +231,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
218
  for file in file_list:
219
  file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file
220
 
221
- dl_dir = dl_manager.download_and_extract(file_dict)
222
 
223
  return [
224
  datasets.SplitGenerator(
@@ -231,7 +244,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
231
 
232
  # handle custom config where only country is provided
233
  elif self.config.lang is not None:
234
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
235
  return [
236
  datasets.SplitGenerator(
237
  name=datasets.Split.TRAIN,
@@ -243,7 +256,7 @@ class SEAWiki(datasets.GeneratorBasedBuilder):
243
 
244
 
245
  def _generate_examples(self, data_file):
246
- pd_df = pd.read_csv(data_file)
247
  for _, row in pd_df.iterrows():
248
  example = {feature: row[feature] for feature in self.config.features}
249
  idx = row["id"]
 
66
 
67
  def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str):
68
  _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"}
69
+ _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv.gz", "raw": "raw_dataset.csv.gz"}
70
 
71
  return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}")
72
 
73
 
74
+ def read_csv_ignore_some_nulls(path: str, null_list_data: list=None, *args, **kwargs):
75
+ '''
76
+ Wrapper of `pd.read_csv` fn that ignores some of null data
77
+ '''
78
+ #values of pd._libs.parsers.STR_NA_VALUES: {'', '<NA>', 'NaN', 'N/A', 'null', '1.#QNAN', 'None', '#NA', 'nan', '-NaN', '#N/A N/A', '-1.#QNAN', 'NA', '-1.#IND', 'n/a', 'NULL', '-nan', '1.#IND', '#N/A'}
79
+ _unconsidered_for_null_list = ['NA', 'NULL', 'null', 'nan', 'null', 'NaN', 'None', 'N/A']
80
+ if null_list_data is not None:
81
+ _unconsidered_for_null_list.extend(null_list_data)
82
+
83
+ values_to_considered_missing_data = [val for val in pd._libs.parsers.STR_NA_VALUES if val not in _unconsidered_for_null_list]
84
+ return pd.read_csv(path, keep_default_na=False, na_values=values_to_considered_missing_data, *args, **kwargs)
85
+
86
+
87
  class SEAWikiConfig(datasets.BuilderConfig):
88
  """BuilderConfig for SEAWiki."""
89
 
 
212
  # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country)
213
  if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None):
214
  file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url}
215
+ dl_dir = dl_manager.download(file_dict)
216
 
217
  return [
218
  datasets.SplitGenerator(
 
231
  for file in file_list:
232
  file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file
233
 
234
+ dl_dir = dl_manager.download(file_dict)
235
 
236
  return [
237
  datasets.SplitGenerator(
 
244
 
245
  # handle custom config where only country is provided
246
  elif self.config.lang is not None:
247
+ dl_dir = dl_manager.download(self.config.data_url)
248
  return [
249
  datasets.SplitGenerator(
250
  name=datasets.Split.TRAIN,
 
256
 
257
 
258
  def _generate_examples(self, data_file):
259
+ pd_df = read_csv_ignore_some_nulls(data_file, compression='gzip')
260
  for _, row in pd_df.iterrows():
261
  example = {feature: row[feature] for feature in self.config.features}
262
  idx = row["id"]