diff --git a/.gitattributes b/.gitattributes index 28df5f900b358436f0267334b3e3e9af33f917ba..225859ecdc475a0e46d69d27e289fa73fffea696 100644 --- a/.gitattributes +++ b/.gitattributes @@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text *.webp filter=lfs diff=lfs merge=lfs -text +# Wiki data files CSV +*.csv filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 08b5ae64808be50560252b43730aaf971826b342..47440dbe7e46f5652e05cc74e8ea3036aded4d7a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,471 @@ --- +annotations_creators: +- no-annotation +language_creators: +- crowdsourced +language: +- ace +- ban +- bjn +- bug +- gor +- id +- jv +- mis +- min +- ms +- nia +- su +- tet +license: +- cc-by-sa-3.0 +- gfdl +multilinguality: +- multilingual +source_datasets: +- Wikipedia-HF +task_categories: +- text-generation +- fill-mask +task_ids: +- language-modeling +- masked-language-modeling +pretty_name: Wikipedia Archive for SEA Languages +tags: +- Wikipedia +- Southeast Asia (SEA) +- Dialect +- SEA-related Languages +- SEA Local Languages +dataset_info: +- config_name: seawiki_all + features: + - name: url + dtype: string + - name: title + dtype: string + - name: text + dtype: string + splits: + - name: ace + num_bytes: 4952102 + num_examples: 13003 + - name: ban + num_bytes: 18198909 + num_examples: 20987 + - name: bjn + num_bytes: 6792259 + num_examples: 10519 + - name: bug + num_bytes: 3298561 + num_examples: 15880 + - name: gor + num_bytes: 6239133 + num_examples: 15359 + - name: id + num_bytes: 1118834498 + num_examples: 665622 + - name: jv + num_bytes: 72101470 + num_examples: 73380 + - name: km + num_bytes: 103146669 + num_examples: 11994 + - name: lo + num_bytes: 15240262 + num_examples: 5014 + - name: mad + num_bytes: 1612542 + num_examples: 1192 + - name: map_bms + num_bytes: 5221506 + num_examples: 13580 + - name: min + num_bytes: 116824020 + num_examples: 227143 + - name: mnw + num_bytes: 47321734 + num_examples: 3296 + - name: ms + num_bytes: 419662356 + num_examples: 368628 + - name: my + num_bytes: 313370839 + num_examples: 109310 + - name: nia + num_bytes: 2153274 + num_examples: 1714 + - name: shn + num_bytes: 33754296 + num_examples: 13945 + - name: su + num_bytes: 47516268 + num_examples: 61555 + - name: tet + num_bytes: 1454499 + num_examples: 1468 + - name: th + num_bytes: 1012930269 + num_examples: 159719 + - name: vi + num_bytes: 1603057632 + num_examples: 1288680 + download_size: 4959860254 + dataset_size: 4953683098 +- config_name: seawiki_dedup_all + features: + - name: url + dtype: string + - name: title + dtype: string + - name: text + dtype: string + splits: + - name: ace + num_bytes: 4944916 + num_examples: 12979 + - name: ban + num_bytes: 18025267 + num_examples: 20611 + - name: bjn + num_bytes: 6786207 + num_examples: 10503 + - name: bug + num_bytes: 2182435 + num_examples: 9969 + - name: gor + num_bytes: 6217480 + num_examples: 15290 + - name: id + num_bytes: 1117891512 + num_examples: 662443 + - name: jv + num_bytes: 71997517 + num_examples: 73080 + - name: km + num_bytes: 102698901 + num_examples: 11466 + - name: lo + num_bytes: 14908444 + num_examples: 4897 + - name: mad + num_bytes: 1612542 + num_examples: 1192 + - name: map_bms + num_bytes: 5067489 + num_examples: 11839 + - name: min + num_bytes: 116721269 + num_examples: 225972 + - name: mnw + num_bytes: 47243333 + num_examples: 3271 + - name: ms + num_bytes: 414783365 + num_examples: 348045 + - name: my + num_bytes: 312990457 + num_examples: 108819 + - name: nia + num_bytes: 2153274 + num_examples: 1714 + - name: shn + num_bytes: 33616591 + num_examples: 13662 + - name: su + num_bytes: 47512744 + num_examples: 61529 + - name: tet + num_bytes: 1452151 + num_examples: 1464 + - name: th + num_bytes: 1012868861 + num_examples: 159666 + - name: vi + num_bytes: 1602828123 + num_examples: 1287910 + download_size: 4950689052 + dataset_size: 4944502878 +- config_name: seawiki_with_countries_all + features: + - name: url + dtype: string + - name: title + dtype: string + - name: text + dtype: string + splits: + - name: idn_ace + num_bytes: 4952102 + num_examples: 13003 + - name: idn_ban + num_bytes: 18198909 + num_examples: 20987 + - name: idn_bjn + num_bytes: 6792259 + num_examples: 10519 + - name: idn_bug + num_bytes: 3298561 + num_examples: 15880 + - name: idn_gor + num_bytes: 6239133 + num_examples: 15359 + - name: idn_id + num_bytes: 1118834498 + num_examples: 665622 + - name: idn_jv + num_bytes: 72101470 + num_examples: 73380 + - name: idn_mad + num_bytes: 1612542 + num_examples: 1192 + - name: idn_map_bms + num_bytes: 5221506 + num_examples: 13580 + - name: idn_min + num_bytes: 116824020 + num_examples: 227143 + - name: idn_ms + num_bytes: 419662356 + num_examples: 368628 + - name: idn_nia + num_bytes: 2153274 + num_examples: 1714 + - name: idn_su + num_bytes: 47516268 + num_examples: 61555 + - name: idn_tet + num_bytes: 1454499 + num_examples: 1468 + - name: sgp_ms + num_bytes: 419662356 + num_examples: 368628 + - name: mys_ms + num_bytes: 419662356 + num_examples: 368628 + - name: brn_ms + num_bytes: 419662356 + num_examples: 368628 + - name: tha_th + num_bytes: 1012930269 + num_examples: 159719 + - name: mmr_my + num_bytes: 313370839 + num_examples: 109310 + - name: mmr_shn + num_bytes: 33754296 + num_examples: 13945 + - name: mmr_mnw + num_bytes: 47321734 + num_examples: 3296 + - name: lao_lo + num_bytes: 15240262 + num_examples: 5014 + - name: vnm_vi + num_bytes: 1603057632 + num_examples: 1288680 + - name: khm_km + num_bytes: 103146669 + num_examples: 11994 + - name: tls_tet + num_bytes: 1454499 + num_examples: 1468 + download_size: 4959860254 + dataset_size: 6214124665 +- config_name: seawiki_with_countries_dedup_all + features: + - name: url + dtype: string + - name: title + dtype: string + - name: text + dtype: string + splits: + - name: idn_ace + num_bytes: 4944916 + num_examples: 12979 + - name: idn_ban + num_bytes: 18025267 + num_examples: 20611 + - name: idn_bjn + num_bytes: 6786207 + num_examples: 10503 + - name: idn_bug + num_bytes: 2182435 + num_examples: 9969 + - name: idn_gor + num_bytes: 6217480 + num_examples: 15290 + - name: idn_id + num_bytes: 1117891512 + num_examples: 662443 + - name: idn_jv + num_bytes: 71997517 + num_examples: 73080 + - name: idn_mad + num_bytes: 1612542 + num_examples: 1192 + - name: idn_map_bms + num_bytes: 5067489 + num_examples: 11839 + - name: idn_min + num_bytes: 116721269 + num_examples: 225972 + - name: idn_ms + num_bytes: 414783365 + num_examples: 348045 + - name: idn_nia + num_bytes: 2153274 + num_examples: 1714 + - name: idn_su + num_bytes: 47512744 + num_examples: 61529 + - name: idn_tet + num_bytes: 1452151 + num_examples: 1464 + - name: sgp_ms + num_bytes: 414783365 + num_examples: 348045 + - name: mys_ms + num_bytes: 414783365 + num_examples: 348045 + - name: brn_ms + num_bytes: 414783365 + num_examples: 348045 + - name: tha_th + num_bytes: 1012868861 + num_examples: 159666 + - name: mmr_my + num_bytes: 312990457 + num_examples: 108819 + - name: mmr_shn + num_bytes: 33616591 + num_examples: 13662 + - name: mmr_mnw + num_bytes: 47243333 + num_examples: 3271 + - name: lao_lo + num_bytes: 14908444 + num_examples: 4897 + - name: vnm_vi + num_bytes: 1602828123 + num_examples: 1287910 + - name: khm_km + num_bytes: 102698901 + num_examples: 11466 + - name: tls_tet + num_bytes: 1452151 + num_examples: 1464 + download_size: 4950689052 + dataset_size: 6190305124 +--- + +# **SEA Wikipedia Data Repository** +--- license: cc-by-sa-3.0 --- +Welcome to SEA Wikipedia Data Repository. The datasets are extracted from [Wikipedia HF](https://huggingface.co/datasets/wikipedia) and processed using the scripts available in this repository for reproducibility purpose. + +# **FAQS** +### What are the available languages provided in dataset and from which country? +You may check the following tables to understand the current coverage of this dataset (languages, countries, data size & volume). + +#### 1. Table of Countries and its Country Code +| Country Code | Country Name | Wiki Info | +| :---: | :---: | :---: | +| brn | Brunei | [Wiki Link](https://en.wikipedia.org/wiki/Brunei) | +| idn | Indonesia | [Wiki Link](https://en.wikipedia.org/wiki/Indonesia) | +| khm | Cambodia | [Wiki Link](https://en.wikipedia.org/wiki/Cambodia) | +| lao | Laos | [Wiki Link](https://en.wikipedia.org/wiki/Laos) | +| mmr | Myanmar | [Wiki Link](https://en.wikipedia.org/wiki/Myanmar) | +| mys | Malaysia | [Wiki Link](https://en.wikipedia.org/wiki/Malaysia) | +| sgp | Singapore | [Wiki Link](https://en.wikipedia.org/wiki/Singapore) | +| tha | Thailand | [Wiki Link](https://en.wikipedia.org/wiki/Thailand) | +| tls | East Timor | [Wiki Link](https://en.wikipedia.org/wiki/East_Timor) | +| vnm | Vietnam | [Wiki Link](https://en.wikipedia.org/wiki/Vietnam) | + +#### 2. Table of Languages and Countries of its speakers +| Lang Code | Lang Name | Country Codes Spoken | Wiki Info | Total Data | Total Size (bytes) | + :---: | :---: | :---: | :--- | ---: | ---: | +| ace | Acehnese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Acehnese_language) | 12904 | 4867838 | +| ban | Balinese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Balinese_language) | 19837 | 17366080 | +| bjn | Banjarese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banjarese_language) | 10437 | 6655378 | +| bug | Buginese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Buginese_language) | 9793 | 2072609 | +| gor | Gorontalo | idn | [Wiki Link](https://en.wikipedia.org/wiki/Gorontalo_language) | 14514 | 5989252 | +| km | Khmer | khm | [Wiki Link](https://en.wikipedia.org/wiki/Khmer_language) | 11994 | 103146669 | +| id | Indonesian | idn | [Wiki Link](https://en.wikipedia.org/wiki/Indonesian_language) | 654287 | 1100932403 | +| jv | Javanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Javanese_language) | 72667 | 69774853 | +| lo | Lao | lao | [Wiki Link](https://en.wikipedia.org/wiki/Lao_language) | 5014 | 15240262 | +| mad | Madurese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Madurese_language) | 1192 | 1612542 | +| map_bms | Banyumasan
(Dialect of Javanese) | idn | [Wiki Link](https://en.wikipedia.org/wiki/Banyumasan_dialect) | 11832 | 5060989 | +| mnw | Mon | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Mon_language) | 3296 | 47321734 | +| min | Minangkabau | idn | [Wiki Link](https://en.wikipedia.org/wiki/Minangkabau_language) | 225858 | 116376870 | +| ms | Malay | mys, sgp, brn, idn | [Wiki Link](https://en.wikipedia.org/wiki/Malay_language) | 346186 | 410443550 | +| my | Burmese | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Burmese_language) | 109310 | 313370839 | +| nia | Nias | idn | [Wiki Link](https://en.wikipedia.org/wiki/Nias_language) | 1650 | 1938121 | +| shn | Shan | mmr | [Wiki Link](https://en.wikipedia.org/wiki/Shan_language) | 13945 | 33754296 | +| su | Sundanese | idn | [Wiki Link](https://en.wikipedia.org/wiki/Sundanese_language) | 61494 | 47410439 | +| tet | Tetum | tls, idn | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1465 | 1452716 | +| th | Thai | tha | [Wiki Link](https://en.wikipedia.org/wiki/Thai_language) | 159719 | 1012930269 | +| vi | Vietnamese | vnm | [Wiki Link](https://en.wikipedia.org/wiki/Tetum_language) | 1288680 | 1603057632 | + + +Some other languages in SEA that are already exists its Wiki Index at Wikimedia might be missing from this list. Any lang update PR is greatly appreciated! + +### How do I extract new Wikipedia Dataset of SEA languages? +You may check to the script [_```extract_raw_wiki_data.py```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data.py) to understand its implementations, or you can adjust the bash provided in [_```extract_raw_wiki_data_sea.sh```_](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/extract_raw_wiki_data_sea.sh) to extract it on your own. Please note that this dataset is extensible to any languages of your choice. + +### How do I extract new Wikipedia Dataset of SEA languages? +You may visit this [Wikipedia Dump Index](https://dumps.wikimedia.org/backup-index.html) to check any latest available data and this link [Wikipedia Language Coverage](https://meta.wikimedia.org/wiki/List_of_Wikipedias#All_Wikipedias_ordered_by_number_of_articles) to map into any languages that you're wanting to extract. + +### How does the data being preprocessed? What makes it different from loading it directly from Wikipedia HF? +The data available in here are processed with following flows: +1. Raw data is being deduplicated on ```title``` and ```text``` (text-content from a given article), to remove articles containing boilerplate text (template text that are used usually for no-available informations or asking for contributions of content in that article), which usually deemed noisy for NLP data. +2. Furthermore, the ```title``` and ```text``` data are being checked for string-matching duplication (duplication of text that are being pre-processed, i.e symbols removed, HTML tags striped, or ASCII-chars validated). You may check this [ ```dedup_raw_wiki_data.py```](https://huggingface.co/datasets/sabilmakbar/sea_wiki/blob/main/dedup_raw_wiki_data.py) script to understand its implementation. + +# Getting Started # +### To read the datasets directly ### +Use one of the following code chunks to load it from HuggingFace Hub: +You can refer to the 2nd args of ```config name``` using the following script +``` +dataset = load_dataset( + "sabilmakbar/sea_wiki", + "seawiki_dedup_all" # a config name, can be "seawiki_dedup_all" or "seawiki_with_countries_all", or "seawiki_with_countries_dedup_all" , defaults to "seawiki_dedup_all" +) +``` +Or you can provide both ```lang``` and ```date_stamp``` (or just lang only by assuming the ```date_stamp``` will take the newest one) +``` +dataset = load_dataset( + "sabilmakbar/sea_wiki", + lang = "id", # see README for complete lang choices + date_stamp="20230901" +) +``` +Or you can provide a ```country``` params with similar fashion to ```lang``` args(providing both ```country``` and ```lang``` will prioritize the ```lang``` kwarg) +``` +dataset = load_dataset( + "sabilmakbar/sea_wiki", + lang = "id", # see the splits for complete lang choices + date_stamp="20230901" +) +``` + +### To replicate the whole dataset generation process ### +1. Set-up a new Python/Conda Environment (recommended Python version: 3.9.6 to 3.9.18 or 3.10.0 to 3.10.13) and install the requirements on ```requirements.txt``` use this codebase via ```pip install -r requirements.txt```. +2. Activate the chosen Python/Conda environment which the requirements are being installed. +3. Force install ```multiprocess==0.70.15``` by using ```pip install multiprocess==0.70.15``` to avoid [this issue](https://github.com/huggingface/datasets/issues/5613#issuecomment-1703169594) (there's no other workaround for now, esp for Python 3.10.x) +4. Run this ```sh``` script for extractions from Wikimedia Dump:<\b> + ```sh extract_raw_wiki_data_sea.sh```. +5. Run this ```sh``` script of deduplication:<\b> + ```sh dedup_raw_wiki_data_sea.sh```. + +## Citation Info: +``` +@ONLINE{wikidump, + author = "Wikimedia Foundation", + title = "Wikimedia Downloads", + url = "https://dumps.wikimedia.org"} +@ONLINE{wikipedia-hf, + title = "Huggingface Wikipedia Dataset", + url = "https://huggingface.co/datasets/wikipedia"} +``` diff --git a/dedup_raw_wiki_data.py b/dedup_raw_wiki_data.py new file mode 100644 index 0000000000000000000000000000000000000000..bf5d5ceb3a015c67d21e0a55aa1b36d9aff72888 --- /dev/null +++ b/dedup_raw_wiki_data.py @@ -0,0 +1,414 @@ +# %% +''' +Script on Cleansing Wikipedia Data that has been extracted from extract_raw_wiki_data.py +''' +#core functionality modules +import os, gc +import logging +import argparse +import warnings + +from functools import partial + +#text preprocess modules +import re +import urllib +from xml.etree import ElementTree as ET + +#dataset related modules +import numpy as np +import pandas as pd + + +### MODULES DEFINITION ### +#create custom type-checking of incoming ArgParse +def argparse_bool_check(value: str): + #cast str with value like float into actual float + try: + value = float(value) + #can't be parsed as float, keep as it is + except ValueError: + pass + + #cast float-like value (incl int) into str + if isinstance(value, float) and int(value) == value: + value = str(int(value)) + #raise ArgumentTypeError if the value isn't in string already + else: + if not isinstance(value, str): + raise argparse.ArgumentTypeError(f"Not the correct value (args: {value})! Expected is cast-able to '1' or '0' or already in string. Please rectify!") + #check for these combinations of values + if value.lower() in ("yes", "true", "t", "y", "1"): + return True + elif value.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!") + + +def text_processing_args_checker(value: str): + if value not in ["all", "text", "title", "neither"]: + raise argparse.ArgumentTypeError(f"Value Error! Not the correct value (args: {value})! Please rectify!") + else: + return value + + +def set_logger(): + # Set up the logger + logging.basicConfig( + level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) + format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format + datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format + ) + + # Create a file handler to write logs into a file + file_handler = logging.FileHandler('app.log') + + # Set the log level for the file handler + file_handler.setLevel(logging.INFO) + + # Create a formatter for the file handler (customize the log format for the file) + file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + file_handler.setFormatter(file_formatter) + + logger = logging.getLogger("Wiki Dataset Generation") + logger.addHandler(file_handler) + + return logger + + +#wrapper fn of text-cleansing +def text_cleansing_wrapper(fn, exception_class_names = []): + + #ensure caught exception class names passed to decorator is a list (if provided) + if not isinstance(exception_class_names, list): + raise TypeError("Exception Class Name for Wrapper is not a list!") + #ensure all values of caught exception class name list is a string + if not all([isinstance(val, str) for val in exception_class_names]): + raise ValueError("Found an element of Exception Class Name for Wrapper that is not a string!") + + #lowercase all exception class name + exception_class_names = [val.lower() for val in exception_class_names] + if len(exception_class_names) == 0: + warnings.warn("The wrapper receives 0 `exception_class_names` to be warned! Will return the function value with its input!") + + def text_fn_wrapper(text: str, *args, **kwargs): + try: + return fn(text, *args, **kwargs) + except Exception as e: + _exc_name = type(e).__name__ + if _exc_name.lower() not in exception_class_names and len(exception_class_names)>0: + raise Exception(f"Exception Occured of {_exc_name} in {fn.__name__}!") from e + else: + _followup_msg = "Returning the input as it is..." + _text_warn = f"An exception of {_exc_name} occured in {fn.__name__}! {_followup_msg}" + warnings.warn(_text_warn) + return text + + return text_fn_wrapper + + +#create html tags cleanser of a given text +partial_decorator = partial(text_cleansing_wrapper, exception_class_names=["parseerror"]) +@partial_decorator +def remove_html_tags(text: str): + #extracted from "https://stackoverflow.com/a/9662410", w/ additional decorator of error handler + return (''.join(ET.fromstring(text).itertext())).strip() + + +#create url decoder of text +@text_cleansing_wrapper +def decode_url(text: str): + # return (urllib.parse.unquote(text)).encode('utf8', errors='ignore').decode().strip() + return (urllib.parse.unquote(text)).strip() + +#create encoder check of text +@text_cleansing_wrapper +def check_text_by_encoder(text: str, encoder: str="utf8"): + return text.encode(encoder, errors='ignore').decode().strip() + +#create excessive whitespace removal of text +@text_cleansing_wrapper +def remove_excessive_whitespace(text: str): + return re.sub("(\s)(\s+)", r"\1", text).strip() + +#create non-alphanumeric removal of text +@text_cleansing_wrapper +def remove_non_alphanumeric(text: str): + return re.sub("[^a-z0-9\s]", "", text, flags=re.I).strip() + +# def cleanse_wiki_text(text: str): +# return remove_html_tags(decode_url_and_remove_non_ascii(text)) + +# def normalize_wiki_title(text: str): +# return remove_non_alphanumeric(remove_excessive_whitespace(text.lower())) + + +def _text_normalizer_constructor( + remove_non_alphanumeric_bool: bool, remove_excessive_whitespace_bool: bool, + remove_html_tags_bool: bool, decode_url_bool: bool, encoder_check_bool: bool, + encoder: str="utf8"): + + _lambda_fn_1 = partial(check_text_by_encoder, encoder=encoder) if encoder_check_bool else lambda x: x + _lambda_fn_2 = lambda x: remove_non_alphanumeric(_lambda_fn_1(x)) if remove_non_alphanumeric_bool else _lambda_fn_1(x) + _lambda_fn_3 = lambda x: remove_excessive_whitespace(_lambda_fn_2(x)) if remove_excessive_whitespace_bool else _lambda_fn_2(x) + _lambda_fn_4 = lambda x: remove_html_tags(_lambda_fn_3(x)) if remove_html_tags_bool else _lambda_fn_3(x) + _lambda_fn_5 = lambda x: decode_url(_lambda_fn_4(x)) if decode_url_bool else _lambda_fn_4(x) + + return _lambda_fn_5 + + +def _args_to_text_constructor_fn(**kwargs): + + def _decode_options(opt: str): + # return decoded options with format `text_opt`, `title_opt` + # possible values are ["all", "text", "title", "neither"] + if opt == "all": + return True, True + elif opt == "text": + return True, False + elif opt == "title": + return False, True + else: + return False, False + + kwargs_title, kwargs_text = {}, {} + + kwargs_title["encoder"] = kwargs["text_encoder_choice_title"] + kwargs_text["encoder"] = kwargs["text_encoder_choice_text"] + + for key, val in kwargs.items(): + if key not in [ + "remove_non_alphanumeric_option", "remove_excessive_whitespace_option", + "remove_html_tags_option", "decode_url_option", "encoder_check_option"]: + continue + new_key = "_".join(key.split("_")[:-1]) + "_bool" + text_opt_val, title_opt_val = _decode_options(val) + kwargs_text[new_key], kwargs_title[new_key] = text_opt_val, title_opt_val + + return _text_normalizer_constructor(**kwargs_text), _text_normalizer_constructor(**kwargs_title) + + +def _text_processing_wrapper(text: str, _fn, mode: str="text"): + if mode not in ["text", "title"]: + raise ValueError(f"Provided `mode` isn't either 'text' or 'title'! Received: {mode}") + return _fn(text.lower()) if mode=="title" else _fn(text) + + +### MAIN CODE ### +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("--raw-csv-path", help="Relative location of csv file containing raw Wikipedia data") + + parser.add_argument("--drop-hard-dupl", help="""Flag whether to drop hard duplicates + (exact values of data of relevant text fields, Titles & Desc)""", + default=True, type=argparse_bool_check) + + parser.add_argument("--drop-soft-dupl", help="""Flag whether to drop soft duplicates + (duplicates after cleansed and normalized relevant text fields, Titles & Desc)""", + default=True, type=argparse_bool_check) + + parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data + to the `dedup_raw_wiki_data.py` script dir""", + default=os.path.dirname(os.path.abspath(__file__))) + + ### THE FOLLOWING ARGUMENTS ONLY TEMPORARILY ALTER THE TEXT DATA ONLY FOR SOFT-DEDUP CHECK ### + ### THE INITIAL TEXT DATA WON'T BE OVERWRITTEN AFTER BEING PREPROCESSED ### + ### UNLESS YOU ARE SPECIFYING IN ARGS `overwrite-initial-title-data` AND `overwrite-initial-text-data` ### + + ### ARGS TO OVERWRITTE INITIAL TEXT DATA WITH PROCESSED ONES ### + parser.add_argument("--overwrite-initial-title-data", help="""Flag whether to overwrite title + init data w/ processed data (True) or keep it as it is (False)""", + default=False, type=argparse_bool_check) + + parser.add_argument("--overwrite-initial-text-data", help="""Flag whether to overwrite text + init data w/ processed data (True) or keep it as it is (False)""", + default=False, type=argparse_bool_check) + + ### INSTANTIATOR ARGS FOR CONSTRUCTING TEXT PROCESSING FN TO BE APPLIED ### + parser.add_argument("--remove-non-alphanumeric-option", help="""Identifier which columns to be preprocessed + using `remove_non_alphanumeric` for soft duplicates detection + (Choices are "all", "text", "title", "neither")""", + default="neither", type=text_processing_args_checker) + + parser.add_argument("--remove-excessive-whitespace-option", help="""Identifier which columns to be preprocessed + using `remove_excessive_whitespace` for soft duplicates detection + (Choices are "all", "text", "title", "neither")""", + default="all", type=text_processing_args_checker) + + parser.add_argument("--remove-html-tags-option", help="""Identifier which columns to be preprocessed + using `remove_html_tags` for soft duplicates detection + (Choices are "all", "text", "title", "neither")""", + default="all", type=text_processing_args_checker) + + parser.add_argument("--decode-url-option", help="""Identifier which columns to be preprocessed + using `decode_url` for soft duplicates detection + (Choices are "all", "text", "title", "neither")""", + default="all", type=text_processing_args_checker) + + ### ARGS TO CHOOSE ENCODER CHECKING AND ITS CONFIG INITIALIZATION ### + parser.add_argument("--encoder-check-option", help="""Identifier which columns to be preprocessed + using `check_text_by_encoder` for soft duplicates detection + (Choices are "all", "text", "title", "neither")""", + default="all", type=text_processing_args_checker) + + parser.add_argument("--text-encoder-choice-title", help="""Identifier of title encoder type + to be applied into `check_text_by_encoder` for soft duplicates detection""", + default="utf8", type=str) + + parser.add_argument("--text-encoder-choice-text", help="""Identifier of text encoder type + to be applied into `check_text_by_encoder` for soft duplicates detection""", + default="utf8", type=str) + + + _EXPECTED_COLNAMES = ["id", "url", "title", "text"] + + logger = set_logger() + logger.info("Parsing arguments...") + + args = parser.parse_args() + + # class dotdict(dict): + # """dot.notation access to dictionary attributes""" + # __getattr__ = dict.get + # __setattr__ = dict.__setitem__ + # __delattr__ = dict.__delitem__ + + # args = dotdict({ + # "raw_csv_path":"", + # "drop_hard_dupl": True, + # "drop_soft_dupl": True, + # "save_dir_path": os.path.dirname(os.path.abspath(__file__)), + # "overwrite_initial_title_data": False, + # "overwrite_initial_text_data": False, + # "remove_non_alphanumeric_option":"neither", + # "remove_excessive_whitespace_option": "neither", + # "remove_html_tags_option":"neither", + # "decode_url_option":"neither", + # "encoder_check_option":"all", + # "text_encoder_choice_title":"utf8", + # "text_encoder_choice_text":"utf8" + # }) + + _TEXT_PROCESSING_FN, _TITLE_PROCESSING_FN = _args_to_text_constructor_fn( + remove_non_alphanumeric_option = args.remove_non_alphanumeric_option, + remove_excessive_whitespace_option = args.remove_excessive_whitespace_option, + remove_html_tags_option = args.remove_html_tags_option, + decode_url_option = args.text_encoder_choice_title, + encoder_check_option = args.encoder_check_option, + text_encoder_choice_title = args.text_encoder_choice_title, + text_encoder_choice_text = args.text_encoder_choice_text + ) + + raw_data_path = args.raw_csv_path + drop_hard_dupl = args.drop_hard_dupl + drop_soft_dupl = args.drop_soft_dupl + save_dir = args.save_dir_path + + overwrite_initial_title_data = args.overwrite_initial_title_data + overwrite_initial_text_data = args.overwrite_initial_text_data + + + df = pd.read_csv(raw_data_path) + if len(set(df.columns).difference(set(_EXPECTED_COLNAMES))) != 0 or len(set(_EXPECTED_COLNAMES).difference(set(df.columns))) != 0: + raise ValueError(f"The data schema expected, consist of columns: {', '.join(df.columns.to_list())} doesn't match with expected column values of {', '.join(_EXPECTED_COLNAMES)}!") + + if (not drop_hard_dupl) and (not drop_soft_dupl): + raise AssertionError("The script won't run with both `drop-hard-dupl` and `drop-soft-dupl` args turned off!") + elif (not drop_hard_dupl): + warnings.warn("The args of `drop_hard_dupl` isn't turned off! Possibly the data will contain one template value of Wikipedia (usually no contribution text!)") + + #will save id identifier colname first (popping first list val) + id_colname = _EXPECTED_COLNAMES.pop(0) + + # if any of the data has duplicate values from columns checked (url, title, or text), + # it means the data integrity is questionable + # i.e. copied from other article or filled with template text + # hence, we will delete those duplicated datasets + + #hard duplicate drop (drop all duplicate values that has exact same text on expected unique colnames) + if drop_hard_dupl: + + for colname in _EXPECTED_COLNAMES: + logger.info(f"Checking data integrity on column {colname} on removing hard-duplicate(s)...") + dupl_text_df = df[df.duplicated(subset=colname,keep=False)] + shape_of_dupl_data = dupl_text_df.shape[0] + + if shape_of_dupl_data > 0: + logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped") + df.drop_duplicates(subset=colname, keep=False, inplace=True) + + + #check id/idx of the cleansed data, whether it has duplicate + # (the duplication of id/idx should came from the very first extraction, not from the cleansing) + + if df[df.duplicated(subset=id_colname,keep=False)].shape[0] > 0: + logger.info("Duplicated ID found! Re-assigning ID to the new ones based on `df.reset_index` method!") + df[id_colname] = df.reset_index().index + + #soft duplicate drop (drop all except one duplicate values that has exact same text on expected unique colnames) + #keep the data that has longest value of its raw form + if drop_soft_dupl: + + idx_to_keep = set(df.index.to_list()) + #clean from text & title only, url isn't needed for this process + _EXPECTED_COLNAMES.remove("url") + + for colname in _EXPECTED_COLNAMES: + #Construct Text Cleanser Fn for soft-duplicate cleansing + _PROCESSING_FN = _TEXT_PROCESSING_FN if colname == "text" else _TITLE_PROCESSING_FN + text_processing_fn = partial(_text_processing_wrapper, _fn=_PROCESSING_FN, mode=colname) + logger.info(f"Checking data integrity on column {colname} on removing soft-duplicate(s)...") + _df = df.copy(deep=True) + + #Setting up DF cols as String so it can be text-processed + _df = _df[[colname]] + _df[colname] = _df[colname].astype("str") + logger.info(f"Cleansing the data based on {colname}") + + #applying text processing + _df[colname+"_raw_len"] = _df[colname].apply(len) + _df[colname+"_cleansed"] = _df[colname].apply(lambda row_text: text_processing_fn(text=row_text)) + + #overwrite its text data if set as true + if overwrite_initial_title_data and colname == "title": + df[colname] = _df[colname+"_cleansed"] + elif overwrite_initial_text_data and colname == "text": + df[colname] = _df[colname+"_cleansed"] + + #choose the data to keep by "ranking" it according to len of its raw text (greatest to keep) + logger.info(f"Ranking and grouping the data based on {colname}") + _df["rk"] = _df.groupby(colname+"_cleansed")[colname+"_raw_len"].rank(method="min", ascending=False) + shape_of_dupl_data = _df[_df["rk"]>1].shape[0] + + if shape_of_dupl_data > 0: + logger.info(f"Found {shape_of_dupl_data} data duplicated! Will be dropped") + _idx_to_keep = _df[_df["rk"]==1].index.to_list() + if len(_idx_to_keep)+shape_of_dupl_data != df.shape[0]: + raise AssertionError("Mismatch of data number!") + idx_to_keep = idx_to_keep.intersection(set(_idx_to_keep)) + else: + logger.info(f"No soft-duplicate found in colname {colname}. Continuing") + + del _df + gc.collect() + + logger.info(f"The final data kept is {len(idx_to_keep)} from {df.shape[0]}") + df = df.loc[list(idx_to_keep),:] + + logger.info("Saving dataset cleansed form...") + #input path splitted by ("/") for the last entry should return filename + #whereas the filename splitted by (".") except the last value should return the filename w/o ".csv" extension + + _override_suffix_identifier = "" + if overwrite_initial_title_data or overwrite_initial_text_data: + _override_suffix_identifier = "_overwritten" + if overwrite_initial_text_data: + _override_suffix_identifier = "_text"+_override_suffix_identifier + if overwrite_initial_title_data: + _override_suffix_identifier = "_title"+_override_suffix_identifier + + _save_file_name = ".".join(raw_data_path.split("/")[-1].split(".")[:-1]) + "_dedup_cleansed" + _override_suffix_identifier + ".csv" + _save_file_name = _save_file_name.replace("_raw", "") + df.to_csv(f"{save_dir}/{_save_file_name}", index=False) diff --git a/dedup_raw_wiki_data_sea.sh b/dedup_raw_wiki_data_sea.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca91312f51748ed16960bae2011ea036f516eb05 --- /dev/null +++ b/dedup_raw_wiki_data_sea.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# all available lang codes in SEA local-languages or linguistically-related to following countries in SEA: +# Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum) +# Singapore: "ms" (Malay) +# Malaysia: "ms" (Malay) +# Brunei: "ms" (Malay) +# Thailand: "th" (Thai) +# Myanmar: "my" (Burmese), "shn" (Shan), "mnw" (Mon) +# Laos: "lo" (Lao) +# Vietnam: "vi" (Vietnamese) +# Cambodia: "km" (Khmer) +# East Timor: "tet" (Tetum) + +#params of executions +folder_dir_to_save=./sea_wiki_dedup_data +input_folder_to_be_dedup=./sea_wiki_raw_data + +drop_hard_dupl=True +drop_soft_dupl=True + + +# main executions + +# src: https://stackoverflow.com/a/18887210 (to list all files under a dir) +shopt -s nullglob +file_name_array=($input_folder_to_be_dedup/*) +shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later +file_name_array="${file_name_array}" + +if [ ${#file_name_array[@]} == 0 ]; then + echo "No files found under directory $input_folder_to_be_dedup" >&2 +fi + +if [ ! -d $folder_dir_to_save ]; +then + echo "Dir $folder_dir_to_save not exists! Creating the dir..." + mkdir $folder_dir_to_save +fi + +echo "The params hard-dedup drop is set as $drop_hard_dupl" +echo "The params soft-dedup drop is set as $drop_soft_dupl" + +for val in ${!file_name_array[@]}; do + csv_path=${file_name_array[$val]} + + if [[ ${csv_path} != *".csv" ]]; then + echo "The extracted file name isn't a CSV! Skipping! Received $csv_path" + continue + fi + + echo "Executing Dedup on iteration no "$((val+1))" of total ${#file_name_array[@]} for input data $csv_path" + #see the script bcs there are more args than this command is using + python dedup_raw_wiki_data.py \ + --raw-csv-path $csv_path \ + --drop-hard-dupl $drop_hard_dupl \ + --drop-soft-dupl $drop_soft_dupl \ + --save-dir-path $folder_dir_to_save + echo "Done Execution" +done +echo "Done Dedup Process" diff --git a/extract_raw_wiki_data.py b/extract_raw_wiki_data.py new file mode 100644 index 0000000000000000000000000000000000000000..e337067b1dd43ec0a6b5e07d1b6ad9961b49081e --- /dev/null +++ b/extract_raw_wiki_data.py @@ -0,0 +1,73 @@ +''' +Script on Generating Wikipedia Data that are dumped into https://dumps.wikimedia.org/ +More info can be read on https://huggingface.co/datasets/wikipedia +------------------- +Check here to see available indexed data: https://dumps.wikimedia.org/backup-index.html +Also check here to see language meta from its code: https://meta.wikimedia.org/wiki/List_of_Wikipedias +''' + +import os, gc +import logging +import argparse + +import pandas as pd +from datasets import load_dataset + + +def set_logger(): + # Set up the logger + logging.basicConfig( + level=logging.INFO, # Set the desired logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) + format='%(asctime)s [%(levelname)s]: %(message)s', # Customize the log message format + datefmt='%Y-%m-%d %H:%M:%S' # Customize the date/time format + ) + + # Create a file handler to write logs into a file + file_handler = logging.FileHandler('app.log') + + # Set the log level for the file handler + file_handler.setLevel(logging.INFO) + + # Create a formatter for the file handler (customize the log format for the file) + file_formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') + file_handler.setFormatter(file_formatter) + + logger = logging.getLogger("Wiki Dataset Generation") + logger.addHandler(file_handler) + + return logger + + +#only executed if called directly +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("--lang-id", help="Lang ID from Wikipedia Data to extract") + + parser.add_argument("--date-ver", help="Date of Wikipedia Data (YYYYMMDD) generation to extract") + + parser.add_argument("--save-dir-path", help="""Relative dir path of saved Wikipedia CSV data + to the `extract_raw_wiki_data.py` script dir""", + default=os.path.dirname(os.path.abspath(__file__))) + + args = parser.parse_args() + + + dset_name = "wikipedia" + + logger = set_logger() + logger.info("Parsing arguments...") + + lang_id = args.lang_id + date_ver = args.date_ver + save_dir = args.save_dir_path + + logger.info("Loading the dataset from Wikipedia...") + df = load_dataset(dset_name, language=lang_id, date=date_ver, beam_runner='DirectRunner', split="train").to_pandas() + logger.info("Loading done!") + logger.info(f"#Data collected: {df.shape[0]}") + logger.info("Saving dataset raw form...") + df.to_csv(f"{save_dir}/wiki_{lang_id}_{date_ver}_raw_dataset.csv", index=False) + + del df + gc.collect() diff --git a/extract_raw_wiki_data_sea.sh b/extract_raw_wiki_data_sea.sh new file mode 100644 index 0000000000000000000000000000000000000000..b7574118384bdcd7b0cc80841a09268127844acc --- /dev/null +++ b/extract_raw_wiki_data_sea.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# all available lang codes in SEA local-languages or linguistically-related to following countries in SEA: +# Indonesia: "ace" (Acehnese), "ban" (Balinese), "bjn" (Banjarese), "bug" (Buginese), "gor" (Gorontalo), "id" (Indonesian), "jv" (Javanese), "mad" (Madurese), "map-bms" (Banyumasan, Dialect of Javanese), "min" (Minangkabau), "ms" (Malay), "nia" (Nias), "su" (Sundanese), "tet" (Tetum) +# Singapore: "ms" (Malay) +# Malaysia: "ms" (Malay) +# Brunei: "ms" (Malay) +# Thailand: "th" (Thai) +# Myanmar: "my" (Burmese), "shn" (Shan), "mnw" (Mon) +# Laos: "lo" (Lao) +# Vietnam: "vi" (Vietnamese) +# Cambodia: "km" (Khmer) +# East Timor: "tet" (Tetum) + +#params of executions +date_ver=20231101 +folder_dir_to_save=./sea_wiki_raw_data +lang_list=(ace ban bjn bug gor id km lo jv mad map-bms my min mnw ms nia su shn tet th vi) + + +#main executions + +if [ ! -d $folder_dir_to_save ]; then + echo "Dir $folder_dir_to_save not exists! Creating the dir..." + mkdir $folder_dir_to_save +fi + +for val in ${!lang_list[@]}; do + lang=${lang_list[$val]} + echo "Executing Extractor on iteration no $((val+1)) of total ${#lang_list[@]} for language $lang and date version of $date_ver" + python extract_raw_wiki_data.py \ + --lang-id $lang \ + --date-ver $date_ver \ + --save-dir-path $folder_dir_to_save + echo "Done Execution" +done +echo "Done Extraction Process" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..51dd3540a5754bb4fdf369af1d5c480284894e67 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +datasets==2.14.6 +pandas==2.1.0 +fsspec==2023.9.1 +apache-beam==2.50.0 +dill~=0.3.1.0 +numpy==1.24.4 diff --git a/sea_wiki.py b/sea_wiki.py new file mode 100644 index 0000000000000000000000000000000000000000..eea47a90eba7ecce15c1795344e7f6394d678be0 --- /dev/null +++ b/sea_wiki.py @@ -0,0 +1,246 @@ +"""The Southeast Asia Lan Wiki Loader""" + +import os +import re + +from functools import reduce + +import numpy as np +import pandas as pd + +import datasets + + +_CITATIONS = """\ +@ONLINE{wikidump, + author = "Wikimedia Foundation", + title = "Wikimedia Downloads", + url = "https://dumps.wikimedia.org"} + +@ONLINE{wikipedia-hf, + title = "Huggingface Wikipedia Dataset", + url = "https://huggingface.co/datasets/wikipedia"}""" + +_REPO_URL = "https://huggingface.co/datasets/sabilmakbar/sea_wiki" + +_LICENSE = ( + "This work is licensed under the Creative Commons Attribution-ShareAlike " + "3.0 Unported License. To view a copy of this license, visit " + "http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " + "Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." +) + + +_SEA_WIKI_RAW_DESCRIPTION = """\ +Southeast Asia Wikipedia Data Repository contains Wikipedia Data from Wikipedia HF that focuses +on extraction in all available Languanges and Local Languages across South East Asia, which some of them +are considered as low-resource languages or extremely low-resource languages""" + +_SEA_WIKI_DEDUP_DESCRIPTION = """\ +This is a derivative of South East Asia Wikipedia Data Repository which is already pre-processed +by identifying and dropping duplicates to prevent boilerplate texts occuring in dataset""" + +_AVAILABLE_DUMP_VERSION_DATE = ["20231101"] + +# map from alpha-3 country codes to ISO-639 3 lang codes +# alpha-3 codes: https://www.iban.com/country-codes +# ISO-639 codes: https://iso639-3.sil.org/code_tables/639/data +_COUNTRY_TO_LANG_MAPPER = { + "idn": ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "mad", "map-bms", "min", "ms", "nia", "su", "tet"], + "sgp": ["ms"], + "mys": ["ms"], + "brn": ["ms"], + "tha": ["th"], + "mmr": ["my", "shn", "mnw"], + "lao": ["lo"], + "vnm": ["vi"], + "khm": ["km"], + "tls": ["tet"]} + +_AVAILABLE_DUMP_LANGUAGES = reduce(np.union1d, list(_COUNTRY_TO_LANG_MAPPER.values())) + +_LATEST_DUMP_VERSION_DATE = sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1] + + +def _construct_dset_url_from_dset_version_and_lang(date_ver: str, lang: str, mode: str): + _mode_to_folder_mapper = {"dedup": "sea_wiki_dedup_data", "raw": "sea_wiki_raw_data"} + _mode_to_file_suffix_mapper = {"dedup": "dataset_dedup_cleansed.csv", "raw": "raw_dataset.csv"} + + return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}") + + +class SEAWikiConfig(datasets.BuilderConfig): + """BuilderConfig for SEAWiki.""" + + def __init__(self, description: str=None, features: list=['url', 'title', 'text'], + data_url: str=None, date_stamp: str=_LATEST_DUMP_VERSION_DATE, country: str=None, + lang: str=None, mode = "dedup", **kwargs): + """BuilderConfig for SEAWiki. + + Args: + description: `string`, description of dataset + features: `list[string]`, list of the features that will appear in the + feature dict. Should not include "label" if it's a supervised. + data_url: `string`, url to download the data. + date_stamp: `string`, wikidump date_stamp for data available in repo. + lang: `string`, language to be loaded. + **kwargs: keyword arguments forwarded to super. + """ + # validate configs + if mode not in ["dedup", "raw"]: + raise ValueError(f"Error occured! Expected values are 'dedup' or 'raw' for arg `mode`, received {mode}!") + + if ((lang is None and country is None) or date_stamp is None) and data_url is None: + raise ValueError("Expected `data_url` is provided or both `date_stamp` and `lang` or `country` are provided!") + + _mode_to_desc_mapper = {"dedup": _SEA_WIKI_DEDUP_DESCRIPTION, "raw": _SEA_WIKI_RAW_DESCRIPTION} + + if date_stamp is not None and date_stamp not in _AVAILABLE_DUMP_VERSION_DATE: + raise ValueError("Provided `date_stamp` dataset versioning doesn't match! Please re-check") + + if lang is not None and lang not in _AVAILABLE_DUMP_LANGUAGES: + raise ValueError("Provided `lang` doesn't match! Please re-check") + + if country is not None and country not in _COUNTRY_TO_LANG_MAPPER.keys() and lang is None: + raise ValueError("Provided `country` doesn't match! Please re-check") + + super(SEAWikiConfig, self).__init__(**kwargs) + self.features = features + + # prioritize kwargs data_url + if data_url is not None: + self.data_url = data_url + # prioritize lang provided over country + elif lang is not None: + self.data_url = _construct_dset_url_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode) + # if only country provided, create dict of langs + elif country is not None: + self.data_url = {lang: _construct_dset_url_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode) for lang in _COUNTRY_TO_LANG_MAPPER[country]} + + # auto-construct desc if not provided + if description is None: + self.description = _mode_to_desc_mapper[mode] + "\n" + f"Extracted from file path {self.data_url}" + + #define citations & info URL internally in config class + self.citation = _CITATIONS + self.url = _REPO_URL + + +class SEAWiki(datasets.GeneratorBasedBuilder): + """The SEAWiki Dataset.""" + + # if name isn't provided, will create a dataset of all languages + DEFAULT_CONFIG_NAME = "seawiki_dedup_all" + BUILDER_CONFIG_CLASS = SEAWikiConfig + + # construct data-url with countries a list of spoken langs as value + _newest_data_raw_all_langs = [_construct_dset_url_from_dset_version_and_lang( + date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="raw") for lang in _AVAILABLE_DUMP_LANGUAGES] + _newest_data_dedup_all_langs = [_construct_dset_url_from_dset_version_and_lang( + date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="dedup") for lang in _AVAILABLE_DUMP_LANGUAGES] + + # construct data-url with countries as key-dict, being country code as key and list of spoken langs as value + _newest_data_raw_with_countries_all_langs = { + country: [_construct_dset_url_from_dset_version_and_lang(date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="raw") for lang in lang_list] + for country, lang_list in _COUNTRY_TO_LANG_MAPPER.items()} + _newest_data_dedup_with_countries_all_langs = { + country: [_construct_dset_url_from_dset_version_and_lang(date_ver=_LATEST_DUMP_VERSION_DATE, lang=lang, mode="dedup") for lang in lang_list] + for country, lang_list in _COUNTRY_TO_LANG_MAPPER.items()} + + BUILDER_CONFIGS = [ + SEAWikiConfig( + name="seawiki_all", + description=_SEA_WIKI_RAW_DESCRIPTION, + data_url=_newest_data_raw_all_langs + ), + SEAWikiConfig( + name="seawiki_dedup_all", + description=_SEA_WIKI_DEDUP_DESCRIPTION, + data_url=_newest_data_dedup_all_langs + ), + SEAWikiConfig( + name="seawiki_with_countries_all", + description=_SEA_WIKI_RAW_DESCRIPTION, + data_url=_newest_data_raw_with_countries_all_langs + ), + SEAWikiConfig( + name="seawiki_with_countries_dedup_all", + description=_SEA_WIKI_DEDUP_DESCRIPTION, + data_url=_newest_data_dedup_with_countries_all_langs + ), + ] + + + def _info(self): + features = {feature: datasets.Value("string") for feature in self.config.features} + + return datasets.DatasetInfo( + description = self.config.description, + features = datasets.Features(features), + homepage = self.config.url, + citation = self.config.citation, + license=_LICENSE) + + + @staticmethod + def _get_lang_name_from_data_url(data_url: str): + # lang code occurred after "wiki_" and before date versioning (using 8len date) + _list_folder_sep = data_url.split("/")[-1].split("_") + _min_pos = min([pos for pos, data in enumerate(_list_folder_sep) if bool(re.search("\d{8}", data))]) + return re.sub("[^\w\.]", "_", "_".join(_list_folder_sep[1:_min_pos])) + + + def _split_generators(self, dl_manager): + + # handle cases of config "seawiki_all", "seawiki_dedup_all", and custom config where only country is provided (take all langs in a country) + if self.config.name in ("seawiki_all", "seawiki_dedup_all") or (self.config.country is not None and self.config.lang is None): + file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url} + dl_dir = dl_manager.download_and_extract(file_dict) + + return [ + datasets.SplitGenerator( + name=datasets.Split(split_name), + gen_kwargs={ + "data_file": file_name + } + ) + for split_name, file_name in dl_dir.items()] + + # handle cases of config "seawiki_with_countries_all", "seawiki_with_countries_dedup_all" + elif self.config.name in ("seawiki_with_countries_all", "seawiki_with_countries_dedup_all"): + file_dict = {} + + for country, file_list in self.config.data_url.items(): + for file in file_list: + file_dict[country + "_" + self._get_lang_name_from_data_url(file)] = file + + dl_dir = dl_manager.download_and_extract(file_dict) + + return [ + datasets.SplitGenerator( + name=datasets.Split(split_name), + gen_kwargs={ + "data_file": file_name + } + ) + for split_name, file_name in dl_dir.items()] + + # handle custom config where only country is provided + elif self.config.lang is not None: + dl_dir = dl_manager.download_and_extract(self.config.data_url) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "data_file": dl_dir + }, + ) + ] + + + def _generate_examples(self, data_file): + pd_df = pd.read_csv(data_file) + for _, row in pd_df.iterrows(): + example = {feature: row[feature] for feature in self.config.features} + idx = row["id"] + yield idx, example diff --git a/sea_wiki_dedup_data/wiki_ace_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_ace_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..4e285a607f57dac0a7505b6d044d40d3389f6b5a --- /dev/null +++ b/sea_wiki_dedup_data/wiki_ace_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f52ce2ae7c8d340a30014bd036bc0806f5782f5a0856f80ffe3bf80f71b33152 +size 4938934 diff --git a/sea_wiki_dedup_data/wiki_ban_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_ban_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..4d1da29999ea3fc9c76226df7592e81cd3c7ae36 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_ban_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:325006efe32de9ee07f718cb187094a358e9a50577a4945470471c798bfa4d2b +size 18034158 diff --git a/sea_wiki_dedup_data/wiki_bjn_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_bjn_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..d3a0cce80670bfc6cc1fb8ffb0f1213b27d54f65 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_bjn_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a0914f929502fa632e764dadeddf49969b45ccf35d9933743bd4e9e2b16ea0f +size 6791315 diff --git a/sea_wiki_dedup_data/wiki_bug_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_bug_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..adb5c3813af3b74e78ea49c5979be96abb27e24e --- /dev/null +++ b/sea_wiki_dedup_data/wiki_bug_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2a1d85b4306eb2d0cf74db4ca2d997b0ea20e2f4016167aeb66394ac5b9b59 +size 2172844 diff --git a/sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..edb973e678e16a05fcf49585b0db3479d14300c0 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_gor_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0607ee607dd6c00c5eb6729efc0c38a21ea9e10da39005e34373959e179e7707 +size 6222508 diff --git a/sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..2f16cffaa7197f6ffcb75b66be3ca7a4a9247bf7 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_id_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49f61d796d89cb4f8877a80494a4b26ee379bb3602de96dfc455d8cdbd8661fd +size 1120126829 diff --git a/sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..2840f802a336af78e55276f90041000f6595f7c5 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_jv_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f36dba2f186c2a710cac07918a218ca74006d8940fc8fd1955f4122725efd43 +size 72052487 diff --git a/sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..53cbd4c82f8bb88c6c1f10a0a9889e37ca741a4e --- /dev/null +++ b/sea_wiki_dedup_data/wiki_km_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0f043cd2dddb22a3076abe0af5805b6b8130678aa6650bbdb158f91cb6e1b30 +size 102709279 diff --git a/sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..1cc16fa420551d967ed83011e0adcb8d06f2f3d7 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_lo_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47d4132398f9002803a88454ad4318a9f2f41c11281a9f085c23255810beccd6 +size 14905688 diff --git a/sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..959fca5072a39d1ea27d561ef2d5b5a0581d6f03 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_mad_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:517777f9ec0da20ce5ccf1f70cca6d2a8192452745afefc6288ff360dad4ee7c +size 1610155 diff --git a/sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..a3729f78c0e75892fa2a4529eb1f7b01cb82199e --- /dev/null +++ b/sea_wiki_dedup_data/wiki_map-bms_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49bee130293507159198f83422c654d5e0000e6e20d27ed5f5afeb378a663967 +size 5076335 diff --git a/sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..2868d54553d2be93a36b0d4c5ef3b5654c40cd1b --- /dev/null +++ b/sea_wiki_dedup_data/wiki_min_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6debf5f9204262a3bcdcb37966873f877158882c435a3b5cae55a01ad1418a3f +size 116663617 diff --git a/sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..687ad7e5be23708db57a90c40c939593f2c629e3 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_mnw_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daf4ec4e087fcf46c2ebf71a3db8e11b8bdbfbbe0eacd9e5efc6d9f9c5b6b6d2 +size 47243726 diff --git a/sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..c5d1c0e48c41c294624325069d039781b8789cf1 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_ms_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c1aca444ed1a161d30069d3106f604597cc9e3e48267b223d2ef3cf7b52fa7c +size 415339805 diff --git a/sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..296ad36756e2434310a36c58c029135b07566eb4 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_my_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffd9ad7c34a340d4ae62820dccede45d515dc69b145340f4dc5485f01d83745f +size 312976625 diff --git a/sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..55030510e60817aeb36a455141ad1d870df8b5ab --- /dev/null +++ b/sea_wiki_dedup_data/wiki_nia_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3895f612ef2366104e669eac8acd5ec13646ab4dd388a9372422bbcdfbbe45d6 +size 2151317 diff --git a/sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..c4b2db50be354299429ed92f6a1b152145c39538 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_shn_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05d1589795708541a95366f18528a47c5ac95f4e287291a19d5589f03183cf8f +size 33599756 diff --git a/sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..b8dc13a037c0bc69ce0b07ec2f331e5b5678fd34 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_su_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b758f801e232d271882a619b7107237b15964dc467b16a5867493cfdb1b655 +size 47525184 diff --git a/sea_wiki_dedup_data/wiki_tet_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_tet_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..13702c672629ceb0fdd91b7cc7863387fcec160d --- /dev/null +++ b/sea_wiki_dedup_data/wiki_tet_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:550083c7c657441c29cf8de3a8dc6dcf9506bc472c0000d15b88af7bbd855699 +size 1450499 diff --git a/sea_wiki_dedup_data/wiki_th_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_th_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..4cfac38ba486e3b9ad619eb428833c168dbe5060 --- /dev/null +++ b/sea_wiki_dedup_data/wiki_th_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:793f1b801eecdcc9d4b1bd00a384957de08b916f491bf62b1c73e5792e3bcfa9 +size 1013480563 diff --git a/sea_wiki_dedup_data/wiki_vi_20231101_dataset_dedup_cleansed.csv b/sea_wiki_dedup_data/wiki_vi_20231101_dataset_dedup_cleansed.csv new file mode 100644 index 0000000000000000000000000000000000000000..69ec8c4d1b2b90d597f7f61fcda5386e52cd057a --- /dev/null +++ b/sea_wiki_dedup_data/wiki_vi_20231101_dataset_dedup_cleansed.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ce2151076f483b2885e580d2d0b392753a68443eca21776ce8eb5373b785499 +size 1605617428 diff --git a/sea_wiki_raw_data/wiki_ace_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_ace_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..720d6d65eb5976d6bbc7dd491eb7b5d89523b80b --- /dev/null +++ b/sea_wiki_raw_data/wiki_ace_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3e723d9930ae552692b1c45c5463acfb4632c71ed3736e1e49c0f21a0c5086e +size 4946116 diff --git a/sea_wiki_raw_data/wiki_ban_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_ban_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..fe103bd2b72e5ffa1fedfbc5c036924a9e48c8cd --- /dev/null +++ b/sea_wiki_raw_data/wiki_ban_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8b51380f75333cf170c29383bab07d24800a7cd19491a094f7f78ccb018f128 +size 18207081 diff --git a/sea_wiki_raw_data/wiki_bjn_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_bjn_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..a2452c6025b97967a7d067e3532a6114a2ae999a --- /dev/null +++ b/sea_wiki_raw_data/wiki_bjn_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5e1b9f81f122388fcc774b1f70f63bfe331aeae367c60ed1437811fb2f87538 +size 6797366 diff --git a/sea_wiki_raw_data/wiki_bug_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_bug_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..937a993dc9428f37c3da461171dc38b781c439d4 --- /dev/null +++ b/sea_wiki_raw_data/wiki_bug_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a4f9e90c1e589096a213c808f8a3067046c3d56f538bd16f48108d9d2f513a4 +size 3280997 diff --git a/sea_wiki_raw_data/wiki_gor_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_gor_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..3c9ef104110624d257e01b75f91f05efa398d555 --- /dev/null +++ b/sea_wiki_raw_data/wiki_gor_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ebaa34881179ed7ded2f6c7640c984473086dfece48f769d61a9da7503e8a57 +size 6244189 diff --git a/sea_wiki_raw_data/wiki_id_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_id_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..4f4387329489f220b93b326a478a31a8ced1f23e --- /dev/null +++ b/sea_wiki_raw_data/wiki_id_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77a4ec9e94f097b1d40eabe435231cea329d97cf17eb77dd3f84a2582f3497eb +size 1121070688 diff --git a/sea_wiki_raw_data/wiki_jv_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_jv_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..38e7144a6fc626d6007c066b055fba33db45317f --- /dev/null +++ b/sea_wiki_raw_data/wiki_jv_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58dfa79e01e4518bc9af414daf98836072ad57d5f863e825e540e000e0d94826 +size 72156567 diff --git a/sea_wiki_raw_data/wiki_km_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_km_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..d7b78b23d3e9e321536e14a7cc0a3f9d9ece9107 --- /dev/null +++ b/sea_wiki_raw_data/wiki_km_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a42cd60f4790892c6e3b3a807901f1fdcbcf8ddbe42108e6686bbcca89f71ec +size 103156370 diff --git a/sea_wiki_raw_data/wiki_lo_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_lo_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..bfee316eb6387e76647a9d8c129b107c68e0b50f --- /dev/null +++ b/sea_wiki_raw_data/wiki_lo_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63ccd7122594e6367faf454a3ea838bb9341bf82774eecec878573a2bd8cf547 +size 15237378 diff --git a/sea_wiki_raw_data/wiki_mad_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_mad_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..959fca5072a39d1ea27d561ef2d5b5a0581d6f03 --- /dev/null +++ b/sea_wiki_raw_data/wiki_mad_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:517777f9ec0da20ce5ccf1f70cca6d2a8192452745afefc6288ff360dad4ee7c +size 1610155 diff --git a/sea_wiki_raw_data/wiki_map-bms_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_map-bms_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..0486f386d18dac2f28eedff3ef641ee793259800 --- /dev/null +++ b/sea_wiki_raw_data/wiki_map-bms_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1ecc56766b3297f6b19946c856898c0a1bfe7f2f472245d5e27001840560729 +size 5227494 diff --git a/sea_wiki_raw_data/wiki_min_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_min_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..ce4fb3a6ae431752cf082ab2a6e08ed3006a6c9a --- /dev/null +++ b/sea_wiki_raw_data/wiki_min_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c817fad237578a7ef319b82c35b1e6925699c28310f590e631aaa9f51477978c +size 116764626 diff --git a/sea_wiki_raw_data/wiki_mnw_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_mnw_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..cf26911674a54453d70cfb5fd9a13fa7f8537d2e --- /dev/null +++ b/sea_wiki_raw_data/wiki_mnw_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9f20ff42eeedfe51b6e7e6f73e16feb30afc185e2e0e95459b2efd9053a84c9 +size 47322074 diff --git a/sea_wiki_raw_data/wiki_ms_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_ms_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..f265d3879381281f06c9bb4b47cab46daedfa1a8 --- /dev/null +++ b/sea_wiki_raw_data/wiki_ms_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e21c0d9a01340b911bcd7396fd48202e0ccf98a9e18fef91a07590aa39896ba6 +size 420222523 diff --git a/sea_wiki_raw_data/wiki_my_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_my_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..67e50adc3a166e6ef80ef267823b943af8d776ec --- /dev/null +++ b/sea_wiki_raw_data/wiki_my_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bb0ec1fc163fbafc31e0c8c8618bc1c23740a010e201c16468cfa67ebf4cc2e +size 313356770 diff --git a/sea_wiki_raw_data/wiki_nia_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_nia_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..55030510e60817aeb36a455141ad1d870df8b5ab --- /dev/null +++ b/sea_wiki_raw_data/wiki_nia_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3895f612ef2366104e669eac8acd5ec13646ab4dd388a9372422bbcdfbbe45d6 +size 2151317 diff --git a/sea_wiki_raw_data/wiki_shn_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_shn_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..6ffbcd125182e4c5bc32de8cae4c183b7da8158a --- /dev/null +++ b/sea_wiki_raw_data/wiki_shn_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6e89ac9b6d938d9454b06be1aaf58d0d382e6f2793139791ec0ebfdf5354ba +size 33737143 diff --git a/sea_wiki_raw_data/wiki_su_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_su_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..edfbe231aa0d6cf1da0fd18ab6394cac21599396 --- /dev/null +++ b/sea_wiki_raw_data/wiki_su_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19379716ae3995ac5ad7e9528e5a9ccd1e5fcfbe01b80e66356317fe8ca79b03 +size 47528683 diff --git a/sea_wiki_raw_data/wiki_tet_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_tet_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..3a7651b0d3cb9d9e5524790907256d6aff04e997 --- /dev/null +++ b/sea_wiki_raw_data/wiki_tet_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dde3d9f8f978adb7c4c26bbcd252a53832fe721217e614561e0e9f8459ee8308 +size 1452853 diff --git a/sea_wiki_raw_data/wiki_th_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_th_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..2064a805c35844b61b77fb9b9d5f46650d254951 --- /dev/null +++ b/sea_wiki_raw_data/wiki_th_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6be292a3229a6581e05fa56149fccc493fa2a08b21615f4d3098467779f0020c +size 1013541968 diff --git a/sea_wiki_raw_data/wiki_vi_20231101_raw_dataset.csv b/sea_wiki_raw_data/wiki_vi_20231101_raw_dataset.csv new file mode 100644 index 0000000000000000000000000000000000000000..8f2463024ea184be9ea239442abceb08c6756a82 --- /dev/null +++ b/sea_wiki_raw_data/wiki_vi_20231101_raw_dataset.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b866efb43b4605fec03716ac71aa5d60612956f6fb2d3f283224c6d7f317aa +size 1605847896