File size: 1,875 Bytes
30dd125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import glob
import os

from datasets import load_dataset

_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr',
              'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv']
ERROR_LANGS = ['et', 'ga']  # 20221101: ga somehow hangs at sample 36715/57711, for et no data is downloaded
ERROR_LANGS = []  # 20221120: no errors here
FINAL_LANGS = [l for l in _LANGUAGES if l not in ERROR_LANGS]
date = "20221120"  # every 01 and 20 of the month, just edit this to generate newer data

base_dir = 'data'
date_dir = os.path.join(base_dir, date)
os.makedirs(date_dir, exist_ok=True)

max_file_size = 4  # GB


def process_language(LANG):
    print(f'Processing language {LANG}...')
    # streaming does not work here!
    dataset = load_dataset("olm/wikipedia", language=LANG, date=date, split='train')
    size_in_gb = dataset.size_in_bytes / 1e9
    print(f'Found {size_in_gb} GB of data ({len(dataset)} documents) for language {LANG}...')
    if size_in_gb > max_file_size:
        num_shards = int(size_in_gb / max_file_size) + 1
        for shard in range(num_shards):
            dataset.shard(num_shards, shard).to_json(f'{date_dir}/{LANG}_{shard}.jsonl', lines=True)
    else:
        dataset.to_json(f'{date_dir}/{LANG}_0.jsonl', lines=True)


if __name__ == '__main__':
    """
    Run with 
    export PYTHONPATH=. && python prepare_wikipedias.py | tee prepare_wikipedias.log
    """
    # it does not work in parallel
    for LANG in FINAL_LANGS:
        process_language(LANG)

    # Compress datasets
    print(f"Compressing datasets at {date_dir}")
    # Do this at the end because we use multithreading
    for path in glob.glob(os.path.join(date_dir, '*.jsonl')):
        os.system(f'xz -zkf -T0 {path}')  # -TO to use multithreading
        # os.system(f'rm {path}')  # remove uncompressed file to save space