|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
import datasets
|
|
from dataclasses import dataclass
|
|
import numpy as np
|
|
|
|
_CITATION = '''
|
|
'''
|
|
|
|
languages2filesize = {
|
|
'ar': 32,
|
|
'de': 150,
|
|
'en': 352,
|
|
'es': 102,
|
|
'fr': 134,
|
|
'hi': 5,
|
|
'it': 83,
|
|
'ja': 47,
|
|
'ko': 13,
|
|
'simple': 5,
|
|
'zh': 23
|
|
}
|
|
|
|
_DESCRIPTION = 'dataset load script'
|
|
|
|
_DATASET_URLS = {
|
|
lang: [f'https://huggingface.co/datasets/Cohere/wikipedia-22-12/resolve/main/{lang}/{str(i).zfill(3)}.jsonl.gz' for i in range(n)]
|
|
for lang, n in languages2filesize.items()
|
|
}
|
|
|
|
|
|
class WikiCorpus(datasets.GeneratorBasedBuilder):
|
|
BUILDER_CONFIGS = [
|
|
datasets.BuilderConfig(
|
|
version=datasets.Version('1.0.0'),
|
|
name=lang,
|
|
description=f'Wiki dataset in language {lang}.'
|
|
) for lang in languages2filesize
|
|
]
|
|
|
|
def _info(self):
|
|
|
|
features = datasets.Features({
|
|
'id': datasets.Value('int32'),
|
|
'title': datasets.Value('string'),
|
|
'text': datasets.Value('string'),
|
|
'url': datasets.Value('string'),
|
|
'wiki_id': datasets.Value('string'),
|
|
'views': datasets.Value('float32'),
|
|
'paragraph_id': datasets.Value('int32'),
|
|
'langs': datasets.Value('int32'),
|
|
|
|
})
|
|
|
|
return datasets.DatasetInfo(
|
|
|
|
description=_DESCRIPTION,
|
|
|
|
features=features,
|
|
supervised_keys=None,
|
|
|
|
homepage='https://www.cohere.ai',
|
|
|
|
license='',
|
|
|
|
citation=_CITATION,
|
|
)
|
|
|
|
def _split_generators(self, dl_manager):
|
|
lang = self.config.name
|
|
downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[lang])
|
|
|
|
splits = [
|
|
datasets.SplitGenerator(
|
|
name='train',
|
|
gen_kwargs={
|
|
'filepaths': downloaded_files,
|
|
},
|
|
),
|
|
]
|
|
return splits
|
|
|
|
def _generate_examples(self, filepaths):
|
|
for filepath in filepaths:
|
|
with open(filepath, encoding="utf-8") as f:
|
|
for line in f:
|
|
data = json.loads(line)
|
|
yield data['id'], data
|
|
|