|
import json |
|
import csv |
|
import os |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """ |
|
This is the Polish Wikipedia dataset. This dump is converted from parquet format. |
|
""" |
|
_SPLITS = ["corpus"] |
|
|
|
_URLs = { |
|
"corpus": "corpus.jsonl.gz" |
|
} |
|
|
|
class PLWikiedia(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="corpus", |
|
description=_DESCRIPTION |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"id": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=self.config.name, |
|
gen_kwargs={"filepath": data_dir}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
texts = f.readlines() |
|
|
|
for i, text in enumerate(texts): |
|
text = json.loads(text) |
|
yield i, text |
|
|