EU_Wikipedias / EU_Wikipedias.py
joelniklaus's picture
made small change to loading script
bcafdef
raw
history blame
5.41 kB
"""EU Wikipedias"""
import json
import datasets
from huggingface_hub.file_download import hf_hub_url
try:
import lzma as xz
except ImportError:
import pylzma as xz
datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@ONLINE {wikidump,
author = {Wikimedia Foundation},
title = {Wikimedia Downloads},
url = {https://dumps.wikimedia.org}
}
"""
_DESCRIPTION = """\
Wikipedia dataset containing cleaned articles of all languages.
The datasets are built from the Wikipedia dump
(https://dumps.wikimedia.org/) with one split per language. Each example
contains the content of one full Wikipedia article with cleaning to strip
markdown and unwanted sections (references, etc.).
"""
_LICENSE = (
"This work is licensed under the Creative Commons Attribution-ShareAlike "
"3.0 Unported License. To view a copy of this license, visit "
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to "
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA."
)
_URL = "https://huggingface.co/datasets/joelito/EU_Wikipedias"
_LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr",
"hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]
_DATES = ["20221120"] # one can add more in the future with the file prepare_wikipedias.py
# IMPORTANT: Increase this once larger datasets are available (English has 11 in 20221120)
_HIGHEST_NUMBER_OF_SHARDS = 11
class EUWikipediasConfig(datasets.BuilderConfig):
"""BuilderConfig for EUWikipedias."""
def __init__(self, date=None, language=None, **kwargs):
"""BuilderConfig for EUWikipedias.
Args:
language: string, the language code for the Wikipedia dump to use:
One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of
available dates can be found at https://dumps.wikimedia.org/enwiki/.
**kwargs: keyword arguments forwarded to super.
"""
if date not in _DATES:
raise ValueError(f"date must be one of {_DATES} but was `{date}`")
if language not in _LANGUAGES + ["all"]:
raise ValueError(f"language must be one of {_LANGUAGES} but was `{language}`")
super().__init__(
name=f"{date}.{language}",
description=f"Wikipedia dataset for {language}, parsed from {date} dump.",
**kwargs,
)
self.date = date
self.language = language
class EUWikipedias(datasets.GeneratorBasedBuilder):
"""EUWikipedias: A dataset of Wikipedias in the EU languages"""
BUILDER_CONFIG_CLASS = EUWikipediasConfig
BUILDER_CONFIGS = [EUWikipediasConfig(date=date, language=language)
for language in _LANGUAGES + ["all"]
for date in _DATES]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"language": datasets.Value("string"),
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None, # No default supervised_keys.
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
def download_url(dataset, file_name):
url = hf_hub_url(repo_id=dataset, filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")
return dl_manager.download(url)
data_infos = []
languages = _LANGUAGES if self.config.language == "all" else [self.config.language]
for language in languages:
info = {"language": language}
for shard in range(_HIGHEST_NUMBER_OF_SHARDS):
try:
info["filepath"] = download_url("joelito/EU_Wikipedias", f"{self.config.date}/{language}_{shard}")
data_infos.append(info.copy())
except:
break # we found the last shard
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_infos": data_infos})]
def _generate_examples(self, data_infos):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for data_info in data_infos:
logger.info("Generating examples from = %s", data_info["filepath"])
try:
with xz.open(open(data_info["filepath"], "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
if example is not None and isinstance(example, dict):
yield id_, {
"language": data_info["language"], # add the language
**example,
}
id_ += 1
except Exception:
logger.exception("Error while processing file %s", data_info["filepath"])