euscrawl / euscrawl.py
juletxara's picture
add plain_text field that combines text fields
6b37ead
raw
history blame
5.49 kB
"""EusCrawl dataset."""
import json
import datasets
_DESCRIPTION = """\
EusCrawl (http://www.ixa.eus/euscrawl/) is a high-quality corpus for
Basque comprising 12.5 million documents and 423 million tokens,
totalling 2.1 GiB of uncompressed text. EusCrawl was built using
ad-hoc scrapers to extract text from 33 Basque websites with
high-quality content, resulting in cleaner text compared to general
purpose approaches.
We do not claim ownership of any document in the corpus. All documents
we collected were published under a Creative Commons license in their
original website, and the specific variant can be found in the
"license" field of each document. Should you consider
that our data contains material that is owned by you and you would not
like to be reproduced here, please contact Aitor Soroa at
a.soroa@ehu.eus.
For more details about the corpus, refer to our paper "Artetxe M.,
Aldabe I., Agerri R., Perez-de-Viñaspre O, Soroa A. (2022). Does
Corpus Quality Really Matter for Low-Resource Languages?"
https://arxiv.org/abs/2203.08111
If you use our corpus or models for academic research, please cite the paper in question:
@misc{artetxe2022euscrawl,
title={Does corpus quality really matter for low-resource languages?},
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri, Olatz Perez-de-Viñaspre, Aitor Soroa},
year={2022},
eprint={2203.08111},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
For questions please contact Aitor Soroa at a.soroa@ehu.eus.
"""
_HOMEPAGE_URL = "https://ixa.ehu.eus/euscrawl/"
_CITATION = """\
@misc{artetxe2022euscrawl,
title={Does corpus quality really matter for low-resource languages?},
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri,
Olatz Perez-de-Viñaspre, Aitor Soroa},
year={2022},
eprint={2203.08111},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_URL = "http://ixa.ehu.eus/euscrawl/files/euscrawl-v1-free-jsonl.tar.bz2"
_FILEPATH = "euscrawl-v1-free-jsonl/euscrawl-v1.free.jsonl"
KEYS = [
"plain_text",
"title",
"opening",
"text",
"extra",
"license",
"source",
"url",
"author",
"type",
"lang",
"heading",
"category",
"tags",
"id",
"revid",
"year",
"month",
"day",
"hour",
"minute",
"second",
]
class EusCrawl(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"plain_text": datasets.Value("string"),
"title": datasets.Value("string"),
"opening": datasets.Value("string"),
"text": datasets.Value("string"),
"extra": datasets.Sequence(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
"license": datasets.Value("string"),
"source": datasets.Value("string"),
"url": datasets.Value("string"),
"author": datasets.Value("string"),
"type": datasets.Value("string"),
"lang": datasets.Value("string"),
"heading": datasets.Value("string"),
"category": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(datasets.Value("string")),
"id": datasets.Value("int32"),
"revid": datasets.Value("int32"),
"year": datasets.Value("int32"),
"month": datasets.Value("int32"),
"day": datasets.Value("int32"),
"hour": datasets.Value("int32"),
"minute": datasets.Value("int32"),
"second": datasets.Value("int32"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": dl_manager.iter_archive(path)},
)
]
def _generate_examples(self, filepaths):
for filepath, file in filepaths:
if filepath == _FILEPATH:
for id, line in enumerate(file):
data = json.loads(line)
plain_text_lines = []
plain_text_lines += data.get("title", "").splitlines()
plain_text_lines += data.get("opening", "").splitlines()
plain_text_lines += data.get("text", "").splitlines()
plain_text_lines += [
line
for extra in data.get("extra", [])
for line in extra["title"].splitlines()
+ extra["text"].splitlines()
]
plain_text_lines = [
line.strip() for line in plain_text_lines if line.strip()
]
data["plain_text"] = "\n".join(plain_text_lines)
# defaut to None if field is missing
yield id, {key: data.get(key, None) for key in KEYS}