Datasets:
File size: 5,491 Bytes
4f8b771 01ae61b 6b37ead 01ae61b b21121c 6b37ead 01ae61b b21121c 01ae61b 4f8b771 b21121c 6b37ead b21121c 6b37ead b21121c 6b37ead b21121c 4f8b771 0ab2363 4f8b771 0ab2363 4f8b771 0ab2363 6b37ead b21121c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
"""EusCrawl dataset."""
import json
import datasets
_DESCRIPTION = """\
EusCrawl (http://www.ixa.eus/euscrawl/) is a high-quality corpus for
Basque comprising 12.5 million documents and 423 million tokens,
totalling 2.1 GiB of uncompressed text. EusCrawl was built using
ad-hoc scrapers to extract text from 33 Basque websites with
high-quality content, resulting in cleaner text compared to general
purpose approaches.
We do not claim ownership of any document in the corpus. All documents
we collected were published under a Creative Commons license in their
original website, and the specific variant can be found in the
"license" field of each document. Should you consider
that our data contains material that is owned by you and you would not
like to be reproduced here, please contact Aitor Soroa at
a.soroa@ehu.eus.
For more details about the corpus, refer to our paper "Artetxe M.,
Aldabe I., Agerri R., Perez-de-Viñaspre O, Soroa A. (2022). Does
Corpus Quality Really Matter for Low-Resource Languages?"
https://arxiv.org/abs/2203.08111
If you use our corpus or models for academic research, please cite the paper in question:
@misc{artetxe2022euscrawl,
title={Does corpus quality really matter for low-resource languages?},
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri, Olatz Perez-de-Viñaspre, Aitor Soroa},
year={2022},
eprint={2203.08111},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
For questions please contact Aitor Soroa at a.soroa@ehu.eus.
"""
_HOMEPAGE_URL = "https://ixa.ehu.eus/euscrawl/"
_CITATION = """\
@misc{artetxe2022euscrawl,
title={Does corpus quality really matter for low-resource languages?},
author={Mikel Artetxe, Itziar Aldabe, Rodrigo Agerri,
Olatz Perez-de-Viñaspre, Aitor Soroa},
year={2022},
eprint={2203.08111},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_URL = "http://ixa.ehu.eus/euscrawl/files/euscrawl-v1-free-jsonl.tar.bz2"
_FILEPATH = "euscrawl-v1-free-jsonl/euscrawl-v1.free.jsonl"
KEYS = [
"plain_text",
"title",
"opening",
"text",
"extra",
"license",
"source",
"url",
"author",
"type",
"lang",
"heading",
"category",
"tags",
"id",
"revid",
"year",
"month",
"day",
"hour",
"minute",
"second",
]
class EusCrawl(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"plain_text": datasets.Value("string"),
"title": datasets.Value("string"),
"opening": datasets.Value("string"),
"text": datasets.Value("string"),
"extra": datasets.Sequence(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
"license": datasets.Value("string"),
"source": datasets.Value("string"),
"url": datasets.Value("string"),
"author": datasets.Value("string"),
"type": datasets.Value("string"),
"lang": datasets.Value("string"),
"heading": datasets.Value("string"),
"category": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(datasets.Value("string")),
"id": datasets.Value("int32"),
"revid": datasets.Value("int32"),
"year": datasets.Value("int32"),
"month": datasets.Value("int32"),
"day": datasets.Value("int32"),
"hour": datasets.Value("int32"),
"minute": datasets.Value("int32"),
"second": datasets.Value("int32"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": dl_manager.iter_archive(path)},
)
]
def _generate_examples(self, filepaths):
for filepath, file in filepaths:
if filepath == _FILEPATH:
for id, line in enumerate(file):
data = json.loads(line)
plain_text_lines = []
plain_text_lines += data.get("title", "").splitlines()
plain_text_lines += data.get("opening", "").splitlines()
plain_text_lines += data.get("text", "").splitlines()
plain_text_lines += [
line
for extra in data.get("extra", [])
for line in extra["title"].splitlines()
+ extra["text"].splitlines()
]
plain_text_lines = [
line.strip() for line in plain_text_lines if line.strip()
]
data["plain_text"] = "\n".join(plain_text_lines)
# defaut to None if field is missing
yield id, {key: data.get(key, None) for key in KEYS}
|