|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Catalan Government Crawling.""" |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{armengol-estape-etal-2021-multilingual, |
|
title = "Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan", |
|
author = "Armengol-Estap{\'e}, Jordi and |
|
Carrino, Casimiro Pio and |
|
Rodriguez-Penagos, Carlos and |
|
de Gibert Bonet, Ona and |
|
Armentano-Oller, Carme and |
|
Gonzalez-Agirre, Aitor and |
|
Melero, Maite and |
|
Villegas, Marta", |
|
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
month = aug, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.findings-acl.437", |
|
doi = "10.18653/v1/2021.findings-acl.437", |
|
pages = "4933--4946", |
|
eprint={2107.07903}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Catalan Government Crawling Corpus is a 39-million-token web corpus of Catalan built from the web. It has been obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government during September and October 2020. It consists of 39.117.909 tokens, 1.565.433 sentences and 71.043 documents. Documents are separated by single new lines. It is a subcorpus of the Catalan Textual Corpus. |
|
""" |
|
|
|
_HOMEPAGE = "https://zenodo.org/record/5511667" |
|
|
|
_LICENSE = "Creative Commons CC0 1.0 Universal" |
|
|
|
_URL = "https://zenodo.org/record/5511667/files/catalan_government_crawling.zip?download=1" |
|
|
|
|
|
class CatalanGovernmentCrawling(datasets.GeneratorBasedBuilder): |
|
"""Catalan Government Crawling.""" |
|
|
|
VERSION = datasets.Version("1.0.1") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({"text": datasets.Value("string")}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir, "catalan_government_crawling", "corpus", "catalan_government_crawling.txt" |
|
), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
text = "" |
|
for id_, line in enumerate(f): |
|
if line == "\n": |
|
yield id_, {"text": text.strip()} |
|
text = "" |
|
else: |
|
text += line |
|
|