|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Carolina Corpus""" |
|
|
|
from lxml import etree |
|
import os |
|
import datasets |
|
|
|
|
|
_HOMEPAGE = "https://sites.usp.br/corpuscarolina/" |
|
|
|
|
|
_DESCRIPTION = """ |
|
Carolina is an Open Corpus for Linguistics and Artificial Intelligence with a |
|
robust volume of texts of varied typology in contemporary Brazilian Portuguese |
|
(1970-2021). |
|
""" |
|
|
|
|
|
_CITATION = r""" |
|
@misc{corpusCarolinaV1.1, |
|
title={ |
|
Carolina: |
|
The Open Corpus for Linguistics and Artificial Intelligence}, |
|
author={ |
|
Finger, Marcelo and |
|
Paixão de Sousa, Maria Clara and |
|
Namiuti, Cristiane and |
|
Martins do Monte, Vanessa and |
|
Costa, Aline Silva and |
|
Serras, Felipe Ribas and |
|
Sturzeneker, Mariana Lourenço and |
|
Guets, Raquel de Paula and |
|
Mesquita, Renata Morais and |
|
Mello, Guilherme Lamartine de and |
|
Crespo, Maria Clara Ramos Morales and |
|
Rocha, Maria Lina de Souza Jeannine and |
|
Brasil, Patrícia and |
|
Silva, Mariana Marques da and |
|
Palma, Mayara Feliciano}, |
|
howpublished={\url{https://sites.usp.br/corpuscarolina/corpus}}, |
|
year={2022}, |
|
note={Version 1.1 (Ada)}, |
|
} |
|
""" |
|
|
|
|
|
_LICENSE = """ |
|
The Open Corpus for Linguistics and Artificial Intelligence (Carolina) was |
|
compiled for academic purposes, namely linguistic and computational analysis. |
|
It is composed of texts assembled in various digital repositories, whose |
|
licenses are multiple and therefore should be observed when making use of the |
|
corpus. The Carolina headers are licensed under Creative Commons |
|
Attribution-NonCommercial-ShareAlike 4.0 International." |
|
""" |
|
|
|
|
|
def _taxonomies(): |
|
"""Creates a map between taxonomy code and name |
|
|
|
Returns |
|
------- |
|
dict |
|
The dictionary of codes and names. |
|
""" |
|
return dict( |
|
dat="datasets and other corpora", |
|
jud="judicial branch", |
|
leg="legislative branch", |
|
pub="public domain works", |
|
soc="social media", |
|
uni="university_domains", |
|
wik="wikis", |
|
) |
|
|
|
|
|
_VERSION = "1.1.0" |
|
_CORPUS_URL = "corpus/{taxonomy}/" |
|
_CHECKSUM_FNAME = _CORPUS_URL + "checksum.sha256" |
|
|
|
|
|
class CarolinaConfig(datasets.BuilderConfig): |
|
"""Carolina Configuration.""" |
|
def __init__(self, taxonomy: str = None, **kwargs): |
|
"""BuilderConfig for Carolina |
|
|
|
Parameters |
|
---------- |
|
taxonomy : str |
|
The taxonomy code (3 letters). The code defines the taxonomy |
|
to download. If `None`, all taxonomies will be downloaded. |
|
**kwargs |
|
Arguments passed to super. |
|
""" |
|
|
|
if taxonomy is None: |
|
taxonomy = "all" |
|
elif taxonomy != "all" and taxonomy not in _taxonomies(): |
|
raise ValueError(f"Invalid taxonomy: {taxonomy}") |
|
|
|
|
|
description = "Carolina corpus." |
|
if taxonomy == "all": |
|
name = "carolina" |
|
description += " Using all taxonomies." |
|
else: |
|
name = _taxonomies()[taxonomy] |
|
description += f" Using taxonomy {taxonomy}" |
|
|
|
super(CarolinaConfig, self).__init__( |
|
name=name, description=description, **kwargs) |
|
|
|
|
|
self.taxonomy = taxonomy |
|
self.version = datasets.Version(_VERSION) |
|
|
|
|
|
class Carolina(datasets.GeneratorBasedBuilder): |
|
"""Carolina Downloader and Builder""" |
|
|
|
BUILDER_CONFIG_CLASS = CarolinaConfig |
|
|
|
def _info(self): |
|
features = datasets.Features({ |
|
"meta": datasets.Value("string"), |
|
"text": datasets.Value("string") |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
features=features, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
if self.config.taxonomy == "all": |
|
taxonomies = _taxonomies().values() |
|
else: |
|
taxonomies = [_taxonomies()[self.config.taxonomy]] |
|
|
|
zip_urls = dict() |
|
for taxonomy in taxonomies: |
|
|
|
checksum_path = _CHECKSUM_FNAME.format(taxonomy=taxonomy) |
|
checksum_path = dl_manager.download(checksum_path) |
|
|
|
tax_url = _CORPUS_URL.format(taxonomy=taxonomy) |
|
|
|
|
|
with open(checksum_path, encoding="utf-8") as cfile: |
|
for line in cfile: |
|
fname = line.split()[1] |
|
if fname.endswith(".xml.zip"): |
|
zip_url = tax_url + fname |
|
fname = os.path.split(fname)[1] |
|
fname = fname[:-4] |
|
zip_urls[fname] = zip_url |
|
|
|
|
|
|
|
|
|
|
|
|
|
extracted = dl_manager.download_and_extract(zip_urls) |
|
xml_files = [os.path.join(v, k) for k, v in extracted.items()] |
|
xml_files = sorted(xml_files) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name="corpus", |
|
gen_kwargs={"filepaths": xml_files} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepaths): |
|
TEI_NS = "{http://www.tei-c.org/ns/1.0}" |
|
parser_params = dict( |
|
huge_tree=True, |
|
encoding="utf-8", |
|
tag=f"{TEI_NS}TEI" |
|
) |
|
|
|
_key = 0 |
|
for path in filepaths: |
|
|
|
for _, tei in etree.iterparse(path, **parser_params): |
|
header = tei.find(f"{TEI_NS}teiHeader") |
|
|
|
example = { |
|
"meta": etree.tostring( |
|
header, encoding="utf-8").decode("utf-8"), |
|
"text": tei.find(f".//{TEI_NS}body/{TEI_NS}p").text |
|
} |
|
yield _key, example |
|
_key += 1 |
|
|