import json import datasets from typing import Dict, List, Optional, Union, Callable import textwrap import gzip logger = datasets.logging.get_logger(__name__) _CC_NEWS_PT_KWARGS = dict( name = "cc_news_pt", description=textwrap.dedent( """\ CC-News-PT is a curation of news articles from CommonCrawl News in the Portuguese language. CommonCrawl News is a dataset containing news articles from news sites all over the world The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. This version of the dataset is the portuguese subset from https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual""" ), data_urls=[ 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2016/pt.jsonl.gz', 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2017/pt.jsonl.gz', 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2018/pt.jsonl.gz', 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2019/pt.jsonl.gz', 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2020/pt.jsonl.gz', 'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2021/pt.jsonl.gz', ], citation=textwrap.dedent( """\ @misc{Acerola2023, author = {Garcia, E.A.S.}, title = {Acerola Corpus: Towards Better Portuguese Language Models}, year = {2023}, doi = {10.57967/hf/0814} }""" ), url="https://huggingface.co/datasets/eduagarcia/cc_news_pt", ) class AcerolaConfig(datasets.BuilderConfig): """BuilderConfig for Acerola.""" def __init__( self, data_urls: Dict[str, str], citation: str, url: str, file_type: Optional[str] = None, #filetype (csv, tsc, jsonl) **kwargs ): """BuilderConfig for Acerola. Args: **kwargs: keyword arguments forwarded to super. """ super(AcerolaConfig, self).__init__(version=datasets.Version("1.0.3", ""), **kwargs) self.data_urls = data_urls self.citation = citation self.url = url self.file_type = file_type def _get_ccnews_features(config: AcerolaConfig): return datasets.Features( { "title": datasets.Value("string"), "text": datasets.Value("string"), "authors": datasets.Value("string"), "domain": datasets.Value("string"), "date": datasets.Value("string"), "description": datasets.Value("string"), "url": datasets.Value("string"), "image_url": datasets.Value("string"), "date_download": datasets.Value("string") } ) class Acerola(datasets.GeneratorBasedBuilder): """CC-News dataset.""" BUILDER_CONFIGS = [ AcerolaConfig( **_CC_NEWS_PT_KWARGS ) ] def _info(self) -> datasets.DatasetInfo: features = _get_ccnews_features(self.config) return datasets.DatasetInfo( description=self.config.description, homepage=self.config.url, citation=self.config.citation, supervised_keys=None, features=features ) def _split_generators(self, dl_manager): data_urls = self.config.data_urls.copy() filepaths = dl_manager.download(data_urls) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}), ] def _generate_examples(self, filepaths): id_ = 0 for filepath in filepaths: with gzip.open(filepath, "rt", encoding="utf-8") as f: for line in f: if line: article = json.loads(line) yield id_, { "title": article["title"].strip() if article["title"] is not None else "", "text": article["maintext"].strip() if article["maintext"] is not None else "", "authors": "; ".join([a.strip() for a in article["authors"]]) if len(article["authors"]) > 0 else "", "domain": article["source_domain"].strip() if article["source_domain"] is not None else "", "date": article["date_publish"].strip() if article["date_publish"] is not None else "", "description": article["description"].strip() if article["description"] is not None else "", "url": article["url"].strip() if article["url"] is not None else "", "image_url": article["image_url"].strip() if article["image_url"] is not None else "", "date_download": article["date_download"].strip() if article["date_download"] is not None else "", } id_ += 1