File size: 5,126 Bytes
fcfff69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f351e18
 
 
 
 
fcfff69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import json
import datasets
from typing import Dict, List, Optional, Union, Callable
import textwrap
import gzip


logger = datasets.logging.get_logger(__name__)

_CC_NEWS_PT_KWARGS = dict(
    name = "cc_news_pt",
    description=textwrap.dedent(
        """\
        CC-News-PT is a curation of news articles from CommonCrawl News in the Portuguese language.
        CommonCrawl News is a dataset containing news articles from news sites all over the world
        The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/.
        This version of the dataset is the portuguese subset from
        https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual"""
    ),
    data_urls=[
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2016/pt.jsonl.gz',
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2017/pt.jsonl.gz',
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2018/pt.jsonl.gz',
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2019/pt.jsonl.gz',
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2020/pt.jsonl.gz',
        'https://huggingface.co/datasets/CloverSearch/cc-news-mutlilingual/resolve/main/2021/pt.jsonl.gz',
    ],  
    citation=textwrap.dedent(
        """\
        @misc{Acerola2023,
            author     = {Garcia, E.A.S.},
            title      = {Acerola Corpus: Towards Better Portuguese Language Models},
            year       = {2023},
            doi        = {10.57967/hf/0814}
        }"""
    ),
    url="https://huggingface.co/datasets/eduagarcia/cc_news_pt",
)


class AcerolaConfig(datasets.BuilderConfig):
    """BuilderConfig for Acerola."""

    def __init__(
        self,
        data_urls: Dict[str, str],
        citation: str,
        url: str,
        file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
        **kwargs
    ):
        """BuilderConfig for Acerola.
        Args:
        **kwargs: keyword arguments forwarded to super.
        """
        super(AcerolaConfig, self).__init__(version=datasets.Version("1.0.3", ""), **kwargs)
        self.data_urls = data_urls
        self.citation = citation
        self.url = url
        self.file_type = file_type


def _get_ccnews_features(config: AcerolaConfig):
    return datasets.Features(
        {
            "title": datasets.Value("string"),
            "text": datasets.Value("string"),
            "authors": datasets.Value("string"),
            "domain": datasets.Value("string"),
            "date": datasets.Value("string"),
            "description": datasets.Value("string"),
            "url": datasets.Value("string"),
            "image_url": datasets.Value("string"),
            "date_download": datasets.Value("string")
        }
    )

class Acerola(datasets.GeneratorBasedBuilder):
    """CC-News dataset."""

    BUILDER_CONFIGS = [
        AcerolaConfig(
            **_CC_NEWS_PT_KWARGS
        )
    ]

    def _info(self) -> datasets.DatasetInfo:
        features = _get_ccnews_features(self.config)
        
        return datasets.DatasetInfo(
            description=self.config.description,
            homepage=self.config.url,
            citation=self.config.citation,
            supervised_keys=None,
            features=features
        )

    def _split_generators(self, dl_manager):
        data_urls = self.config.data_urls.copy()
        filepaths = dl_manager.download(data_urls)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}),
        ]

    def _generate_examples(self, filepaths):
        id_ = 0
        for filepath in filepaths:
            with gzip.open(filepath, "rt", encoding="utf-8") as f:
                for line in f:
                    if line:
                        article = json.loads(line)
                        yield id_, {
                            "title": article["title"].strip() if article["title"] is not None else "",
                            "text": article["maintext"].strip() if article["maintext"] is not None else "",
                            "authors": "; ".join([a.strip() for a in article["authors"]]) if len(article["authors"]) > 0 else "",
                            "domain": article["source_domain"].strip() if article["source_domain"] is not None else "",
                            "date": article["date_publish"].strip() if article["date_publish"] is not None else "",
                            "description": article["description"].strip() if article["description"] is not None else "",
                            "url": article["url"].strip() if article["url"] is not None else "",
                            "image_url": article["image_url"].strip() if article["image_url"] is not None else "",
                            "date_download": article["date_download"].strip() if article["date_download"] is not None else "",
                        }
                        id_ += 1