File size: 5,759 Bytes
68e4216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833849
 
 
 
 
 
68e4216
 
 
 
 
 
 
 
1833849
 
 
68e4216
 
1833849
68e4216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833849
 
 
 
 
 
 
 
 
68e4216
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OrangeSum dataset"""


import datasets


_CITATION = """\
@inproceedings{kamal-eddine-etal-2021-barthez,
    title = "{BART}hez: a Skilled Pretrained {F}rench Sequence-to-Sequence Model",
    author = "Kamal Eddine, Moussa  and
      Tixier, Antoine  and
      Vazirgiannis, Michalis",
    booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
    month = nov,
    year = "2021",
    address = "Online and Punta Cana, Dominican Republic",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.emnlp-main.740",
    pages = "9369--9390",
}
"""

_DESCRIPTION = """\
The OrangeSum dataset was inspired by the XSum dataset. It was created by scraping the "Orange Actu" website: https://actu.orange.fr/. Orange S.A. is a large French multinational telecommunications corporation, with 266M customers worldwide. Scraped pages cover almost a decade from Feb 2011 to Sep 2020. They belong to five main categories: France, world, politics, automotive, and society. The society category is itself divided into 8 subcategories: health, environment, people, culture, media, high-tech, unsual ("insolite" in French), and miscellaneous.

Each article featured a single-sentence title as well as a very brief abstract, both professionally written by the author of the article. These two fields were extracted from each page, thus creating two summarization tasks: OrangeSum Title and OrangeSum Abstract.
"""

_URL_DATA = {
    "abstract": "https://raw.githubusercontent.com/Tixierae/OrangeSum/main/data/docs/splits/abstract.tgz",
    "title": "https://raw.githubusercontent.com/Tixierae/OrangeSum/main/data/docs/splits/title.tgz",
}


class OrangeSum(datasets.GeneratorBasedBuilder):
    """OrangeSum: a french abstractive summarization dataset"""

    VERSION = datasets.Version("1.1.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="abstract", description="Abstracts used as summaries", version=VERSION
        ),
        datasets.BuilderConfig(
            name="title", description="Titles used as summaries", version=VERSION
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "gem_id": datasets.Value("string"),
                    "input": datasets.Value("string"),
                    "target": datasets.Value("string"),
                    "references": [datasets.Value("string")],
                }
            ),
            supervised_keys=("input", "target"),
            homepage="https://github.com/Tixierae/OrangeSum/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        archive = dl_manager.download(_URL_DATA[self.config.name])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "source_files": dl_manager.iter_archive(archive),
                    "target_files": dl_manager.iter_archive(archive),
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "source_files": dl_manager.iter_archive(archive),
                    "target_files": dl_manager.iter_archive(archive),
                    "split": "test",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "source_files": dl_manager.iter_archive(archive),
                    "target_files": dl_manager.iter_archive(archive),
                    "split": "valid",
                },
            ),
        ]

    def _generate_examples(self, source_files, target_files, split):
        """Yields examples."""
        expected_source_path = f"{self.config.name}/{split}.source"
        expected_target_path = f"{self.config.name}/{split}.target"
        for source_path, f_source in source_files:
            if source_path == expected_source_path:
                for target_path, f_target in target_files:
                    if target_path == expected_target_path:
                        for idx, (document, summary) in enumerate(
                            zip(f_source, f_target)
                        ):
                            yield idx, {
                                "input": document.decode("utf-8"),
                                "target": summary.decode("utf-8"),
                                "references": [summary.decode("utf-8")],
                                "gem_id": f"OrangeSum_{self.config.name}-{split}-{idx}",
                            }
                        break
                break