holylovenia commited on
Commit
cdd2cc0
1 Parent(s): a6f886d

Upload lr_sum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. lr_sum.py +166 -0
lr_sum.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Dict, List, Tuple
16
+
17
+ import datasets
18
+
19
+ from seacrowd.utils import schemas
20
+ from seacrowd.utils.configs import SEACrowdConfig
21
+ from seacrowd.utils.constants import Licenses, Tasks
22
+
23
+ _CITATION = """
24
+ @inproceedings{palen-michel-lignos-2023-lr,
25
+ author = {Palen-Michel, Chester and Lignos, Constantine},
26
+ title = {LR - Sum: Summarization for Less-Resourced Languages},
27
+ booktitle = {Findings of the Association for Computational Linguistics: ACL 2023},
28
+ year = {2023},
29
+ publisher = {Association for Computational Linguistics},
30
+ address = {Toronto, Canada},
31
+ doi = {10.18653/v1/2023.findings-acl.427},
32
+ pages = {6829--6844},
33
+ }
34
+ """
35
+
36
+ _LOCAL = False
37
+ _LANGUAGES = ["ind", "khm", "lao", "mya", "tha", "vie"]
38
+
39
+ _DATASETNAME = "lr_sum"
40
+ _DESCRIPTION = """
41
+ LR-Sum is a news abstractive summarization dataset focused on low-resource languages. It contains human-written summaries
42
+ for 39 languages and the data is based on the Multilingual Open Text corpus
43
+ (ultimately derived from the Voice of America website).
44
+ """
45
+
46
+ _HOMEPAGE = "https://huggingface.co/datasets/bltlab/lr-sum"
47
+ _LICENSE = Licenses.CC_BY_4_0.value
48
+ _URL = "https://huggingface.co/datasets/bltlab/lr-sum"
49
+
50
+ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
51
+ _SOURCE_VERSION = "1.0.0"
52
+ _SEACROWD_VERSION = "2024.06.20"
53
+
54
+
55
+ class LRSumDataset(datasets.GeneratorBasedBuilder):
56
+ """Dataset of article-summary pairs for different low-resource languages."""
57
+
58
+ # Config to load individual datasets per language
59
+ BUILDER_CONFIGS = [
60
+ SEACrowdConfig(
61
+ name=f"{_DATASETNAME}_{lang}_source",
62
+ version=datasets.Version(_SOURCE_VERSION),
63
+ description=f"{_DATASETNAME} source schema for {lang} language",
64
+ schema="source",
65
+ subset_id=f"{_DATASETNAME}_{lang}",
66
+ )
67
+ for lang in _LANGUAGES
68
+ ] + [
69
+ SEACrowdConfig(
70
+ name=f"{_DATASETNAME}_{lang}_seacrowd_t2t",
71
+ version=datasets.Version(_SEACROWD_VERSION),
72
+ description=f"{_DATASETNAME} SEACrowd schema for {lang} language",
73
+ schema="seacrowd_t2t",
74
+ subset_id=f"{_DATASETNAME}_{lang}",
75
+ )
76
+ for lang in _LANGUAGES
77
+ ]
78
+
79
+ # Config to load all datasets
80
+ BUILDER_CONFIGS.extend(
81
+ [
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_source",
84
+ version=datasets.Version(_SOURCE_VERSION),
85
+ description=f"{_DATASETNAME} source schema for all languages",
86
+ schema="source",
87
+ subset_id=_DATASETNAME,
88
+ ),
89
+ SEACrowdConfig(
90
+ name=f"{_DATASETNAME}_seacrowd_t2t",
91
+ version=datasets.Version(_SEACROWD_VERSION),
92
+ description=f"{_DATASETNAME} SEACrowd schema for all languages",
93
+ schema="seacrowd_t2t",
94
+ subset_id=_DATASETNAME,
95
+ ),
96
+ ]
97
+ )
98
+
99
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
100
+
101
+ def _info(self) -> datasets.DatasetInfo:
102
+ if self.config.schema == "source":
103
+ features = datasets.Features(
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "url": datasets.Value("string"),
107
+ "title": datasets.Value("string"),
108
+ "summary": datasets.Value("string"),
109
+ "text": datasets.Value("string"),
110
+ }
111
+ )
112
+ elif self.config.schema == "seacrowd_t2t":
113
+ features = schemas.text2text_features
114
+
115
+ return datasets.DatasetInfo(
116
+ description=_DESCRIPTION,
117
+ features=features,
118
+ homepage=_HOMEPAGE,
119
+ license=_LICENSE,
120
+ citation=_CITATION,
121
+ )
122
+
123
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
124
+ """Returns SplitGenerators."""
125
+ # dl_manager not used since dataloader uses HF 'load_dataset'
126
+ return [
127
+ datasets.SplitGenerator(name=split, gen_kwargs={"split": split._name})
128
+ for split in (
129
+ datasets.Split.TRAIN,
130
+ datasets.Split.VALIDATION,
131
+ datasets.Split.TEST,
132
+ )
133
+ ]
134
+
135
+ def _load_hf_data_from_remote(self, lang: str, split: str) -> datasets.DatasetDict:
136
+ """Load dataset from HuggingFace."""
137
+ hf_remote_ref = "/".join(_URL.split("/")[-2:])
138
+ return datasets.load_dataset(hf_remote_ref, lang, split=split)
139
+
140
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
141
+ """Yields examples as (key, example) tuples."""
142
+ lr_sum_datasets = []
143
+
144
+ lang = self.config.subset_id.split("_")[-1]
145
+ if lang in _LANGUAGES:
146
+ lr_sum_datasets.append(self._load_hf_data_from_remote(lang, split))
147
+ else:
148
+ for lang in _LANGUAGES:
149
+ lr_sum_datasets.append(self._load_hf_data_from_remote(lang, split))
150
+
151
+ index = 0
152
+ for lang_subset in lr_sum_datasets:
153
+ for row in lang_subset:
154
+ if self.config.schema == "source":
155
+ example = row
156
+
157
+ elif self.config.schema == "seacrowd_t2t":
158
+ example = {
159
+ "id": str(index),
160
+ "text_1": row["text"],
161
+ "text_2": row["summary"],
162
+ "text_1_name": "document",
163
+ "text_2_name": "summary",
164
+ }
165
+ yield index, example
166
+ index += 1