holylovenia
commited on
Commit
•
ae158be
1
Parent(s):
9c8a39a
Upload liputan6.py with huggingface_hub
Browse files- liputan6.py +208 -0
liputan6.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Dict, List, Tuple
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
from nusacrowd.utils.configs import NusantaraConfig
|
8 |
+
from nusacrowd.utils.constants import Tasks
|
9 |
+
from nusacrowd.utils import schemas
|
10 |
+
import json
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@inproceedings{koto2020liputan6,
|
14 |
+
title={Liputan6: A Large-scale Indonesian Dataset for Text Summarization},
|
15 |
+
author={Koto, Fajri and Lau, Jey Han and Baldwin, Timothy},
|
16 |
+
booktitle={Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing},
|
17 |
+
pages={598--608},
|
18 |
+
year={2020}
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
_LOCAL = False
|
23 |
+
_LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
|
24 |
+
_DATASETNAME = "liputan6"
|
25 |
+
|
26 |
+
_DESCRIPTION = """
|
27 |
+
A large-scale Indonesian summarization dataset consisting of harvested articles from Liputan6.com, an online news portal, resulting in 215,827 document-summary pairs.
|
28 |
+
"""
|
29 |
+
|
30 |
+
_HOMEPAGE = "https://github.com/fajri91/sum_liputan6"
|
31 |
+
|
32 |
+
_LICENSE = "CC-BY-SA 4.0"
|
33 |
+
|
34 |
+
_URLS = {
|
35 |
+
_DATASETNAME: "https://storage.googleapis.com/babert-pretraining/IndoNLG_finals/downstream_task/downstream_task_datasets.zip",
|
36 |
+
}
|
37 |
+
|
38 |
+
_SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
|
39 |
+
|
40 |
+
_SOURCE_VERSION = "1.0.0"
|
41 |
+
|
42 |
+
_NUSANTARA_VERSION = "1.0.0"
|
43 |
+
|
44 |
+
|
45 |
+
class Liputan6(datasets.GeneratorBasedBuilder):
|
46 |
+
"""A large-scale Indonesian summarization dataset consisting of harvested articles from Liputan6.com, an online news portal, resulting in 215,827 document-summary pairs."""
|
47 |
+
|
48 |
+
|
49 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
50 |
+
NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
|
51 |
+
|
52 |
+
TYPE_LIST = ['canonical', 'xtreme']
|
53 |
+
BUILDER_CONFIGS = (
|
54 |
+
[
|
55 |
+
NusantaraConfig(
|
56 |
+
name="liputan6_{fold_name}_source".format(fold_name=i),
|
57 |
+
version=_SOURCE_VERSION,
|
58 |
+
description="liputan6 source schema",
|
59 |
+
schema="source",
|
60 |
+
subset_id="liputan6_{fold_name}".format(fold_name=i),
|
61 |
+
) for i in TYPE_LIST
|
62 |
+
]
|
63 |
+
+
|
64 |
+
[
|
65 |
+
NusantaraConfig(
|
66 |
+
name="liputan6_{fold_name}_nusantara_t2t".format(fold_name=i),
|
67 |
+
version=_NUSANTARA_VERSION,
|
68 |
+
description="liputan6 Nusantara schema",
|
69 |
+
schema="nusantara_t2t",
|
70 |
+
subset_id="liputan6_{fold_name}".format(fold_name=i),
|
71 |
+
) for i in TYPE_LIST
|
72 |
+
]
|
73 |
+
)
|
74 |
+
DEFAULT_CONFIG_NAME = "liputan6_canonical_source"
|
75 |
+
|
76 |
+
def _info(self) -> datasets.DatasetInfo:
|
77 |
+
|
78 |
+
if self.config.schema == "source":
|
79 |
+
|
80 |
+
features = datasets.Features(
|
81 |
+
{
|
82 |
+
"document": datasets.Value("string"),
|
83 |
+
"id": datasets.Value("string"),
|
84 |
+
"summary": datasets.Value("string")
|
85 |
+
}
|
86 |
+
)
|
87 |
+
|
88 |
+
elif self.config.schema == "nusantara_t2t":
|
89 |
+
features = schemas.text2text_features
|
90 |
+
|
91 |
+
return datasets.DatasetInfo(
|
92 |
+
description=_DESCRIPTION,
|
93 |
+
features=features,
|
94 |
+
homepage=_HOMEPAGE,
|
95 |
+
license=_LICENSE,
|
96 |
+
citation=_CITATION,
|
97 |
+
)
|
98 |
+
|
99 |
+
def _get_fold_name(self):
|
100 |
+
subset_id = self.config.subset_id
|
101 |
+
idx_fold = subset_id.index("_")
|
102 |
+
file_id = subset_id[(idx_fold + 1):]
|
103 |
+
return file_id
|
104 |
+
|
105 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
106 |
+
fold_name = self._get_fold_name()
|
107 |
+
|
108 |
+
urls = _URLS[_DATASETNAME]
|
109 |
+
|
110 |
+
data_dir = Path(dl_manager.download_and_extract(urls))
|
111 |
+
|
112 |
+
location = {
|
113 |
+
"train": "IndoNLG_downstream_tasks/liputan6/{fold_name}_train.json",
|
114 |
+
"test": "IndoNLG_downstream_tasks/liputan6/{fold_name}_test.json",
|
115 |
+
"dev": "IndoNLG_downstream_tasks/liputan6/{fold_name}_dev.json"
|
116 |
+
}
|
117 |
+
|
118 |
+
return [
|
119 |
+
datasets.SplitGenerator(
|
120 |
+
name=datasets.Split.TRAIN,
|
121 |
+
|
122 |
+
gen_kwargs={
|
123 |
+
"filepath": os.path.join(data_dir, location["train"].format(fold_name=fold_name)),
|
124 |
+
"split": "train",
|
125 |
+
},
|
126 |
+
),
|
127 |
+
datasets.SplitGenerator(
|
128 |
+
name=datasets.Split.TEST,
|
129 |
+
gen_kwargs={
|
130 |
+
"filepath": os.path.join(data_dir, location["test"].format(fold_name=fold_name)),
|
131 |
+
"split": "test",
|
132 |
+
},
|
133 |
+
),
|
134 |
+
datasets.SplitGenerator(
|
135 |
+
name=datasets.Split.VALIDATION,
|
136 |
+
gen_kwargs={
|
137 |
+
"filepath": os.path.join(data_dir, location["dev"].format(fold_name=fold_name)),
|
138 |
+
"split": "dev",
|
139 |
+
},
|
140 |
+
),
|
141 |
+
]
|
142 |
+
|
143 |
+
|
144 |
+
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
|
145 |
+
|
146 |
+
if self.config.schema == "source":
|
147 |
+
|
148 |
+
if "xtreme_train.json" in filepath:
|
149 |
+
with open(filepath) as f:
|
150 |
+
lines = f.read().split("{")
|
151 |
+
LEN = len(lines)
|
152 |
+
for i, line in enumerate(lines):
|
153 |
+
if 0 < i < LEN-1:
|
154 |
+
idx = line.index("}")
|
155 |
+
line = "{"+line[:idx+1]
|
156 |
+
each_data = json.loads(line)
|
157 |
+
ex = {
|
158 |
+
"id": each_data["id"],
|
159 |
+
"document": each_data['text'],
|
160 |
+
"summary": each_data['label']
|
161 |
+
}
|
162 |
+
yield each_data["id"], ex
|
163 |
+
|
164 |
+
else:
|
165 |
+
with open(filepath) as f:
|
166 |
+
data = json.load(f)
|
167 |
+
for i, each_data in enumerate(data):
|
168 |
+
ex = {
|
169 |
+
"id": each_data["id"],
|
170 |
+
"document": each_data['text'],
|
171 |
+
"summary": each_data['label']
|
172 |
+
}
|
173 |
+
yield each_data["id"], ex
|
174 |
+
|
175 |
+
elif self.config.schema == "nusantara_t2t":
|
176 |
+
if "xtreme_train.json" in filepath:
|
177 |
+
with open(filepath) as f:
|
178 |
+
lines = f.read().split("{")
|
179 |
+
LEN = len(lines)
|
180 |
+
for i, line in enumerate(lines):
|
181 |
+
if 0 < i < LEN-1:
|
182 |
+
idx = line.index("}")
|
183 |
+
line = "{"+line[:idx+1]
|
184 |
+
each_data = json.loads(line)
|
185 |
+
ex = {
|
186 |
+
"id": each_data["id"],
|
187 |
+
"text_1": each_data['text'],
|
188 |
+
"text_2": each_data['label'],
|
189 |
+
"text_1_name": "document",
|
190 |
+
"text_2_name": "summary"
|
191 |
+
}
|
192 |
+
yield each_data["id"], ex
|
193 |
+
|
194 |
+
else:
|
195 |
+
with open(filepath) as f:
|
196 |
+
data = json.load(f)
|
197 |
+
for i, each_data in enumerate(data):
|
198 |
+
ex = {
|
199 |
+
"id": each_data["id"],
|
200 |
+
"text_1": each_data['text'],
|
201 |
+
"text_2": each_data['label'],
|
202 |
+
"text_1_name": "document",
|
203 |
+
"text_2_name": "summary"
|
204 |
+
}
|
205 |
+
yield each_data["id"], ex
|
206 |
+
|
207 |
+
|
208 |
+
|