holylovenia commited on
Commit
85a3272
1 Parent(s): 9a90284

Upload balita_nlp.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. balita_nlp.py +229 -0
balita_nlp.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{bunagtransformer,
29
+ author={Bunag, Kenrick Lance T and Esquivel, Rosanna A}
30
+ title={Transformer-Based Conditional Language Models to Generate Filipino News Articles},
31
+ year = {2023},
32
+ publisher = {IEOM Society International},
33
+ url = {https://ieomsociety.org/proceedings/2023manila/595.pdf},
34
+ booktitle = {Proceedings of the International Conference on Industrial Engineering and Operations Management},
35
+ pages = {2231–2237},
36
+ numpages = {7},
37
+ location = {Manila, Philippines},
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "balita_nlp"
42
+
43
+ _DESCRIPTION = """\
44
+ BalitaNLP is a dataset for image-conditional language generation and text-conditional image generation. It consists of 300k Filipino news
45
+ articles and images gathered from Filipino news outlets. News articles are categorized into five possible classes: News, Sports, Entertainment,
46
+ Crime, and Other. Some articles were removed from the SEACrowd `imtext` schema, as their corresponding image files do not exist:
47
+ - `train` split (262480 total articles): from the original 281403 articles, 18923 (~6.72%) had missing images
48
+ - `test` split (32821 total articles): from the original 35177 articles, 2356 (~6.70%) had missing images
49
+ - `validation` split (32806 total articles): from the original 35175 articles, 2369 (~6.73%) had missing images
50
+ """
51
+
52
+ _HOMEPAGE = "https://github.com/KenrickLance/BalitaNLP-Dataset"
53
+
54
+ _LANGUAGES = ["fil"]
55
+
56
+ _LICENSE = Licenses.UNKNOWN.value
57
+
58
+ _LOCAL = False
59
+
60
+ _URLS = {
61
+ "text": "https://storage.googleapis.com/public-kenricklancebunag/BalitaNLP/2022/BalitaNLP-Dataset.zip",
62
+ "images": {
63
+ "part1": "https://storage.googleapis.com/public-kenricklancebunag/BalitaNLP/2022/BalitaNLP-images_1.zip",
64
+ "part2": "https://storage.googleapis.com/public-kenricklancebunag/BalitaNLP/2022/BalitaNLP-images_2.zip",
65
+ "part3": "https://storage.googleapis.com/public-kenricklancebunag/BalitaNLP/2022/BalitaNLP-images_3.zip",
66
+ "part4": "https://storage.googleapis.com/public-kenricklancebunag/BalitaNLP/2022/BalitaNLP-images_4.zip",
67
+ },
68
+ }
69
+
70
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
71
+
72
+ _SOURCE_VERSION = "1.0.0"
73
+
74
+ _SEACROWD_VERSION = "2024.06.20"
75
+
76
+
77
+ class BalitaNLPDataset(datasets.GeneratorBasedBuilder):
78
+ """
79
+ BalitaNLP is an image-text dataset from https://github.com/KenrickLance/BalitaNLP-Dataset.
80
+ """
81
+
82
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
83
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
84
+
85
+ BUILDER_CONFIGS = [
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_source",
88
+ version=datasets.Version(_SOURCE_VERSION),
89
+ description=f"{_DATASETNAME} source schema",
90
+ schema="source",
91
+ subset_id=f"{_DATASETNAME}",
92
+ ),
93
+ SEACrowdConfig(
94
+ name=f"{_DATASETNAME}_seacrowd_imtext",
95
+ version=datasets.Version(_SEACROWD_VERSION),
96
+ description=f"{_DATASETNAME} SEACrowd schema",
97
+ schema="seacrowd_imtext",
98
+ subset_id=f"{_DATASETNAME}",
99
+ ),
100
+ ]
101
+
102
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
103
+
104
+ def _info(self) -> datasets.DatasetInfo:
105
+ if self.config.schema == "source":
106
+ features = datasets.Features(
107
+ {
108
+ "body": datasets.Sequence(datasets.Value("string")),
109
+ "title": datasets.Value("string"),
110
+ "website": datasets.Value("string"),
111
+ "category": datasets.Value("string"),
112
+ "date": datasets.Value("string"),
113
+ "author": datasets.Value("string"),
114
+ "url": datasets.Value("string"),
115
+ "img_url": datasets.Value("string"),
116
+ "img_path": datasets.Value("string"),
117
+ }
118
+ )
119
+ elif self.config.schema == "seacrowd_imtext":
120
+ features = schemas.image_text_features()
121
+ features["metadata"] = {
122
+ "context": datasets.Value("string"),
123
+ "author": datasets.Value("string"),
124
+ "category": datasets.Value("string"),
125
+ "date": datasets.Value("string"),
126
+ "img_url": datasets.Value("string"),
127
+ "url": datasets.Value("string"),
128
+ "website": datasets.Value("string"),
129
+ }
130
+ else:
131
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")
132
+
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=features,
136
+ homepage=_HOMEPAGE,
137
+ license=_LICENSE,
138
+ citation=_CITATION,
139
+ )
140
+
141
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
142
+ """
143
+ Returns SplitGenerators.
144
+ """
145
+
146
+ text_path = dl_manager.download_and_extract(_URLS["text"])
147
+ img_paths = dl_manager.download_and_extract([v for k, v in _URLS["images"].items()])
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={
153
+ "text_path": os.path.join(text_path, "train.json"),
154
+ "img_paths": img_paths,
155
+ "split": "train",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={
161
+ "text_path": os.path.join(text_path, "test.json"),
162
+ "img_paths": img_paths,
163
+ "split": "test",
164
+ },
165
+ ),
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.VALIDATION,
168
+ gen_kwargs={
169
+ "text_path": os.path.join(text_path, "validation.json"),
170
+ "img_paths": img_paths,
171
+ "split": "validation",
172
+ },
173
+ ),
174
+ ]
175
+
176
+ def _generate_examples(self, text_path: Path, img_paths: Path, split: str) -> Tuple[int, Dict]:
177
+ """
178
+ Yields examples as (key, example) tuples.
179
+ """
180
+ text_data = pd.read_json(text_path)
181
+ data = text_data.to_records()
182
+
183
+ for idx, row in enumerate(data):
184
+
185
+ # Search for path of image file
186
+ img_path = ""
187
+ for idx_subpath, img_subpath in enumerate(img_paths):
188
+ candidate_filepath = os.path.join(img_subpath, "part" + str(idx_subpath + 1), row["img_path"])
189
+ if os.path.isfile(candidate_filepath):
190
+ img_path = candidate_filepath
191
+
192
+ if self.config.schema == "source":
193
+ x = {
194
+ "body": row["body"],
195
+ "title": row["title"],
196
+ "website": row["website"],
197
+ "category": row["category"],
198
+ "date": row["date"],
199
+ "author": row["author"],
200
+ "url": row["url"],
201
+ "img_url": row["img_url"],
202
+ "img_path": img_path,
203
+ }
204
+ yield idx, x
205
+
206
+ elif self.config.schema == "seacrowd_imtext":
207
+
208
+ # Remove examples with no existing image path
209
+ if img_path == "":
210
+ continue
211
+
212
+ x = {
213
+ "id": idx,
214
+ "image_paths": [img_path],
215
+ "texts": row["title"],
216
+ "metadata": {
217
+ "context": row["body"],
218
+ "author": row["author"],
219
+ "category": row["category"],
220
+ "date": row["date"],
221
+ "img_url": row["img_url"],
222
+ "url": row["url"],
223
+ "website": row["website"],
224
+ },
225
+ }
226
+ yield idx, x
227
+
228
+ else:
229
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")