holylovenia commited on
Commit
a506668
1 Parent(s): be22107

Upload bloom_captioning.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bloom_captioning.py +248 -0
bloom_captioning.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SEA Crowd Data Loader for Bloom Captioning.
3
+ """
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+ from datasets.download.download_manager import DownloadManager
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
12
+
13
+ _CITATION = r"""
14
+ @inproceedings{leong-etal-2022-bloom,
15
+ title = "Bloom Library: Multimodal Datasets in 300+ Languages for a Variety of Downstream Tasks",
16
+ author = "Leong, Colin and
17
+ Nemecek, Joshua and
18
+ Mansdorfer, Jacob and
19
+ Filighera, Anna and
20
+ Owodunni, Abraham and
21
+ Whitenack, Daniel",
22
+ editor = "Goldberg, Yoav and
23
+ Kozareva, Zornitsa and
24
+ Zhang, Yue",
25
+ booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
26
+ month = dec,
27
+ year = "2022",
28
+ address = "Abu Dhabi, United Arab Emirates",
29
+ publisher = "Association for Computational Linguistics",
30
+ url = "https://aclanthology.org/2022.emnlp-main.590",
31
+ doi = "10.18653/v1/2022.emnlp-main.590",
32
+ pages = "8608--8621",
33
+ }
34
+ """
35
+
36
+ logger = datasets.logging.get_logger(__name__)
37
+
38
+ # this config is created for SEACrowd Dataloader
39
+ _LANG_CONFIG = {
40
+ "abc": "Ambala Ayta",
41
+ "ahk": "Akha",
42
+ "bfn": "Bunak",
43
+ "bjn": "Banjar",
44
+ "bkx": "Baikeno",
45
+ "brb": "Brao",
46
+ "brv": "Western Bru",
47
+ "bya": "Batak",
48
+ "bzi": "Bisu",
49
+ "ceb": "Cebuano",
50
+ "cgc": "Kagayanen",
51
+ "cmo": "Central Mnong",
52
+ "ddg": "Fataluku",
53
+ "dmg": "Upper Kinabatangan",
54
+ "dnw": "Western Dani",
55
+ "dtp": "Kadazan Dusun",
56
+ "dtr": "Lotud",
57
+ "enc": "En",
58
+ "fil": "Filipino",
59
+ "gal": "Galolen",
60
+ "hil": "Hiligaynon",
61
+ "hre": "Hre",
62
+ "hro": "Haroi",
63
+ "idt": "Idaté",
64
+ "ilo": "Ilocano",
65
+ "ind": "Indonesian",
66
+ "jra": "Jarai",
67
+ "kak": "Kalanguya",
68
+ "khb": "Lü",
69
+ "khm": "Khmer",
70
+ "kqr": "Kimaragang",
71
+ "krr": "Krung",
72
+ "ksw": "S’gaw Karen",
73
+ "lhu": "Lahu",
74
+ "llg": "Lole",
75
+ "lsi": "Lacid",
76
+ "lwl": "Eastern Lawa",
77
+ "mdr": "Mandar",
78
+ "mgm": "Mambae",
79
+ "mhx": "Lhao Vo",
80
+ "mkz": "Makasae",
81
+ "mnw": "Mon",
82
+ "mqj": "Mamasa",
83
+ "mry": "Mandaya",
84
+ "msb": "Masbatenyo",
85
+ "mya": "Burmese",
86
+ "nod": "Northern Thai",
87
+ "nst": "Tangshang Naga",
88
+ "nxa": "Nauete",
89
+ "nxl": "South Nuaulu",
90
+ "pag": "Pangasinan",
91
+ "pce": "Ruching Palaung",
92
+ "pdu": "Kayan",
93
+ "pea": "Peranakan Indonesian",
94
+ "pmf": "Pamona",
95
+ "sea": "Semai",
96
+ "sgd": "Surigaonon",
97
+ "shn": "Shan",
98
+ "sml": "Central Sama",
99
+ "snl": "Sangil",
100
+ "tdt": "Tetun Dili",
101
+ "tet": "Tetun",
102
+ "tha": "Thai",
103
+ "tkd": "Tukudede",
104
+ "tnt": "Tontemboan",
105
+ "tom": "Tombulu",
106
+ "tpu": "Tampuan",
107
+ "vie": "Vietnamese",
108
+ "war": "Waray-Waray",
109
+ "wms": "Wambon",
110
+ "wnk": "Wanukaka",
111
+ "xmm": "Manado Malay",
112
+ "yet": "Yetfa",
113
+ "zlm": "Malay",
114
+ }
115
+
116
+ _LOCAL = False
117
+ _LANGUAGES = list(_LANG_CONFIG.keys())
118
+
119
+
120
+ _DATASETNAME = "bloom_captioning"
121
+ _DESCRIPTION = r"""
122
+ This is a Bloom Library dataset developed for the image captioning task.
123
+ It covers 74 languages indigenous to SEA overall, amounting to total data of 21K.
124
+ This dataset belongs to a CC license, where its datapoints has specific license attached to it.
125
+ Before using this dataloader, please accept the acknowledgement at https://huggingface.co/datasets/sil-ai/bloom-captioning and use huggingface-cli login for authentication.
126
+ """
127
+
128
+ _HOMEPAGE = "https://huggingface.co/datasets/sil-ai/bloom-captioning"
129
+ _LICENSE = Licenses.CC.value
130
+
131
+ _URL = "https://huggingface.co/datasets/sil-ai/bloom-captioning"
132
+ _HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
133
+
134
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
135
+ _SOURCE_VERSION = "0.1.0"
136
+ _SEACROWD_VERSION = "2024.06.20"
137
+
138
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
139
+
140
+
141
+ def construct_configs_on_langs(languages: list = None) -> List[SEACrowdConfig]:
142
+ """
143
+ The function `construct_configs` constructs a list of SEACrowdConfig objects based on the provided
144
+ languages or a default language, and returns the list.
145
+
146
+ input:
147
+ languages (list, default None): The `languages` parameter is a list that specifies the languages for which the
148
+ configurations need to be constructed. If no languages are provided (value=None), the first value in language config
149
+ will be used.
150
+ output:
151
+ a list of `SEACrowdConfig` objects based on instantiated init variables
152
+ """
153
+
154
+ # set output var
155
+ config_list = []
156
+
157
+ # construct zipped arg for config instantiation
158
+ TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
159
+
160
+ # implement source schema
161
+ version, config_name_prefix = _SOURCE_VERSION, "source"
162
+ config_list += [
163
+ SEACrowdConfig(
164
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}",
165
+ version=datasets.Version(version),
166
+ description=f"{_DATASETNAME} {config_name_prefix} schema for language code {_LANG}",
167
+ schema=f"{config_name_prefix}",
168
+ subset_id=_LANG,
169
+ )
170
+ for _LANG in languages
171
+ ]
172
+
173
+ # implement SEACrowd schema
174
+ version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
175
+ for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
176
+ config_list += [
177
+ SEACrowdConfig(
178
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}_{config_name_suffix}",
179
+ version=datasets.Version(version),
180
+ description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
181
+ schema=f"{config_name_prefix}_{config_name_suffix}",
182
+ subset_id=_LANG,
183
+ )
184
+ for _LANG in languages
185
+ ]
186
+ return config_list
187
+
188
+
189
+ class BloomCaptioningDataset(datasets.GeneratorBasedBuilder):
190
+ """Bloom Captioning dataset, subsetted from https://huggingface.co/datasets/sil-ai/bloom-captioning"""
191
+
192
+ # get all schema w/o lang arg + get all schema w/ lang arg
193
+ BUILDER_CONFIGS = construct_configs_on_langs(_LANGUAGES)
194
+
195
+ def _info(self) -> datasets.DatasetInfo:
196
+ _config_schema_name = self.config.schema
197
+ logger.info(f"Received schema name: {self.config.schema}")
198
+ # source schema
199
+ if _config_schema_name == "source":
200
+ features = datasets.Features(
201
+ {
202
+ "image_id": datasets.Value("string"),
203
+ "image_url": datasets.Value("string"),
204
+ "caption": datasets.Value("string"),
205
+ "story_id": datasets.Value("string"),
206
+ "album_id": datasets.Value("string"),
207
+ "license": datasets.Value("string"),
208
+ "original_bloom_language_tag": datasets.Value("string"),
209
+ "index_in_story": datasets.Value("uint16"),
210
+ }
211
+ )
212
+
213
+ # image-text schema
214
+ elif _config_schema_name == "seacrowd_imtext":
215
+ features = schemas.image_text_features()
216
+
217
+ else:
218
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
219
+
220
+ return datasets.DatasetInfo(
221
+ description=_DESCRIPTION,
222
+ features=features,
223
+ homepage=_HOMEPAGE,
224
+ license=_LICENSE,
225
+ citation=_CITATION,
226
+ )
227
+
228
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
229
+ hf_dset_dict = datasets.load_dataset(_HF_REMOTE_REF, self.config.subset_id)
230
+
231
+ return [datasets.SplitGenerator(name=datasets.Split(dset_key), gen_kwargs={"hf_dset": dset}) for dset_key, dset in hf_dset_dict.items() if dset.num_rows > 0]
232
+
233
+ def _generate_examples(self, hf_dset) -> Tuple[int, Dict]:
234
+ _config_schema_name = self.config.schema
235
+
236
+ _idx = 0
237
+ for datapoints in hf_dset:
238
+ # the `_idx` will be generated manually since no `id` present in the dataset fulfill the purpose as primary key
239
+ if _config_schema_name == "source":
240
+ yield _idx, {colname: datapoints[colname] for colname in self.info.features}
241
+
242
+ elif _config_schema_name == "seacrowd_imtext":
243
+ yield _idx, {"id": _idx, "image_paths": [datapoints["image_url"]], "texts": datapoints["caption"], "metadata": {"context": "", "labels": []}}
244
+
245
+ else:
246
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
247
+
248
+ _idx += 1