holylovenia commited on
Commit
fd0a1f0
1 Parent(s): 7cc225c

Upload tydiqa_id.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tydiqa_id.py +186 -0
tydiqa_id.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import datasets
20
+ import json
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+ _CITATION = """\
27
+ @article{clark-etal-2020-tydi,
28
+ title = "{T}y{D}i {QA}: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages",
29
+ author = "Clark, Jonathan H. and
30
+ Choi, Eunsol and
31
+ Collins, Michael and
32
+ Garrette, Dan and
33
+ Kwiatkowski, Tom and
34
+ Nikolaev, Vitaly and
35
+ Palomaki, Jennimaria",
36
+ journal = "Transactions of the Association for Computational Linguistics",
37
+ volume = "8",
38
+ year = "2020",
39
+ address = "Cambridge, MA",
40
+ publisher = "MIT Press",
41
+ url = "https://aclanthology.org/2020.tacl-1.30",
42
+ doi = "10.1162/tacl_a_00317",
43
+ pages = "454--470",
44
+ }
45
+
46
+ @inproceedings{cahyawijaya-etal-2021-indonlg,
47
+ title = "{I}ndo{NLG}: Benchmark and Resources for Evaluating {I}ndonesian Natural Language Generation",
48
+ author = "Cahyawijaya, Samuel and
49
+ Winata, Genta Indra and
50
+ Wilie, Bryan and
51
+ Vincentio, Karissa and
52
+ Li, Xiaohong and
53
+ Kuncoro, Adhiguna and
54
+ Ruder, Sebastian and
55
+ Lim, Zhi Yuan and
56
+ Bahar, Syafri and
57
+ Khodra, Masayu and
58
+ Purwarianti, Ayu and
59
+ Fung, Pascale",
60
+ booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
61
+ month = nov,
62
+ year = "2021",
63
+ address = "Online and Punta Cana, Dominican Republic",
64
+ publisher = "Association for Computational Linguistics",
65
+ url = "https://aclanthology.org/2021.emnlp-main.699",
66
+ doi = "10.18653/v1/2021.emnlp-main.699",
67
+ pages = "8875--8898"
68
+ }
69
+ """
70
+
71
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
72
+ _LOCAL = False
73
+
74
+ _DATASETNAME = "tydiqa_id"
75
+
76
+ _DESCRIPTION = """\
77
+ TyDiQA dataset is collected from Wikipedia articles with human-annotated question and answer pairs covering 11 languages.
78
+ The question-answer pairs are collected for each language without using translation services.
79
+ IndoNLG uses the Indonesian data from the secondary Gold passage task of the original TyDiQA dataset and
80
+ randomly split off 15% of the training data and use it as the test set.
81
+ """
82
+
83
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlg"
84
+
85
+ _LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
86
+
87
+ # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
88
+ # In most cases the URLs will be the same for the source and nusantara config.
89
+ # However, if you need to access different files for each config you can have multiple entries in this dict.
90
+ # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
91
+ _URLS = {
92
+ _DATASETNAME: "https://storage.googleapis.com/babert-pretraining/IndoNLG_finals/downstream_task/downstream_task_datasets.zip"
93
+ }
94
+
95
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
96
+
97
+ _SOURCE_VERSION = "1.0.0"
98
+
99
+ _NUSANTARA_VERSION = "1.0.0"
100
+
101
+
102
+ class TyDiQAIdDataset(datasets.GeneratorBasedBuilder):
103
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
104
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
105
+
106
+ BUILDER_CONFIGS = [
107
+ NusantaraConfig(
108
+ name="tydiqa_id_source",
109
+ version=SOURCE_VERSION,
110
+ description="TyDiQA Id source schema",
111
+ schema="source",
112
+ subset_id="tydiqa_id",
113
+ ),
114
+ NusantaraConfig(
115
+ name="tydiqa_id_nusantara_qa",
116
+ version=NUSANTARA_VERSION,
117
+ description="TyDiQA Id Nusantara schema",
118
+ schema="nusantara_qa",
119
+ subset_id="tydiqa_id",
120
+ ),
121
+ ]
122
+
123
+ DEFAULT_CONFIG_NAME = "tydiqa_id_source"
124
+
125
+ def _info(self) -> datasets.DatasetInfo:
126
+
127
+ if self.config.schema == "source":
128
+ features = datasets.Features(
129
+ {
130
+ "id": datasets.Value("string"),
131
+ "context": datasets.Value("string"),
132
+ "question": datasets.Value("string"),
133
+ "label": datasets.Value("string")
134
+ }
135
+ )
136
+ elif self.config.schema == "nusantara_qa":
137
+ features = schemas.qa_features
138
+
139
+ return datasets.DatasetInfo(
140
+ description=_DESCRIPTION,
141
+ features=features,
142
+ homepage=_HOMEPAGE,
143
+ license=_LICENSE,
144
+ citation=_CITATION,
145
+ )
146
+
147
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
148
+ url = _URLS[_DATASETNAME]
149
+ base_path = Path(dl_manager.download_and_extract(url))
150
+ train_data_path = base_path / "IndoNLG_downstream_tasks" / "question_answering" / "train_preprocess.json"
151
+ valid_data_path = base_path / "IndoNLG_downstream_tasks" / "question_answering" / "valid_preprocess.json"
152
+ test_data_path = base_path / "IndoNLG_downstream_tasks" / "question_answering" / "test_preprocess.json"
153
+
154
+ return [
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TRAIN,
157
+ gen_kwargs={"filepath": train_data_path},
158
+ ),
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.VALIDATION,
161
+ gen_kwargs={"filepath": valid_data_path},
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TEST,
165
+ gen_kwargs={"filepath": test_data_path},
166
+ )
167
+ ]
168
+
169
+ def _generate_examples(self, filepath: Path):
170
+ if self.config.schema == "source":
171
+ for example in json.load(open(filepath, 'r')):
172
+ yield example["id"], example
173
+ elif self.config.schema == "nusantara_qa":
174
+ for example in json.load(open(filepath, 'r')):
175
+ yield example["id"], {
176
+ "id": example['id'],
177
+ "question_id": example['id'],
178
+ "document_id": example['id'],
179
+ "question": example['question'],
180
+ "type": 'abstractive',
181
+ "choices": [],
182
+ "context": example['context'],
183
+ "answer": [example['label']]
184
+ }
185
+ else:
186
+ raise ValueError(f"Invalid config: {self.config.name}")