holylovenia commited on
Commit
70c9b35
1 Parent(s): b67153a

Upload indoqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indoqa.py +152 -0
indoqa.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @misc{IndoQA,
28
+ author = {{Jakarta Artificial Intelligence Research}}
29
+ title = {IndoQA: Building Indonesian QA dataset},
30
+ year = {2023}
31
+ url = {https://huggingface.co/datasets/jakartaresearch/indoqa}
32
+ }
33
+ """
34
+
35
+ _DATASETNAME = "indoqa"
36
+
37
+ _DESCRIPTION = """\
38
+ IndoQA is a monolingual question-answering dataset of Indonesian language (ind).
39
+ It comprises 4,413 examples with 3:1 split of training and validation sets.
40
+ The datasets consists of a context paragraph along with an associated question-answer pair.
41
+ """
42
+
43
+ _HOMEPAGE = "https://jakartaresearch.com/"
44
+ _LICENSE = Licenses.CC_BY_ND_4_0.value
45
+
46
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
47
+ _LOCAL = False
48
+ _URLS = {
49
+ _DATASETNAME: {
50
+ "train": "https://drive.google.com/uc?id=1ND893H5x2gaPRRMJVajQ4hgqpopHoD0u",
51
+ "validation": "https://drive.google.com/uc?id=1mq_foV72riXb1KVBirJzTFZEe7oa8f4f",
52
+ },
53
+ }
54
+
55
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
56
+ _SOURCE_VERSION = "1.0.0"
57
+ _SEACROWD_VERSION = "2024.06.20"
58
+
59
+
60
+ class IndoQADataset(datasets.GeneratorBasedBuilder):
61
+ """IndoQA: A monolingual Indonesian question-answering dataset comprises 4,413 instances of QA-pair with context."""
62
+
63
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
64
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
65
+
66
+ BUILDER_CONFIGS = [
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_source",
69
+ version=SOURCE_VERSION,
70
+ description=f"{_DATASETNAME} source schema",
71
+ schema="source",
72
+ subset_id=f"{_DATASETNAME}",
73
+ ),
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_seacrowd_qa",
76
+ version=SEACROWD_VERSION,
77
+ description=f"{_DATASETNAME} SEACrowd schema",
78
+ schema="seacrowd_qa",
79
+ subset_id=f"{_DATASETNAME}",
80
+ ),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
84
+
85
+ def _info(self) -> datasets.DatasetInfo:
86
+
87
+ if self.config.schema == "source":
88
+ features = datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "question": datasets.Value("string"),
92
+ "answer": datasets.Value("string"),
93
+ "context": datasets.Value("string"),
94
+ "category": datasets.Value("string"),
95
+ "span_start": datasets.Value("int32"),
96
+ "span_end": datasets.Value("int32"),
97
+ }
98
+ )
99
+
100
+ elif self.config.schema == "seacrowd_qa":
101
+ features = schemas.qa_features
102
+ features["meta"]["span_start"] = datasets.Value("int32")
103
+ features["meta"]["span_end"] = datasets.Value("int32")
104
+
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=features,
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
114
+ """Returns SplitGenerators."""
115
+ urls = _URLS[_DATASETNAME]
116
+ data_paths = dl_manager.download_and_extract(urls)
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ gen_kwargs={"filepath": data_paths["train"]},
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ gen_kwargs={"filepath": data_paths["validation"]},
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
129
+ """Yields examples as (key, example) tuples."""
130
+ with open(filepath, "r", encoding="utf-8") as file:
131
+ datas = json.load(file)
132
+
133
+ if self.config.schema == "source":
134
+ for key, data in enumerate(datas):
135
+ yield key, data
136
+
137
+ elif self.config.schema == "seacrowd_qa":
138
+ for key, data in enumerate(datas):
139
+ yield key, {
140
+ "id": f'{data["id"]}',
141
+ "question_id": data["id"],
142
+ "document_id": "",
143
+ "question": data["question"],
144
+ "type": data["category"],
145
+ "choices": [],
146
+ "context": data["context"],
147
+ "answer": [data["answer"]],
148
+ "meta": {
149
+ "span_start": data["span_start"],
150
+ "span_end": data["span_end"],
151
+ },
152
+ }