Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
07c7499
1 Parent(s): f83d6d7

Upload qasina.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. qasina.py +173 -0
qasina.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import json
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @misc{rizqullah2023qasina,
29
+ title={QASiNa: Religious Domain Question Answering using Sirah Nabawiyah},
30
+ author={Muhammad Razif Rizqullah and Ayu Purwarianti and Alham Fikri Aji},
31
+ year={2023},
32
+ eprint={2310.08102},
33
+ archivePrefix={arXiv},
34
+ primaryClass={cs.CL}
35
+ }
36
+ """
37
+
38
+ _DATASETNAME = "qasina"
39
+
40
+ _DESCRIPTION = """\
41
+ Question Answering Sirah Nabawiyah Dataset (QASiNa) is Extractive \
42
+ QA Dataset which build to perform QA task in Sirah Nabawiyah domain.
43
+ """
44
+
45
+ _HOMEPAGE = "https://github.com/rizquuula/QASiNa"
46
+
47
+ _LANGUAGES = ["ind"]
48
+
49
+ _LICENSE = Licenses.MIT.value
50
+ _LOCAL = False
51
+
52
+ _URLS = {
53
+ _DATASETNAME: "https://github.com/rizquuula/QASiNa/raw/main/QASiNa.json",
54
+ }
55
+
56
+
57
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+
62
+
63
+ # TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
64
+ class QasinaDataset(datasets.GeneratorBasedBuilder):
65
+ """Question Answering Sirah Nabawiyah Dataset (QASiNa) is \
66
+ Extractive QA Dataset which build to perform QA task in Sirah Nabawiyah domain."""
67
+
68
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
69
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
70
+
71
+ SEACROWD_SCHEMA_NAME = "qa"
72
+
73
+ BUILDER_CONFIGS = [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_source",
76
+ version=SOURCE_VERSION,
77
+ description=f"{_DATASETNAME} source schema",
78
+ schema="source",
79
+ subset_id=f"{_DATASETNAME}",
80
+ ),
81
+ SEACrowdConfig(
82
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
83
+ version=SEACROWD_VERSION,
84
+ description=f"{_DATASETNAME} SEACrowd schema",
85
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
86
+ subset_id=f"{_DATASETNAME}",
87
+ ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+ if self.config.schema == "source":
94
+ features = datasets.Features(
95
+ {
96
+ "context_id": datasets.Value("int32"),
97
+ "context": datasets.Value("string"),
98
+ "question_answers": datasets.Sequence({"type": datasets.Value("string"), "question": datasets.Value("string"), "answer": datasets.Value("string"), "answer_start": datasets.Value("int32"), "question_id": datasets.Value("int32")}),
99
+ "context_length": datasets.Value("int32"),
100
+ "context_title": datasets.Value("string"),
101
+ }
102
+ )
103
+
104
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
105
+ features = schemas.qa.features
106
+ features["meta"] = {"context_title": datasets.Value("string"), "answer_start": datasets.Value("int32"),"context_length": datasets.Value("int32"), "type": datasets.Value("string")}
107
+
108
+ return datasets.DatasetInfo(
109
+ description=_DESCRIPTION,
110
+ features=features,
111
+ homepage=_HOMEPAGE,
112
+ license=_LICENSE,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
117
+ urls = _URLS[_DATASETNAME]
118
+ filepath = dl_manager.download(urls)
119
+
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TRAIN,
123
+ gen_kwargs={
124
+ "filepath": filepath,
125
+ "split": "train",
126
+ },
127
+ ),
128
+ ]
129
+
130
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
131
+ with open(filepath) as file:
132
+ dataset = json.load(file)
133
+
134
+ if self.config.schema == "source":
135
+ for i, line in enumerate(dataset):
136
+ yield i, {
137
+ "context_id": line["context_id"],
138
+ "context": line["context"],
139
+ "question_answers": [
140
+ {
141
+ "type": subline["type"],
142
+ "question": subline["question"],
143
+ "answer": subline["answer"],
144
+ "answer_start": subline["answer_start"],
145
+ "question_id": subline["question_id"],
146
+ }
147
+ for subline in line["question_answers"]
148
+ ],
149
+ "context_length": line["context_length"],
150
+ "context_title": line["context_title"],
151
+ }
152
+
153
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
154
+ for line in dataset:
155
+ for question_answer in line["question_answers"]:
156
+ id = question_answer["question_id"]
157
+
158
+ yield id, {
159
+ "id": id,
160
+ "question_id": question_answer["question_id"],
161
+ "document_id": line["context_id"],
162
+ "question": question_answer["question"],
163
+ "type": "extractive",
164
+ "choices": [],
165
+ "context": line["context"],
166
+ "answer": [question_answer["answer"]],
167
+ "meta": {
168
+ "context_title": line["context_title"],
169
+ "answer_start": question_answer["answer_start"],
170
+ "context_length": line["context_length"],
171
+ "type": question_answer["type"],
172
+ },
173
+ }