holylovenia commited on
Commit
58dcdcb
1 Parent(s): 10239b9

Upload tydiqa_id_nli.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tydiqa_id_nli.py +211 -0
tydiqa_id_nli.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The TyDIQA_ID-NLI dataset is derived from the TyDIQA_ID \
18
+ question answering dataset, utilizing named \
19
+ entity recognition (NER), chunking tags, \
20
+ Regex, and embedding similarity techniques \
21
+ to determine its contradiction sets. \
22
+ Collected through this process, \
23
+ the dataset comprises various columns beyond \
24
+ premise, hypothesis, and label, including \
25
+ properties aligned with NER and chunking tags. \
26
+ This dataset is designed to facilitate Natural\
27
+ Language Inference (NLI) tasks and contains \
28
+ information extracted from diverse sources \
29
+ to provide comprehensive coverage. Each data \
30
+ instance encapsulates premise, hypothesis, label, \
31
+ and additional properties pertinent to NLI evaluation.
32
+ """
33
+ import csv
34
+ from pathlib import Path
35
+ from typing import Dict, List, Tuple
36
+
37
+ import datasets
38
+
39
+ from seacrowd.utils import schemas
40
+ from seacrowd.utils.configs import SEACrowdConfig
41
+ from seacrowd.utils.constants import Tasks, Licenses
42
+
43
+ # The workshop submission at 18 April. I will change this _CITATION on that day.
44
+ _CITATION = """\
45
+ @article{,
46
+ author = {},
47
+ title = {},
48
+ journal = {},
49
+ volume = {},
50
+ year = {},
51
+ url = {},
52
+ doi = {},
53
+ biburl = {},
54
+ bibsource = {}
55
+ }
56
+ """
57
+
58
+ _DATASETNAME = "tydiqa_id_nli"
59
+
60
+ _DESCRIPTION = """
61
+ The TyDIQA_ID-NLI dataset is derived from the TyDIQA_ID \
62
+ question answering dataset, utilizing named \
63
+ entity recognition (NER), chunking tags, \
64
+ Regex, and embedding similarity techniques \
65
+ to determine its contradiction sets. \
66
+ Collected through this process, \
67
+ the dataset comprises various columns beyond \
68
+ premise, hypothesis, and label, including \
69
+ properties aligned with NER and chunking tags. \
70
+ This dataset is designed to facilitate Natural\
71
+ Language Inference (NLI) tasks and contains \
72
+ information extracted from diverse sources \
73
+ to provide comprehensive coverage. Each data \
74
+ instance encapsulates premise, hypothesis, label, \
75
+ and additional properties pertinent to NLI evaluation.
76
+ """
77
+
78
+ _HOMEPAGE = "https://huggingface.co/datasets/muhammadravi251001/tydiqaid-nli"
79
+
80
+ _LANGUAGES = ["ind"]
81
+
82
+ _LICENSE = Licenses.UNKNOWN.value
83
+
84
+ _LOCAL = False
85
+
86
+ _URLS = {
87
+ "train": "https://huggingface.co/datasets/muhammadravi251001/tydiqaid-nli/resolve/main/tydi-qa-id_nli_train_df.csv?download=true",
88
+ "val": "https://huggingface.co/datasets/muhammadravi251001/tydiqaid-nli/raw/main/tydi-qa-id_nli_val_df.csv",
89
+ "test": "https://huggingface.co/datasets/muhammadravi251001/tydiqaid-nli/raw/main/tydi-qa-id_nli_test_df.csv",
90
+ }
91
+
92
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
93
+
94
+ _SOURCE_VERSION = "1.0.0"
95
+
96
+ _SEACROWD_VERSION = "2024.06.20"
97
+
98
+
99
+ class TyDIQAIDNLIDataset(datasets.GeneratorBasedBuilder):
100
+ """
101
+ The TyDIQA_ID-NLI dataset is derived from the TyDIQA_ID \
102
+ question answering dataset, utilizing named \
103
+ entity recognition (NER), chunking tags, \
104
+ Regex, and embedding similarity techniques \
105
+ to determine its contradiction sets. \
106
+ Collected through this process, \
107
+ the dataset comprises various columns beyond \
108
+ premise, hypothesis, and label, including \
109
+ properties aligned with NER and chunking tags. \
110
+ This dataset is designed to facilitate Natural\
111
+ Language Inference (NLI) tasks and contains \
112
+ information extracted from diverse sources \
113
+ to provide comprehensive coverage. Each data \
114
+ instance encapsulates premise, hypothesis, label, \
115
+ and additional properties pertinent to NLI evaluation.
116
+ """
117
+
118
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
119
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
120
+
121
+ BUILDER_CONFIGS = [
122
+ SEACrowdConfig(
123
+ name=f"{_DATASETNAME}_source",
124
+ version=SOURCE_VERSION,
125
+ description=f"{_DATASETNAME} source schema",
126
+ schema="source",
127
+ subset_id=f"{_DATASETNAME}",
128
+ ),
129
+ SEACrowdConfig(
130
+ name=f"{_DATASETNAME}_seacrowd_pairs",
131
+ version=SEACROWD_VERSION,
132
+ description=f"{_DATASETNAME} SEACrowd schema",
133
+ schema="seacrowd_pairs",
134
+ subset_id=f"{_DATASETNAME}",
135
+ ),
136
+ ]
137
+
138
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
139
+ labels = ["entailment", "neutral", "contradiction"]
140
+
141
+ def _info(self) -> datasets.DatasetInfo:
142
+
143
+ if self.config.schema == "source":
144
+ features = datasets.Features(
145
+ {
146
+ "premise": datasets.Value("string"),
147
+ "hypothesis": datasets.Value("string"),
148
+ "label": datasets.ClassLabel(names=self.labels),
149
+ }
150
+ )
151
+
152
+ elif self.config.schema == "seacrowd_pairs":
153
+ features = schemas.pairs_features(self.labels)
154
+
155
+ return datasets.DatasetInfo(
156
+ description=_DESCRIPTION,
157
+ features=features,
158
+ homepage=_HOMEPAGE,
159
+ license=_LICENSE,
160
+ citation=_CITATION,
161
+ )
162
+
163
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
164
+ """Returns SplitGenerators."""
165
+
166
+ train_path = dl_manager.download_and_extract(_URLS["train"])
167
+ val_path = dl_manager.download_and_extract(_URLS["val"])
168
+ test_path = dl_manager.download_and_extract(_URLS["test"])
169
+
170
+ return [
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TRAIN,
173
+ gen_kwargs={
174
+ "filepath": train_path,
175
+ "split": "train",
176
+ },
177
+ ),
178
+ datasets.SplitGenerator(
179
+ name=datasets.Split.TEST,
180
+ gen_kwargs={
181
+ "filepath": test_path,
182
+ "split": "test",
183
+ },
184
+ ),
185
+ datasets.SplitGenerator(
186
+ name=datasets.Split.VALIDATION,
187
+ gen_kwargs={
188
+ "filepath": val_path,
189
+ "split": "val",
190
+ },
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
195
+ """Yields examples as (key, example) tuples."""
196
+
197
+ if self.config.schema == "source":
198
+ with open(filepath, encoding="utf-8") as csv_file:
199
+ csv_reader = csv.DictReader(csv_file)
200
+ for id, row in enumerate(csv_reader):
201
+ yield id, {"premise": row["premise"], "hypothesis": row["hypothesis"], "label": row["label"]}
202
+
203
+ elif self.config.schema == "seacrowd_pairs":
204
+ with open(filepath, encoding="utf-8") as csv_file:
205
+ csv_reader = csv.DictReader(csv_file)
206
+ for id, row in enumerate(csv_reader):
207
+ yield id, {"id": str(id), "text_1": row["premise"], "text_2": row["hypothesis"], "label": row["label"]}
208
+
209
+
210
+ # This template is based on the following template from the datasets package:
211
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py