holylovenia commited on
Commit
18dffa7
1 Parent(s): f3efbd1

Upload idk_mrc_nli.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. idk_mrc_nli.py +213 -0
idk_mrc_nli.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The IDKMRC-NLI dataset is derived from the IDK-MRC \
18
+ question answering dataset, utilizing named \
19
+ entity recognition (NER), chunking tags, \
20
+ Regex, and embedding similarity techniques \
21
+ to determine its contradiction sets. \
22
+ Collected through this process, \
23
+ the dataset comprises various columns beyond \
24
+ premise, hypothesis, and label, including \
25
+ properties aligned with NER and chunking tags. \
26
+ This dataset is designed to facilitate Natural\
27
+ Language Inference (NLI) tasks and contains \
28
+ information extracted from diverse sources \
29
+ to provide comprehensive coverage. Each data \
30
+ instance encapsulates premise, hypothesis, label, \
31
+ and additional properties pertinent to NLI evaluation.
32
+ """
33
+ import csv
34
+ from pathlib import Path
35
+ from typing import Dict, List, Tuple
36
+
37
+ import datasets
38
+
39
+ from seacrowd.utils import schemas
40
+ from seacrowd.utils.configs import SEACrowdConfig
41
+ from seacrowd.utils.constants import Tasks, Licenses
42
+
43
+ # The workshop submission at 18 April. I will change this _CITATION on that day.
44
+ _CITATION = """\
45
+ @article{,
46
+ author = {},
47
+ title = {},
48
+ journal = {},
49
+ volume = {},
50
+ year = {},
51
+ url = {},
52
+ doi = {},
53
+ biburl = {},
54
+ bibsource = {}
55
+ }
56
+ """
57
+
58
+ _DATASETNAME = "idk_mrc_nli"
59
+
60
+ # TODO: Add description of the dataset here
61
+ # You can copy an official description
62
+ _DESCRIPTION = """
63
+ The IDKMRC-NLI dataset is derived from the IDK-MRC \
64
+ question answering dataset, utilizing named \
65
+ entity recognition (NER), chunking tags, \
66
+ Regex, and embedding similarity techniques \
67
+ to determine its contradiction sets. \
68
+ Collected through this process, \
69
+ the dataset comprises various columns beyond \
70
+ premise, hypothesis, and label, including \
71
+ properties aligned with NER and chunking tags. \
72
+ This dataset is designed to facilitate Natural\
73
+ Language Inference (NLI) tasks and contains \
74
+ information extracted from diverse sources \
75
+ to provide comprehensive coverage. Each data \
76
+ instance encapsulates premise, hypothesis, label, \
77
+ and additional properties pertinent to NLI evaluation.
78
+ """
79
+
80
+ _HOMEPAGE = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli"
81
+
82
+ _LANGUAGES = ["ind"]
83
+
84
+ _LICENSE = Licenses.UNKNOWN.value
85
+
86
+ _LOCAL = False
87
+
88
+ _URLS = {
89
+ "train": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/resolve/main/idk-mrc_nli_train_df.csv?download=true",
90
+ "val": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_val_df.csv",
91
+ "test": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_test_df.csv",
92
+ }
93
+
94
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
95
+
96
+ _SOURCE_VERSION = "1.0.0"
97
+
98
+ _SEACROWD_VERSION = "2024.06.20"
99
+
100
+
101
+ class IDKMRCNLIDataset(datasets.GeneratorBasedBuilder):
102
+ """
103
+ The IDKMRC-NLI dataset is derived from the IDK-MRC \
104
+ question answering dataset, utilizing named \
105
+ entity recognition (NER), chunking tags, \
106
+ Regex, and embedding similarity techniques \
107
+ to determine its contradiction sets. \
108
+ Collected through this process, \
109
+ the dataset comprises various columns beyond \
110
+ premise, hypothesis, and label, including \
111
+ properties aligned with NER and chunking tags. \
112
+ This dataset is designed to facilitate Natural\
113
+ Language Inference (NLI) tasks and contains \
114
+ information extracted from diverse sources \
115
+ to provide comprehensive coverage. Each data \
116
+ instance encapsulates premise, hypothesis, label, \
117
+ and additional properties pertinent to NLI evaluation.
118
+ """
119
+
120
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
121
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
122
+
123
+ BUILDER_CONFIGS = [
124
+ SEACrowdConfig(
125
+ name=f"{_DATASETNAME}_source",
126
+ version=SOURCE_VERSION,
127
+ description=f"{_DATASETNAME} source schema",
128
+ schema="source",
129
+ subset_id=f"{_DATASETNAME}",
130
+ ),
131
+ SEACrowdConfig(
132
+ name=f"{_DATASETNAME}_seacrowd_pairs",
133
+ version=SEACROWD_VERSION,
134
+ description=f"{_DATASETNAME} SEACrowd schema",
135
+ schema="seacrowd_pairs",
136
+ subset_id=f"{_DATASETNAME}",
137
+ ),
138
+ ]
139
+
140
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
141
+ labels = ["entailment", "neutral", "contradiction"]
142
+
143
+ def _info(self) -> datasets.DatasetInfo:
144
+
145
+ if self.config.schema == "source":
146
+ features = datasets.Features(
147
+ {
148
+ "premise": datasets.Value("string"),
149
+ "hypothesis": datasets.Value("string"),
150
+ "label": datasets.ClassLabel(names=self.labels),
151
+ }
152
+ )
153
+
154
+ elif self.config.schema == "seacrowd_pairs":
155
+ features = schemas.pairs_features(self.labels)
156
+
157
+ return datasets.DatasetInfo(
158
+ description=_DESCRIPTION,
159
+ features=features,
160
+ homepage=_HOMEPAGE,
161
+ license=_LICENSE,
162
+ citation=_CITATION,
163
+ )
164
+
165
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
166
+ """Returns SplitGenerators."""
167
+
168
+ train_path = dl_manager.download_and_extract(_URLS["train"])
169
+ val_path = dl_manager.download_and_extract(_URLS["val"])
170
+ test_path = dl_manager.download_and_extract(_URLS["test"])
171
+
172
+ return [
173
+ datasets.SplitGenerator(
174
+ name=datasets.Split.TRAIN,
175
+ gen_kwargs={
176
+ "filepath": train_path,
177
+ "split": "train",
178
+ },
179
+ ),
180
+ datasets.SplitGenerator(
181
+ name=datasets.Split.TEST,
182
+ gen_kwargs={
183
+ "filepath": test_path,
184
+ "split": "test",
185
+ },
186
+ ),
187
+ datasets.SplitGenerator(
188
+ name=datasets.Split.VALIDATION,
189
+ gen_kwargs={
190
+ "filepath": val_path,
191
+ "split": "val",
192
+ },
193
+ ),
194
+ ]
195
+
196
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
197
+ """Yields examples as (key, example) tuples."""
198
+
199
+ if self.config.schema == "source":
200
+ with open(filepath, encoding="utf-8") as csv_file:
201
+ csv_reader = csv.DictReader(csv_file)
202
+ for id, row in enumerate(csv_reader):
203
+ yield id, {"premise": row["premise"], "hypothesis": row["hypothesis"], "label": row["label"]}
204
+
205
+ elif self.config.schema == "seacrowd_pairs":
206
+ with open(filepath, encoding="utf-8") as csv_file:
207
+ csv_reader = csv.DictReader(csv_file)
208
+ for id, row in enumerate(csv_reader):
209
+ yield id, {"id": str(id), "text_1": row["premise"], "text_2": row["hypothesis"], "label": row["label"]}
210
+
211
+
212
+ # This template is based on the following template from the datasets package:
213
+ # https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py