Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
yuvalr commited on
Commit
34ead2b
·
1 Parent(s): 43d0d37

Upload mnli.py

Browse files
Files changed (1) hide show
  1. mnli.py +230 -0
mnli.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
18
+
19
+
20
+ import csv
21
+ import os
22
+ import textwrap
23
+ import json
24
+
25
+ import numpy as np
26
+
27
+ import datasets
28
+
29
+
30
+ _GLUE_CITATION = """\
31
+ @inproceedings{wang2019glue,
32
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
33
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
34
+ note={In the Proceedings of ICLR.},
35
+ year={2019}
36
+ }
37
+ """
38
+
39
+ _GLUE_DESCRIPTION = """\
40
+ GLUE, the General Language Understanding Evaluation benchmark
41
+ (https://gluebenchmark.com/) is a collection of resources for training,
42
+ evaluating, and analyzing natural language understanding systems.
43
+ """
44
+
45
+ _MNLI_BASE_KWARGS = dict(
46
+ text_features={
47
+ "premise": "sentence1",
48
+ "hypothesis": "sentence2",
49
+ },
50
+ label_classes=["entailment", "neutral", "contradiction"],
51
+ label_column="label",
52
+ data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
53
+ data_dir="MNLI",
54
+ citation=textwrap.dedent(
55
+ """\
56
+ @InProceedings{N18-1101,
57
+ author = "Williams, Adina
58
+ and Nangia, Nikita
59
+ and Bowman, Samuel",
60
+ title = "A Broad-Coverage Challenge Corpus for
61
+ Sentence Understanding through Inference",
62
+ booktitle = "Proceedings of the 2018 Conference of
63
+ the North American Chapter of the
64
+ Association for Computational Linguistics:
65
+ Human Language Technologies, Volume 1 (Long
66
+ Papers)",
67
+ year = "2018",
68
+ publisher = "Association for Computational Linguistics",
69
+ pages = "1112--1122",
70
+ location = "New Orleans, Louisiana",
71
+ url = "http://aclweb.org/anthology/N18-1101"
72
+ }
73
+ @article{bowman2015large,
74
+ title={A large annotated corpus for learning natural language inference},
75
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
76
+ journal={arXiv preprint arXiv:1508.05326},
77
+ year={2015}
78
+ }"""
79
+ ),
80
+ url="http://www.nyu.edu/projects/bowman/multinli/",
81
+ )
82
+
83
+
84
+ class GlueConfig(datasets.BuilderConfig):
85
+ """BuilderConfig for GLUE."""
86
+
87
+ def __init__(
88
+ self,
89
+ text_features,
90
+ label_column,
91
+ data_url,
92
+ data_dir,
93
+ citation,
94
+ url,
95
+ label_classes=None,
96
+ process_label=lambda x: x,
97
+ **kwargs,
98
+ ):
99
+ """BuilderConfig for GLUE.
100
+ Args:
101
+ text_features: `dict[string, string]`, map from the name of the feature
102
+ dict for each text field to the name of the column in the tsv file
103
+ label_column: `string`, name of the column in the tsv file corresponding
104
+ to the label
105
+ data_url: `string`, url to download the zip file from
106
+ data_dir: `string`, the path to the folder containing the tsv files in the
107
+ downloaded zip
108
+ citation: `string`, citation for the data set
109
+ url: `string`, url for information about the data set
110
+ label_classes: `list[string]`, the list of classes if the label is
111
+ categorical. If not provided, then the label will be of type
112
+ `datasets.Value('float32')`.
113
+ process_label: `Function[string, any]`, function taking in the raw value
114
+ of the label and processing it to the form required by the label feature
115
+ **kwargs: keyword arguments forwarded to super.
116
+ """
117
+ super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
118
+ self.text_features = text_features
119
+ self.label_column = label_column
120
+ self.label_classes = label_classes
121
+ self.data_url = data_url
122
+ self.data_dir = data_dir
123
+ self.citation = citation
124
+ self.url = url
125
+ self.process_label = process_label
126
+
127
+
128
+ class Glue(datasets.GeneratorBasedBuilder):
129
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
130
+
131
+ BUILDER_CONFIGS = [
132
+ GlueConfig(
133
+ name=bias_amplified_splits_type,
134
+ description=textwrap.dedent(
135
+ """\
136
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
137
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
138
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
139
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
140
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
141
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
142
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
143
+ the SNLI corpus as 550k examples of auxiliary training data."""
144
+ ),
145
+ **_MNLI_BASE_KWARGS,
146
+ ) for bias_amplified_splits_type in ["minority_examples", "partial_input"]
147
+ ]
148
+
149
+ def _info(self):
150
+ features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
151
+ if self.config.label_classes:
152
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
153
+ else:
154
+ features["label"] = datasets.Value("float32")
155
+ features["idx"] = datasets.Value("int32")
156
+ return datasets.DatasetInfo(
157
+ description=_GLUE_DESCRIPTION,
158
+ features=datasets.Features(features),
159
+ homepage=self.config.url,
160
+ citation=self.config.citation + "\n" + _GLUE_CITATION,
161
+ )
162
+
163
+ def _split_generators(self, dl_manager):
164
+ return [
165
+ datasets.SplitGenerator(
166
+ name="train.biased",
167
+ gen_kwargs={
168
+ "filepath": dl_manager.download(os.path.join(self.config.name, "train.biased.jsonl")),
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name="train.anti-biased",
173
+ gen_kwargs={
174
+ "filepath": dl_manager.download(os.path.join(self.config.name, "train.anti-biased.jsonl")),
175
+ },
176
+ ),
177
+ datasets.SplitGenerator(
178
+ name="validation_matched.biased",
179
+ gen_kwargs={
180
+ "filepath": dl_manager.download(os.path.join(self.config.name, "validation_matched.biased.jsonl")),
181
+ },
182
+ ),
183
+ datasets.SplitGenerator(
184
+ name="validation_matched.anti-biased",
185
+ gen_kwargs={
186
+ "filepath": dl_manager.download(os.path.join(self.config.name, "validation_matched.anti-biased.jsonl")),
187
+ },
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name="validation_mismatched.biased",
191
+ gen_kwargs={
192
+ "filepath": dl_manager.download(os.path.join(self.config.name, "validation_mismatched.biased.jsonl")),
193
+ },
194
+ ),
195
+ datasets.SplitGenerator(
196
+ name="validation_mismatched.anti-biased",
197
+ gen_kwargs={
198
+ "filepath": dl_manager.download(os.path.join(self.config.name, "validation_mismatched.anti-biased.jsonl")),
199
+ },
200
+ ),
201
+ ]
202
+
203
+ def _generate_examples(self, filepath):
204
+ """Generate examples.
205
+
206
+ Args:
207
+ filepath: a string
208
+
209
+ Yields:
210
+ dictionaries containing "premise", "hypothesis" and "label" strings
211
+ """
212
+ process_label = self.config.process_label
213
+ label_classes = self.config.label_classes
214
+
215
+ for idx, line in enumerate(open(filepath, "rb")):
216
+ if line is not None:
217
+ line = line.strip().decode("utf-8")
218
+ item = json.loads(line)
219
+ example = {
220
+ "idx": item["idx"],
221
+ "premise": item["premise"],
222
+ "hypothesis": item["hypothesis"],
223
+ }
224
+ if self.config.label_column in item:
225
+ label = item[self.config.label_column]
226
+ example["label"] = process_label(label)
227
+ else:
228
+ example["label"] = process_label(-1)
229
+
230
+ yield example["idx"], example