trminhnam20082002 commited on
Commit
7672994
1 Parent(s): c7528b3
.DS_Store ADDED
Binary file (6.15 kB). View file
 
VieGLUE.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
18
+
19
+ import json
20
+ import os
21
+ from fnmatch import fnmatch
22
+ import io
23
+ import textwrap
24
+
25
+ import datasets
26
+
27
+
28
+ logger = datasets.logging.get_logger(__name__)
29
+
30
+ ######################
31
+ #### DESCRIPTIONS ####
32
+ ######################
33
+ _DESCRIPTION = """\
34
+ """
35
+
36
+ ###################
37
+ #### CITATIONS ####
38
+ ###################
39
+ _CITATION = """\
40
+ """
41
+
42
+
43
+ #######################
44
+ #### DOWNLOAD URLs ####
45
+ #######################
46
+ _DOWNLOAD_URL = {
47
+ "ax": {
48
+ "test": [os.path.join("data", "ax", "test.tar.gz")],
49
+ },
50
+ "cola": {
51
+ "train": [os.path.join("data", "cola", "train.tar.gz")],
52
+ "test": [os.path.join("data", "cola", "test.tar.gz")],
53
+ "validation": [os.path.join("data", "cola", "validation.tar.gz")],
54
+ },
55
+ "mnli": {
56
+ "train": [os.path.join("data", "mnli", "train.tar.gz")],
57
+ "test_matched": [os.path.join("data", "mnli", "test.tar.gz")],
58
+ "validation_matched": [
59
+ os.path.join("data", "mnli", "validation_matched.tar.gz")
60
+ ],
61
+ "test_mismatched": [os.path.join("data", "mnli", "test_mismatched.tar.gz")],
62
+ "validation_mismatched": [
63
+ os.path.join("data", "mnli", "validation_mismatched.tar.gz")
64
+ ],
65
+ },
66
+ "mrpc": {
67
+ "train": [os.path.join("data", "mrpc", "train.tar.gz")],
68
+ "test": [os.path.join("data", "mrpc", "test.tar.gz")],
69
+ "validation": [os.path.join("data", "mrpc", "validation.tar.gz")],
70
+ },
71
+ "qnli": {
72
+ "train": [os.path.join("data", "qnli", "train.tar.gz")],
73
+ "test": [os.path.join("data", "qnli", "test.tar.gz")],
74
+ "validation": [os.path.join("data", "qnli", "validation.tar.gz")],
75
+ },
76
+ "qqp": {
77
+ "train": [os.path.join("data", "qqp", "train.tar.gz")],
78
+ "test": [os.path.join("data", "qqp", "test.tar.gz")],
79
+ "validation": [os.path.join("data", "qqp", "validation.tar.gz")],
80
+ },
81
+ "rte": {
82
+ "train": [os.path.join("data", "rte", "train.tar.gz")],
83
+ "test": [os.path.join("data", "rte", "test.tar.gz")],
84
+ "validation": [os.path.join("data", "rte", "validation.tar.gz")],
85
+ },
86
+ "sst2": {
87
+ "train": [os.path.join("data", "sst2", "train.tar.gz")],
88
+ "test": [os.path.join("data", "sst2", "test.tar.gz")],
89
+ "validation": [os.path.join("data", "sst2", "validation.tar.gz")],
90
+ },
91
+ "stsb": {
92
+ "train": [os.path.join("data", "stsb", "train.tar.gz")],
93
+ "test": [os.path.join("data", "stsb", "test.tar.gz")],
94
+ "validation": [os.path.join("data", "stsb", "validation.tar.gz")],
95
+ },
96
+ "wnli": {
97
+ "train": [os.path.join("data", "wnli", "train.tar.gz")],
98
+ "test": [os.path.join("data", "wnli", "test.tar.gz")],
99
+ "validation": [os.path.join("data", "wnli", "validation.tar.gz")],
100
+ },
101
+ }
102
+ # VieGLUEConfig(
103
+ # name="cola",
104
+ # description=textwrap.dedent(
105
+ # """\
106
+ # The Corpus of Linguistic Acceptability consists of English
107
+ # acceptability judgments drawn from books and journal articles on
108
+ # linguistic theory. Each example is a sequence of words annotated
109
+ # with whether it is a grammatical English sentence."""
110
+ # ),
111
+ # text_features={"sentence": "sentence"},
112
+ # label_classes=["unacceptable", "acceptable"],
113
+ # label_column="is_acceptable",
114
+ # data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
115
+ # data_dir="CoLA",
116
+ # citation=textwrap.dedent(
117
+ # """\
118
+ # @article{warstadt2018neural,
119
+ # title={Neural Network Acceptability Judgments},
120
+ # author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
121
+ # journal={arXiv preprint arXiv:1805.12471},
122
+ # year={2018}
123
+ # }"""
124
+ # ),
125
+ # url="https://nyu-mll.github.io/CoLA/",
126
+ # ),
127
+ # VieGLUEConfig(
128
+ # name="mnli",
129
+ # description=textwrap.dedent(
130
+ # """\
131
+ # The Multi-Genre Natural Language Inference Corpus is a crowdsourced
132
+ # collection of sentence pairs with textual entailment annotations. Given a premise sentence
133
+ # and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
134
+ # (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
135
+ # gathered from ten different sources, including transcribed speech, fiction, and government reports.
136
+ # We use the standard test set, for which we obtained private labels from the authors, and evaluate
137
+ # on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
138
+ # the SNLI corpus as 550k examples of auxiliary training data."""
139
+ # ),
140
+ # text_features={
141
+ # "premise": "sentence1",
142
+ # "hypothesis": "sentence2",
143
+ # },
144
+ # label_classes=["entailment", "neutral", "contradiction"],
145
+ # label_column="gold_label",
146
+ # data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
147
+ # data_dir="MNLI",
148
+ # citation=textwrap.dedent(
149
+ # """\
150
+ # @InProceedings{N18-1101,
151
+ # author = "Williams, Adina
152
+ # and Nangia, Nikita
153
+ # and Bowman, Samuel",
154
+ # title = "A Broad-Coverage Challenge Corpus for
155
+ # Sentence Understanding through Inference",
156
+ # booktitle = "Proceedings of the 2018 Conference of
157
+ # the North American Chapter of the
158
+ # Association for Computational Linguistics:
159
+ # Human Language Technologies, Volume 1 (Long
160
+ # Papers)",
161
+ # year = "2018",
162
+ # publisher = "Association for Computational Linguistics",
163
+ # pages = "1112--1122",
164
+ # location = "New Orleans, Louisiana",
165
+ # url = "http://aclweb.org/anthology/N18-1101"
166
+ # }
167
+ # @article{bowman2015large,
168
+ # title={A large annotated corpus for learning natural language inference},
169
+ # author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
170
+ # journal={arXiv preprint arXiv:1508.05326},
171
+ # year={2015}
172
+ # }"""
173
+ # ),
174
+ # url="http://www.nyu.edu/projects/bowman/multinli/",
175
+ # ),
176
+ SUBSET_KWARGS = {
177
+ "ax": {
178
+ "name": "ax",
179
+ "text_features": ["premise", "hypothesis"],
180
+ "label_classes": ["entailment", "neutral", "contradiction"],
181
+ "label_column": "",
182
+ "citation": "",
183
+ "description": textwrap.dedent(
184
+ """\
185
+ A manually-curated evaluation dataset for fine-grained analysis of
186
+ system performance on a broad range of linguistic phenomena. This
187
+ dataset evaluates sentence understanding through Natural Language
188
+ Inference (NLI) problems. Use a model trained on MulitNLI to produce
189
+ predictions for this dataset."""
190
+ ),
191
+ },
192
+ "cola": {
193
+ "name": "cola",
194
+ "text_features": ["sentence"],
195
+ "label_classes": ["unacceptable", "acceptable"],
196
+ "label_column": "is_acceptable",
197
+ "citation": textwrap.dedent(
198
+ """\
199
+ @article{warstadt2018neural,
200
+ title={Neural Network Acceptability Judgments},
201
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
202
+ journal={arXiv preprint arXiv:1805.12471},
203
+ year={2018}
204
+ }"""
205
+ ),
206
+ "description": textwrap.dedent(
207
+ """\
208
+ The Corpus of Linguistic Acceptability consists of English
209
+ acceptability judgments drawn from books and journal articles on
210
+ linguistic theory. Each example is a sequence of words annotated
211
+ with whether it is a grammatical English sentence."""
212
+ ),
213
+ },
214
+ "mnli": {
215
+ "name": "mnli",
216
+ "text_features": ["premise", "hypothesis"],
217
+ "label_classes": ["entailment", "neutral", "contradiction"],
218
+ "label_column": "gold_label",
219
+ "citation": textwrap.dedent(
220
+ """\
221
+ @InProceedings{N18-1101,
222
+ author = "Williams, Adina
223
+ and Nangia, Nikita
224
+ and Bowman, Samuel",
225
+ title = "A Broad-Coverage Challenge Corpus for
226
+ Sentence Understanding through Inference",
227
+ booktitle = "Proceedings of the 2018 Conference of
228
+ the North American Chapter of the
229
+ Association for Computational Linguistics:
230
+ Human Language Technologies, Volume 1 (Long
231
+ Papers)",
232
+ year = "2018",
233
+ publisher = "Association for Computational Linguistics",
234
+ pages = "1112--1122",
235
+ location = "New Orleans, Louisiana",
236
+ url = "http://aclweb.org/anthology/N18-1101"
237
+ }
238
+ @article{bowman2015large,
239
+ title={A large annotated corpus for learning natural language inference},
240
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
241
+ journal={arXiv preprint arXiv:1508.05326},
242
+ year={2015}
243
+ }"""
244
+ ),
245
+ "description": textwrap.dedent(
246
+ """\
247
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
248
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
249
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
250
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
251
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
252
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
253
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
254
+ the SNLI corpus as 550k examples of auxiliary training data."""
255
+ ),
256
+ },
257
+ "mrpc": {
258
+ "name": "mrpc",
259
+ "text_features": ["sentence1", "sentence2"],
260
+ "label_classes": ["not_equivalent", "equivalent"],
261
+ "label_column": "Quality",
262
+ "citation": textwrap.dedent(
263
+ """\
264
+ @inproceedings{dolan2005automatically,
265
+ title={Automatically constructing a corpus of sentential paraphrases},
266
+ author={Dolan, William B and Brockett, Chris},
267
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
268
+ year={2005}
269
+ }"""
270
+ ),
271
+ "description": textwrap.dedent(
272
+ """\
273
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
274
+ sentence pairs automatically extracted from online news sources, with human annotations
275
+ for whether the sentences in the pair are semantically equivalent."""
276
+ ), # pylint: disable=line-too-long
277
+ },
278
+ "qnli": {
279
+ "name": "qnli",
280
+ "text_features": ["question", "sentence"],
281
+ "label_classes": ["entailment", "not_entailment"],
282
+ "label_column": "label",
283
+ "citation": textwrap.dedent(
284
+ """\
285
+ @article{rajpurkar2016squad,
286
+ title={Squad: 100,000+ questions for machine comprehension of text},
287
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
288
+ journal={arXiv preprint arXiv:1606.05250},
289
+ year={2016}
290
+ }"""
291
+ ),
292
+ "description": textwrap.dedent(
293
+ """\
294
+ The Stanford Question Answering Dataset is a question-answering
295
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
296
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
297
+ convert the task into sentence pair classification by forming a pair between each question and each
298
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
299
+ question and the context sentence. The task is to determine whether the context sentence contains
300
+ the answer to the question. This modified version of the original task removes the requirement that
301
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
302
+ is always present in the input and that lexical overlap is a reliable cue."""
303
+ ), # pylint: disable=line-too-long
304
+ },
305
+ "qqp": {
306
+ "name": "qqp",
307
+ "text_features": ["question1", "question2"],
308
+ "label_classes": ["not_duplicate", "duplicate"],
309
+ "label_column": "is_duplicate",
310
+ "citation": textwrap.dedent(
311
+ """\
312
+ @online{WinNT,
313
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
314
+ title = {First Quora Dataset Release: Question Pairs},
315
+ year = {2017},
316
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
317
+ urldate = {2019-04-03}
318
+ }"""
319
+ ),
320
+ "description": textwrap.dedent(
321
+ """\
322
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
323
+ community question-answering website Quora. The task is to determine whether a
324
+ pair of questions are semantically equivalent."""
325
+ ),
326
+ },
327
+ "rte": {
328
+ "name": "rte",
329
+ "text_features": ["sentence1", "sentence2"],
330
+ "label_classes": ["entailment", "not_entailment"],
331
+ "label_column": "label",
332
+ "citation": textwrap.dedent(
333
+ """\
334
+ @inproceedings{dagan2005pascal,
335
+ title={The PASCAL recognising textual entailment challenge},
336
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
337
+ booktitle={Machine Learning Challenges Workshop},
338
+ pages={177--190},
339
+ year={2005},
340
+ organization={Springer}
341
+ }
342
+ @inproceedings{bar2006second,
343
+ title={The second pascal recognising textual entailment challenge},
344
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
345
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
346
+ volume={6},
347
+ number={1},
348
+ pages={6--4},
349
+ year={2006},
350
+ organization={Venice}
351
+ }
352
+ @inproceedings{giampiccolo2007third,
353
+ title={The third pascal recognizing textual entailment challenge},
354
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
355
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
356
+ pages={1--9},
357
+ year={2007},
358
+ organization={Association for Computational Linguistics}
359
+ }
360
+ @inproceedings{bentivogli2009fifth,
361
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
362
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
363
+ booktitle={TAC},
364
+ year={2009}
365
+ }"""
366
+ ),
367
+ "description": textwrap.dedent(
368
+ """\
369
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
370
+ entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
371
+ et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
372
+ constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
373
+ for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
374
+ ), # pylint: disable=line-too-long
375
+ },
376
+ "sst2": {
377
+ "name": "sst2",
378
+ "text_features": ["sentence"],
379
+ "label_classes": ["negative", "positive"],
380
+ "label_column": "label",
381
+ "citation": textwrap.dedent(
382
+ """\
383
+ @inproceedings{socher2013recursive,
384
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
385
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
386
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
387
+ pages={1631--1642},
388
+ year={2013}
389
+ }"""
390
+ ),
391
+ "description": textwrap.dedent(
392
+ """\
393
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
394
+ human annotations of their sentiment. The task is to predict the sentiment of a
395
+ given sentence. We use the two-way (positive/negative) class split, and use only
396
+ sentence-level labels."""
397
+ ),
398
+ },
399
+ "stsb": {
400
+ "name": "stsb",
401
+ "text_features": ["sentence1", "sentence2"],
402
+ "label_classes": None,
403
+ "label_column": "score",
404
+ "citation": textwrap.dedent(
405
+ """\
406
+ @inproceedings{cer2017semeval,
407
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
408
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
409
+ booktitle={Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)},
410
+ pages={1--14},
411
+ year={2017}
412
+ }"""
413
+ ),
414
+ "description": textwrap.dedent(
415
+ """\
416
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
417
+ sentence pairs drawn from news headlines, video and image captions, and natural language
418
+ inference data. Each pair is human-annotated with a similarity score from 1 to 5. We
419
+ convert this to a binary classification task by labeling examples with a similarity score
420
+ >= 4.5 as entailment and < 4.5 as not entailment."""
421
+ ),
422
+ "process_label": lambda x: float(x),
423
+ },
424
+ "wnli": {
425
+ "name": "wnli",
426
+ "text_features": ["sentence1", "sentence2"],
427
+ "label_classes": ["not_entailment", "entailment"],
428
+ "label_column": "label",
429
+ "citation": textwrap.dedent(
430
+ """\
431
+ @inproceedings{levesque2012winograd,
432
+ title={The winograd schema challenge},
433
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
434
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
435
+ year={2012}
436
+ }"""
437
+ ),
438
+ "description": textwrap.dedent(
439
+ """\
440
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
441
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
442
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
443
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
444
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
445
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
446
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
447
+ new examples derived from fiction books that was shared privately by the authors of the original
448
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
449
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
450
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
451
+ training examples, they will predict the wrong label on corresponding development set
452
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
453
+ between a model's score on this task and its score on the unconverted original task. We
454
+ call converted dataset WNLI (Winograd NLI)."""
455
+ ),
456
+ },
457
+ }
458
+
459
+
460
+ _VERSION = datasets.Version("1.0.0", "")
461
+
462
+
463
+ class VieGLUEConfig(datasets.BuilderConfig):
464
+ """BuilderConfig for GLUE."""
465
+
466
+ def __init__(
467
+ self,
468
+ text_features,
469
+ label_column="",
470
+ data_url="",
471
+ data_dir="",
472
+ citation="",
473
+ url="",
474
+ label_classes=None,
475
+ process_label=lambda x: x,
476
+ **kwargs,
477
+ ):
478
+ """BuilderConfig for VieGLUE.
479
+ Args:
480
+ text_features: `dict[string, string]`, map from the name of the feature
481
+ dict for each text field to the name of the column in the tsv file
482
+ label_column: `string`, name of the column in the tsv file corresponding
483
+ to the label
484
+ data_url: `string`, url to download the zip file from
485
+ data_dir: `string`, the path to the folder containing the tsv files in the
486
+ downloaded zip
487
+ citation: `string`, citation for the data set
488
+ url: `string`, url for information about the data set
489
+ label_classes: `list[string]`, the list of classes if the label is
490
+ categorical. If not provided, then the label will be of type
491
+ `datasets.Value('float32')`.
492
+ process_label: `Function[string, any]`, function taking in the raw value
493
+ of the label and processing it to the form required by the label feature
494
+ **kwargs: keyword arguments forwarded to super.
495
+ """
496
+ super(VieGLUEConfig, self).__init__(
497
+ version=datasets.Version("1.0.0", ""), **kwargs
498
+ )
499
+ self.text_features = text_features
500
+ self.label_column = label_column
501
+ self.label_classes = label_classes
502
+ self.data_url = data_url
503
+ self.data_dir = data_dir
504
+ self.citation = citation
505
+ self.url = url
506
+ self.process_label = process_label
507
+
508
+
509
+ class VNExpress(datasets.GeneratorBasedBuilder):
510
+ """"""
511
+
512
+ VERSION = _VERSION
513
+ DEFAULT_CONFIG_NAME = "mnli"
514
+
515
+ BUILDER_CONFIGS = [VieGLUEConfig(**config) for config in SUBSET_KWARGS.values()]
516
+
517
+ def _info(self):
518
+ features = {f: datasets.Value("string") for f in self.config.text_features}
519
+ if self.config.label_classes:
520
+ features["label"] = datasets.features.ClassLabel(
521
+ names=self.config.label_classes
522
+ )
523
+ else:
524
+ features["label"] = datasets.Value("float32")
525
+ features["idx"] = datasets.Value("int32")
526
+ return datasets.DatasetInfo(
527
+ description=_DESCRIPTION,
528
+ features=datasets.Features(features),
529
+ homepage=self.config.url,
530
+ citation=self.config.citation + "\n" + _CITATION,
531
+ )
532
+
533
+ def _split_generators(self, dl_manager):
534
+ _SPLIT_MAPPING = {
535
+ "train": datasets.Split.TRAIN,
536
+ "training": datasets.Split.TRAIN,
537
+ "test": datasets.Split.TEST,
538
+ "testing": datasets.Split.TEST,
539
+ "val": datasets.Split.VALIDATION,
540
+ "validation": datasets.Split.VALIDATION,
541
+ "valid": datasets.Split.VALIDATION,
542
+ "dev": datasets.Split.VALIDATION,
543
+ }
544
+
545
+ name = self.config.name
546
+ download_url = _DOWNLOAD_URL[name]
547
+ filepath = dl_manager.download_and_extract(download_url)
548
+
549
+ return_datasets = []
550
+ for split in download_url:
551
+ return_datasets.append(
552
+ datasets.SplitGenerator(
553
+ name=_SPLIT_MAPPING[split],
554
+ gen_kwargs={
555
+ "files": filepath[split],
556
+ "urls": download_url[split],
557
+ "stage": split,
558
+ "config": self.config,
559
+ },
560
+ )
561
+ )
562
+
563
+ return return_datasets
564
+
565
+ def _generate_examples(self, files, urls, stage):
566
+ # id_ = 0
567
+
568
+ if not isinstance(files, list):
569
+ files = [files]
570
+ for path, url in zip(files, urls):
571
+ print(f"Loading file from {url}...")
572
+ for file in os.listdir(path):
573
+ if file.startswith("._"):
574
+ continue
575
+ file_path = os.path.join(path, file)
576
+ if not os.path.isfile(file_path):
577
+ continue
578
+ with open(file_path) as f:
579
+ all_samples = json.load(f)
580
+ for sample in all_samples:
581
+ yield sample
data/ax/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad60289d3dbd04204e9369cf7af380775c6c1d1aa6bcc14d9b7a8526c9a805e5
3
+ size 35767
data/cola/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca81585b0f058cbbf127e55c0e3ff502e5e3be8994e6bf7aa772499efec115da
3
+ size 23941
data/cola/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e26435b2297444d6f5e6849da66e0c9bf6dd3966337f4ebc6605e1c5b169b1f
3
+ size 157789
data/cola/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c5199213012f68aac23490c14a7d9c9e1808792a42b132373ba0e6289e33af5
3
+ size 24119
data/mnli/test_matched.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a50fafcfcec9cd51e48f40e86ad97254abd764d4dbf3f4df2fe43f488bd2715
3
+ size 804164
data/mnli/test_mismatched.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c1bf1e6724b4fe1b822fe6ffd4d56a02d6fc5c2265e2fed58c5dcdb5de6dc5c
3
+ size 819403
data/mnli/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03497c968e0a15eb9abd10fc224c7627d7d0e84ee6993a1cc0cdd21cf18925c7
3
+ size 33001627
data/mnli/validation_matched.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf263094bb5f66a9568c6afdff15515315f607196702a6864f9b60029d34f6f
3
+ size 804245
data/mnli/validation_mismatched.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:616502498babe0591ba75c4ea02b186fa3deccc00239f05df956cefe44d94586
3
+ size 820579
data/mrpc/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0db537bbbd376c82c18550018f0d41ab2c8fbc431e3057131f0d7b6b3504f1fc
3
+ size 171292
data/mrpc/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:246d993ce4af74b2df0e7e54dcbf5cd9a9f208ac8de7f91d888b2000abb319c0
3
+ size 361063
data/mrpc/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c931958f8bd19f69cd8da866386ed1225d1d55b7cb933ff32a7e57b6d66ae08
3
+ size 41857
data/qnli/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dfe805e449e91c2d093b3503eec34562a321d8bad5dc2aa38e6ab572b2f0cd4
3
+ size 597166
data/qnli/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f1facfddab2bbc4670b108a24b25294964cd1abeb01471b551f7c3e20776f55
3
+ size 11431368
data/qnli/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d4bb8e051090ced96da554b273c155144aec634b1d40e73b3af3e0becaa139
3
+ size 594909
data/qqp/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:232f73288356bb2d766683a7504263ba8efa21b096a323765add6123264c84ec
3
+ size 23005123
data/qqp/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea7ff74be1650e194538927508fc90fe068042a5ce7e857066c8a43889e9fac3
3
+ size 20740924
data/qqp/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b42948ee05e21cafb7b57b5e996d6492ffca8268d756ab4d60b7309c5bea7b9a
3
+ size 2304652
data/rte/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af1647edca0c8045fdc08d705bb6c1d7708e629fbb9b6d63e339dec8d4b78af6
3
+ size 405248
data/rte/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:552666ce1f67d38e1cc422996d8f6b60f56e5cdd51b9e2b08dabf374a6d46343
3
+ size 383012
data/rte/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4caebc6328fca32f8883dbfb2f1216da92af723128bb7bd9f7eb5c51ab444e71
3
+ size 43009
data/sst2/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faee083265c078aacec25003ab2b15501815b4237d5d807783281c8636bded2f
3
+ size 100475
data/sst2/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0337a4e2a83fe63347e1e96a6bdd610196d5f14a6ae91ddadfea442fe3a82575
3
+ size 2115797
data/sst2/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a3868bdd3800aec03c66e4e67e4e28000c44a3689a8a666246d79c2f1ab127a
3
+ size 49046
data/stsb/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:436f05584be34561d757d91fc59f946f1c98c9a42671a07344c28a8c2e8fea04
3
+ size 64739
data/stsb/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43f289d1e1c568e132b94c8e418eda8d20534554c9540e12dd1516501716952e
3
+ size 300955
data/stsb/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d4550ecbf2c0b296e006a34af2a5a32a09bca91c5118bca1932a97cee9a6a8
3
+ size 92249
data/wnli/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7ec2715e39e26a1fd8c4eebf6cbdd21cd98c72ce6d58b19a7b81acccff07aa5
3
+ size 6675
data/wnli/train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:239d2c9debc388ba54c468fb8e853cfd1db3c2e07eebe0a9ab0aeb98b06f21a1
3
+ size 33044
data/wnli/validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5bc9b1a794f081384f1c539ca0f769dabaf21c3a4f624df19fbda2ea53e7a04
3
+ size 5267