albertvillanova HF staff commited on
Commit
ea7dceb
1 Parent(s): c234b36

Add qnli data files

Browse files
README.md CHANGED
@@ -208,17 +208,17 @@ dataset_info:
208
  - name: idx
209
  dtype: int32
210
  splits:
211
- - name: test
212
- num_bytes: 1376516
213
- num_examples: 5463
214
  - name: train
215
- num_bytes: 25677924
216
  num_examples: 104743
217
  - name: validation
218
- num_bytes: 1371727
219
  num_examples: 5463
220
- download_size: 10627589
221
- dataset_size: 28426167
 
 
 
222
  - config_name: qqp
223
  features:
224
  - name: question1
@@ -384,6 +384,14 @@ configs:
384
  path: mrpc/validation-*
385
  - split: test
386
  path: mrpc/test-*
 
 
 
 
 
 
 
 
387
  - config_name: qqp
388
  data_files:
389
  - split: train
 
208
  - name: idx
209
  dtype: int32
210
  splits:
 
 
 
211
  - name: train
212
+ num_bytes: 25612443
213
  num_examples: 104743
214
  - name: validation
215
+ num_bytes: 1368304
216
  num_examples: 5463
217
+ - name: test
218
+ num_bytes: 1373093
219
+ num_examples: 5463
220
+ download_size: 19278324
221
+ dataset_size: 28353840
222
  - config_name: qqp
223
  features:
224
  - name: question1
 
384
  path: mrpc/validation-*
385
  - split: test
386
  path: mrpc/test-*
387
+ - config_name: qnli
388
+ data_files:
389
+ - split: train
390
+ path: qnli/train-*
391
+ - split: validation
392
+ path: qnli/validation-*
393
+ - split: test
394
+ path: qnli/test-*
395
  - config_name: qqp
396
  data_files:
397
  - split: train
dataset_infos.json CHANGED
@@ -473,39 +473,32 @@
473
  },
474
  "qnli": {
475
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
476
- "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
477
  "homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
478
  "license": "",
479
  "features": {
480
  "question": {
481
  "dtype": "string",
482
- "id": null,
483
  "_type": "Value"
484
  },
485
  "sentence": {
486
  "dtype": "string",
487
- "id": null,
488
  "_type": "Value"
489
  },
490
  "label": {
491
- "num_classes": 2,
492
  "names": [
493
  "entailment",
494
  "not_entailment"
495
  ],
496
- "names_file": null,
497
- "id": null,
498
  "_type": "ClassLabel"
499
  },
500
  "idx": {
501
  "dtype": "int32",
502
- "id": null,
503
  "_type": "Value"
504
  }
505
  },
506
- "post_processed": null,
507
- "supervised_keys": null,
508
  "builder_name": "glue",
 
509
  "config_name": "qnli",
510
  "version": {
511
  "version_str": "1.0.0",
@@ -515,35 +508,28 @@
515
  "patch": 0
516
  },
517
  "splits": {
518
- "test": {
519
- "name": "test",
520
- "num_bytes": 1376516,
521
- "num_examples": 5463,
522
- "dataset_name": "glue"
523
- },
524
  "train": {
525
  "name": "train",
526
- "num_bytes": 25677924,
527
  "num_examples": 104743,
528
- "dataset_name": "glue"
529
  },
530
  "validation": {
531
  "name": "validation",
532
- "num_bytes": 1371727,
533
  "num_examples": 5463,
534
- "dataset_name": "glue"
535
- }
536
- },
537
- "download_checksums": {
538
- "https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {
539
- "num_bytes": 10627589,
540
- "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"
541
  }
542
  },
543
- "download_size": 10627589,
544
- "post_processing_size": null,
545
- "dataset_size": 28426167,
546
- "size_in_bytes": 39053756
547
  },
548
  "rte": {
549
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
473
  },
474
  "qnli": {
475
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
476
+ "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
477
  "homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
478
  "license": "",
479
  "features": {
480
  "question": {
481
  "dtype": "string",
 
482
  "_type": "Value"
483
  },
484
  "sentence": {
485
  "dtype": "string",
 
486
  "_type": "Value"
487
  },
488
  "label": {
 
489
  "names": [
490
  "entailment",
491
  "not_entailment"
492
  ],
 
 
493
  "_type": "ClassLabel"
494
  },
495
  "idx": {
496
  "dtype": "int32",
 
497
  "_type": "Value"
498
  }
499
  },
 
 
500
  "builder_name": "glue",
501
+ "dataset_name": "glue",
502
  "config_name": "qnli",
503
  "version": {
504
  "version_str": "1.0.0",
 
508
  "patch": 0
509
  },
510
  "splits": {
 
 
 
 
 
 
511
  "train": {
512
  "name": "train",
513
+ "num_bytes": 25612443,
514
  "num_examples": 104743,
515
+ "dataset_name": null
516
  },
517
  "validation": {
518
  "name": "validation",
519
+ "num_bytes": 1368304,
520
  "num_examples": 5463,
521
+ "dataset_name": null
522
+ },
523
+ "test": {
524
+ "name": "test",
525
+ "num_bytes": 1373093,
526
+ "num_examples": 5463,
527
+ "dataset_name": null
528
  }
529
  },
530
+ "download_size": 19278324,
531
+ "dataset_size": 28353840,
532
+ "size_in_bytes": 47632164
 
533
  },
534
  "rte": {
535
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
qnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f39520cd07925c9784e4a7f1f7aed8f17f136039b8498f7ad07c7bf13d65ba83
3
+ size 877345
qnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc7cb70a5bbde0b0336c3d51f31bb4df4673e908e8874b090b52169b1365c6c
3
+ size 17528917
qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69311b81dc65589286091d9905a27617a90436dd215c7a59832fa8f4f336169
3
+ size 872062