parquet-converter commited on
Commit
b2f6e26
·
1 Parent(s): 23318bb

Update parquet files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +0 -699
  2. all_languages/xnli-test.parquet +3 -0
  3. all_languages/xnli-train-00000-of-00004.parquet +3 -0
  4. all_languages/xnli-train-00001-of-00004.parquet +3 -0
  5. all_languages/xnli-train-00002-of-00004.parquet +3 -0
  6. all_languages/xnli-train-00003-of-00004.parquet +3 -0
  7. all_languages/xnli-validation.parquet +3 -0
  8. ar/xnli-test.parquet +3 -0
  9. ar/xnli-train.parquet +3 -0
  10. ar/xnli-validation.parquet +3 -0
  11. bg/xnli-test.parquet +3 -0
  12. bg/xnli-train.parquet +3 -0
  13. bg/xnli-validation.parquet +3 -0
  14. dataset_infos.json +0 -1
  15. de/xnli-test.parquet +3 -0
  16. de/xnli-train.parquet +3 -0
  17. de/xnli-validation.parquet +3 -0
  18. el/xnli-test.parquet +3 -0
  19. el/xnli-train.parquet +3 -0
  20. el/xnli-validation.parquet +3 -0
  21. en/xnli-test.parquet +3 -0
  22. en/xnli-train.parquet +3 -0
  23. en/xnli-validation.parquet +3 -0
  24. es/xnli-test.parquet +3 -0
  25. es/xnli-train.parquet +3 -0
  26. es/xnli-validation.parquet +3 -0
  27. fr/xnli-test.parquet +3 -0
  28. fr/xnli-train.parquet +3 -0
  29. fr/xnli-validation.parquet +3 -0
  30. hi/xnli-test.parquet +3 -0
  31. hi/xnli-train.parquet +3 -0
  32. hi/xnli-validation.parquet +3 -0
  33. ru/xnli-test.parquet +3 -0
  34. ru/xnli-train.parquet +3 -0
  35. ru/xnli-validation.parquet +3 -0
  36. sw/xnli-test.parquet +3 -0
  37. sw/xnli-train.parquet +3 -0
  38. sw/xnli-validation.parquet +3 -0
  39. th/xnli-test.parquet +3 -0
  40. th/xnli-train.parquet +3 -0
  41. th/xnli-validation.parquet +3 -0
  42. tr/xnli-test.parquet +3 -0
  43. tr/xnli-train.parquet +3 -0
  44. tr/xnli-validation.parquet +3 -0
  45. ur/xnli-test.parquet +3 -0
  46. ur/xnli-train.parquet +3 -0
  47. ur/xnli-validation.parquet +3 -0
  48. vi/xnli-test.parquet +3 -0
  49. vi/xnli-train.parquet +3 -0
  50. vi/xnli-validation.parquet +3 -0
README.md DELETED
@@ -1,699 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- paperswithcode_id: xnli
5
- pretty_name: Cross-lingual Natural Language Inference
6
- dataset_info:
7
- - config_name: ar
8
- features:
9
- - name: premise
10
- dtype: string
11
- - name: hypothesis
12
- dtype: string
13
- - name: label
14
- dtype:
15
- class_label:
16
- names:
17
- 0: entailment
18
- 1: neutral
19
- 2: contradiction
20
- splits:
21
- - name: train
22
- num_bytes: 107399934
23
- num_examples: 392702
24
- - name: test
25
- num_bytes: 1294561
26
- num_examples: 5010
27
- - name: validation
28
- num_bytes: 633009
29
- num_examples: 2490
30
- download_size: 483963712
31
- dataset_size: 109327504
32
- - config_name: bg
33
- features:
34
- - name: premise
35
- dtype: string
36
- - name: hypothesis
37
- dtype: string
38
- - name: label
39
- dtype:
40
- class_label:
41
- names:
42
- 0: entailment
43
- 1: neutral
44
- 2: contradiction
45
- splits:
46
- - name: train
47
- num_bytes: 125973545
48
- num_examples: 392702
49
- - name: test
50
- num_bytes: 1573042
51
- num_examples: 5010
52
- - name: validation
53
- num_bytes: 774069
54
- num_examples: 2490
55
- download_size: 483963712
56
- dataset_size: 128320656
57
- - config_name: de
58
- features:
59
- - name: premise
60
- dtype: string
61
- - name: hypothesis
62
- dtype: string
63
- - name: label
64
- dtype:
65
- class_label:
66
- names:
67
- 0: entailment
68
- 1: neutral
69
- 2: contradiction
70
- splits:
71
- - name: train
72
- num_bytes: 84684460
73
- num_examples: 392702
74
- - name: test
75
- num_bytes: 996496
76
- num_examples: 5010
77
- - name: validation
78
- num_bytes: 494612
79
- num_examples: 2490
80
- download_size: 483963712
81
- dataset_size: 86175568
82
- - config_name: el
83
- features:
84
- - name: premise
85
- dtype: string
86
- - name: hypothesis
87
- dtype: string
88
- - name: label
89
- dtype:
90
- class_label:
91
- names:
92
- 0: entailment
93
- 1: neutral
94
- 2: contradiction
95
- splits:
96
- - name: train
97
- num_bytes: 139753678
98
- num_examples: 392702
99
- - name: test
100
- num_bytes: 1704793
101
- num_examples: 5010
102
- - name: validation
103
- num_bytes: 841234
104
- num_examples: 2490
105
- download_size: 483963712
106
- dataset_size: 142299705
107
- - config_name: en
108
- features:
109
- - name: premise
110
- dtype: string
111
- - name: hypothesis
112
- dtype: string
113
- - name: label
114
- dtype:
115
- class_label:
116
- names:
117
- 0: entailment
118
- 1: neutral
119
- 2: contradiction
120
- splits:
121
- - name: train
122
- num_bytes: 74444346
123
- num_examples: 392702
124
- - name: test
125
- num_bytes: 875142
126
- num_examples: 5010
127
- - name: validation
128
- num_bytes: 433471
129
- num_examples: 2490
130
- download_size: 483963712
131
- dataset_size: 75752959
132
- - config_name: es
133
- features:
134
- - name: premise
135
- dtype: string
136
- - name: hypothesis
137
- dtype: string
138
- - name: label
139
- dtype:
140
- class_label:
141
- names:
142
- 0: entailment
143
- 1: neutral
144
- 2: contradiction
145
- splits:
146
- - name: train
147
- num_bytes: 81383604
148
- num_examples: 392702
149
- - name: test
150
- num_bytes: 969821
151
- num_examples: 5010
152
- - name: validation
153
- num_bytes: 478430
154
- num_examples: 2490
155
- download_size: 483963712
156
- dataset_size: 82831855
157
- - config_name: fr
158
- features:
159
- - name: premise
160
- dtype: string
161
- - name: hypothesis
162
- dtype: string
163
- - name: label
164
- dtype:
165
- class_label:
166
- names:
167
- 0: entailment
168
- 1: neutral
169
- 2: contradiction
170
- splits:
171
- - name: train
172
- num_bytes: 85809099
173
- num_examples: 392702
174
- - name: test
175
- num_bytes: 1029247
176
- num_examples: 5010
177
- - name: validation
178
- num_bytes: 510112
179
- num_examples: 2490
180
- download_size: 483963712
181
- dataset_size: 87348458
182
- - config_name: hi
183
- features:
184
- - name: premise
185
- dtype: string
186
- - name: hypothesis
187
- dtype: string
188
- - name: label
189
- dtype:
190
- class_label:
191
- names:
192
- 0: entailment
193
- 1: neutral
194
- 2: contradiction
195
- splits:
196
- - name: train
197
- num_bytes: 170594284
198
- num_examples: 392702
199
- - name: test
200
- num_bytes: 2073081
201
- num_examples: 5010
202
- - name: validation
203
- num_bytes: 1023923
204
- num_examples: 2490
205
- download_size: 483963712
206
- dataset_size: 173691288
207
- - config_name: ru
208
- features:
209
- - name: premise
210
- dtype: string
211
- - name: hypothesis
212
- dtype: string
213
- - name: label
214
- dtype:
215
- class_label:
216
- names:
217
- 0: entailment
218
- 1: neutral
219
- 2: contradiction
220
- splits:
221
- - name: train
222
- num_bytes: 129859935
223
- num_examples: 392702
224
- - name: test
225
- num_bytes: 1603474
226
- num_examples: 5010
227
- - name: validation
228
- num_bytes: 786450
229
- num_examples: 2490
230
- download_size: 483963712
231
- dataset_size: 132249859
232
- - config_name: sw
233
- features:
234
- - name: premise
235
- dtype: string
236
- - name: hypothesis
237
- dtype: string
238
- - name: label
239
- dtype:
240
- class_label:
241
- names:
242
- 0: entailment
243
- 1: neutral
244
- 2: contradiction
245
- splits:
246
- - name: train
247
- num_bytes: 69286045
248
- num_examples: 392702
249
- - name: test
250
- num_bytes: 871659
251
- num_examples: 5010
252
- - name: validation
253
- num_bytes: 429858
254
- num_examples: 2490
255
- download_size: 483963712
256
- dataset_size: 70587562
257
- - config_name: th
258
- features:
259
- - name: premise
260
- dtype: string
261
- - name: hypothesis
262
- dtype: string
263
- - name: label
264
- dtype:
265
- class_label:
266
- names:
267
- 0: entailment
268
- 1: neutral
269
- 2: contradiction
270
- splits:
271
- - name: train
272
- num_bytes: 176063212
273
- num_examples: 392702
274
- - name: test
275
- num_bytes: 2147023
276
- num_examples: 5010
277
- - name: validation
278
- num_bytes: 1061168
279
- num_examples: 2490
280
- download_size: 483963712
281
- dataset_size: 179271403
282
- - config_name: tr
283
- features:
284
- - name: premise
285
- dtype: string
286
- - name: hypothesis
287
- dtype: string
288
- - name: label
289
- dtype:
290
- class_label:
291
- names:
292
- 0: entailment
293
- 1: neutral
294
- 2: contradiction
295
- splits:
296
- - name: train
297
- num_bytes: 71637460
298
- num_examples: 392702
299
- - name: test
300
- num_bytes: 934942
301
- num_examples: 5010
302
- - name: validation
303
- num_bytes: 459316
304
- num_examples: 2490
305
- download_size: 483963712
306
- dataset_size: 73031718
307
- - config_name: ur
308
- features:
309
- - name: premise
310
- dtype: string
311
- - name: hypothesis
312
- dtype: string
313
- - name: label
314
- dtype:
315
- class_label:
316
- names:
317
- 0: entailment
318
- 1: neutral
319
- 2: contradiction
320
- splits:
321
- - name: train
322
- num_bytes: 96441806
323
- num_examples: 392702
324
- - name: test
325
- num_bytes: 1416249
326
- num_examples: 5010
327
- - name: validation
328
- num_bytes: 699960
329
- num_examples: 2490
330
- download_size: 483963712
331
- dataset_size: 98558015
332
- - config_name: vi
333
- features:
334
- - name: premise
335
- dtype: string
336
- - name: hypothesis
337
- dtype: string
338
- - name: label
339
- dtype:
340
- class_label:
341
- names:
342
- 0: entailment
343
- 1: neutral
344
- 2: contradiction
345
- splits:
346
- - name: train
347
- num_bytes: 101417750
348
- num_examples: 392702
349
- - name: test
350
- num_bytes: 1190225
351
- num_examples: 5010
352
- - name: validation
353
- num_bytes: 590688
354
- num_examples: 2490
355
- download_size: 483963712
356
- dataset_size: 103198663
357
- - config_name: zh
358
- features:
359
- - name: premise
360
- dtype: string
361
- - name: hypothesis
362
- dtype: string
363
- - name: label
364
- dtype:
365
- class_label:
366
- names:
367
- 0: entailment
368
- 1: neutral
369
- 2: contradiction
370
- splits:
371
- - name: train
372
- num_bytes: 72225161
373
- num_examples: 392702
374
- - name: test
375
- num_bytes: 777937
376
- num_examples: 5010
377
- - name: validation
378
- num_bytes: 384859
379
- num_examples: 2490
380
- download_size: 483963712
381
- dataset_size: 73387957
382
- - config_name: all_languages
383
- features:
384
- - name: premise
385
- dtype:
386
- translation:
387
- languages:
388
- - ar
389
- - bg
390
- - de
391
- - el
392
- - en
393
- - es
394
- - fr
395
- - hi
396
- - ru
397
- - sw
398
- - th
399
- - tr
400
- - ur
401
- - vi
402
- - zh
403
- - name: hypothesis
404
- dtype:
405
- translation_variable_languages:
406
- languages:
407
- - ar
408
- - bg
409
- - de
410
- - el
411
- - en
412
- - es
413
- - fr
414
- - hi
415
- - ru
416
- - sw
417
- - th
418
- - tr
419
- - ur
420
- - vi
421
- - zh
422
- num_languages: 15
423
- - name: label
424
- dtype:
425
- class_label:
426
- names:
427
- 0: entailment
428
- 1: neutral
429
- 2: contradiction
430
- splits:
431
- - name: train
432
- num_bytes: 1581474731
433
- num_examples: 392702
434
- - name: test
435
- num_bytes: 19387508
436
- num_examples: 5010
437
- - name: validation
438
- num_bytes: 9566255
439
- num_examples: 2490
440
- download_size: 483963712
441
- dataset_size: 1610428494
442
- ---
443
-
444
- # Dataset Card for "xnli"
445
-
446
- ## Table of Contents
447
- - [Dataset Description](#dataset-description)
448
- - [Dataset Summary](#dataset-summary)
449
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
450
- - [Languages](#languages)
451
- - [Dataset Structure](#dataset-structure)
452
- - [Data Instances](#data-instances)
453
- - [Data Fields](#data-fields)
454
- - [Data Splits](#data-splits)
455
- - [Dataset Creation](#dataset-creation)
456
- - [Curation Rationale](#curation-rationale)
457
- - [Source Data](#source-data)
458
- - [Annotations](#annotations)
459
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
460
- - [Considerations for Using the Data](#considerations-for-using-the-data)
461
- - [Social Impact of Dataset](#social-impact-of-dataset)
462
- - [Discussion of Biases](#discussion-of-biases)
463
- - [Other Known Limitations](#other-known-limitations)
464
- - [Additional Information](#additional-information)
465
- - [Dataset Curators](#dataset-curators)
466
- - [Licensing Information](#licensing-information)
467
- - [Citation Information](#citation-information)
468
- - [Contributions](#contributions)
469
-
470
- ## Dataset Description
471
-
472
- - **Homepage:** [https://www.nyu.edu/projects/bowman/xnli/](https://www.nyu.edu/projects/bowman/xnli/)
473
- - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
474
- - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
475
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
476
- - **Size of downloaded dataset files:** 7384.70 MB
477
- - **Size of the generated dataset:** 3076.99 MB
478
- - **Total amount of disk used:** 10461.69 MB
479
-
480
- ### Dataset Summary
481
-
482
- XNLI is a subset of a few thousand examples from MNLI which has been translated
483
- into a 14 different languages (some low-ish resource). As with MNLI, the goal is
484
- to predict textual entailment (does sentence A imply/contradict/neither sentence
485
- B) and is a classification task (given two sentences, predict one of three
486
- labels).
487
-
488
- ### Supported Tasks and Leaderboards
489
-
490
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
491
-
492
- ### Languages
493
-
494
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
495
-
496
- ## Dataset Structure
497
-
498
- ### Data Instances
499
-
500
- #### all_languages
501
-
502
- - **Size of downloaded dataset files:** 461.54 MB
503
- - **Size of the generated dataset:** 1535.82 MB
504
- - **Total amount of disk used:** 1997.37 MB
505
-
506
- An example of 'train' looks as follows.
507
- ```
508
- This example was too long and was cropped:
509
-
510
- {
511
- "hypothesis": "{\"language\": [\"ar\", \"bg\", \"de\", \"el\", \"en\", \"es\", \"fr\", \"hi\", \"ru\", \"sw\", \"th\", \"tr\", \"ur\", \"vi\", \"zh\"], \"translation\": [\"احد اع...",
512
- "label": 0,
513
- "premise": "{\"ar\": \"واحدة من رقابنا ستقوم بتنفيذ تعليماتك كلها بكل دقة\", \"bg\": \"един от нашите номера ще ви даде инструкции .\", \"de\": \"Eine ..."
514
- }
515
- ```
516
-
517
- #### ar
518
-
519
- - **Size of downloaded dataset files:** 461.54 MB
520
- - **Size of the generated dataset:** 104.26 MB
521
- - **Total amount of disk used:** 565.81 MB
522
-
523
- An example of 'validation' looks as follows.
524
- ```
525
- {
526
- "hypothesis": "اتصل بأمه حالما أوصلته حافلة المدرسية.",
527
- "label": 1,
528
- "premise": "وقال، ماما، لقد عدت للمنزل."
529
- }
530
- ```
531
-
532
- #### bg
533
-
534
- - **Size of downloaded dataset files:** 461.54 MB
535
- - **Size of the generated dataset:** 122.38 MB
536
- - **Total amount of disk used:** 583.92 MB
537
-
538
- An example of 'train' looks as follows.
539
- ```
540
- This example was too long and was cropped:
541
-
542
- {
543
- "hypothesis": "\"губиш нещата на следното ниво , ако хората си припомнят .\"...",
544
- "label": 0,
545
- "premise": "\"по време на сезона и предполагам , че на твоето ниво ще ги загубиш на следващото ниво , ако те решат да си припомнят отбора на ..."
546
- }
547
- ```
548
-
549
- #### de
550
-
551
- - **Size of downloaded dataset files:** 461.54 MB
552
- - **Size of the generated dataset:** 82.18 MB
553
- - **Total amount of disk used:** 543.73 MB
554
-
555
- An example of 'train' looks as follows.
556
- ```
557
- This example was too long and was cropped:
558
-
559
- {
560
- "hypothesis": "Man verliert die Dinge auf die folgende Ebene , wenn sich die Leute erinnern .",
561
- "label": 0,
562
- "premise": "\"Du weißt , während der Saison und ich schätze , auf deiner Ebene verlierst du sie auf die nächste Ebene , wenn sie sich entschl..."
563
- }
564
- ```
565
-
566
- #### el
567
-
568
- - **Size of downloaded dataset files:** 461.54 MB
569
- - **Size of the generated dataset:** 135.71 MB
570
- - **Total amount of disk used:** 597.25 MB
571
-
572
- An example of 'validation' looks as follows.
573
- ```
574
- This example was too long and was cropped:
575
-
576
- {
577
- "hypothesis": "\"Τηλεφώνησε στη μαμά του μόλις το σχολικό λεωφορείο τον άφησε.\"...",
578
- "label": 1,
579
- "premise": "Και είπε, Μαμά, έφτασα στο σπίτι."
580
- }
581
- ```
582
-
583
- ### Data Fields
584
-
585
- The data fields are the same among all splits.
586
-
587
- #### all_languages
588
- - `premise`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
589
- - `hypothesis`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
590
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
591
-
592
- #### ar
593
- - `premise`: a `string` feature.
594
- - `hypothesis`: a `string` feature.
595
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
596
-
597
- #### bg
598
- - `premise`: a `string` feature.
599
- - `hypothesis`: a `string` feature.
600
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
601
-
602
- #### de
603
- - `premise`: a `string` feature.
604
- - `hypothesis`: a `string` feature.
605
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
606
-
607
- #### el
608
- - `premise`: a `string` feature.
609
- - `hypothesis`: a `string` feature.
610
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
611
-
612
- ### Data Splits
613
-
614
- | name |train |validation|test|
615
- |-------------|-----:|---------:|---:|
616
- |all_languages|392702| 2490|5010|
617
- |ar |392702| 2490|5010|
618
- |bg |392702| 2490|5010|
619
- |de |392702| 2490|5010|
620
- |el |392702| 2490|5010|
621
-
622
- ## Dataset Creation
623
-
624
- ### Curation Rationale
625
-
626
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
627
-
628
- ### Source Data
629
-
630
- #### Initial Data Collection and Normalization
631
-
632
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
633
-
634
- #### Who are the source language producers?
635
-
636
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
637
-
638
- ### Annotations
639
-
640
- #### Annotation process
641
-
642
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
643
-
644
- #### Who are the annotators?
645
-
646
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
647
-
648
- ### Personal and Sensitive Information
649
-
650
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
651
-
652
- ## Considerations for Using the Data
653
-
654
- ### Social Impact of Dataset
655
-
656
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
657
-
658
- ### Discussion of Biases
659
-
660
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
661
-
662
- ### Other Known Limitations
663
-
664
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
665
-
666
- ## Additional Information
667
-
668
- ### Dataset Curators
669
-
670
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
671
-
672
- ### Licensing Information
673
-
674
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
675
-
676
- ### Citation Information
677
-
678
- ```
679
- @InProceedings{conneau2018xnli,
680
- author = {Conneau, Alexis
681
- and Rinott, Ruty
682
- and Lample, Guillaume
683
- and Williams, Adina
684
- and Bowman, Samuel R.
685
- and Schwenk, Holger
686
- and Stoyanov, Veselin},
687
- title = {XNLI: Evaluating Cross-lingual Sentence Representations},
688
- booktitle = {Proceedings of the 2018 Conference on Empirical Methods
689
- in Natural Language Processing},
690
- year = {2018},
691
- publisher = {Association for Computational Linguistics},
692
- location = {Brussels, Belgium},
693
- }
694
- ```
695
-
696
-
697
- ### Contributions
698
-
699
- Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
all_languages/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f613b5cd81d042c4bb195097aa49f192edbad7e75fba2a5ec655e1a7183e02d
3
+ size 6769721
all_languages/xnli-train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4f6fd95d0c1f77cf01415b6b28a610662b6607353ce8f9c8558aa04290e095f
3
+ size 302881429
all_languages/xnli-train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c307a98b5ccdcba5d23ae914e861b7b0065371cdf24d7d6376ed3c1dc76bb9f1
3
+ size 303637918
all_languages/xnli-train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f92129356eda90eebf26af2c40b9e6415930aaf5e4dc04ed4a7e2d3f41e9f403
3
+ size 301736501
all_languages/xnli-train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ed01db7d892d0581094e2be11438266154bd2581d401b967480289b7be682e
3
+ size 45411019
all_languages/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:847585d8b3212f24404e796e849ab35632a4298fa3178e03fcd9434f0710af5f
3
+ size 3392502
ar/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfa53cd45deb1b09bec29cd68c32680cfd7ec6dd74010c0af5158d706e71077c
3
+ size 391979
ar/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a25e67f6f94f0e5d4a9cb6244af6087d3e4f7900641617a43e048ad782edf08a
3
+ size 58630164
ar/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08cc4729f9f1b3cc27d0c1b5ffb3d24cd169b6c2add1e285779f7e8324bfbc4
3
+ size 193756
bg/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f664e5f923c40c96bdbe9c2043e641dc9959e8e3b6b332bfea8420fbf4a329
3
+ size 447340
bg/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c21f1a13e2e0d3ee812e94f11375cf0c18fd163f923689832f27a30e1eba690
3
+ size 65447047
bg/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3840a7713ee478d480608a72665a2f1f0de14e41e814c1c82a6e14ca804e68a
3
+ size 223488
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"ar": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ar", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107399934, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1294561, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 633009, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 109327504, "size_in_bytes": 593291216}, "bg": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "bg", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 125973545, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1573042, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 774069, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 128320656, "size_in_bytes": 612284368}, "de": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "de", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 84684460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 996496, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 494612, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 86175568, "size_in_bytes": 570139280}, "el": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "el", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 139753678, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1704793, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 841234, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 142299705, "size_in_bytes": 626263417}, "en": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "en", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74444346, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 875142, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 433471, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 75752959, "size_in_bytes": 559716671}, "es": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "es", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 81383604, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 969821, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 478430, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 82831855, "size_in_bytes": 566795567}, "fr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "fr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 85809099, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1029247, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 510112, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 87348458, "size_in_bytes": 571312170}, "hi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "hi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 170594284, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2073081, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1023923, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 173691288, "size_in_bytes": 657655000}, "ru": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ru", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 129859935, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1603474, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 786450, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 132249859, "size_in_bytes": 616213571}, "sw": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "sw", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69286045, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 871659, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 429858, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 70587562, "size_in_bytes": 554551274}, "th": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "th", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 176063212, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2147023, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1061168, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 179271403, "size_in_bytes": 663235115}, "tr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "tr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 71637460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 934942, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 459316, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73031718, "size_in_bytes": 556995430}, "ur": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ur", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 96441806, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1416249, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 699960, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 98558015, "size_in_bytes": 582521727}, "vi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "vi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101417750, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1190225, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 590688, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 103198663, "size_in_bytes": 587162375}, "zh": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "zh", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 72225161, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 777937, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 384859, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73387957, "size_in_bytes": 557351669}, "all_languages": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "id": null, "_type": "Translation"}, "hypothesis": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "num_languages": 15, "id": null, "_type": "TranslationVariableLanguages"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "all_languages", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1581474731, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 19387508, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 9566255, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 1610428494, "size_in_bytes": 2094392206}}
 
 
de/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8e56533a55a993c12b5add76e51975f3712c3ad7a54095ae71209f0bcc41086
3
+ size 356131
de/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a2ed04c2093c573012b7a33e236ee11fccc612bc3fb96e574bf38b5e1e8cabf
3
+ size 55436760
de/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:752e1a2eec82c487473ce83eee9254e4e4fd0833ef4b776968078b03c635d053
3
+ size 180989
el/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebed26a2c2e92c551b50acc728848dd47fbba982d5986c20a56b8d44dabc8da
3
+ size 489528
el/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:738d8c9bcb751fa0795640144fece6119b6b9701d86189f0988a112016389154
3
+ size 73814853
el/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffc308bf2e9329c11a0bcce8b1d502f97eb8c14abfb86d11f67c6a35d7fe1b2e
3
+ size 246863
en/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18748a9dc5ff5544b3b5499aa62041b9f74e2d57f67611f9876c948476c5c755
3
+ size 308236
en/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce50be7207ad3e3405c29767ca657870e48e54c4aee86b5d372ac41c737ce7e7
3
+ size 50161922
en/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a043157dffe4030b86a6963c7e5b7c8ff96c23e47dad7f4d88d07f9e4b2a77d8
3
+ size 157206
es/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:256788de66603248a2b2660011aba35dd97f847d0b739c0220d00274593d5403
3
+ size 341924
es/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:799af741f795cb8020f6c20430c89af01ce62409de85423c13549e619b8c4dc8
3
+ size 53162138
es/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29272da47443f80fbc87e6417ccc2e372b2b8d3bb4504fe26795f9c36d368643
3
+ size 173092
fr/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dfddfd8273187c14b68c1b3b5fa352acf77fd71ad630118a7e2484da238ff82
3
+ size 360232
fr/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:518f94f0d2c2aae5dc766ffbc278423dc94c5d22d8e797590131352b735024e5
3
+ size 55425052
fr/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4130ecb91284d9754d88eb460c77ab9a6fe27493b9c1ca3ba592e889f24c1a02
3
+ size 183393
hi/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047aa8dc9eb0ff8a8d2ee20437682f966e23a6e1f822e52b814eec230a81e8fc
3
+ size 492624
hi/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42d790c3473ec63e750c86b05810065f4c2da8d21a929bab6270fbf03a6e7a22
3
+ size 70167281
hi/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37c8ce1cf5825a2bed7737aa7b6bf927e6d819068de35b79f118e007b74a5889
3
+ size 248640
ru/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8872a035081a70d5ed76b4da7159cf92682a4a2e29bcb080cd916e9d5b05d670
3
+ size 477351
ru/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7944c40e76b2d643e852f365a354ded1efff827404f8dbb14d14d6dc6f27de6e
3
+ size 69986523
ru/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a23c1873ffb17779492c76a73d3a0a0139f33dbaa2404b83da9d56a5883626b
3
+ size 238729
sw/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61e78abdd7358d648fed51e2c568a3a54716e2cea8ac7be06a25721fa424a3f8
3
+ size 312254
sw/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbed4a892f86cdef3bcc932c151ea5d2c70c21004d4c752e6f1eb933ebbeb497
3
+ size 45093825
sw/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2243ed7749ed460dd71f72eb267dc61cf6507082aad2f8057110f6ba82f1c6bc
3
+ size 158070
th/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea66cf0b17797b706a83f811eb01e2997eb0b97310b331d4cf27e6f288811c60
3
+ size 503401
th/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5957400b9888b4abd2c22aab5bf5145a98c228ebbd447a4529d995809f6ebbba
3
+ size 76466352
th/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69e10090e0b5cfcdee8f9ff8976f198a125029ecd3565cdf8c2ec0ca3b5b054a
3
+ size 252289
tr/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f04079c7e23d9dbb6fa11da888c770648961204ac1bb7d5b0f8d7dd89b183c6a
3
+ size 338132
tr/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ac9bb4698604a73039c4c183a0c340ab842130b5a3316b295e5588f390e380
3
+ size 47999787
tr/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b061dbff5a513d5287577cafc3c7dc158da9ee01895ba056a9dbf4d661d6b533
3
+ size 171758
ur/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6609558b167db601b9929acd38f28967890b51902c1ba38e239c4a8487f8c23
3
+ size 427736
ur/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbd0092be22cdda814c5e7713894259d6f409b8f969aa8440b42f69ab8a11f2a
3
+ size 46038911
ur/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:222f916a6f951dba8e663f801242405708a3a8261cbce17fd00980d0f9533252
3
+ size 216135
vi/xnli-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a54771601eeb96b2e23c526869e8e0ee655af6f62dc585a2c2b0494f3727dbc
3
+ size 364125
vi/xnli-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c87da5103282745d84fcd8388a56905bdcf51d26ca37630445433c3766af443
3
+ size 57140046
vi/xnli-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a267d0948a84eded722399a02df0cd0d6a6b06ba7af42de4361937a299a71b1d
3
+ size 185884