diff --git a/MLQA.ar.ar/test-00000-of-00001.parquet b/MLQA.ar.ar/test-00000-of-00001.parquet deleted file mode 100644 index 4d9968eddab93fecfa4149cec325d18858ae2dc1..0000000000000000000000000000000000000000 --- a/MLQA.ar.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:55aab36681ab7bce085f880c06ce89410c0a704ff7d8bfb8ee1dd50945ce45e1 -size 3691555 diff --git a/MLQA.ar.ar/validation-00000-of-00001.parquet b/MLQA.ar.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 8b68257e186e7ab8f136bef28859097e079d951c..0000000000000000000000000000000000000000 --- a/MLQA.ar.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e539b1a4d3d624f45ad21ec11f2ab391444ff5d9597b374253647163e775da02 -size 356625 diff --git a/MLQA.ar.de/test-00000-of-00001.parquet b/MLQA.ar.de/test-00000-of-00001.parquet deleted file mode 100644 index 5bacd642f6ce58af27728fa8e93043143673de76..0000000000000000000000000000000000000000 --- a/MLQA.ar.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5b47f3f4c5ce919574c8190f195c2cdd795f75a926a8e515ac44398237702b94 -size 1041913 diff --git a/MLQA.ar.de/validation-00000-of-00001.parquet b/MLQA.ar.de/validation-00000-of-00001.parquet deleted file mode 100644 index c718849322240e8f724997ab67256072c340bc6a..0000000000000000000000000000000000000000 --- a/MLQA.ar.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b0233a0b95ca9e59357399ccc4c934636ca97b803d8e78d7f9a438085afa405f -size 150912 diff --git a/MLQA.ar.en/test-00000-of-00001.parquet b/MLQA.ar.en/test-00000-of-00001.parquet deleted file mode 100644 index 54dd25a4d0d704f5c9ec45553b9942176be9220f..0000000000000000000000000000000000000000 --- a/MLQA.ar.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:33ac946deeb871e58a4584db8f3de470f595ac6eeee69a2d818ccf7260d41860 -size 3645950 diff --git a/MLQA.ar.en/validation-00000-of-00001.parquet b/MLQA.ar.en/validation-00000-of-00001.parquet deleted file mode 100644 index c697424855bcf848f143f6dc6f09c65f34407258..0000000000000000000000000000000000000000 --- a/MLQA.ar.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:172dc10b9ee867e3f8918e7ca3572025f1869978698ef114a4331e3bee483c48 -size 352058 diff --git a/MLQA.ar.es/test-00000-of-00001.parquet b/MLQA.ar.es/test-00000-of-00001.parquet deleted file mode 100644 index ae3cf4cdc92f265d29cbd1993cf6973eec7d13f0..0000000000000000000000000000000000000000 --- a/MLQA.ar.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:416d553753ccf73a423400620a68ddca4bb6382cf4d8537dbe26e83594e8e02b -size 1415633 diff --git a/MLQA.ar.es/validation-00000-of-00001.parquet b/MLQA.ar.es/validation-00000-of-00001.parquet deleted file mode 100644 index 7f23d9706b39eaf47cc95a11e429d6108bd72b6c..0000000000000000000000000000000000000000 --- a/MLQA.ar.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e6c34bf4bd9c3a196c6119ad56020c0c184cdd4d3c4b530748d52e9e0661c29c -size 116028 diff --git a/MLQA.ar.hi/test-00000-of-00001.parquet b/MLQA.ar.hi/test-00000-of-00001.parquet deleted file mode 100644 index 4ba41773b913bd7301a0cd1b01fa963329cc430c..0000000000000000000000000000000000000000 --- a/MLQA.ar.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9795fe5f1786685a92d0ddf0daccd1826922ddccecb31961c14ed9798718fd2d -size 1243874 diff --git a/MLQA.ar.hi/validation-00000-of-00001.parquet b/MLQA.ar.hi/validation-00000-of-00001.parquet deleted file mode 100644 index c40fb8de24401c7aed901857aee60a33885c0613..0000000000000000000000000000000000000000 --- a/MLQA.ar.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:089a35052fd9da7eddad16314cce8a2ec1006ced3f91a4dd8f3f5aac0cec5df4 -size 125882 diff --git a/MLQA.ar.vi/test-00000-of-00001.parquet b/MLQA.ar.vi/test-00000-of-00001.parquet deleted file mode 100644 index 0bbbdf659af6ad49bcf44b507c3c9d550a6d9715..0000000000000000000000000000000000000000 --- a/MLQA.ar.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d625383de3fa311bea8a7e22da5d12eb6a9490d15b6343b4975deca28df1d459 -size 1525806 diff --git a/MLQA.ar.vi/validation-00000-of-00001.parquet b/MLQA.ar.vi/validation-00000-of-00001.parquet deleted file mode 100644 index f041061542dcca5a54f2c6d1c5fb384f26ef3877..0000000000000000000000000000000000000000 --- a/MLQA.ar.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5b7a1e0ab08009c2fb069dd5ca7fe284ce70e95bb60b24c133e163b816febbda -size 141432 diff --git a/MLQA.ar.zh/test-00000-of-00001.parquet b/MLQA.ar.zh/test-00000-of-00001.parquet deleted file mode 100644 index 16d7f7a290396aa4e8fd7c3cde02bd00db1bf8cf..0000000000000000000000000000000000000000 --- a/MLQA.ar.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:445ee0d3b1b13d1e220de308705853e48a2aa9473678ec8402897ab0f4245732 -size 1449631 diff --git a/MLQA.ar.zh/validation-00000-of-00001.parquet b/MLQA.ar.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 5f37ceb38feffa454c23783956f8152f3d75b667..0000000000000000000000000000000000000000 --- a/MLQA.ar.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6d0fe258bd36ee1bd49469bdee626cd4b19f6c54af33efab4fdb5d329c70ed63 -size 141814 diff --git a/MLQA.de.ar/test-00000-of-00001.parquet b/MLQA.de.ar/test-00000-of-00001.parquet deleted file mode 100644 index fda6fdc752545e7a89d5647e30fe5d261af28d0d..0000000000000000000000000000000000000000 --- a/MLQA.de.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6d3d5cdc61d846629d71ee176d372306d13eded3ea45ded35a9a60854f19b002 -size 928724 diff --git a/MLQA.de.ar/validation-00000-of-00001.parquet b/MLQA.de.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 27e6a07f160d714ddd00b76954b77854d12b41ae..0000000000000000000000000000000000000000 --- a/MLQA.de.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a302ca6901c272a915c62df2642a14dda03e706826bbae8eb274a9be62e62862 -size 115759 diff --git a/MLQA.de.de/test-00000-of-00001.parquet b/MLQA.de.de/test-00000-of-00001.parquet deleted file mode 100644 index 9cd26fffefda81ae8b5ba32aa51f7e576cade01a..0000000000000000000000000000000000000000 --- a/MLQA.de.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1ad56a2ef34b5c8f16a37bc0ca09a1224e728ed5dff182c9a87e5765f7b05d61 -size 2512383 diff --git a/MLQA.de.de/validation-00000-of-00001.parquet b/MLQA.de.de/validation-00000-of-00001.parquet deleted file mode 100644 index 1c2cb2d29ae9a3b43e488a54893d78c57ab6bc2f..0000000000000000000000000000000000000000 --- a/MLQA.de.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d305f59151b017f15c9d0bfd78df5b086773f6b12492e31b1e7cc992e3bf5c25 -size 285667 diff --git a/MLQA.de.en/test-00000-of-00001.parquet b/MLQA.de.en/test-00000-of-00001.parquet deleted file mode 100644 index d7097fba674078165da90e725f7aadc5c95b4e85..0000000000000000000000000000000000000000 --- a/MLQA.de.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1a7e702f041e91505ea7c2775f46672e5a9dba62b77cf85918b1db0897220aba -size 2494685 diff --git a/MLQA.de.en/validation-00000-of-00001.parquet b/MLQA.de.en/validation-00000-of-00001.parquet deleted file mode 100644 index 76a6cb706e4c4aa4be92ff10f0d3faf1a7aff5ea..0000000000000000000000000000000000000000 --- a/MLQA.de.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:89d8955819ce58db5044ff556052e84ab85471bd2bd6e6465946a50497b1434e -size 283661 diff --git a/MLQA.de.es/test-00000-of-00001.parquet b/MLQA.de.es/test-00000-of-00001.parquet deleted file mode 100644 index 8a6731ab464456cdd157043adefa87d025936b9f..0000000000000000000000000000000000000000 --- a/MLQA.de.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7ab3b95ae249ab88874c668b23cf8c34d6c3f0afa0b469895d8cb6e67c20da06 -size 1004388 diff --git a/MLQA.de.es/validation-00000-of-00001.parquet b/MLQA.de.es/validation-00000-of-00001.parquet deleted file mode 100644 index 4af31bdaf6cf3b0a9c45fe5a85273f3c9cf066be..0000000000000000000000000000000000000000 --- a/MLQA.de.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3fa957c3ab595a60bc3c1803fba88ad2bfe74db735c3f50e1b76ed0105ede0f4 -size 114363 diff --git a/MLQA.de.hi/test-00000-of-00001.parquet b/MLQA.de.hi/test-00000-of-00001.parquet deleted file mode 100644 index 2c15d2a7b20584ab759db40574f1f48007c36e12..0000000000000000000000000000000000000000 --- a/MLQA.de.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1ad971c9e029785f127646e8da90806b14bd689d380ad00fc8584b6cf4cb1726 -size 783332 diff --git a/MLQA.de.hi/validation-00000-of-00001.parquet b/MLQA.de.hi/validation-00000-of-00001.parquet deleted file mode 100644 index 6746bcb04258908ad1ed2934239e42048e32db60..0000000000000000000000000000000000000000 --- a/MLQA.de.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:22ad67317014d153b34814a714c01d71cf8e7dbb122e294f8f8bef08892ba35b -size 97320 diff --git a/MLQA.de.vi/test-00000-of-00001.parquet b/MLQA.de.vi/test-00000-of-00001.parquet deleted file mode 100644 index 8caa7bfd155792bf4c103082453dc79700f14cc2..0000000000000000000000000000000000000000 --- a/MLQA.de.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b0dee937153a378c606879ea28ce99c6081abbe42e8431eb77a9cd3c88dea867 -size 973140 diff --git a/MLQA.de.vi/validation-00000-of-00001.parquet b/MLQA.de.vi/validation-00000-of-00001.parquet deleted file mode 100644 index c364bf1d49f98057bd916c80a91f23d9324258e3..0000000000000000000000000000000000000000 --- a/MLQA.de.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:34efad0eab0610fe25ddd6378cea79ba973a13b6938baec49bc33cf65c8f1ffe -size 135023 diff --git a/MLQA.de.zh/test-00000-of-00001.parquet b/MLQA.de.zh/test-00000-of-00001.parquet deleted file mode 100644 index 9d0d11bae90514889d6817aae234a5ebe0ba38ca..0000000000000000000000000000000000000000 --- a/MLQA.de.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ee7368e29ae3c34a7dfcaa2b5b2b536c682f082adcafa9ab90e92f26ff70cb30 -size 933523 diff --git a/MLQA.de.zh/validation-00000-of-00001.parquet b/MLQA.de.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 1e92d6b163bbe3bfc321aa7bd1ead1f81bb11356..0000000000000000000000000000000000000000 --- a/MLQA.de.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:580c62808f292b13ff0770822c9f545c25b3e5da876fa38a2c53915c2d3dc3a0 -size 112338 diff --git a/MLQA.en.ar/test-00000-of-00001.parquet b/MLQA.en.ar/test-00000-of-00001.parquet deleted file mode 100644 index e4fd56cef766cdba30e5947ec9a2bb736c5b2f09..0000000000000000000000000000000000000000 --- a/MLQA.en.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b36df69274b215557c51a6c805741035e80f59b026b6058ddfb6b11a92b133a8 -size 3595715 diff --git a/MLQA.en.ar/validation-00000-of-00001.parquet b/MLQA.en.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 1e1ecaf2b26abab54f96acb014a6afaaebb03e93..0000000000000000000000000000000000000000 --- a/MLQA.en.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3e9577914ddf827b9eefe2ba09f573eb3a45a072151b3908f5c6350896bdc447 -size 343420 diff --git a/MLQA.en.de/test-00000-of-00001.parquet b/MLQA.en.de/test-00000-of-00001.parquet deleted file mode 100644 index 682ffdffae8dbc9f803e632c84ec4fdc2eb32131..0000000000000000000000000000000000000000 --- a/MLQA.en.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:53cbe6a4c7d6530b0413006b7d3a83692742f6404fd75cc7d2465f65f3207886 -size 2887069 diff --git a/MLQA.en.de/validation-00000-of-00001.parquet b/MLQA.en.de/validation-00000-of-00001.parquet deleted file mode 100644 index bf0e546f5431234d0459bd6affd8e6804bf9eba1..0000000000000000000000000000000000000000 --- a/MLQA.en.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aff5f8930292ada9fb1c3c8e099435dc340c2072f98b0523ab7c54812b45ec00 -size 336127 diff --git a/MLQA.en.en/test-00000-of-00001.parquet b/MLQA.en.en/test-00000-of-00001.parquet deleted file mode 100644 index c7b04dcde834252842e910fced2d123aec99e1bf..0000000000000000000000000000000000000000 --- a/MLQA.en.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:89b839001edcd70781b02fce32a575af3bb8637e907d2c6f5017aa4971c46aff -size 7493961 diff --git a/MLQA.en.en/validation-00000-of-00001.parquet b/MLQA.en.en/validation-00000-of-00001.parquet deleted file mode 100644 index 0a826269fb340fe1a96decf0d7b7446c12512fc0..0000000000000000000000000000000000000000 --- a/MLQA.en.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6362df8ba5ccb369ad47e5cadbff6407f9b35a3b4c65a17c152d781adb43fd2d -size 723558 diff --git a/MLQA.en.es/test-00000-of-00001.parquet b/MLQA.en.es/test-00000-of-00001.parquet deleted file mode 100644 index 4024167867bab94675d0f758a8f304b0d3ff9f56..0000000000000000000000000000000000000000 --- a/MLQA.en.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f52edf417a6ead94b17362db996097208fc628ccd167c917041569ac70731758 -size 3457312 diff --git a/MLQA.en.es/validation-00000-of-00001.parquet b/MLQA.en.es/validation-00000-of-00001.parquet deleted file mode 100644 index c5d8be241cb326bbc418fccc3cc2c0b1ff97e05f..0000000000000000000000000000000000000000 --- a/MLQA.en.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0252b1be57ec6d9d18704f8b088c0dc43b1bd622e6ccfc53775166418b8fb710 -size 319516 diff --git a/MLQA.en.hi/test-00000-of-00001.parquet b/MLQA.en.hi/test-00000-of-00001.parquet deleted file mode 100644 index 2946f435093cd221092e33551b4ea24d3d5a66dc..0000000000000000000000000000000000000000 --- a/MLQA.en.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:63c6630eae1ed7467f54eb9885166388f5ac8e63cf557f672914635880351aa2 -size 3194337 diff --git a/MLQA.en.hi/validation-00000-of-00001.parquet b/MLQA.en.hi/validation-00000-of-00001.parquet deleted file mode 100644 index a12df8cc59246387ec6e79b7a359f96c0d6da7d2..0000000000000000000000000000000000000000 --- a/MLQA.en.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:645ece9c1613137dd9c73dd510ac5bd55d9706d7da7b1cb92cd430cec53f49a1 -size 323003 diff --git a/MLQA.en.vi/test-00000-of-00001.parquet b/MLQA.en.vi/test-00000-of-00001.parquet deleted file mode 100644 index c8197b682c0bfac835dcab2848474576c6b86024..0000000000000000000000000000000000000000 --- a/MLQA.en.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:558cf72b27aa7f632fcebb9daad5eb53d921e0398419fbbd6bba89b9ea561677 -size 3815403 diff --git a/MLQA.en.vi/validation-00000-of-00001.parquet b/MLQA.en.vi/validation-00000-of-00001.parquet deleted file mode 100644 index b7e63ee9126c13d28bb18eda7131a27d2eb958e9..0000000000000000000000000000000000000000 --- a/MLQA.en.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10eedbbc483631b4495113a59348dc900dd447a651e4e32720c6a95c871e48fc -size 355239 diff --git a/MLQA.en.zh/test-00000-of-00001.parquet b/MLQA.en.zh/test-00000-of-00001.parquet deleted file mode 100644 index c5f4740a0aef9eb9fb207b21ba3ea80d8a253b7d..0000000000000000000000000000000000000000 --- a/MLQA.en.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8619b3f0627882f0bb1056d5da0ebaa80e629e43a8f26eff3c2e6aa47950e51 -size 3593935 diff --git a/MLQA.en.zh/validation-00000-of-00001.parquet b/MLQA.en.zh/validation-00000-of-00001.parquet deleted file mode 100644 index b3e45943bef66348da8ff3200e6e116c5d552903..0000000000000000000000000000000000000000 --- a/MLQA.en.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f4641a1855ff40048661ce0de0dedb365a337700cd46e8818bc3bbcadd626991 -size 335187 diff --git a/MLQA.es.ar/test-00000-of-00001.parquet b/MLQA.es.ar/test-00000-of-00001.parquet deleted file mode 100644 index e199d8633a9d6dbad499a2e541ff67654230b6d7..0000000000000000000000000000000000000000 --- a/MLQA.es.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d55ffcdcc6e11c57c36578721ea9a73a35905208dc7c15ca3e6757468712e2a8 -size 1016581 diff --git a/MLQA.es.ar/validation-00000-of-00001.parquet b/MLQA.es.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 93a47a147619b77026480b58d94d5962f2c34298..0000000000000000000000000000000000000000 --- a/MLQA.es.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:75e1d0cff0a38dad81a3cbdd7994233fd7e626bf44aa07da59ec34474cc8717d -size 90854 diff --git a/MLQA.es.de/test-00000-of-00001.parquet b/MLQA.es.de/test-00000-of-00001.parquet deleted file mode 100644 index f1e781ce569a157285ef85c0cad9eb34f0f1060c..0000000000000000000000000000000000000000 --- a/MLQA.es.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:08cbf482417e035c638fb01684d174ee499b433712a79c98acf922bd79b91b87 -size 851003 diff --git a/MLQA.es.de/validation-00000-of-00001.parquet b/MLQA.es.de/validation-00000-of-00001.parquet deleted file mode 100644 index 5fec51058f4765ea807a770ba4adc93e0a09b585..0000000000000000000000000000000000000000 --- a/MLQA.es.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5990a8efe3fd50ef9debbbd4fe42415b28e7ad33a717f7b668769d39b5f72297 -size 99445 diff --git a/MLQA.es.en/test-00000-of-00001.parquet b/MLQA.es.en/test-00000-of-00001.parquet deleted file mode 100644 index c568ce1145d5a3c165239c67cfd026820ca83df4..0000000000000000000000000000000000000000 --- a/MLQA.es.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:430f56f61ca1345d0eb405da27d21c0a36ea6e585786f928781ad613ea80d497 -size 2585683 diff --git a/MLQA.es.en/validation-00000-of-00001.parquet b/MLQA.es.en/validation-00000-of-00001.parquet deleted file mode 100644 index 3848e15928dec12f4012b1121e9123f2e8a333b2..0000000000000000000000000000000000000000 --- a/MLQA.es.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:19dd6f76acee710b0bf3639eb04f0bc936ab67262315e6f67ce97098e946c78a -size 257196 diff --git a/MLQA.es.es/test-00000-of-00001.parquet b/MLQA.es.es/test-00000-of-00001.parquet deleted file mode 100644 index 2a6a1ddbfad4465dc963b18cad52a2eff1860c59..0000000000000000000000000000000000000000 --- a/MLQA.es.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb794409934455464cd978ee6295c67ad41a1e5d0667f6c93393ba3f11683596 -size 2598142 diff --git a/MLQA.es.es/validation-00000-of-00001.parquet b/MLQA.es.es/validation-00000-of-00001.parquet deleted file mode 100644 index af0ee5f59847a7d09ffe976b02e3fc042f2aa438..0000000000000000000000000000000000000000 --- a/MLQA.es.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:185f3815456dc3cbfc13160e7b4769aae9507f5a42a84527bbc50aa66231591a -size 258789 diff --git a/MLQA.es.hi/test-00000-of-00001.parquet b/MLQA.es.hi/test-00000-of-00001.parquet deleted file mode 100644 index fd4a13d4e3eba6f1c7cd4be8aa94025b54ad51bc..0000000000000000000000000000000000000000 --- a/MLQA.es.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:62c732cdee8600a960f4ce9465c22040d9641e9ea33602970da9b95106fbf9ae -size 844595 diff --git a/MLQA.es.hi/validation-00000-of-00001.parquet b/MLQA.es.hi/validation-00000-of-00001.parquet deleted file mode 100644 index 594995414a1c068d4f441f8cb7d6538de108ed35..0000000000000000000000000000000000000000 --- a/MLQA.es.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0f55ed4a6330ed3f7488e64d40b7941f2067b29066a5a93d800a6bc425cceeb2 -size 109423 diff --git a/MLQA.es.vi/test-00000-of-00001.parquet b/MLQA.es.vi/test-00000-of-00001.parquet deleted file mode 100644 index 42991bf2a78a691dd2771d47c9383484c4eabf3c..0000000000000000000000000000000000000000 --- a/MLQA.es.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd77ec6d71cdd8fd5e73ada9904a0778d70d179fbe1aa744edbb4ab9613bcbd5 -size 1074072 diff --git a/MLQA.es.vi/validation-00000-of-00001.parquet b/MLQA.es.vi/validation-00000-of-00001.parquet deleted file mode 100644 index 80f4fcb489554a5a975de4566f1a380714802e0a..0000000000000000000000000000000000000000 --- a/MLQA.es.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd89e3a161f0a5bcbafe96e617662e7e7e44aa751b655450ab942be6566dfda4 -size 113877 diff --git a/MLQA.es.zh/test-00000-of-00001.parquet b/MLQA.es.zh/test-00000-of-00001.parquet deleted file mode 100644 index 17488046d3cf8495244ab3b170fec3512b93ade6..0000000000000000000000000000000000000000 --- a/MLQA.es.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:64c8d4ae8c685499437e1b2171342b5e8c419fcd7e8541ff067eea0578b8009d -size 1012081 diff --git a/MLQA.es.zh/validation-00000-of-00001.parquet b/MLQA.es.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 55b4e0275c4af2098605ed42361c4602bc4c5079..0000000000000000000000000000000000000000 --- a/MLQA.es.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7dcbea12f8fe1b8516e6ce82169841c2b6d26a455336c4f90a6d5d848a06ab0e -size 88684 diff --git a/MLQA.hi.ar/test-00000-of-00001.parquet b/MLQA.hi.ar/test-00000-of-00001.parquet deleted file mode 100644 index 59ccc001e0f0507508cf3ed678b7e9a52aeb8476..0000000000000000000000000000000000000000 --- a/MLQA.hi.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ccf08e9f4bc3017be63bc4532f9ba77c91d9664e479bac19a0a47fa88882ad29 -size 1407994 diff --git a/MLQA.hi.ar/validation-00000-of-00001.parquet b/MLQA.hi.ar/validation-00000-of-00001.parquet deleted file mode 100644 index c09f7709493288198bbed53a91bceea3b9e8dd4a..0000000000000000000000000000000000000000 --- a/MLQA.hi.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f521555a994c4aa9399e2511f77ac55ee4714178c11db7d4c1f87d7cfb7d1080 -size 134774 diff --git a/MLQA.hi.de/test-00000-of-00001.parquet b/MLQA.hi.de/test-00000-of-00001.parquet deleted file mode 100644 index 0f74aa1708acf00410ce448024d8903b0e751b21..0000000000000000000000000000000000000000 --- a/MLQA.hi.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d3d74e492842b89cfa52feabfdcf3aeeefa57ae79e44dbb4d0fdc4c428bb9a97 -size 1142084 diff --git a/MLQA.hi.de/validation-00000-of-00001.parquet b/MLQA.hi.de/validation-00000-of-00001.parquet deleted file mode 100644 index d3f4cf74407ab63129b79ad257cbb9f7c71e1e37..0000000000000000000000000000000000000000 --- a/MLQA.hi.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ed53537523b3db17259533508e88f8687a84d913a536a479cb358b85bfe56e8e -size 115762 diff --git a/MLQA.hi.en/test-00000-of-00001.parquet b/MLQA.hi.en/test-00000-of-00001.parquet deleted file mode 100644 index c97faba6f80355d60d316424e6f53901322e236a..0000000000000000000000000000000000000000 --- a/MLQA.hi.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e3eea626bef7c83388afed031a78b2d7bf07fc0f1318f5012520fc26f5868c3d -size 3764636 diff --git a/MLQA.hi.en/validation-00000-of-00001.parquet b/MLQA.hi.en/validation-00000-of-00001.parquet deleted file mode 100644 index e9903c4cf1723c31b3349cf795ec650b5f957461..0000000000000000000000000000000000000000 --- a/MLQA.hi.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0fc97a91e1d60e53a96a01bb5c396397bdfdd59787e3b32789f0069618690c92 -size 366447 diff --git a/MLQA.hi.es/test-00000-of-00001.parquet b/MLQA.hi.es/test-00000-of-00001.parquet deleted file mode 100644 index ac7ddc2fdfe8e8c65789cccc3264b24a737bade3..0000000000000000000000000000000000000000 --- a/MLQA.hi.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ea106768303dad732ae6f0e387170be5231183c4c65dba3091a6ce8883777b24 -size 1346708 diff --git a/MLQA.hi.es/validation-00000-of-00001.parquet b/MLQA.hi.es/validation-00000-of-00001.parquet deleted file mode 100644 index b8a32eeca39040fdaa78e8ceeb62f3a744960047..0000000000000000000000000000000000000000 --- a/MLQA.hi.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fc358019acf758067da975f77fa57fcc29fa2ed70336caec12c2f9f3f2ae9787 -size 146760 diff --git a/MLQA.hi.hi/test-00000-of-00001.parquet b/MLQA.hi.hi/test-00000-of-00001.parquet deleted file mode 100644 index 617266080e80080b2ab4a5ad54d8a59c63efa567..0000000000000000000000000000000000000000 --- a/MLQA.hi.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8284edfae6b6222224eec8152650b7d491bc9199179309622d8eb3a2f6574d4f -size 3859758 diff --git a/MLQA.hi.hi/validation-00000-of-00001.parquet b/MLQA.hi.hi/validation-00000-of-00001.parquet deleted file mode 100644 index 87ba800383b43a1d1f4621cc70f7d003bbbffbf3..0000000000000000000000000000000000000000 --- a/MLQA.hi.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:01be59ea70dbe0cb3e124434c148a25a34d2b867a640106e54de3e05b13d5cc9 -size 376223 diff --git a/MLQA.hi.vi/test-00000-of-00001.parquet b/MLQA.hi.vi/test-00000-of-00001.parquet deleted file mode 100644 index ce12dc82cfc1f728908322e40199049d7e3939ca..0000000000000000000000000000000000000000 --- a/MLQA.hi.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ebe314a4b29c7e425d7d5439994e9559302e5b197103d143651b6a0548030c91 -size 1552320 diff --git a/MLQA.hi.vi/validation-00000-of-00001.parquet b/MLQA.hi.vi/validation-00000-of-00001.parquet deleted file mode 100644 index b55250fc753ab3ee7f8b32e385bb66e3dd616292..0000000000000000000000000000000000000000 --- a/MLQA.hi.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:068f0d06f84a221a1a19a96e1043b12a2a69ed5782d5f7138aa2c9fc36f02649 -size 152644 diff --git a/MLQA.hi.zh/test-00000-of-00001.parquet b/MLQA.hi.zh/test-00000-of-00001.parquet deleted file mode 100644 index 781abea41df456b10a09131914290333e26be9e1..0000000000000000000000000000000000000000 --- a/MLQA.hi.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:57534f445bdcd5b47dd94a497c790bbd38d1ab6c2f52a6c8639f72c3aac2ec08 -size 1479524 diff --git a/MLQA.hi.zh/validation-00000-of-00001.parquet b/MLQA.hi.zh/validation-00000-of-00001.parquet deleted file mode 100644 index f3984ca4566e355382e3fad02c56d931792e5ad2..0000000000000000000000000000000000000000 --- a/MLQA.hi.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:90e96d4dd882f2c2d2419901dd567965386ae0e59a737a1c2392fea52242a14b -size 147583 diff --git a/MLQA.vi.ar/test-00000-of-00001.parquet b/MLQA.vi.ar/test-00000-of-00001.parquet deleted file mode 100644 index 6024820219cf21bf9a16801fc593634c93060371..0000000000000000000000000000000000000000 --- a/MLQA.vi.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e80b641228a15bc70222de1060f6b2c6ce5d430699406c2eddc615833b65af0f -size 1530634 diff --git a/MLQA.vi.ar/validation-00000-of-00001.parquet b/MLQA.vi.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 9ecb15d9e7d1abd96767812cf101e22d18235d69..0000000000000000000000000000000000000000 --- a/MLQA.vi.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:60564652d4ea087cb601e418a399a4642c8c93b5f1b815403dda3d4453b27ec2 -size 126027 diff --git a/MLQA.vi.de/test-00000-of-00001.parquet b/MLQA.vi.de/test-00000-of-00001.parquet deleted file mode 100644 index 8d78b7e94dbd0c9259c04ba36b38fd025966fa38..0000000000000000000000000000000000000000 --- a/MLQA.vi.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e43f75808799238577ae625a2abbc1e974439819dcfc74b1a97d76a398f5bf3e -size 1128325 diff --git a/MLQA.vi.de/validation-00000-of-00001.parquet b/MLQA.vi.de/validation-00000-of-00001.parquet deleted file mode 100644 index 857f592402f38a607a5eab4a291f71b52b34efa5..0000000000000000000000000000000000000000 --- a/MLQA.vi.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2d370348961c0c9dc4f7524a033d0fa3ba54867321ccf34658e59b9042c1242a -size 139716 diff --git a/MLQA.vi.en/test-00000-of-00001.parquet b/MLQA.vi.en/test-00000-of-00001.parquet deleted file mode 100644 index 8d5dab7dc9895ac76c37e73fdc36498e7515b309..0000000000000000000000000000000000000000 --- a/MLQA.vi.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7ed9cf3575802ac86e39d7dd5ade4cc34f117c4ea2a164e9c8a6c926a52477c2 -size 3724141 diff --git a/MLQA.vi.en/validation-00000-of-00001.parquet b/MLQA.vi.en/validation-00000-of-00001.parquet deleted file mode 100644 index 89dcecb3fd5c9d1ae892d957fd6d1495a20b3ce7..0000000000000000000000000000000000000000 --- a/MLQA.vi.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6a525c15b4b805d7bcd1b3e9757386a0b0c03ff649adae432d36f4b692118bf5 -size 347562 diff --git a/MLQA.vi.es/test-00000-of-00001.parquet b/MLQA.vi.es/test-00000-of-00001.parquet deleted file mode 100644 index d6276fd0a81f8ac2fcd9d1e603e8ddb417df45c5..0000000000000000000000000000000000000000 --- a/MLQA.vi.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1446e5aae9831919fe49f192c7ce0c0d050be36f995254c6170e0223eb2100ef -size 1466018 diff --git a/MLQA.vi.es/validation-00000-of-00001.parquet b/MLQA.vi.es/validation-00000-of-00001.parquet deleted file mode 100644 index b1a61a8dc7587f54b90fd838ae10802d0c97395a..0000000000000000000000000000000000000000 --- a/MLQA.vi.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f092f3a169b007030ffbab908eed4660671f375b031547ab44c7e6c2dfdbba5b -size 141908 diff --git a/MLQA.vi.hi/test-00000-of-00001.parquet b/MLQA.vi.hi/test-00000-of-00001.parquet deleted file mode 100644 index f490a38bc59c8c7d9cf44d69b60d82c158b31ef9..0000000000000000000000000000000000000000 --- a/MLQA.vi.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c26d08295d99131f6adb913a3cea029463e03b254c9af631ce23d24adb12f31b -size 1236419 diff --git a/MLQA.vi.hi/validation-00000-of-00001.parquet b/MLQA.vi.hi/validation-00000-of-00001.parquet deleted file mode 100644 index 908a61bd388aeb6cd5391d932198776308bd76fd..0000000000000000000000000000000000000000 --- a/MLQA.vi.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c6381364c651d0a7d8434205e3871d6436e6e381142141326b362f7bbf1efab9 -size 129638 diff --git a/MLQA.vi.vi/test-00000-of-00001.parquet b/MLQA.vi.vi/test-00000-of-00001.parquet deleted file mode 100644 index 91db420f6eb36d234261e414380dd3e9bd3ddf39..0000000000000000000000000000000000000000 --- a/MLQA.vi.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ffbad421497296fc95deb35e2b5a86c211385f4215accbd600a5a5762d9f458c -size 3755070 diff --git a/MLQA.vi.vi/validation-00000-of-00001.parquet b/MLQA.vi.vi/validation-00000-of-00001.parquet deleted file mode 100644 index 3f471e957bf9f829afacce8a00e2e8b7f79eb308..0000000000000000000000000000000000000000 --- a/MLQA.vi.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6326f455bbc622bf4dce0aae5843ee5b4a163fe11b9628c04b392d56d8e8bf44 -size 350318 diff --git a/MLQA.vi.zh/test-00000-of-00001.parquet b/MLQA.vi.zh/test-00000-of-00001.parquet deleted file mode 100644 index 3b62bee881aea1d75cea35f65c3de9f4654bbcc3..0000000000000000000000000000000000000000 --- a/MLQA.vi.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:220d971d9b9078070ecca6b283fe212f0d2cb16e234ea16fe1a0a2c5c7ca5728 -size 1432356 diff --git a/MLQA.vi.zh/validation-00000-of-00001.parquet b/MLQA.vi.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 4a33d127fc20833891c78d86a7789306e9406e65..0000000000000000000000000000000000000000 --- a/MLQA.vi.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9cb4ca18b9b6d43c9476d96703aa69d9143150891bb73cd956a7c707d86e8d50 -size 138037 diff --git a/MLQA.zh.ar/test-00000-of-00001.parquet b/MLQA.zh.ar/test-00000-of-00001.parquet deleted file mode 100644 index bfd433294d0c7f1c3a4087728674fb9bd1eb5419..0000000000000000000000000000000000000000 --- a/MLQA.zh.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:47f9318338a0c04970604655a1d50370a9730892e8aa90db33dfdf73474bbfc0 -size 1117059 diff --git a/MLQA.zh.ar/validation-00000-of-00001.parquet b/MLQA.zh.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 109ba2d6e653e8eee7e27da8ce8b43279a1797b8..0000000000000000000000000000000000000000 --- a/MLQA.zh.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10bcc7fef1eaca3b2975b13b588fda19f73945661a7bbdd49559d9c9b36a02d8 -size 106804 diff --git a/MLQA.zh.de/test-00000-of-00001.parquet b/MLQA.zh.de/test-00000-of-00001.parquet deleted file mode 100644 index 025a92039ebf88d49a2d69e27a1005cb242009e1..0000000000000000000000000000000000000000 --- a/MLQA.zh.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d225a70f4bf94f64cd56c7ba4ec45f8d9dea23bc42ced2936b401faf2f861ce3 -size 892855 diff --git a/MLQA.zh.de/validation-00000-of-00001.parquet b/MLQA.zh.de/validation-00000-of-00001.parquet deleted file mode 100644 index bb4c6b2123febe5e6b3809f381ced63c9e989a6f..0000000000000000000000000000000000000000 --- a/MLQA.zh.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f4346361bb77136912a1d2aa34a10167692365d85753df9def74324bf6e4e847 -size 113974 diff --git a/MLQA.zh.en/test-00000-of-00001.parquet b/MLQA.zh.en/test-00000-of-00001.parquet deleted file mode 100644 index ceb82c645fa439a0796496be29d3432501e8bf98..0000000000000000000000000000000000000000 --- a/MLQA.zh.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ac5a663be906ed38e676970cce13bf9e8e074e0562e42d6cb35651fa6ae4652c -size 2830274 diff --git a/MLQA.zh.en/validation-00000-of-00001.parquet b/MLQA.zh.en/validation-00000-of-00001.parquet deleted file mode 100644 index d672b16166c30b3a04bd1611e4d8234297a360ce..0000000000000000000000000000000000000000 --- a/MLQA.zh.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0fb1d868590f55a779d3e20affb13ba149ae1370047a8fee6aaf86e0cc3fda30 -size 278159 diff --git a/MLQA.zh.es/test-00000-of-00001.parquet b/MLQA.zh.es/test-00000-of-00001.parquet deleted file mode 100644 index 9f0751b9c68556373d915c807caf8c9a2e684c5d..0000000000000000000000000000000000000000 --- a/MLQA.zh.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a26ed7fa83012415f5647cb02dc860f9bc8580c8f89a57e2fefb2bb4c75219c9 -size 1124032 diff --git a/MLQA.zh.es/validation-00000-of-00001.parquet b/MLQA.zh.es/validation-00000-of-00001.parquet deleted file mode 100644 index 3b9ea970c415d7cf6c2dcc685ef1cf07fa0fdca6..0000000000000000000000000000000000000000 --- a/MLQA.zh.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b7a94418e3973e9c35f5423b8381c5c4f1dd50601f00a5feb9e92b6aab8890d4 -size 99435 diff --git a/MLQA.zh.hi/test-00000-of-00001.parquet b/MLQA.zh.hi/test-00000-of-00001.parquet deleted file mode 100644 index 63e67fb028a637e0a9390ad03de55ed3d129b9ce..0000000000000000000000000000000000000000 --- a/MLQA.zh.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:97cfadb5ac87b002b9fac8446c81230643396da83b121219633d9b0500df75a9 -size 933816 diff --git a/MLQA.zh.hi/validation-00000-of-00001.parquet b/MLQA.zh.hi/validation-00000-of-00001.parquet deleted file mode 100644 index c9d00e3d48ed02fe8fede28637d74a08f94e9244..0000000000000000000000000000000000000000 --- a/MLQA.zh.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:114a3056807115eb62a3d0994bd700a57b2775e6fdbff2adff9a45be9972e842 -size 110783 diff --git a/MLQA.zh.vi/test-00000-of-00001.parquet b/MLQA.zh.vi/test-00000-of-00001.parquet deleted file mode 100644 index 778d8862dcd57354524260ffee3ce88e99c8b9cd..0000000000000000000000000000000000000000 --- a/MLQA.zh.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e445710d9528f970b054f10d3418e3d633b7404e3941daac9372a73bb9695011 -size 1147739 diff --git a/MLQA.zh.vi/validation-00000-of-00001.parquet b/MLQA.zh.vi/validation-00000-of-00001.parquet deleted file mode 100644 index 98a0c75378e07e9f49935e33b21de882eccc20d1..0000000000000000000000000000000000000000 --- a/MLQA.zh.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07ace75fcd4c7ce8e27f1986086a971f42f1083958a8be87c8e27ec9840abcb9 -size 120474 diff --git a/MLQA.zh.zh/test-00000-of-00001.parquet b/MLQA.zh.zh/test-00000-of-00001.parquet deleted file mode 100644 index 9e9a8a6e2d3169d61efaa35f0a02a061fde15ff4..0000000000000000000000000000000000000000 --- a/MLQA.zh.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:15f0705f148a85fb0e243a7c4e928b399276fb79eef9a616199c7e1b7103aade -size 2827582 diff --git a/MLQA.zh.zh/validation-00000-of-00001.parquet b/MLQA.zh.zh/validation-00000-of-00001.parquet deleted file mode 100644 index bfef129d80fc8b19df4bd01ace230ec67c5f0e46..0000000000000000000000000000000000000000 --- a/MLQA.zh.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:def99a63b04d73bda1c0c2036b751c63642019de3cbdae899d6300b5c0991443 -size 277780 diff --git a/PAN-X.af/test-00000-of-00001.parquet b/PAN-X.af/test-00000-of-00001.parquet deleted file mode 100644 index b6411430344bf3e6ebedbd7610a382f00cc1be09..0000000000000000000000000000000000000000 --- a/PAN-X.af/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3c8a6fdd776713fb429c06a9c6f48ac91921cfac1a235a6c91f26d2c8953bb74 -size 55758 diff --git a/PAN-X.af/train-00000-of-00001.parquet b/PAN-X.af/train-00000-of-00001.parquet deleted file mode 100644 index f7999309ae5f7cdce5fc78d06e5a1e046d8b5e72..0000000000000000000000000000000000000000 --- a/PAN-X.af/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:424126fe7ae53b64c789f484fc6be810031a3b9265deef4da95b9b0683154275 -size 277544 diff --git a/PAN-X.af/validation-00000-of-00001.parquet b/PAN-X.af/validation-00000-of-00001.parquet deleted file mode 100644 index 58d22b13a7e4734bef5a6540fff8f5812b61ec4f..0000000000000000000000000000000000000000 --- a/PAN-X.af/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3552fe2b6741e2ea886a72ccc4807e9d22b14ae027e1d8e90dc42210b7e64bd2 -size 55713 diff --git a/PAN-X.ar/test-00000-of-00001.parquet b/PAN-X.ar/test-00000-of-00001.parquet deleted file mode 100644 index e46015a1be7514bd7cc0aa772f2c2fd2a26057bc..0000000000000000000000000000000000000000 --- a/PAN-X.ar/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b1a3bce398b4a5cc9751cf6a08ea910dff3a04f100071a67e21959f10f536990 -size 392347 diff --git a/PAN-X.ar/train-00000-of-00001.parquet b/PAN-X.ar/train-00000-of-00001.parquet deleted file mode 100644 index 26b48bdf43cd33ef950aeb7e1902c547b23e95f8..0000000000000000000000000000000000000000 --- a/PAN-X.ar/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:907011a73451a96abd8233fdd85806f2659687e46baa0f55d4680fbe25599e71 -size 784875 diff --git a/PAN-X.ar/validation-00000-of-00001.parquet b/PAN-X.ar/validation-00000-of-00001.parquet deleted file mode 100644 index ce1051b6132a83e04db40fae80e3226dfff24337..0000000000000000000000000000000000000000 --- a/PAN-X.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fd577adc60db7e1b016f61a306c20b2fee386ce20a71ccf35133cbc7181a17c1 -size 390248 diff --git a/PAN-X.bg/test-00000-of-00001.parquet b/PAN-X.bg/test-00000-of-00001.parquet deleted file mode 100644 index 5546083890f3481beff81a43ef57034673b69e30..0000000000000000000000000000000000000000 --- a/PAN-X.bg/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fec511dd63f8f61a0794d4cc78088a8fffd425bf653abeb8bbc3b3c590f4e358 -size 509997 diff --git a/PAN-X.bg/train-00000-of-00001.parquet b/PAN-X.bg/train-00000-of-00001.parquet deleted file mode 100644 index 22f5b3405769f2b450aceede4c6889c1c9ac46a6..0000000000000000000000000000000000000000 --- a/PAN-X.bg/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fdafcbfad323f8d4aff6a2b0a087cba540a6b05d31ca12b569e83922f9242a46 -size 1011287 diff --git a/PAN-X.bg/validation-00000-of-00001.parquet b/PAN-X.bg/validation-00000-of-00001.parquet deleted file mode 100644 index 65240e30a698c4b5678f6849e1da394dc303a2ad..0000000000000000000000000000000000000000 --- a/PAN-X.bg/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b7f80511e0eca9f08cfa2d0be02200af47d812928b4d9c36fc4fd4bd7fe63b74 -size 509385 diff --git a/PAN-X.bn/test-00000-of-00001.parquet b/PAN-X.bn/test-00000-of-00001.parquet deleted file mode 100644 index fcd13c6685ebf22342859ec70467f9be394a2303..0000000000000000000000000000000000000000 --- a/PAN-X.bn/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:06f278bfbc9c85fd50823e38bc1cc0c54f4e8653e3c4d8bbc61a8bab883d2fa0 -size 32107 diff --git a/PAN-X.bn/train-00000-of-00001.parquet b/PAN-X.bn/train-00000-of-00001.parquet deleted file mode 100644 index c95b0de89b718877515d12ddff20549e05a4d253..0000000000000000000000000000000000000000 --- a/PAN-X.bn/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:30dc398e0953c17aed92502114b2cfba862f87c9aabe9482f69131ec0982ef47 -size 301000 diff --git a/PAN-X.bn/validation-00000-of-00001.parquet b/PAN-X.bn/validation-00000-of-00001.parquet deleted file mode 100644 index 7f70a18d650e11da9d45efd66f13ecef7bda3d63..0000000000000000000000000000000000000000 --- a/PAN-X.bn/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:33b884c635dfac83eb71f990dc75ee5cb7860dbbf93eddba8bcfe36a6337ac03 -size 30917 diff --git a/PAN-X.de/test-00000-of-00001.parquet b/PAN-X.de/test-00000-of-00001.parquet deleted file mode 100644 index 7a4f03f18a623acf6844389c6d26ed27ee8788bc..0000000000000000000000000000000000000000 --- a/PAN-X.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:20f52cd569e8f534e50304f428cc434bdafcda9a9730e7b573baec4bf2cc5db8 -size 587593 diff --git a/PAN-X.de/train-00000-of-00001.parquet b/PAN-X.de/train-00000-of-00001.parquet deleted file mode 100644 index 763d5c24bbbeb394e222cea075092e16762225af..0000000000000000000000000000000000000000 --- a/PAN-X.de/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f470ee11f2bca77de55a23a942771390f30963a83f0dd6ec3aa7bef83777609c -size 1182366 diff --git a/PAN-X.de/validation-00000-of-00001.parquet b/PAN-X.de/validation-00000-of-00001.parquet deleted file mode 100644 index 4e65e6810af1d3ca19c765becd1812751f11f439..0000000000000000000000000000000000000000 --- a/PAN-X.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b024f5a85fbe1ef51f12f2efec63c3fb4c43d44c2378c95bd1415260decb525e -size 590283 diff --git a/PAN-X.el/test-00000-of-00001.parquet b/PAN-X.el/test-00000-of-00001.parquet deleted file mode 100644 index 5fecd7828190c6f924f249c17e8552f15462a526..0000000000000000000000000000000000000000 --- a/PAN-X.el/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:385b3d16aeab090571ca198fdee4e5446adcf1880e99e42cf434978b2aa03039 -size 570620 diff --git a/PAN-X.el/train-00000-of-00001.parquet b/PAN-X.el/train-00000-of-00001.parquet deleted file mode 100644 index f3505c7f563440019ccef7a31897ff4f79f4845c..0000000000000000000000000000000000000000 --- a/PAN-X.el/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:59dfd09c44c2d0893cf09443e6268137a1318644b57f6f5ec220174e7faae157 -size 1132284 diff --git a/PAN-X.el/validation-00000-of-00001.parquet b/PAN-X.el/validation-00000-of-00001.parquet deleted file mode 100644 index b894eafa663dd021c6b16c2b6baaa391560423f5..0000000000000000000000000000000000000000 --- a/PAN-X.el/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d439a5a2d21ffe340b17233e85b854bc637ad30ff33e93118aeced2eafcfa98a -size 568822 diff --git a/PAN-X.en/test-00000-of-00001.parquet b/PAN-X.en/test-00000-of-00001.parquet deleted file mode 100644 index 198b76456dea03e438f7d4f48a148541a80f229d..0000000000000000000000000000000000000000 --- a/PAN-X.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:17da78a915f44fed08e84d9bb91b7690170b4de0b086c67c164015a8d3b7347e -size 472304 diff --git a/PAN-X.en/train-00000-of-00001.parquet b/PAN-X.en/train-00000-of-00001.parquet deleted file mode 100644 index 3ec97569d32fd17ec1bee4799876a3b7b9f0da91..0000000000000000000000000000000000000000 --- a/PAN-X.en/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d0d71235671b3fe34bc6d800f69c1d1278d77a1f34eb2cf9d4faf3f8d8ad4969 -size 942226 diff --git a/PAN-X.en/validation-00000-of-00001.parquet b/PAN-X.en/validation-00000-of-00001.parquet deleted file mode 100644 index 94aa53ee71b02ac718352883a64e39760517cb19..0000000000000000000000000000000000000000 --- a/PAN-X.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:039e63a079f8cd589970de43a499740550ba0cd6770bfa01c9adc62ce82c43db -size 471754 diff --git a/PAN-X.es/test-00000-of-00001.parquet b/PAN-X.es/test-00000-of-00001.parquet deleted file mode 100644 index b13789d2efc3d4d93cd965a05495c7f76cb3127c..0000000000000000000000000000000000000000 --- a/PAN-X.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:abb30d18d725bea52083588efb2d1b61b9f0ba15eb1eba2ff1268a8ffc438449 -size 373386 diff --git a/PAN-X.es/train-00000-of-00001.parquet b/PAN-X.es/train-00000-of-00001.parquet deleted file mode 100644 index 2b8249755b62dad6d38e1210c680748d070b514d..0000000000000000000000000000000000000000 --- a/PAN-X.es/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:35839fff33832595db379b2ad462483a509d5f71178f425cfa4ce85baf8e37e6 -size 744323 diff --git a/PAN-X.es/validation-00000-of-00001.parquet b/PAN-X.es/validation-00000-of-00001.parquet deleted file mode 100644 index 4ca65547295a3fde3ec2c0e1fff26341dfc4a9fa..0000000000000000000000000000000000000000 --- a/PAN-X.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7a2a0f30dd7aef5fe9167164068ff0ae835c8c5eab23f4c6357c16560b886d82 -size 371853 diff --git a/PAN-X.et/test-00000-of-00001.parquet b/PAN-X.et/test-00000-of-00001.parquet deleted file mode 100644 index 243ed24b54726e3b39cfe96706e4c080ea89da18..0000000000000000000000000000000000000000 --- a/PAN-X.et/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:293df27b589d22f79115fbb06ee112a346895c3a0a77b2c3ee7a9d388121b699 -size 545018 diff --git a/PAN-X.et/train-00000-of-00001.parquet b/PAN-X.et/train-00000-of-00001.parquet deleted file mode 100644 index 5f8d9c073175ca5b3e023192c08556bfe8ca81cb..0000000000000000000000000000000000000000 --- a/PAN-X.et/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:748301fdfaef6986f3ffd8000ee7232fd370ec793ca48f094a4b410130d4c568 -size 818640 diff --git a/PAN-X.et/validation-00000-of-00001.parquet b/PAN-X.et/validation-00000-of-00001.parquet deleted file mode 100644 index cb170822520c9b4e29eb6c243bc4793cdccec03c..0000000000000000000000000000000000000000 --- a/PAN-X.et/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:87462cc73385da3a9f0eec06c1975f36b2181b3fd4bbaf7d95143ff3378c71ce -size 551966 diff --git a/PAN-X.eu/test-00000-of-00001.parquet b/PAN-X.eu/test-00000-of-00001.parquet deleted file mode 100644 index f71e4e7f760fc6741a5b83bf139e2ddd99be8cd7..0000000000000000000000000000000000000000 --- a/PAN-X.eu/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bc2c7141313d880c6be58fd90101dc54076367b8379a739387903410c20eca82 -size 460626 diff --git a/PAN-X.eu/train-00000-of-00001.parquet b/PAN-X.eu/train-00000-of-00001.parquet deleted file mode 100644 index 5e921573237a48952c6cd9d99a910734c38477b8..0000000000000000000000000000000000000000 --- a/PAN-X.eu/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1a326f4ff88e8b0c1856834c6405934f42c5a42d04154b4b362bb3cf2c3c9575 -size 466572 diff --git a/PAN-X.eu/validation-00000-of-00001.parquet b/PAN-X.eu/validation-00000-of-00001.parquet deleted file mode 100644 index cca22a5b58915ae8f9777a811d22c050095abd19..0000000000000000000000000000000000000000 --- a/PAN-X.eu/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb268dc974df46a511c1a1be8ded862bad306edd788f5ebeb940f2dbd9dd79ab -size 465981 diff --git a/PAN-X.fa/test-00000-of-00001.parquet b/PAN-X.fa/test-00000-of-00001.parquet deleted file mode 100644 index 1637fdedb03562d12d93ea77b7dc149fa221132d..0000000000000000000000000000000000000000 --- a/PAN-X.fa/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4ff63042b68835c43ab55e0ad3c9f1684478dec027f2d8c8c98e01cc33a84021 -size 350994 diff --git a/PAN-X.fa/train-00000-of-00001.parquet b/PAN-X.fa/train-00000-of-00001.parquet deleted file mode 100644 index 481d111230b74d4e00461996a0bdaeb0a5bf576d..0000000000000000000000000000000000000000 --- a/PAN-X.fa/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c1be34f72a8343bfaa614028f4f31644685e18ad9d884678dd8f3d9ed3294b30 -size 694848 diff --git a/PAN-X.fa/validation-00000-of-00001.parquet b/PAN-X.fa/validation-00000-of-00001.parquet deleted file mode 100644 index bc80499223b1b370cb7e479883d4b6764eade170..0000000000000000000000000000000000000000 --- a/PAN-X.fa/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:38f4ccadc89878c4680f6c77944599100c83a1ff162fe87ea4bacaa63ca2dacb -size 355366 diff --git a/PAN-X.fi/test-00000-of-00001.parquet b/PAN-X.fi/test-00000-of-00001.parquet deleted file mode 100644 index 83375e99bcdcbe49d34b776a98fccf17cb83b0b2..0000000000000000000000000000000000000000 --- a/PAN-X.fi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b89cbe011670eec6c70988fecdf0f65a8d659a29985b53bdcaeeaa8ad190adc7 -size 613781 diff --git a/PAN-X.fi/train-00000-of-00001.parquet b/PAN-X.fi/train-00000-of-00001.parquet deleted file mode 100644 index d5df9626e78a695fb2edfda4b56dc1249cd16186..0000000000000000000000000000000000000000 --- a/PAN-X.fi/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2b03caa41ae6fef9dcc2b59e6b2887b25b8b975489055d6f7c3930bae9ffaad9 -size 1229933 diff --git a/PAN-X.fi/validation-00000-of-00001.parquet b/PAN-X.fi/validation-00000-of-00001.parquet deleted file mode 100644 index 308b205b7c86d5eb8be9e170b5eed50a008c7fce..0000000000000000000000000000000000000000 --- a/PAN-X.fi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f434efccf3151f942a648af25eb927fed8ebdf2171f6211102bcbb064e6afcc2 -size 615435 diff --git a/PAN-X.fr/test-00000-of-00001.parquet b/PAN-X.fr/test-00000-of-00001.parquet deleted file mode 100644 index c7ef223d8000079e3d078e06065c77eb9fe43004..0000000000000000000000000000000000000000 --- a/PAN-X.fr/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f83e8e688600f7b4d317372106f0fa7fe19783c8881f93f2d45bd3968fa007f7 -size 422606 diff --git a/PAN-X.fr/train-00000-of-00001.parquet b/PAN-X.fr/train-00000-of-00001.parquet deleted file mode 100644 index b63ad9666b24824cdd1c476ddac6af28c9e810c9..0000000000000000000000000000000000000000 --- a/PAN-X.fr/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:190d48ffd13da3a02140803839846380475a220daeb5fe419f316df0c7fba061 -size 837437 diff --git a/PAN-X.fr/validation-00000-of-00001.parquet b/PAN-X.fr/validation-00000-of-00001.parquet deleted file mode 100644 index 9df8bc4d6dd972601350587d2e91a328a08a28ae..0000000000000000000000000000000000000000 --- a/PAN-X.fr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dbae4ac94d6763b4d43fe49d299bcf1805a5b0c8ab50e56e8e4f61bf66dfe0c4 -size 419240 diff --git a/PAN-X.he/test-00000-of-00001.parquet b/PAN-X.he/test-00000-of-00001.parquet deleted file mode 100644 index 69e4e2484874f88ad5ff41aba547b0771ffb21b4..0000000000000000000000000000000000000000 --- a/PAN-X.he/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b3c0cbd3e640b3a06763aa173d0cc3d715e4e7e7a76db7043a78086d7671f735 -size 543778 diff --git a/PAN-X.he/train-00000-of-00001.parquet b/PAN-X.he/train-00000-of-00001.parquet deleted file mode 100644 index 9f2e3263d7c94f6562a202b2abd6e3c2f509ff95..0000000000000000000000000000000000000000 --- a/PAN-X.he/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9b0f7bee7e288a78cc6d49a0d23e1ce23431674839af5b6c019975fe24571a38 -size 1092114 diff --git a/PAN-X.he/validation-00000-of-00001.parquet b/PAN-X.he/validation-00000-of-00001.parquet deleted file mode 100644 index be156f1d473e69b79cf63795c99789f8be2491e6..0000000000000000000000000000000000000000 --- a/PAN-X.he/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c23640294b7b1ad54be5943bc46cc5f3ef505a38876e4d932a6dc389ba371ebd -size 550571 diff --git a/PAN-X.hi/test-00000-of-00001.parquet b/PAN-X.hi/test-00000-of-00001.parquet deleted file mode 100644 index 1aec1887383ec3a8f5b19551c2aaf50a46373bac..0000000000000000000000000000000000000000 --- a/PAN-X.hi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:287d3f6fe2cc420fe122f8e27b473a88575814a37f3a92e88fadd7ca19eaf2c2 -size 39655 diff --git a/PAN-X.hi/train-00000-of-00001.parquet b/PAN-X.hi/train-00000-of-00001.parquet deleted file mode 100644 index d31a35b33128a929fd57c92f217da8eb3d9fcaca..0000000000000000000000000000000000000000 --- a/PAN-X.hi/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:147d9ff446e433a374ac4746cd4106a28d319d938329a99d7b682b08517d01e6 -size 187546 diff --git a/PAN-X.hi/validation-00000-of-00001.parquet b/PAN-X.hi/validation-00000-of-00001.parquet deleted file mode 100644 index 52f7c998a8cb8f6c46706c8a1306a3d3595d3e70..0000000000000000000000000000000000000000 --- a/PAN-X.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:79f0438a3ef8ffd5726667937cd168e284534bd084fb882783689785d53500ee -size 38885 diff --git a/PAN-X.hu/test-00000-of-00001.parquet b/PAN-X.hu/test-00000-of-00001.parquet deleted file mode 100644 index 58834865c14faffafc4b83deb9cda525ea617f52..0000000000000000000000000000000000000000 --- a/PAN-X.hu/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8b33b5dd7714418a74380ea244d285b966766ec04f981636021834b7c824b062 -size 603776 diff --git a/PAN-X.hu/train-00000-of-00001.parquet b/PAN-X.hu/train-00000-of-00001.parquet deleted file mode 100644 index 75497847a1eaf6dee52a7139ae15ae0cac9f78cf..0000000000000000000000000000000000000000 --- a/PAN-X.hu/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1badfb1b1466d5f6f58e52ddf9787cca62154bf1590903b682fd11339ebf6bc8 -size 1208588 diff --git a/PAN-X.hu/validation-00000-of-00001.parquet b/PAN-X.hu/validation-00000-of-00001.parquet deleted file mode 100644 index 0843942001e13e1af6a52be6c06cee6984318f8e..0000000000000000000000000000000000000000 --- a/PAN-X.hu/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:367243f754a00008c1998eeed2098c2cc9e6a8c298a46ca8dbff68fd40ce847a -size 587026 diff --git a/PAN-X.id/test-00000-of-00001.parquet b/PAN-X.id/test-00000-of-00001.parquet deleted file mode 100644 index aae369b682593b8a25dabfd4b3e96acd1541d66a..0000000000000000000000000000000000000000 --- a/PAN-X.id/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d24021bf68247753b5b2b0b03cfcc62b524c0b7edda074a51b9e56fb48d46172 -size 350988 diff --git a/PAN-X.id/train-00000-of-00001.parquet b/PAN-X.id/train-00000-of-00001.parquet deleted file mode 100644 index 62235d84462ced91e7433c0586e71a241daee493..0000000000000000000000000000000000000000 --- a/PAN-X.id/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:171d2fed6be2f0f409e9b58c98ec9ad387d91521437818a123921b3299a5288c -size 707917 diff --git a/PAN-X.id/validation-00000-of-00001.parquet b/PAN-X.id/validation-00000-of-00001.parquet deleted file mode 100644 index bc78aa193b95230b4669596c6fd57947dc430548..0000000000000000000000000000000000000000 --- a/PAN-X.id/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c31ca18447a85f1b729ef05d85a8814d4f6079ff455e03f4c4509f9ec5cada44 -size 353144 diff --git a/PAN-X.it/test-00000-of-00001.parquet b/PAN-X.it/test-00000-of-00001.parquet deleted file mode 100644 index 8dccb79959ca280422499dd4111813f1421da00e..0000000000000000000000000000000000000000 --- a/PAN-X.it/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3688bee161ef412cfbcbe4be47bb2fe6c1b3230ce1a50f3887aaffc4f0147f21 -size 463925 diff --git a/PAN-X.it/train-00000-of-00001.parquet b/PAN-X.it/train-00000-of-00001.parquet deleted file mode 100644 index 77dfc898deeba21432e8ae9e40afb2088f5bf5ad..0000000000000000000000000000000000000000 --- a/PAN-X.it/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2c6b5c7335b6d21eb51d915f5e222170b9f99fc9a8f28a02df15737542b541e1 -size 932490 diff --git a/PAN-X.it/validation-00000-of-00001.parquet b/PAN-X.it/validation-00000-of-00001.parquet deleted file mode 100644 index 4112501b539b49855c464802fe1b22493c388276..0000000000000000000000000000000000000000 --- a/PAN-X.it/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e17434120a756c2f78381d875aa45d666083d9b1c2919fdf7dd8576012f709ca -size 459383 diff --git a/PAN-X.ja/test-00000-of-00001.parquet b/PAN-X.ja/test-00000-of-00001.parquet deleted file mode 100644 index b50557da2e446a0d938f12c7a8e2040037994344..0000000000000000000000000000000000000000 --- a/PAN-X.ja/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4bd73e3f01e90bdd93d61b6747265692a353578685d9868aaa963e327927db3d -size 623605 diff --git a/PAN-X.ja/train-00000-of-00001.parquet b/PAN-X.ja/train-00000-of-00001.parquet deleted file mode 100644 index 9c619d6ba9a28b819c52a49c113f3fc895565f6b..0000000000000000000000000000000000000000 --- a/PAN-X.ja/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4aba0e5d40fba402f4874250eebf054f0b46787a7a4aa1baa16baef28df668d8 -size 1227653 diff --git a/PAN-X.ja/validation-00000-of-00001.parquet b/PAN-X.ja/validation-00000-of-00001.parquet deleted file mode 100644 index 845487dc46ff304a95838fcfb14d37aa6ea1acdb..0000000000000000000000000000000000000000 --- a/PAN-X.ja/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ca7131822d4c96b6ccaa21ccc37fbe43086dadc465dd41b5bbd102197db59237 -size 614416 diff --git a/PAN-X.jv/test-00000-of-00001.parquet b/PAN-X.jv/test-00000-of-00001.parquet deleted file mode 100644 index e4b56a65f68d7686bf7cf473b70f3fea117b42b7..0000000000000000000000000000000000000000 --- a/PAN-X.jv/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f6b539c683886868c921aab828b463e4958ae4c20a7aebc98d0998beeb5be6cc -size 7038 diff --git a/PAN-X.jv/train-00000-of-00001.parquet b/PAN-X.jv/train-00000-of-00001.parquet deleted file mode 100644 index 6078e03112b20e8dc8e55055c2a8312850356245..0000000000000000000000000000000000000000 --- a/PAN-X.jv/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:62234839065c124f4cbea0b6a9b8a33c23719c0314eb74bee496922a1b6cbd92 -size 6972 diff --git a/PAN-X.jv/validation-00000-of-00001.parquet b/PAN-X.jv/validation-00000-of-00001.parquet deleted file mode 100644 index d36370b817a0526aec7fb02daa793d2d6602e8b8..0000000000000000000000000000000000000000 --- a/PAN-X.jv/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da47092c6604d0773af5d92e486e38503ebdf410186d3c7df1b18cce5da473fc -size 6465 diff --git a/PAN-X.ka/test-00000-of-00001.parquet b/PAN-X.ka/test-00000-of-00001.parquet deleted file mode 100644 index f858ba93781b51c8a6c39fd9df7d8e9258ddc2ff..0000000000000000000000000000000000000000 --- a/PAN-X.ka/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:594feed012eee05c517d8a65f8f2e7e7a16944dae03b5c76f12d3a6d1eafe1a8 -size 608427 diff --git a/PAN-X.ka/train-00000-of-00001.parquet b/PAN-X.ka/train-00000-of-00001.parquet deleted file mode 100644 index 9cecea359c1f73a23f571651d04676d240b58ae9..0000000000000000000000000000000000000000 --- a/PAN-X.ka/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c0afaffb04ee82831d101f5d09d4f775387f68ab5443da71a38166648e59a0cf -size 602124 diff --git a/PAN-X.ka/validation-00000-of-00001.parquet b/PAN-X.ka/validation-00000-of-00001.parquet deleted file mode 100644 index 2ec7052938a51959ea9c6e68f9920e7674e83a60..0000000000000000000000000000000000000000 --- a/PAN-X.ka/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:84c6d8f843d32f61d5f588b068b0ea796ddcbb6e7962a03baf990d5a5bdd12e1 -size 606729 diff --git a/PAN-X.kk/test-00000-of-00001.parquet b/PAN-X.kk/test-00000-of-00001.parquet deleted file mode 100644 index 9d1f97b046574c75432cb560880ceb33a01e2b1f..0000000000000000000000000000000000000000 --- a/PAN-X.kk/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:279b900496aa448d09e420d8b2133f0186f0708c5ffc3568421cb1847f50845a -size 52443 diff --git a/PAN-X.kk/train-00000-of-00001.parquet b/PAN-X.kk/train-00000-of-00001.parquet deleted file mode 100644 index 2bc5a19e9861d066a30d4fa07781831126221307..0000000000000000000000000000000000000000 --- a/PAN-X.kk/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8bc34bf576c68d16c36f195ce80b17bb9d55248260ee170775577a3da9e0fff -size 53210 diff --git a/PAN-X.kk/validation-00000-of-00001.parquet b/PAN-X.kk/validation-00000-of-00001.parquet deleted file mode 100644 index 534948af893cd3a30cae0147f3a17d8a9ee7ac37..0000000000000000000000000000000000000000 --- a/PAN-X.kk/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7fa297415d81390fac943e39b8d0bdcaf0213af20a8df9bb41f3fd50414c2134 -size 54901 diff --git a/PAN-X.ko/test-00000-of-00001.parquet b/PAN-X.ko/test-00000-of-00001.parquet deleted file mode 100644 index 38f943323759ea0a7762cb6e25bf658fb501a3d8..0000000000000000000000000000000000000000 --- a/PAN-X.ko/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9dd521abfc761d559d6ce87d9accfc98eeaef065b861c2076e39ac453a4ab6a -size 636536 diff --git a/PAN-X.ko/train-00000-of-00001.parquet b/PAN-X.ko/train-00000-of-00001.parquet deleted file mode 100644 index aa7c45ae9d21a06c7cd3bb66c63cf9d34f48d2e1..0000000000000000000000000000000000000000 --- a/PAN-X.ko/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:48b58461f58d16df2fd7345e15f5b3f664dac77f2360bbc838a73d21c32d3ece -size 1270409 diff --git a/PAN-X.ko/validation-00000-of-00001.parquet b/PAN-X.ko/validation-00000-of-00001.parquet deleted file mode 100644 index c64ef47c00a97d5ac06336fb63a786592f3f0ba8..0000000000000000000000000000000000000000 --- a/PAN-X.ko/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9617283bee49ee8203e3894c56f2f4b2c45f30a14c46b1734a00c1e84b6fa97 -size 632646 diff --git a/PAN-X.ml/test-00000-of-00001.parquet b/PAN-X.ml/test-00000-of-00001.parquet deleted file mode 100644 index f90db68cc88ee4025b2197adb43091eb02a73a87..0000000000000000000000000000000000000000 --- a/PAN-X.ml/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:259d33e063c11c632413a87b8cfd1db4a1762d33ddacf7e7dec384629607f194 -size 69944 diff --git a/PAN-X.ml/train-00000-of-00001.parquet b/PAN-X.ml/train-00000-of-00001.parquet deleted file mode 100644 index 4e33f0e6729f03ba56641345a999aea2061db0ab..0000000000000000000000000000000000000000 --- a/PAN-X.ml/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ec2284764082c8a92a0e1e1c512f503c68ec77c65f2ba4ae3bcc3dada6963ffe -size 708592 diff --git a/PAN-X.ml/validation-00000-of-00001.parquet b/PAN-X.ml/validation-00000-of-00001.parquet deleted file mode 100644 index 1f77132012fbd2603816e0329d90e617067d2316..0000000000000000000000000000000000000000 --- a/PAN-X.ml/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:791b3c7ee1309211a25c7b20c420d413a5651c98dcec574d3ee8056a095d4f07 -size 74419 diff --git a/PAN-X.mr/test-00000-of-00001.parquet b/PAN-X.mr/test-00000-of-00001.parquet deleted file mode 100644 index dd0dff326ba2937b13ba55644a094d1286503a65..0000000000000000000000000000000000000000 --- a/PAN-X.mr/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d08721e34894c38df817147b4e042b28f2fa7bc3b695ed59071efcfb8b5aec86 -size 50653 diff --git a/PAN-X.mr/train-00000-of-00001.parquet b/PAN-X.mr/train-00000-of-00001.parquet deleted file mode 100644 index 5e650130d85b40fc23a4f81d53d2fc11a1726c71..0000000000000000000000000000000000000000 --- a/PAN-X.mr/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2e2ee262a97c24c5d2979ef7a929606e2a2c88e5f8798f3ec432ac9e003b8448 -size 247039 diff --git a/PAN-X.mr/validation-00000-of-00001.parquet b/PAN-X.mr/validation-00000-of-00001.parquet deleted file mode 100644 index 09f65be3e0be3b90e7f01db879acaa96aeb15b37..0000000000000000000000000000000000000000 --- a/PAN-X.mr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ed72dec51103bfe49125e8663160ff4b508218b50277156280d6033e09f3e22a -size 49523 diff --git a/PAN-X.ms/test-00000-of-00001.parquet b/PAN-X.ms/test-00000-of-00001.parquet deleted file mode 100644 index 8602bf891a0c2aca73ab2823254601227ed35b9d..0000000000000000000000000000000000000000 --- a/PAN-X.ms/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:04b2e793b1adc937096e6588ad73d302cf64caf54dc827d19cab389732fc4b02 -size 33926 diff --git a/PAN-X.ms/train-00000-of-00001.parquet b/PAN-X.ms/train-00000-of-00001.parquet deleted file mode 100644 index a44491eb7162d541bf6f29ec918c3da8a6c176e3..0000000000000000000000000000000000000000 --- a/PAN-X.ms/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8e17be141d8b48927cc3d30cbe10aa2c460b224f23192bb3731417f71969b75d -size 641173 diff --git a/PAN-X.ms/validation-00000-of-00001.parquet b/PAN-X.ms/validation-00000-of-00001.parquet deleted file mode 100644 index 86067c9a8dd31cef47184ff55c0ca5671f2edb6f..0000000000000000000000000000000000000000 --- a/PAN-X.ms/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:00f4e18e809d05029f4ed66caaf58bc30fa6b3fc561d4a84e1245e2946ae7590 -size 33696 diff --git a/PAN-X.my/test-00000-of-00001.parquet b/PAN-X.my/test-00000-of-00001.parquet deleted file mode 100644 index 9c9ed5277cde1c2f6c09011fdbdd308ccca92af4..0000000000000000000000000000000000000000 --- a/PAN-X.my/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a48dec37884204563b60c56ddf6bce07f836e676608ddc4daba90aa3bb12e96e -size 13462 diff --git a/PAN-X.my/train-00000-of-00001.parquet b/PAN-X.my/train-00000-of-00001.parquet deleted file mode 100644 index 06d8d2c85c49ad2a1209d5de38ec2ddd010443eb..0000000000000000000000000000000000000000 --- a/PAN-X.my/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9b94f4c75248c457ba4300641924b5f77d59c472dd0447acca0977b6f6dab386 -size 11434 diff --git a/PAN-X.my/validation-00000-of-00001.parquet b/PAN-X.my/validation-00000-of-00001.parquet deleted file mode 100644 index 950f88eaca3cd88bedbac02835e866e7567c2552..0000000000000000000000000000000000000000 --- a/PAN-X.my/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f7deed3441eba73b5db9bb443434b273dce7d52676f53c715364ad3a8e24298e -size 14112 diff --git a/PAN-X.nl/test-00000-of-00001.parquet b/PAN-X.nl/test-00000-of-00001.parquet deleted file mode 100644 index 40f38512f89bf22c5b606db50e9d0ba5933cbaa7..0000000000000000000000000000000000000000 --- a/PAN-X.nl/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a3ebd7bcd4534e6fc4950c67f9dc5601cd0e37d9200be03a2609f59ee5a5582d -size 486049 diff --git a/PAN-X.nl/train-00000-of-00001.parquet b/PAN-X.nl/train-00000-of-00001.parquet deleted file mode 100644 index 8b42d2ebfc85945b49df061da7b58e1d15d47bb1..0000000000000000000000000000000000000000 --- a/PAN-X.nl/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8cc84ae6c9c68120da41b5705c2e647457e92c05ecb168dab122b465f8b45a2 -size 975292 diff --git a/PAN-X.nl/validation-00000-of-00001.parquet b/PAN-X.nl/validation-00000-of-00001.parquet deleted file mode 100644 index fc3a61e09c88773dafe0205d7f28a81ed3b9a2c0..0000000000000000000000000000000000000000 --- a/PAN-X.nl/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e3ff48b3914426d3fd8f4b133806eb65887a07f49300f713015ce1adcb96b44 -size 482552 diff --git a/PAN-X.pt/test-00000-of-00001.parquet b/PAN-X.pt/test-00000-of-00001.parquet deleted file mode 100644 index a30bbe55575f5c618644a0aa951cb868a51e0209..0000000000000000000000000000000000000000 --- a/PAN-X.pt/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:905cede984c3036fe5bfe49933fd69fcdafe9dbaa6210ba1683a520ca867cfe2 -size 382688 diff --git a/PAN-X.pt/train-00000-of-00001.parquet b/PAN-X.pt/train-00000-of-00001.parquet deleted file mode 100644 index ac23d36e32c9d2183a25a0f0c5468c8d66f77ec9..0000000000000000000000000000000000000000 --- a/PAN-X.pt/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d59b03998b327ed5c45e9bd122f50bfaf2d1a307592e0686327d0a435d9bb09b -size 769819 diff --git a/PAN-X.pt/validation-00000-of-00001.parquet b/PAN-X.pt/validation-00000-of-00001.parquet deleted file mode 100644 index 68f6581e8a08573877f1b36869b483b08c2920b0..0000000000000000000000000000000000000000 --- a/PAN-X.pt/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:62d7816b5a8db98829d5363457ca3d4970a00f9a7fa21bbe2975c8554d72d263 -size 387971 diff --git a/PAN-X.ru/test-00000-of-00001.parquet b/PAN-X.ru/test-00000-of-00001.parquet deleted file mode 100644 index 0072320cddc1c2b1f50e205f938513700b59306a..0000000000000000000000000000000000000000 --- a/PAN-X.ru/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c593110d67595af7c0f3947ce256a872528b68eff5a3498000825a469b9ca23c -size 535832 diff --git a/PAN-X.ru/train-00000-of-00001.parquet b/PAN-X.ru/train-00000-of-00001.parquet deleted file mode 100644 index 89836c125848cf5f2f2b8bf2bb093002f5b23940..0000000000000000000000000000000000000000 --- a/PAN-X.ru/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3b0785d0c50b23ef6f0f196acf560f2fa494d4be9f077ab159c93dd74afeeff8 -size 1061852 diff --git a/PAN-X.ru/validation-00000-of-00001.parquet b/PAN-X.ru/validation-00000-of-00001.parquet deleted file mode 100644 index 40e72ef2be583592dff318af8a929c4bb1fb8898..0000000000000000000000000000000000000000 --- a/PAN-X.ru/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:22d64120725479bb83dd823af60cd76b04f3105682680f96279a07df49833dee -size 530046 diff --git a/PAN-X.sw/test-00000-of-00001.parquet b/PAN-X.sw/test-00000-of-00001.parquet deleted file mode 100644 index ffa0af605209b15634185fc9722188bf7a3994c8..0000000000000000000000000000000000000000 --- a/PAN-X.sw/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4d065ef202c4829c5461f0ec5b578f2888cd5bf892198376bd5b46e0e83c3f17 -size 29517 diff --git a/PAN-X.sw/train-00000-of-00001.parquet b/PAN-X.sw/train-00000-of-00001.parquet deleted file mode 100644 index 25ab8e90770f3a913f842616043d911413423228..0000000000000000000000000000000000000000 --- a/PAN-X.sw/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:58abff200f56080295dd5f4dbb020289ff3c89c4ff1df3bc35fc4e239219079d -size 28918 diff --git a/PAN-X.sw/validation-00000-of-00001.parquet b/PAN-X.sw/validation-00000-of-00001.parquet deleted file mode 100644 index a55bd0d24b8930c77e9fdba8dbfefe334e2efd01..0000000000000000000000000000000000000000 --- a/PAN-X.sw/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:23fed1523465f79535766f8151751f1abcfc295463202dc79e3ddb6acb9004ff -size 29000 diff --git a/PAN-X.ta/test-00000-of-00001.parquet b/PAN-X.ta/test-00000-of-00001.parquet deleted file mode 100644 index b4488a42d0ad2b4d513490b07206acaf684cf537..0000000000000000000000000000000000000000 --- a/PAN-X.ta/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5dd244c1439e040efed1169bccbd05f809c5984aac90c6526ab317992481d070 -size 62691 diff --git a/PAN-X.ta/train-00000-of-00001.parquet b/PAN-X.ta/train-00000-of-00001.parquet deleted file mode 100644 index fba3a6c55a09207de1c98916700aed65303c2fed..0000000000000000000000000000000000000000 --- a/PAN-X.ta/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fb678fafdea9e2d7d7228cc967302481bb8ac602be15f8f4147dfdf831afe2e5 -size 918747 diff --git a/PAN-X.ta/validation-00000-of-00001.parquet b/PAN-X.ta/validation-00000-of-00001.parquet deleted file mode 100644 index f611d4b90f117431e24524b56cbc2f6687f23672..0000000000000000000000000000000000000000 --- a/PAN-X.ta/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:97fd64f7ed2da66bac341d3da2e0880f8a9647664be3a9e2c75ab94a2a52e6c5 -size 63291 diff --git a/PAN-X.te/test-00000-of-00001.parquet b/PAN-X.te/test-00000-of-00001.parquet deleted file mode 100644 index 63140dba403834fa0054daf559e223a5f52bc391..0000000000000000000000000000000000000000 --- a/PAN-X.te/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:84169ce9e0f2eed2ce24e347f4d1c4c71bc09bb5d9ab17e18a3c997855f83de7 -size 67021 diff --git a/PAN-X.te/train-00000-of-00001.parquet b/PAN-X.te/train-00000-of-00001.parquet deleted file mode 100644 index 26176cf8d5ff5e2b39784ed082e57b58e28a33a6..0000000000000000000000000000000000000000 --- a/PAN-X.te/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9b103b972e36986977ff5977e5f3e466cf753ec3f9f5554d6bd086a12869b019 -size 67686 diff --git a/PAN-X.te/validation-00000-of-00001.parquet b/PAN-X.te/validation-00000-of-00001.parquet deleted file mode 100644 index ba9f1d35d1c2b9fe431775d76cb7f7c7e674dc8c..0000000000000000000000000000000000000000 --- a/PAN-X.te/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d80c4cc536a9b46104a5500a74abef6451cd5e12f9318134acc617e2b1fc28bd -size 65809 diff --git a/PAN-X.th/test-00000-of-00001.parquet b/PAN-X.th/test-00000-of-00001.parquet deleted file mode 100644 index 436846521b14317aa23eab66d18a89ec41e5ff46..0000000000000000000000000000000000000000 --- a/PAN-X.th/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:622658fc9ee88e5600d4a8a1ef87ce8230ce004dbcc14852720bc97fe2612b1b -size 645355 diff --git a/PAN-X.th/train-00000-of-00001.parquet b/PAN-X.th/train-00000-of-00001.parquet deleted file mode 100644 index aa60066e3937cc7f9cc105bdd996ea96a382aff9..0000000000000000000000000000000000000000 --- a/PAN-X.th/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4865ff5bafae072fe3566857a969c1b577fe78025a86d506a7c0199a2aa9d880 -size 1290075 diff --git a/PAN-X.th/validation-00000-of-00001.parquet b/PAN-X.th/validation-00000-of-00001.parquet deleted file mode 100644 index faea09a9227ced4108396e1f41901ef631a7fab9..0000000000000000000000000000000000000000 --- a/PAN-X.th/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a38cf374ff6eedbca262e6c8d07c29016e366425aa1a1c21bd51794e239d36c8 -size 634136 diff --git a/PAN-X.tl/test-00000-of-00001.parquet b/PAN-X.tl/test-00000-of-00001.parquet deleted file mode 100644 index 9cfda652550f67054bebd87919bc15018900972a..0000000000000000000000000000000000000000 --- a/PAN-X.tl/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8f6ac1f1443180dad610a78e6ed02a753486ec4e789477ac7621af4faa6df9ec -size 27355 diff --git a/PAN-X.tl/train-00000-of-00001.parquet b/PAN-X.tl/train-00000-of-00001.parquet deleted file mode 100644 index 991c3f77f747fd7ca9ec9f45c4b8502a9e9976c5..0000000000000000000000000000000000000000 --- a/PAN-X.tl/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5863413eebdd3cd0a952c46346385ff52734e7ac7dcb85041a5d05ff5a9edb35 -size 254325 diff --git a/PAN-X.tl/validation-00000-of-00001.parquet b/PAN-X.tl/validation-00000-of-00001.parquet deleted file mode 100644 index c77ec817cb3f07852e90fef5c0a53aed594350f7..0000000000000000000000000000000000000000 --- a/PAN-X.tl/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f09a5fd5c53178087dabd55b0d2b208ce2635128eaffc793562d1345f014fc55 -size 26480 diff --git a/PAN-X.tr/test-00000-of-00001.parquet b/PAN-X.tr/test-00000-of-00001.parquet deleted file mode 100644 index 6191ddc185d03e57f51a75d7ed72c2a1534ed8b3..0000000000000000000000000000000000000000 --- a/PAN-X.tr/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e3c6240cac600f464b504a4814f69a83a3976c683a8ac9161e77a60c2bd1e185 -size 504216 diff --git a/PAN-X.tr/train-00000-of-00001.parquet b/PAN-X.tr/train-00000-of-00001.parquet deleted file mode 100644 index 6c89d4a99ce3f9cff73fb4e248f0227420405787..0000000000000000000000000000000000000000 --- a/PAN-X.tr/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7cd8b827c92b661bfb10dc94f2222471cfe32815867659638f25bc9bc011c867 -size 993853 diff --git a/PAN-X.tr/validation-00000-of-00001.parquet b/PAN-X.tr/validation-00000-of-00001.parquet deleted file mode 100644 index fbef5764f63ecf262a68ff187aefa7d94aaf1eb0..0000000000000000000000000000000000000000 --- a/PAN-X.tr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5ab3be9ea345f26781b0d92312fee33317ee1605f6f4894288fb407e1a1613ef -size 502630 diff --git a/PAN-X.ur/test-00000-of-00001.parquet b/PAN-X.ur/test-00000-of-00001.parquet deleted file mode 100644 index 98745a08691e2ffcfa2e044b767d7a7fb3c04ccc..0000000000000000000000000000000000000000 --- a/PAN-X.ur/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7bac78c43c8834d6190f5043645e5baa14b848f1e10fcf40a85245bb9b50d703 -size 28961 diff --git a/PAN-X.ur/train-00000-of-00001.parquet b/PAN-X.ur/train-00000-of-00001.parquet deleted file mode 100644 index f36075ee539dd32b74867d1a25acda1af93ec61a..0000000000000000000000000000000000000000 --- a/PAN-X.ur/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f903afb92a9d74a9dd8afedd790514ab0ec6820735ad0727372ccb5298986736 -size 553373 diff --git a/PAN-X.ur/validation-00000-of-00001.parquet b/PAN-X.ur/validation-00000-of-00001.parquet deleted file mode 100644 index 31858584c30f70ea424711a4f993908ebbbb7f90..0000000000000000000000000000000000000000 --- a/PAN-X.ur/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b3cba41634fc86314d7697a1179638f4e7a93715ca7f930e43912ba5397a0cc9 -size 28535 diff --git a/PAN-X.vi/test-00000-of-00001.parquet b/PAN-X.vi/test-00000-of-00001.parquet deleted file mode 100644 index 26a3520f779c9679627fe9f0c2a52bacccd417d4..0000000000000000000000000000000000000000 --- a/PAN-X.vi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:68af95eb4db22410b215097125cf9c1e1cac3ff30591806bbe9e01a2fe393d24 -size 345420 diff --git a/PAN-X.vi/train-00000-of-00001.parquet b/PAN-X.vi/train-00000-of-00001.parquet deleted file mode 100644 index e8a649a323fdc5b0aeda12dfa956e00619553e7a..0000000000000000000000000000000000000000 --- a/PAN-X.vi/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0566d39a4fd514ebfd71265c77f3545db9c55b930a844ad643b2e41f6fc43691 -size 688732 diff --git a/PAN-X.vi/validation-00000-of-00001.parquet b/PAN-X.vi/validation-00000-of-00001.parquet deleted file mode 100644 index fb793ff17601b91633a57a3b001bb6c4c517fff1..0000000000000000000000000000000000000000 --- a/PAN-X.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:74587951bb2f85ff6d139b1091c776fc66968815a8611cac07ce8f53dde4211e -size 341479 diff --git a/PAN-X.yo/test-00000-of-00001.parquet b/PAN-X.yo/test-00000-of-00001.parquet deleted file mode 100644 index 4ca4c37a555dfaaf98ae3bb60d306333c0a90d42..0000000000000000000000000000000000000000 --- a/PAN-X.yo/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d95dc1d285bd32256929cd18deae24d8c54a3c939a3815f0a8c53ad8d7cc2f31 -size 5554 diff --git a/PAN-X.yo/train-00000-of-00001.parquet b/PAN-X.yo/train-00000-of-00001.parquet deleted file mode 100644 index 7b927f373cc8cd398507e8d34655c3cdc8c029ad..0000000000000000000000000000000000000000 --- a/PAN-X.yo/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7d5c2baf0ff204c2961f0c0e79f08d7e5518e58cf64f5880d7a4a6e802f4e66a -size 6080 diff --git a/PAN-X.yo/validation-00000-of-00001.parquet b/PAN-X.yo/validation-00000-of-00001.parquet deleted file mode 100644 index e173a63d5b6f6f7d6f0ed53f10173baed4a0f586..0000000000000000000000000000000000000000 --- a/PAN-X.yo/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:737454039717eb386207bc76ce5842be74593a9cd6189f323b01d25b18b17fe8 -size 5703 diff --git a/PAN-X.zh/test-00000-of-00001.parquet b/PAN-X.zh/test-00000-of-00001.parquet deleted file mode 100644 index d2e8cd19334f915e7de88a9270c990df771ee114..0000000000000000000000000000000000000000 --- a/PAN-X.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a3e76a6d5128c8b35b7aa51f5f1f1f59aaaf387fbf4d5003f37dad531ad1d86c -size 511388 diff --git a/PAN-X.zh/train-00000-of-00001.parquet b/PAN-X.zh/train-00000-of-00001.parquet deleted file mode 100644 index 33efd7bb2a411e64e99eb80181c7efcaaf81f512..0000000000000000000000000000000000000000 --- a/PAN-X.zh/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:73e3a3663252528d736ee11576e254b2080afe68fbaaf1381b36f099b3cbcc38 -size 1043632 diff --git a/PAN-X.zh/validation-00000-of-00001.parquet b/PAN-X.zh/validation-00000-of-00001.parquet deleted file mode 100644 index d51da4f67ec1f478d745e88a39b6aac133a12183..0000000000000000000000000000000000000000 --- a/PAN-X.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a73aaf2348d6057d0a1cf1f813958cc3918e8e1936b6494019de2228558865b5 -size 528178 diff --git a/PAWS-X.de/test-00000-of-00001.parquet b/PAWS-X.de/test-00000-of-00001.parquet deleted file mode 100644 index ba3b835eae26ce7b443c1bd3c71b916e41a2552f..0000000000000000000000000000000000000000 --- a/PAWS-X.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1281fe7eb19eed79600be27b4e5f0488aa97e9bf985feb70ac618782a3359758 -size 340531 diff --git a/PAWS-X.de/train-00000-of-00001.parquet b/PAWS-X.de/train-00000-of-00001.parquet deleted file mode 100644 index 3da3ebc5a162d4745a39803298bb18cb7f5b9efe..0000000000000000000000000000000000000000 --- a/PAWS-X.de/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:73661141dcdcbc814f720705bdcbad3cc17330018ed8b7d44efdd8a4f6805148 -size 8620466 diff --git a/PAWS-X.de/validation-00000-of-00001.parquet b/PAWS-X.de/validation-00000-of-00001.parquet deleted file mode 100644 index 709a4b5e2dd977fa1e36e198267fec6f57c36d50..0000000000000000000000000000000000000000 --- a/PAWS-X.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3b22380d35fd81fcdc284630e76d9d356109a1806c4772ee063d60f8a7d20f0e -size 333037 diff --git a/PAWS-X.en/test-00000-of-00001.parquet b/PAWS-X.en/test-00000-of-00001.parquet deleted file mode 100644 index a6b05b8ee427daf543eee0a70d4f541b204e2997..0000000000000000000000000000000000000000 --- a/PAWS-X.en/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a41ee8c9fc135d5992b8013246be8a4e991ed82b4a71fac8e9d4c6c8aadd10fa -size 298432 diff --git a/PAWS-X.en/train-00000-of-00001.parquet b/PAWS-X.en/train-00000-of-00001.parquet deleted file mode 100644 index 6b003ebf6464d4f306b1d3a430b05a9b2241ce52..0000000000000000000000000000000000000000 --- a/PAWS-X.en/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f980c95f0a07d1c9cde39a50f15e140af29d100e096b21d2b661ebcd72765e33 -size 8123610 diff --git a/PAWS-X.en/validation-00000-of-00001.parquet b/PAWS-X.en/validation-00000-of-00001.parquet deleted file mode 100644 index f48673dfff21af575abb365a9c9dd37063213a41..0000000000000000000000000000000000000000 --- a/PAWS-X.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:266872eb7f7de8e71e72828fee40ca45bfad19bc5c46c2c8fd7cad45a51b24a1 -size 295597 diff --git a/PAWS-X.es/test-00000-of-00001.parquet b/PAWS-X.es/test-00000-of-00001.parquet deleted file mode 100644 index e95c8d766c33d4fcd2b5a0a8f494fd163d6f2974..0000000000000000000000000000000000000000 --- a/PAWS-X.es/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0f3d3900a6ac902f1c7a89539d2163c28587039ec6fcfc392ab67a93d5038968 -size 332391 diff --git a/PAWS-X.es/train-00000-of-00001.parquet b/PAWS-X.es/train-00000-of-00001.parquet deleted file mode 100644 index 5ac2d3661e2545a5ce183d10b0c19d660254e560..0000000000000000000000000000000000000000 --- a/PAWS-X.es/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e2638ce5439f1678fd3346f232c2d409dde639ee4df60ce7b65445c9612dfa89 -size 8571632 diff --git a/PAWS-X.es/validation-00000-of-00001.parquet b/PAWS-X.es/validation-00000-of-00001.parquet deleted file mode 100644 index 303bcd4cffe7f04355b2f3f4976b71415bc62fbd..0000000000000000000000000000000000000000 --- a/PAWS-X.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6c0da3171e9d377580926e510e114e0a16d856383b077c44ff2864e4c374516f -size 325895 diff --git a/PAWS-X.fr/test-00000-of-00001.parquet b/PAWS-X.fr/test-00000-of-00001.parquet deleted file mode 100644 index 1a63f524c8407790f61f96e2176510c116037aa1..0000000000000000000000000000000000000000 --- a/PAWS-X.fr/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3f8a8622814b61dfbdc92c20ffdad145f6dee3569edae9712fc77394d65c52f0 -size 340002 diff --git a/PAWS-X.fr/train-00000-of-00001.parquet b/PAWS-X.fr/train-00000-of-00001.parquet deleted file mode 100644 index 54327ceef058ab62e785da487714e62426e17e2f..0000000000000000000000000000000000000000 --- a/PAWS-X.fr/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:28d32bbfdb9193a55d57bb75973a2027b95e274574fdbb7dff5da5b5a7b9fa5e -size 8792160 diff --git a/PAWS-X.fr/validation-00000-of-00001.parquet b/PAWS-X.fr/validation-00000-of-00001.parquet deleted file mode 100644 index bda3646ab4889300d60d50c456812cad3e1d06e6..0000000000000000000000000000000000000000 --- a/PAWS-X.fr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f5d16611da294afd4e721cdcad66b787ba8450a10175f2dad0b32f44ca1328a7 -size 332825 diff --git a/PAWS-X.ja/test-00000-of-00001.parquet b/PAWS-X.ja/test-00000-of-00001.parquet deleted file mode 100644 index 22969fd1f6be55ca38a8e7f358c8ca0c69d84ea4..0000000000000000000000000000000000000000 --- a/PAWS-X.ja/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b601c0fb4afe7ca640e34e900ca73895b7efc5286ab53e30dd867346b1b1f3ac -size 398402 diff --git a/PAWS-X.ja/train-00000-of-00001.parquet b/PAWS-X.ja/train-00000-of-00001.parquet deleted file mode 100644 index f285c9c05fba51c8a1e3a3a9d0b7d8e2e30b2ab1..0000000000000000000000000000000000000000 --- a/PAWS-X.ja/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:14ca84f46c10d1af1a1a582a40187f7e928e6c23650c2f47029c18e4f4006141 -size 9347911 diff --git a/PAWS-X.ja/validation-00000-of-00001.parquet b/PAWS-X.ja/validation-00000-of-00001.parquet deleted file mode 100644 index 33937219c73f9b66c94adf843713306507f61192..0000000000000000000000000000000000000000 --- a/PAWS-X.ja/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5de0cb32848a7a55967dab2fa5404ddf7cfb683c8d8dc832ac2575f94b9bde25 -size 389915 diff --git a/PAWS-X.ko/test-00000-of-00001.parquet b/PAWS-X.ko/test-00000-of-00001.parquet deleted file mode 100644 index fefde7f02639f13eb7777dce16224b3718488a30..0000000000000000000000000000000000000000 --- a/PAWS-X.ko/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c5bba07c4d50eeabd3fdecf8db9002eb6172172a72f919a844931a505045fe52 -size 360976 diff --git a/PAWS-X.ko/train-00000-of-00001.parquet b/PAWS-X.ko/train-00000-of-00001.parquet deleted file mode 100644 index b23400b4ddcae30eac40d4d231ea2f522fc39edc..0000000000000000000000000000000000000000 --- a/PAWS-X.ko/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b787972b9337021fb5d69e59f9da1a422b9aae9f099771e59f4bf169a69bc65c -size 9214430 diff --git a/PAWS-X.ko/validation-00000-of-00001.parquet b/PAWS-X.ko/validation-00000-of-00001.parquet deleted file mode 100644 index 9f5b78241ddb372ccae91325d3a904a96623b468..0000000000000000000000000000000000000000 --- a/PAWS-X.ko/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e428532ec9ba50c7fb7242ad8ff4c8a939d2301474eefdabe2fc9fb9cd561298 -size 350886 diff --git a/PAWS-X.zh/test-00000-of-00001.parquet b/PAWS-X.zh/test-00000-of-00001.parquet deleted file mode 100644 index 918f3d65423842f76cbb365b57e98c6b816b89d6..0000000000000000000000000000000000000000 --- a/PAWS-X.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:af1faf15fe231fc45425c438093363ebf43fe7aa7c34534ad6f0de0a450cdb30 -size 333443 diff --git a/PAWS-X.zh/train-00000-of-00001.parquet b/PAWS-X.zh/train-00000-of-00001.parquet deleted file mode 100644 index f2a6e87f1745b5ffbcbfa77404793f4a7b036ec2..0000000000000000000000000000000000000000 --- a/PAWS-X.zh/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:86b196cc05639253fc40910c8d119591ade329322fdb1297dfa72e4f0f6411be -size 8209895 diff --git a/PAWS-X.zh/validation-00000-of-00001.parquet b/PAWS-X.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 909815592061b760ce4c5b4323abf5489c6f8012..0000000000000000000000000000000000000000 --- a/PAWS-X.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bc44df9e5d3de6117886fbd4afa85367397feb35e67c4dc1617a585f214f0aef -size 335517 diff --git a/README.md b/README.md index 057fdf933593a88853f94ca405ad963c3d965fba..5d5525c905fcc019289a45f08ef08bf4cf79103b 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ task_ids: - part-of-speech paperswithcode_id: xtreme pretty_name: XTREME -config_names: +configs: - MLQA.ar.ar - MLQA.ar.de - MLQA.ar.en @@ -265,32 +265,26 @@ tags: - parallel-sentence-retrieval - paraphrase-identification dataset_info: -- config_name: MLQA.ar.ar +- config_name: XNLI features: - - name: id + - name: language dtype: string - - name: title + - name: sentence1 dtype: string - - name: context + - name: sentence2 dtype: string - - name: question + - name: gold_label dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string splits: - name: test - num_bytes: 8368086 - num_examples: 5335 + num_bytes: 20359500 + num_examples: 75150 - name: validation - num_bytes: 824080 - num_examples: 517 - download_size: 4048180 - dataset_size: 9192166 -- config_name: MLQA.ar.de + num_bytes: 10049303 + num_examples: 37350 + download_size: 17865352 + dataset_size: 30408803 +- config_name: tydiqa features: - name: id dtype: string @@ -307,15 +301,15 @@ dataset_info: - name: text dtype: string splits: - - name: test - num_bytes: 2183914 - num_examples: 1649 + - name: train + num_bytes: 52948607 + num_examples: 49881 - name: validation - num_bytes: 364809 - num_examples: 207 - download_size: 1192825 - dataset_size: 2548723 -- config_name: MLQA.ar.en + num_bytes: 5006461 + num_examples: 5077 + download_size: 63621485 + dataset_size: 57955068 +- config_name: SQuAD features: - name: id dtype: string @@ -332,1165 +326,943 @@ dataset_info: - name: text dtype: string splits: - - name: test - num_bytes: 8225634 - num_examples: 5335 + - name: train + num_bytes: 79317110 + num_examples: 87599 - name: validation - num_bytes: 810061 - num_examples: 517 - download_size: 3998008 - dataset_size: 9035695 -- config_name: MLQA.ar.es + num_bytes: 10472653 + num_examples: 10570 + download_size: 35142551 + dataset_size: 89789763 +- config_name: PAN-X.af features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3041350 - num_examples: 1978 - name: validation - num_bytes: 228152 - num_examples: 161 - download_size: 1531661 - dataset_size: 3269502 -- config_name: MLQA.ar.hi + num_bytes: 259709 + num_examples: 1000 + - name: test + num_bytes: 257204 + num_examples: 1000 + - name: train + num_bytes: 1321396 + num_examples: 5000 + download_size: 234008884 + dataset_size: 1838309 +- config_name: PAN-X.ar features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3039368 - num_examples: 1831 - name: validation - num_bytes: 281742 - num_examples: 186 - download_size: 1369756 - dataset_size: 3321110 -- config_name: MLQA.ar.vi + num_bytes: 1808303 + num_examples: 10000 + - name: test + num_bytes: 1811983 + num_examples: 10000 + - name: train + num_bytes: 3634136 + num_examples: 20000 + download_size: 234008884 + dataset_size: 7254422 +- config_name: PAN-X.bg features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3290601 - num_examples: 2047 - name: validation - num_bytes: 288418 - num_examples: 163 - download_size: 1667238 - dataset_size: 3579019 -- config_name: MLQA.ar.zh + num_bytes: 2310314 + num_examples: 10000 + - name: test + num_bytes: 2306158 + num_examples: 10000 + - name: train + num_bytes: 4600773 + num_examples: 20000 + download_size: 234008884 + dataset_size: 9217245 +- config_name: PAN-X.bn features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3229844 - num_examples: 1912 - name: validation - num_bytes: 340021 - num_examples: 188 - download_size: 1591445 - dataset_size: 3569865 -- config_name: MLQA.de.ar + num_bytes: 159088 + num_examples: 1000 + - name: test + num_bytes: 159282 + num_examples: 1000 + - name: train + num_bytes: 1568845 + num_examples: 10000 + download_size: 234008884 + dataset_size: 1887215 +- config_name: PAN-X.de features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1619978 - num_examples: 1649 - name: validation - num_bytes: 200146 - num_examples: 207 - download_size: 1044483 - dataset_size: 1820124 -- config_name: MLQA.de.de + num_bytes: 2381565 + num_examples: 10000 + - name: test + num_bytes: 2377639 + num_examples: 10000 + - name: train + num_bytes: 4762352 + num_examples: 20000 + download_size: 234008884 + dataset_size: 9521556 +- config_name: PAN-X.el features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4366074 - num_examples: 4517 - name: validation - num_bytes: 488339 - num_examples: 512 - download_size: 2798050 - dataset_size: 4854413 -- config_name: MLQA.de.en - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 4343116 - num_examples: 4517 - - name: validation - num_bytes: 485866 - num_examples: 512 - download_size: 2778346 - dataset_size: 4828982 -- config_name: MLQA.de.es - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 1716587 - num_examples: 1776 - - name: validation - num_bytes: 170554 - num_examples: 196 - download_size: 1118751 - dataset_size: 1887141 -- config_name: MLQA.de.hi - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 1371046 - num_examples: 1430 - - name: validation - num_bytes: 153843 - num_examples: 163 - download_size: 880652 - dataset_size: 1524889 -- config_name: MLQA.de.vi - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 1688455 - num_examples: 1675 - - name: validation - num_bytes: 216047 - num_examples: 182 - download_size: 1108163 - dataset_size: 1904502 -- config_name: MLQA.de.zh - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 1679152 - num_examples: 1621 - - name: validation - num_bytes: 184290 - num_examples: 190 - download_size: 1045861 - dataset_size: 1863442 -- config_name: MLQA.en.ar - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 6739191 - num_examples: 5335 - - name: validation - num_bytes: 630815 - num_examples: 517 - download_size: 3939135 - dataset_size: 7370006 -- config_name: MLQA.en.de - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 5056694 - num_examples: 4517 - - name: validation - num_bytes: 594908 - num_examples: 512 - download_size: 3223196 - dataset_size: 5651602 -- config_name: MLQA.en.en - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 14004592 - num_examples: 11590 - - name: validation - num_bytes: 1329084 - num_examples: 1148 - download_size: 8217519 - dataset_size: 15333676 -- config_name: MLQA.en.es - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 6179221 - num_examples: 5253 - - name: validation - num_bytes: 555434 - num_examples: 500 - download_size: 3776828 - dataset_size: 6734655 -- config_name: MLQA.en.hi - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 6378838 - num_examples: 4918 - - name: validation - num_bytes: 623143 - num_examples: 507 - download_size: 3517340 - dataset_size: 7001981 -- config_name: MLQA.en.vi - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: test - num_bytes: 7056670 - num_examples: 5495 - - name: validation - num_bytes: 640618 - num_examples: 511 - download_size: 4170642 - dataset_size: 7697288 -- config_name: MLQA.en.zh - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: + num_bytes: 2533806 + num_examples: 10000 - name: test - num_bytes: 6539279 - num_examples: 5137 - - name: validation - num_bytes: 608416 - num_examples: 504 - download_size: 3929122 - dataset_size: 7147695 -- config_name: MLQA.es.ar + num_bytes: 2547594 + num_examples: 10000 + - name: train + num_bytes: 5063176 + num_examples: 20000 + download_size: 234008884 + dataset_size: 10144576 +- config_name: PAN-X.en features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1740254 - num_examples: 1978 - name: validation - num_bytes: 148621 - num_examples: 161 - download_size: 1107435 - dataset_size: 1888875 -- config_name: MLQA.es.de - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: + num_bytes: 1920069 + num_examples: 10000 - name: test - num_bytes: 1403997 - num_examples: 1776 - - name: validation - num_bytes: 144158 - num_examples: 196 - download_size: 950448 - dataset_size: 1548155 -- config_name: MLQA.es.en + num_bytes: 1916220 + num_examples: 10000 + - name: train + num_bytes: 3823474 + num_examples: 20000 + download_size: 234008884 + dataset_size: 7659763 +- config_name: PAN-X.es features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4362709 - num_examples: 5253 - name: validation - num_bytes: 419040 - num_examples: 500 - download_size: 2842879 - dataset_size: 4781749 -- config_name: MLQA.es.es - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: + num_bytes: 1592525 + num_examples: 10000 - name: test - num_bytes: 4394305 - num_examples: 5253 - - name: validation - num_bytes: 422043 - num_examples: 500 - download_size: 2856931 - dataset_size: 4816348 -- config_name: MLQA.es.hi + num_bytes: 1602291 + num_examples: 10000 + - name: train + num_bytes: 3199161 + num_examples: 20000 + download_size: 234008884 + dataset_size: 6393977 +- config_name: PAN-X.et features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1523495 - num_examples: 1723 - name: validation - num_bytes: 181806 - num_examples: 187 - download_size: 954018 - dataset_size: 1705301 -- config_name: MLQA.es.vi - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: + num_bytes: 2030160 + num_examples: 10000 - name: test - num_bytes: 1747941 - num_examples: 2018 - - name: validation - num_bytes: 176813 - num_examples: 189 - download_size: 1187949 - dataset_size: 1924754 -- config_name: MLQA.es.zh + num_bytes: 2021409 + num_examples: 10000 + - name: train + num_bytes: 3023211 + num_examples: 15000 + download_size: 234008884 + dataset_size: 7074780 +- config_name: PAN-X.eu features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1678423 - num_examples: 1947 - name: validation - num_bytes: 126618 - num_examples: 161 - download_size: 1100765 - dataset_size: 1805041 -- config_name: MLQA.hi.ar + num_bytes: 2296335 + num_examples: 10000 + - name: test + num_bytes: 2249835 + num_examples: 10000 + - name: train + num_bytes: 2292327 + num_examples: 10000 + download_size: 234008884 + dataset_size: 6838497 +- config_name: PAN-X.fa features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4445561 - num_examples: 1831 - name: validation - num_bytes: 410396 - num_examples: 186 - download_size: 1542768 - dataset_size: 4855957 -- config_name: MLQA.hi.de + num_bytes: 1782306 + num_examples: 10000 + - name: test + num_bytes: 1770284 + num_examples: 10000 + - name: train + num_bytes: 3529354 + num_examples: 20000 + download_size: 234008884 + dataset_size: 7081944 +- config_name: PAN-X.fi features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3022836 - num_examples: 1430 - name: validation - num_bytes: 301685 - num_examples: 163 - download_size: 1257846 - dataset_size: 3324521 -- config_name: MLQA.hi.en + num_bytes: 2131769 + num_examples: 10000 + - name: test + num_bytes: 2130665 + num_examples: 10000 + - name: train + num_bytes: 4273793 + num_examples: 20000 + download_size: 234008884 + dataset_size: 8536227 +- config_name: PAN-X.fr features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 11449233 - num_examples: 4918 - name: validation - num_bytes: 1097829 - num_examples: 507 - download_size: 4131083 - dataset_size: 12547062 -- config_name: MLQA.hi.es + num_bytes: 1664190 + num_examples: 10000 + - name: test + num_bytes: 1675785 + num_examples: 10000 + - name: train + num_bytes: 3335424 + num_examples: 20000 + download_size: 234008884 + dataset_size: 6675399 +- config_name: PAN-X.he features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3862201 - num_examples: 1723 - name: validation - num_bytes: 420374 - num_examples: 187 - download_size: 1493468 - dataset_size: 4282575 -- config_name: MLQA.hi.hi + num_bytes: 2332760 + num_examples: 10000 + - name: test + num_bytes: 2318756 + num_examples: 10000 + - name: train + num_bytes: 4667100 + num_examples: 20000 + download_size: 234008884 + dataset_size: 9318616 +- config_name: PAN-X.hi features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 11810447 - num_examples: 4918 - name: validation - num_bytes: 1136756 - num_examples: 507 - download_size: 4235981 - dataset_size: 12947203 -- config_name: MLQA.hi.vi + num_bytes: 190671 + num_examples: 1000 + - name: test + num_bytes: 196190 + num_examples: 1000 + - name: train + num_bytes: 964212 + num_examples: 5000 + download_size: 234008884 + dataset_size: 1351073 +- config_name: PAN-X.hu features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4743456 - num_examples: 1947 - name: validation - num_bytes: 419078 - num_examples: 177 - download_size: 1704964 - dataset_size: 5162534 -- config_name: MLQA.hi.zh + num_bytes: 2211851 + num_examples: 10000 + - name: test + num_bytes: 2249779 + num_examples: 10000 + - name: train + num_bytes: 4499914 + num_examples: 20000 + download_size: 234008884 + dataset_size: 8961544 +- config_name: PAN-X.id features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4354847 - num_examples: 1767 - name: validation - num_bytes: 424218 - num_examples: 189 - download_size: 1627107 - dataset_size: 4779065 -- config_name: MLQA.vi.ar + num_bytes: 1537979 + num_examples: 10000 + - name: test + num_bytes: 1536879 + num_examples: 10000 + - name: train + num_bytes: 3084007 + num_examples: 20000 + download_size: 234008884 + dataset_size: 6158865 +- config_name: PAN-X.it features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 3205157 - num_examples: 2047 - name: validation - num_bytes: 230307 - num_examples: 163 - download_size: 1656661 - dataset_size: 3435464 -- config_name: MLQA.vi.de + num_bytes: 1908529 + num_examples: 10000 + - name: test + num_bytes: 1928408 + num_examples: 10000 + - name: train + num_bytes: 3874663 + num_examples: 20000 + download_size: 234008884 + dataset_size: 7711600 +- config_name: PAN-X.ja features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 2227005 - num_examples: 1675 - name: validation - num_bytes: 277157 - num_examples: 182 - download_size: 1268041 - dataset_size: 2504162 -- config_name: MLQA.vi.en + num_bytes: 6323003 + num_examples: 10000 + - name: test + num_bytes: 6448960 + num_examples: 10000 + - name: train + num_bytes: 12670401 + num_examples: 20000 + download_size: 234008884 + dataset_size: 25442364 +- config_name: PAN-X.jv features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 7843403 - num_examples: 5495 - name: validation - num_bytes: 719245 - num_examples: 511 - download_size: 4071703 - dataset_size: 8562648 -- config_name: MLQA.vi.es + num_bytes: 14600 + num_examples: 100 + - name: test + num_bytes: 16917 + num_examples: 100 + - name: train + num_bytes: 16106 + num_examples: 100 + download_size: 234008884 + dataset_size: 47623 +- config_name: PAN-X.ka features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 2866569 - num_examples: 2018 - name: validation - num_bytes: 283433 - num_examples: 189 - download_size: 1607926 - dataset_size: 3150002 -- config_name: MLQA.vi.hi + num_bytes: 2806901 + num_examples: 10000 + - name: test + num_bytes: 2824641 + num_examples: 10000 + - name: train + num_bytes: 2777362 + num_examples: 10000 + download_size: 234008884 + dataset_size: 8408904 +- config_name: PAN-X.kk features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 2776636 - num_examples: 1947 - name: validation - num_bytes: 254979 - num_examples: 177 - download_size: 1366057 - dataset_size: 3031615 -- config_name: MLQA.vi.vi + num_bytes: 238109 + num_examples: 1000 + - name: test + num_bytes: 236724 + num_examples: 1000 + - name: train + num_bytes: 240276 + num_examples: 1000 + download_size: 234008884 + dataset_size: 715109 +- config_name: PAN-X.ko features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 7922057 - num_examples: 5495 - name: validation - num_bytes: 726490 - num_examples: 511 - download_size: 4105388 - dataset_size: 8648547 -- config_name: MLQA.vi.zh + num_bytes: 2138167 + num_examples: 10000 + - name: test + num_bytes: 2138294 + num_examples: 10000 + - name: train + num_bytes: 4284733 + num_examples: 20000 + download_size: 234008884 + dataset_size: 8561194 +- config_name: PAN-X.ml features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 2989632 - num_examples: 1943 - name: validation - num_bytes: 269361 - num_examples: 184 - download_size: 1570393 - dataset_size: 3258993 -- config_name: MLQA.zh.ar + num_bytes: 290755 + num_examples: 1000 + - name: test + num_bytes: 276926 + num_examples: 1000 + - name: train + num_bytes: 2865204 + num_examples: 10000 + download_size: 234008884 + dataset_size: 3432885 +- config_name: PAN-X.mr features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1731455 - num_examples: 1912 - name: validation - num_bytes: 175321 - num_examples: 188 - download_size: 1223863 - dataset_size: 1906776 -- config_name: MLQA.zh.de + num_bytes: 245358 + num_examples: 1000 + - name: test + num_bytes: 255904 + num_examples: 1000 + - name: train + num_bytes: 1248259 + num_examples: 5000 + download_size: 234008884 + dataset_size: 1749521 +- config_name: PAN-X.ms features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1389990 - num_examples: 1621 - name: validation - num_bytes: 174577 - num_examples: 190 - download_size: 1006829 - dataset_size: 1564567 -- config_name: MLQA.zh.en + num_bytes: 147515 + num_examples: 1000 + - name: test + num_bytes: 147168 + num_examples: 1000 + - name: train + num_bytes: 2965048 + num_examples: 20000 + download_size: 234008884 + dataset_size: 3259731 +- config_name: PAN-X.my features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4450957 - num_examples: 5137 - name: validation - num_bytes: 446840 - num_examples: 504 - download_size: 3108433 - dataset_size: 4897797 -- config_name: MLQA.zh.es + num_bytes: 40428 + num_examples: 100 + - name: test + num_bytes: 37366 + num_examples: 100 + - name: train + num_bytes: 32735 + num_examples: 100 + download_size: 234008884 + dataset_size: 110529 +- config_name: PAN-X.nl features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1736255 - num_examples: 1947 - name: validation - num_bytes: 138045 - num_examples: 161 - download_size: 1223467 - dataset_size: 1874300 -- config_name: MLQA.zh.hi + num_bytes: 2016856 + num_examples: 10000 + - name: test + num_bytes: 2038638 + num_examples: 10000 + - name: train + num_bytes: 4062189 + num_examples: 20000 + download_size: 234008884 + dataset_size: 8117683 +- config_name: PAN-X.pt features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 1578191 - num_examples: 1767 - name: validation - num_bytes: 184373 - num_examples: 189 - download_size: 1044599 - dataset_size: 1762564 -- config_name: MLQA.zh.vi + num_bytes: 1575141 + num_examples: 10000 + - name: test + num_bytes: 1562625 + num_examples: 10000 + - name: train + num_bytes: 3149283 + num_examples: 20000 + download_size: 234008884 + dataset_size: 6287049 +- config_name: PAN-X.ru features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: + - name: validation + num_bytes: 2053169 + num_examples: 10000 - name: test - num_bytes: 1806158 - num_examples: 1943 + num_bytes: 2074145 + num_examples: 10000 + - name: train + num_bytes: 4121791 + num_examples: 20000 + download_size: 234008884 + dataset_size: 8249105 +- config_name: PAN-X.sw + features: + - name: tokens + sequence: string + - name: ner_tags + sequence: + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string + splits: - name: validation - num_bytes: 172906 - num_examples: 184 - download_size: 1268213 - dataset_size: 1979064 -- config_name: MLQA.zh.zh + num_bytes: 136368 + num_examples: 1000 + - name: test + num_bytes: 140231 + num_examples: 1000 + - name: train + num_bytes: 135911 + num_examples: 1000 + download_size: 234008884 + dataset_size: 412510 +- config_name: PAN-X.ta features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers + - name: tokens + sequence: string + - name: ner_tags sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string splits: - - name: test - num_bytes: 4422322 - num_examples: 5137 - name: validation - num_bytes: 443782 - num_examples: 504 - download_size: 3105362 - dataset_size: 4866104 -- config_name: PAN-X.af + num_bytes: 277625 + num_examples: 1000 + - name: test + num_bytes: 278114 + num_examples: 1000 + - name: train + num_bytes: 4122130 + num_examples: 15000 + download_size: 234008884 + dataset_size: 4677869 +- config_name: PAN-X.te features: - name: tokens sequence: string @@ -1508,18 +1280,18 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 1321376 - num_examples: 5000 - name: validation - num_bytes: 259689 + num_bytes: 293281 num_examples: 1000 - name: test - num_bytes: 257184 + num_bytes: 296963 num_examples: 1000 - download_size: 389015 - dataset_size: 1838249 -- config_name: PAN-X.ar + - name: train + num_bytes: 295410 + num_examples: 1000 + download_size: 234008884 + dataset_size: 885654 +- config_name: PAN-X.th features: - name: tokens sequence: string @@ -1537,18 +1309,18 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 3634096 - num_examples: 20000 - name: validation - num_bytes: 1808283 + num_bytes: 13262737 num_examples: 10000 - name: test - num_bytes: 1811963 + num_bytes: 13586928 num_examples: 10000 - download_size: 1567470 - dataset_size: 7254342 -- config_name: PAN-X.bg + - name: train + num_bytes: 27133029 + num_examples: 20000 + download_size: 234008884 + dataset_size: 53982694 +- config_name: PAN-X.tl features: - name: tokens sequence: string @@ -1566,18 +1338,47 @@ dataset_info: - name: langs sequence: string splits: + - name: validation + num_bytes: 114156 + num_examples: 1000 + - name: test + num_bytes: 117904 + num_examples: 1000 - name: train - num_bytes: 4600733 - num_examples: 20000 + num_bytes: 1168717 + num_examples: 10000 + download_size: 234008884 + dataset_size: 1400777 +- config_name: PAN-X.tr + features: + - name: tokens + sequence: string + - name: ner_tags + sequence: + class_label: + names: + '0': O + '1': B-PER + '2': I-PER + '3': B-ORG + '4': I-ORG + '5': B-LOC + '6': I-LOC + - name: langs + sequence: string + splits: - name: validation - num_bytes: 2310294 + num_bytes: 1915352 num_examples: 10000 - name: test - num_bytes: 2306138 + num_bytes: 1911503 num_examples: 10000 - download_size: 2030669 - dataset_size: 9217165 -- config_name: PAN-X.bn + - name: train + num_bytes: 3779170 + num_examples: 20000 + download_size: 234008884 + dataset_size: 7606025 +- config_name: PAN-X.ur features: - name: tokens sequence: string @@ -1595,18 +1396,18 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 1568825 - num_examples: 10000 - name: validation - num_bytes: 159068 + num_bytes: 152148 num_examples: 1000 - name: test - num_bytes: 159262 + num_bytes: 151922 num_examples: 1000 - download_size: 364024 - dataset_size: 1887155 -- config_name: PAN-X.de + - name: train + num_bytes: 3072276 + num_examples: 20000 + download_size: 234008884 + dataset_size: 3376346 +- config_name: PAN-X.vi features: - name: tokens sequence: string @@ -1624,18 +1425,18 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 4762312 - num_examples: 20000 - name: validation - num_bytes: 2381545 + num_bytes: 1565143 num_examples: 10000 - name: test - num_bytes: 2377619 + num_bytes: 1580216 num_examples: 10000 - download_size: 2360242 - dataset_size: 9521476 -- config_name: PAN-X.el + - name: train + num_bytes: 3153227 + num_examples: 20000 + download_size: 234008884 + dataset_size: 6298586 +- config_name: PAN-X.yo features: - name: tokens sequence: string @@ -1653,18 +1454,18 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 5063136 - num_examples: 20000 - name: validation - num_bytes: 2533786 - num_examples: 10000 + num_bytes: 13245 + num_examples: 100 - name: test - num_bytes: 2547574 - num_examples: 10000 - download_size: 2271726 - dataset_size: 10144496 -- config_name: PAN-X.en + num_bytes: 13533 + num_examples: 100 + - name: train + num_bytes: 14709 + num_examples: 100 + download_size: 234008884 + dataset_size: 41487 +- config_name: PAN-X.zh features: - name: tokens sequence: string @@ -1682,1115 +1483,1193 @@ dataset_info: - name: langs sequence: string splits: - - name: train - num_bytes: 3823434 - num_examples: 20000 - - name: validation - num_bytes: 1920049 - num_examples: 10000 + - name: validation + num_bytes: 4491325 + num_examples: 10000 + - name: test + num_bytes: 4363172 + num_examples: 10000 + - name: train + num_bytes: 8832051 + num_examples: 20000 + download_size: 234008884 + dataset_size: 17686548 +- config_name: MLQA.ar.ar + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 8368114 + num_examples: 5335 + - name: validation + num_bytes: 824108 + num_examples: 517 + download_size: 75719050 + dataset_size: 9192222 +- config_name: MLQA.ar.de + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 2183942 + num_examples: 1649 + - name: validation + num_bytes: 364837 + num_examples: 207 + download_size: 75719050 + dataset_size: 2548779 +- config_name: MLQA.ar.vi + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 3290629 + num_examples: 2047 + - name: validation + num_bytes: 288446 + num_examples: 163 + download_size: 75719050 + dataset_size: 3579075 +- config_name: MLQA.ar.zh + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 3229872 + num_examples: 1912 + - name: validation + num_bytes: 340049 + num_examples: 188 + download_size: 75719050 + dataset_size: 3569921 +- config_name: MLQA.ar.en + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 8225662 + num_examples: 5335 + - name: validation + num_bytes: 810089 + num_examples: 517 + download_size: 75719050 + dataset_size: 9035751 +- config_name: MLQA.ar.es + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: test + num_bytes: 3041378 + num_examples: 1978 + - name: validation + num_bytes: 228180 + num_examples: 161 + download_size: 75719050 + dataset_size: 3269558 +- config_name: MLQA.ar.hi + features: + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: - name: test - num_bytes: 1916200 - num_examples: 10000 - download_size: 1886284 - dataset_size: 7659683 -- config_name: PAN-X.es + num_bytes: 3039396 + num_examples: 1831 + - name: validation + num_bytes: 281770 + num_examples: 186 + download_size: 75719050 + dataset_size: 3321166 +- config_name: MLQA.de.ar features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3199121 - num_examples: 20000 - - name: validation - num_bytes: 1592505 - num_examples: 10000 - name: test - num_bytes: 1602271 - num_examples: 10000 - download_size: 1489562 - dataset_size: 6393897 -- config_name: PAN-X.et + num_bytes: 1620006 + num_examples: 1649 + - name: validation + num_bytes: 200174 + num_examples: 207 + download_size: 75719050 + dataset_size: 1820180 +- config_name: MLQA.de.de features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3023171 - num_examples: 15000 - - name: validation - num_bytes: 2030140 - num_examples: 10000 - name: test - num_bytes: 2021389 - num_examples: 10000 - download_size: 1915624 - dataset_size: 7074700 -- config_name: PAN-X.eu + num_bytes: 4366102 + num_examples: 4517 + - name: validation + num_bytes: 488367 + num_examples: 512 + download_size: 75719050 + dataset_size: 4854469 +- config_name: MLQA.de.vi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 2292307 - num_examples: 10000 - - name: validation - num_bytes: 2296315 - num_examples: 10000 - name: test - num_bytes: 2249815 - num_examples: 10000 - download_size: 1393179 - dataset_size: 6838437 -- config_name: PAN-X.fa + num_bytes: 1688483 + num_examples: 1675 + - name: validation + num_bytes: 216075 + num_examples: 182 + download_size: 75719050 + dataset_size: 1904558 +- config_name: MLQA.de.zh features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3529314 - num_examples: 20000 - - name: validation - num_bytes: 1782286 - num_examples: 10000 - name: test - num_bytes: 1770264 - num_examples: 10000 - download_size: 1401208 - dataset_size: 7081864 -- config_name: PAN-X.fi + num_bytes: 1679180 + num_examples: 1621 + - name: validation + num_bytes: 184318 + num_examples: 190 + download_size: 75719050 + dataset_size: 1863498 +- config_name: MLQA.de.en features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4273753 - num_examples: 20000 - - name: validation - num_bytes: 2131749 - num_examples: 10000 - name: test - num_bytes: 2130645 - num_examples: 10000 - download_size: 2459149 - dataset_size: 8536147 -- config_name: PAN-X.fr + num_bytes: 4343144 + num_examples: 4517 + - name: validation + num_bytes: 485894 + num_examples: 512 + download_size: 75719050 + dataset_size: 4829038 +- config_name: MLQA.de.es features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3335384 - num_examples: 20000 - - name: validation - num_bytes: 1664170 - num_examples: 10000 - name: test - num_bytes: 1675765 - num_examples: 10000 - download_size: 1679283 - dataset_size: 6675319 -- config_name: PAN-X.he + num_bytes: 1716615 + num_examples: 1776 + - name: validation + num_bytes: 170582 + num_examples: 196 + download_size: 75719050 + dataset_size: 1887197 +- config_name: MLQA.de.hi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4667060 - num_examples: 20000 - - name: validation - num_bytes: 2332740 - num_examples: 10000 - name: test - num_bytes: 2318736 - num_examples: 10000 - download_size: 2186463 - dataset_size: 9318536 -- config_name: PAN-X.hi + num_bytes: 1371074 + num_examples: 1430 + - name: validation + num_bytes: 153871 + num_examples: 163 + download_size: 75719050 + dataset_size: 1524945 +- config_name: MLQA.vi.ar features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 964192 - num_examples: 5000 - - name: validation - num_bytes: 190651 - num_examples: 1000 - name: test - num_bytes: 196170 - num_examples: 1000 - download_size: 266086 - dataset_size: 1351013 -- config_name: PAN-X.hu + num_bytes: 3205185 + num_examples: 2047 + - name: validation + num_bytes: 230335 + num_examples: 163 + download_size: 75719050 + dataset_size: 3435520 +- config_name: MLQA.vi.de features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4499874 - num_examples: 20000 - - name: validation - num_bytes: 2211831 - num_examples: 10000 - name: test - num_bytes: 2249759 - num_examples: 10000 - download_size: 2399390 - dataset_size: 8961464 -- config_name: PAN-X.id + num_bytes: 2227033 + num_examples: 1675 + - name: validation + num_bytes: 277185 + num_examples: 182 + download_size: 75719050 + dataset_size: 2504218 +- config_name: MLQA.vi.vi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3083967 - num_examples: 20000 - - name: validation - num_bytes: 1537959 - num_examples: 10000 - name: test - num_bytes: 1536859 - num_examples: 10000 - download_size: 1412049 - dataset_size: 6158785 -- config_name: PAN-X.it + num_bytes: 7922085 + num_examples: 5495 + - name: validation + num_bytes: 726518 + num_examples: 511 + download_size: 75719050 + dataset_size: 8648603 +- config_name: MLQA.vi.zh features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3874623 - num_examples: 20000 - - name: validation - num_bytes: 1908509 - num_examples: 10000 - name: test - num_bytes: 1928388 - num_examples: 10000 - download_size: 1855798 - dataset_size: 7711520 -- config_name: PAN-X.ja + num_bytes: 2989660 + num_examples: 1943 + - name: validation + num_bytes: 269389 + num_examples: 184 + download_size: 75719050 + dataset_size: 3259049 +- config_name: MLQA.vi.en features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 12670361 - num_examples: 20000 - - name: validation - num_bytes: 6322983 - num_examples: 10000 - name: test - num_bytes: 6448940 - num_examples: 10000 - download_size: 2465674 - dataset_size: 25442284 -- config_name: PAN-X.jv + num_bytes: 7843431 + num_examples: 5495 + - name: validation + num_bytes: 719273 + num_examples: 511 + download_size: 75719050 + dataset_size: 8562704 +- config_name: MLQA.vi.es features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 16086 - num_examples: 100 - - name: validation - num_bytes: 14580 - num_examples: 100 - name: test - num_bytes: 16897 - num_examples: 100 - download_size: 20475 - dataset_size: 47563 -- config_name: PAN-X.ka + num_bytes: 2866597 + num_examples: 2018 + - name: validation + num_bytes: 283461 + num_examples: 189 + download_size: 75719050 + dataset_size: 3150058 +- config_name: MLQA.vi.hi features: - - name: tokens - sequence: string - - name: ner_tags - sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 2777342 - num_examples: 10000 - - name: validation - num_bytes: 2806881 - num_examples: 10000 - name: test - num_bytes: 2824621 - num_examples: 10000 - download_size: 1817280 - dataset_size: 8408844 -- config_name: PAN-X.kk + num_bytes: 2776664 + num_examples: 1947 + - name: validation + num_bytes: 255007 + num_examples: 177 + download_size: 75719050 + dataset_size: 3031671 +- config_name: MLQA.zh.ar features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 240256 - num_examples: 1000 - - name: validation - num_bytes: 238089 - num_examples: 1000 - name: test - num_bytes: 236704 - num_examples: 1000 - download_size: 160554 - dataset_size: 715049 -- config_name: PAN-X.ko + num_bytes: 1731483 + num_examples: 1912 + - name: validation + num_bytes: 175349 + num_examples: 188 + download_size: 75719050 + dataset_size: 1906832 +- config_name: MLQA.zh.de features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4284693 - num_examples: 20000 - - name: validation - num_bytes: 2138147 - num_examples: 10000 - name: test - num_bytes: 2138274 - num_examples: 10000 - download_size: 2539591 - dataset_size: 8561114 -- config_name: PAN-X.ml + num_bytes: 1390018 + num_examples: 1621 + - name: validation + num_bytes: 174605 + num_examples: 190 + download_size: 75719050 + dataset_size: 1564623 +- config_name: MLQA.zh.vi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 2865184 - num_examples: 10000 - - name: validation - num_bytes: 290735 - num_examples: 1000 - name: test - num_bytes: 276906 - num_examples: 1000 - download_size: 852955 - dataset_size: 3432825 -- config_name: PAN-X.mr + num_bytes: 1806186 + num_examples: 1943 + - name: validation + num_bytes: 172934 + num_examples: 184 + download_size: 75719050 + dataset_size: 1979120 +- config_name: MLQA.zh.zh features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 1248239 - num_examples: 5000 - - name: validation - num_bytes: 245338 - num_examples: 1000 - name: test - num_bytes: 255884 - num_examples: 1000 - download_size: 347215 - dataset_size: 1749461 -- config_name: PAN-X.ms + num_bytes: 4422350 + num_examples: 5137 + - name: validation + num_bytes: 443810 + num_examples: 504 + download_size: 75719050 + dataset_size: 4866160 +- config_name: MLQA.zh.en features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 2965008 - num_examples: 20000 - - name: validation - num_bytes: 147495 - num_examples: 1000 - name: test - num_bytes: 147148 - num_examples: 1000 - download_size: 708795 - dataset_size: 3259651 -- config_name: PAN-X.my + num_bytes: 4450985 + num_examples: 5137 + - name: validation + num_bytes: 446868 + num_examples: 504 + download_size: 75719050 + dataset_size: 4897853 +- config_name: MLQA.zh.es features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 32715 - num_examples: 100 - - name: validation - num_bytes: 40408 - num_examples: 100 - name: test - num_bytes: 37346 - num_examples: 100 - download_size: 39008 - dataset_size: 110469 -- config_name: PAN-X.nl + num_bytes: 1736283 + num_examples: 1947 + - name: validation + num_bytes: 138073 + num_examples: 161 + download_size: 75719050 + dataset_size: 1874356 +- config_name: MLQA.zh.hi features: - - name: tokens - sequence: string - - name: ner_tags - sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4062149 - num_examples: 20000 - - name: validation - num_bytes: 2016836 - num_examples: 10000 - name: test - num_bytes: 2038618 - num_examples: 10000 - download_size: 1943893 - dataset_size: 8117603 -- config_name: PAN-X.pt + num_bytes: 1578219 + num_examples: 1767 + - name: validation + num_bytes: 184401 + num_examples: 189 + download_size: 75719050 + dataset_size: 1762620 +- config_name: MLQA.en.ar features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3149243 - num_examples: 20000 - - name: validation - num_bytes: 1575121 - num_examples: 10000 - name: test - num_bytes: 1562605 - num_examples: 10000 - download_size: 1540478 - dataset_size: 6286969 -- config_name: PAN-X.ru + num_bytes: 6739219 + num_examples: 5335 + - name: validation + num_bytes: 630843 + num_examples: 517 + download_size: 75719050 + dataset_size: 7370062 +- config_name: MLQA.en.de features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4121751 - num_examples: 20000 - - name: validation - num_bytes: 2053149 - num_examples: 10000 - name: test - num_bytes: 2074125 - num_examples: 10000 - download_size: 2127730 - dataset_size: 8249025 -- config_name: PAN-X.sw + num_bytes: 5056722 + num_examples: 4517 + - name: validation + num_bytes: 594936 + num_examples: 512 + download_size: 75719050 + dataset_size: 5651658 +- config_name: MLQA.en.vi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 135891 - num_examples: 1000 - - name: validation - num_bytes: 136348 - num_examples: 1000 - name: test - num_bytes: 140211 - num_examples: 1000 - download_size: 87435 - dataset_size: 412450 -- config_name: PAN-X.ta + num_bytes: 7056698 + num_examples: 5495 + - name: validation + num_bytes: 640646 + num_examples: 511 + download_size: 75719050 + dataset_size: 7697344 +- config_name: MLQA.en.zh features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 4122090 - num_examples: 15000 - - name: validation - num_bytes: 277605 - num_examples: 1000 - name: test - num_bytes: 278094 - num_examples: 1000 - download_size: 1044729 - dataset_size: 4677789 -- config_name: PAN-X.te + num_bytes: 6539307 + num_examples: 5137 + - name: validation + num_bytes: 608444 + num_examples: 504 + download_size: 75719050 + dataset_size: 7147751 +- config_name: MLQA.en.en features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 295390 - num_examples: 1000 - - name: validation - num_bytes: 293261 - num_examples: 1000 - name: test - num_bytes: 296943 - num_examples: 1000 - download_size: 200516 - dataset_size: 885594 -- config_name: PAN-X.th + num_bytes: 14004648 + num_examples: 11590 + - name: validation + num_bytes: 1329112 + num_examples: 1148 + download_size: 75719050 + dataset_size: 15333760 +- config_name: MLQA.en.es features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 27132989 - num_examples: 20000 - - name: validation - num_bytes: 13262717 - num_examples: 10000 - name: test - num_bytes: 13586908 - num_examples: 10000 - download_size: 2569566 - dataset_size: 53982614 -- config_name: PAN-X.tl + num_bytes: 6179249 + num_examples: 5253 + - name: validation + num_bytes: 555462 + num_examples: 500 + download_size: 75719050 + dataset_size: 6734711 +- config_name: MLQA.en.hi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 1168697 - num_examples: 10000 - - name: validation - num_bytes: 114136 - num_examples: 1000 - name: test - num_bytes: 117884 - num_examples: 1000 - download_size: 308160 - dataset_size: 1400717 -- config_name: PAN-X.tr + num_bytes: 6378866 + num_examples: 4918 + - name: validation + num_bytes: 623171 + num_examples: 507 + download_size: 75719050 + dataset_size: 7002037 +- config_name: MLQA.es.ar features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3779130 - num_examples: 20000 - - name: validation - num_bytes: 1915332 - num_examples: 10000 - name: test - num_bytes: 1911483 - num_examples: 10000 - download_size: 2000699 - dataset_size: 7605945 -- config_name: PAN-X.ur + num_bytes: 1740282 + num_examples: 1978 + - name: validation + num_bytes: 148649 + num_examples: 161 + download_size: 75719050 + dataset_size: 1888931 +- config_name: MLQA.es.de features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3072236 - num_examples: 20000 - - name: validation - num_bytes: 152128 - num_examples: 1000 - name: test - num_bytes: 151902 - num_examples: 1000 - download_size: 610869 - dataset_size: 3376266 -- config_name: PAN-X.vi + num_bytes: 1404025 + num_examples: 1776 + - name: validation + num_bytes: 144186 + num_examples: 196 + download_size: 75719050 + dataset_size: 1548211 +- config_name: MLQA.es.vi features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 3153187 - num_examples: 20000 - - name: validation - num_bytes: 1565123 - num_examples: 10000 - name: test - num_bytes: 1580196 - num_examples: 10000 - download_size: 1375631 - dataset_size: 6298506 -- config_name: PAN-X.yo + num_bytes: 1747969 + num_examples: 2018 + - name: validation + num_bytes: 176841 + num_examples: 189 + download_size: 75719050 + dataset_size: 1924810 +- config_name: MLQA.es.zh features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 14689 - num_examples: 100 - - name: validation - num_bytes: 13225 - num_examples: 100 - name: test - num_bytes: 13513 - num_examples: 100 - download_size: 17337 - dataset_size: 41427 -- config_name: PAN-X.zh + num_bytes: 1678451 + num_examples: 1947 + - name: validation + num_bytes: 126646 + num_examples: 161 + download_size: 75719050 + dataset_size: 1805097 +- config_name: MLQA.es.en features: - - name: tokens - sequence: string - - name: ner_tags + - name: id + dtype: string + - name: title + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers sequence: - class_label: - names: - '0': O - '1': B-PER - '2': I-PER - '3': B-ORG - '4': I-ORG - '5': B-LOC - '6': I-LOC - - name: langs - sequence: string + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 8832011 - num_examples: 20000 - - name: validation - num_bytes: 4491305 - num_examples: 10000 - name: test - num_bytes: 4363152 - num_examples: 10000 - download_size: 2083198 - dataset_size: 17686468 -- config_name: PAWS-X.de + num_bytes: 4362737 + num_examples: 5253 + - name: validation + num_bytes: 419068 + num_examples: 500 + download_size: 75719050 + dataset_size: 4781805 +- config_name: MLQA.es.es features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 12451823 - num_examples: 49380 - - name: validation - num_bytes: 499997 - num_examples: 2000 - name: test - num_bytes: 510182 - num_examples: 2000 - download_size: 9294034 - dataset_size: 13462002 -- config_name: PAWS-X.en + num_bytes: 4394333 + num_examples: 5253 + - name: validation + num_bytes: 422071 + num_examples: 500 + download_size: 75719050 + dataset_size: 4816404 +- config_name: MLQA.es.hi features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 11827659 - num_examples: 49175 - - name: validation - num_bytes: 478279 - num_examples: 2000 - name: test - num_bytes: 480726 - num_examples: 2000 - download_size: 8717639 - dataset_size: 12786664 -- config_name: PAWS-X.es + num_bytes: 1523523 + num_examples: 1723 + - name: validation + num_bytes: 181834 + num_examples: 187 + download_size: 75719050 + dataset_size: 1705357 +- config_name: MLQA.hi.ar features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 12462047 - num_examples: 49401 - - name: validation - num_bytes: 494057 - num_examples: 1961 - name: test - num_bytes: 505035 - num_examples: 2000 - download_size: 9229918 - dataset_size: 13461139 -- config_name: PAWS-X.fr + num_bytes: 4445589 + num_examples: 1831 + - name: validation + num_bytes: 410424 + num_examples: 186 + download_size: 75719050 + dataset_size: 4856013 +- config_name: MLQA.hi.de features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 12948452 - num_examples: 49399 - - name: validation - num_bytes: 516099 - num_examples: 1988 - name: test - num_bytes: 521019 - num_examples: 2000 - download_size: 9464987 - dataset_size: 13985570 -- config_name: PAWS-X.ja + num_bytes: 3022864 + num_examples: 1430 + - name: validation + num_bytes: 301713 + num_examples: 163 + download_size: 75719050 + dataset_size: 3324577 +- config_name: MLQA.hi.vi features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 14695593 - num_examples: 49401 - - name: validation - num_bytes: 647762 - num_examples: 2000 - name: test - num_bytes: 654628 - num_examples: 2000 - download_size: 10136228 - dataset_size: 15997983 -- config_name: PAWS-X.ko + num_bytes: 4743484 + num_examples: 1947 + - name: validation + num_bytes: 419106 + num_examples: 177 + download_size: 75719050 + dataset_size: 5162590 +- config_name: MLQA.hi.zh features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 13542597 - num_examples: 49164 - - name: validation - num_bytes: 540775 - num_examples: 2000 - name: test - num_bytes: 547966 - num_examples: 1999 - download_size: 9926292 - dataset_size: 14631338 -- config_name: PAWS-X.zh + num_bytes: 4354875 + num_examples: 1767 + - name: validation + num_bytes: 424246 + num_examples: 189 + download_size: 75719050 + dataset_size: 4779121 +- config_name: MLQA.hi.en features: - - name: sentence1 + - name: id dtype: string - - name: sentence2 + - name: title dtype: string - - name: label + - name: context + dtype: string + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - - name: train - num_bytes: 10469652 - num_examples: 49401 - - name: validation - num_bytes: 459108 - num_examples: 2000 - name: test - num_bytes: 460626 - num_examples: 2000 - download_size: 8878855 - dataset_size: 11389386 -- config_name: SQuAD + num_bytes: 11449261 + num_examples: 4918 + - name: validation + num_bytes: 1097857 + num_examples: 507 + download_size: 75719050 + dataset_size: 12547118 +- config_name: MLQA.hi.es features: - name: id dtype: string @@ -2807,33 +2686,39 @@ dataset_info: - name: text dtype: string splits: - - name: train - num_bytes: 79316858 - num_examples: 87599 + - name: test + num_bytes: 3862229 + num_examples: 1723 - name: validation - num_bytes: 10472597 - num_examples: 10570 - download_size: 16272656 - dataset_size: 89789455 -- config_name: XNLI + num_bytes: 420402 + num_examples: 187 + download_size: 75719050 + dataset_size: 4282631 +- config_name: MLQA.hi.hi features: - - name: language + - name: id dtype: string - - name: sentence1 + - name: title dtype: string - - name: sentence2 + - name: context dtype: string - - name: gold_label + - name: question dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string splits: - name: test - num_bytes: 20359372 - num_examples: 75150 + num_bytes: 11810475 + num_examples: 4918 - name: validation - num_bytes: 10049239 - num_examples: 37350 - download_size: 8881623 - dataset_size: 30408611 + num_bytes: 1136784 + num_examples: 507 + download_size: 75719050 + dataset_size: 12947259 - config_name: XQuAD.ar features: - name: id @@ -2850,10 +2735,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 1722775 + num_bytes: 1722799 num_examples: 1190 - download_size: 263032 - dataset_size: 1722775 + download_size: 1582988 + dataset_size: 1722799 - config_name: XQuAD.de features: - name: id @@ -2870,11 +2755,31 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 1283277 + num_bytes: 1283301 num_examples: 1190 - download_size: 241987 - dataset_size: 1283277 -- config_name: XQuAD.el + download_size: 669810 + dataset_size: 1283301 +- config_name: XQuAD.vi + features: + - name: id + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: validation + num_bytes: 1477239 + num_examples: 1190 + download_size: 911401 + dataset_size: 1477239 +- config_name: XQuAD.zh features: - name: id dtype: string @@ -2890,10 +2795,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 2206666 + num_bytes: 984241 num_examples: 1190 - download_size: 324409 - dataset_size: 2206666 + download_size: 808652 + dataset_size: 984241 - config_name: XQuAD.en features: - name: id @@ -2910,10 +2815,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 1116099 + num_bytes: 1116123 num_examples: 1190 - download_size: 212402 - dataset_size: 1116099 + download_size: 609383 + dataset_size: 1116123 - config_name: XQuAD.es features: - name: id @@ -2930,10 +2835,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 1273475 + num_bytes: 1273499 num_examples: 1190 - download_size: 236904 - dataset_size: 1273475 + download_size: 684322 + dataset_size: 1273499 - config_name: XQuAD.hi features: - name: id @@ -2950,10 +2855,30 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 2682951 + num_bytes: 2682975 + num_examples: 1190 + download_size: 1680538 + dataset_size: 2682975 +- config_name: XQuAD.el + features: + - name: id + dtype: string + - name: context + dtype: string + - name: question + dtype: string + - name: answers + sequence: + - name: answer_start + dtype: int32 + - name: text + dtype: string + splits: + - name: validation + num_bytes: 2206690 num_examples: 1190 - download_size: 322113 - dataset_size: 2682951 + download_size: 1918889 + dataset_size: 2206690 - config_name: XQuAD.ru features: - name: id @@ -2970,10 +2895,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 2136966 + num_bytes: 2136990 num_examples: 1190 - download_size: 321758 - dataset_size: 2136966 + download_size: 1896368 + dataset_size: 2136990 - config_name: XQuAD.th features: - name: id @@ -2990,10 +2915,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 2854935 + num_bytes: 2854959 num_examples: 1190 - download_size: 337337 - dataset_size: 2854935 + download_size: 1809143 + dataset_size: 2854959 - config_name: XQuAD.tr features: - name: id @@ -3010,126 +2935,226 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 1210739 + num_bytes: 1210763 num_examples: 1190 - download_size: 228394 - dataset_size: 1210739 -- config_name: XQuAD.vi + download_size: 729506 + dataset_size: 1210763 +- config_name: bucc18.de + features: + - name: source_sentence + dtype: string + - name: target_sentence + dtype: string + - name: source_lang + dtype: string + - name: target_lang + dtype: string + splits: + - name: validation + num_bytes: 248707 + num_examples: 1038 + - name: test + num_bytes: 2325701 + num_examples: 9580 + download_size: 30719200 + dataset_size: 2574408 +- config_name: bucc18.fr + features: + - name: source_sentence + dtype: string + - name: target_sentence + dtype: string + - name: source_lang + dtype: string + - name: target_lang + dtype: string + splits: + - name: validation + num_bytes: 212513 + num_examples: 929 + - name: test + num_bytes: 2082419 + num_examples: 9086 + download_size: 22706544 + dataset_size: 2294932 +- config_name: bucc18.zh + features: + - name: source_sentence + dtype: string + - name: target_sentence + dtype: string + - name: source_lang + dtype: string + - name: target_lang + dtype: string + splits: + - name: validation + num_bytes: 55739 + num_examples: 257 + - name: test + num_bytes: 415925 + num_examples: 1899 + download_size: 7114794 + dataset_size: 471664 +- config_name: bucc18.ru + features: + - name: source_sentence + dtype: string + - name: target_sentence + dtype: string + - name: source_lang + dtype: string + - name: target_lang + dtype: string + splits: + - name: validation + num_bytes: 761347 + num_examples: 2374 + - name: test + num_bytes: 4641678 + num_examples: 14435 + download_size: 41354312 + dataset_size: 5403025 +- config_name: PAWS-X.de + features: + - name: sentence1 + dtype: string + - name: sentence2 + dtype: string + - name: label + dtype: string + splits: + - name: validation + num_bytes: 500009 + num_examples: 2000 + - name: test + num_bytes: 510194 + num_examples: 2000 + - name: train + num_bytes: 12451883 + num_examples: 49380 + download_size: 30282057 + dataset_size: 13462086 +- config_name: PAWS-X.en features: - - name: id + - name: sentence1 dtype: string - - name: context + - name: sentence2 dtype: string - - name: question + - name: label dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string splits: - name: validation - num_bytes: 1477215 - num_examples: 1190 - download_size: 237674 - dataset_size: 1477215 -- config_name: XQuAD.zh + num_bytes: 478291 + num_examples: 2000 + - name: test + num_bytes: 480738 + num_examples: 2000 + - name: train + num_bytes: 11827719 + num_examples: 49175 + download_size: 30282057 + dataset_size: 12786748 +- config_name: PAWS-X.es features: - - name: id + - name: sentence1 dtype: string - - name: context + - name: sentence2 dtype: string - - name: question + - name: label dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string splits: - name: validation - num_bytes: 984217 - num_examples: 1190 - download_size: 205798 - dataset_size: 984217 -- config_name: bucc18.de + num_bytes: 494069 + num_examples: 1961 + - name: test + num_bytes: 505047 + num_examples: 2000 + - name: train + num_bytes: 12462107 + num_examples: 49401 + download_size: 30282057 + dataset_size: 13461223 +- config_name: PAWS-X.fr features: - - name: source_sentence - dtype: string - - name: target_sentence + - name: sentence1 dtype: string - - name: source_lang + - name: sentence2 dtype: string - - name: target_lang + - name: label dtype: string splits: - name: validation - num_bytes: 248691 - num_examples: 1038 + num_bytes: 516111 + num_examples: 1988 - name: test - num_bytes: 2325685 - num_examples: 9580 - download_size: 1636130 - dataset_size: 2574376 -- config_name: bucc18.fr + num_bytes: 521031 + num_examples: 2000 + - name: train + num_bytes: 12948512 + num_examples: 49399 + download_size: 30282057 + dataset_size: 13985654 +- config_name: PAWS-X.ja features: - - name: source_sentence - dtype: string - - name: target_sentence + - name: sentence1 dtype: string - - name: source_lang + - name: sentence2 dtype: string - - name: target_lang + - name: label dtype: string splits: - name: validation - num_bytes: 212497 - num_examples: 929 + num_bytes: 647774 + num_examples: 2000 - name: test - num_bytes: 2082403 - num_examples: 9086 - download_size: 1437096 - dataset_size: 2294900 -- config_name: bucc18.ru + num_bytes: 654640 + num_examples: 2000 + - name: train + num_bytes: 14695653 + num_examples: 49401 + download_size: 30282057 + dataset_size: 15998067 +- config_name: PAWS-X.ko features: - - name: source_sentence - dtype: string - - name: target_sentence + - name: sentence1 dtype: string - - name: source_lang + - name: sentence2 dtype: string - - name: target_lang + - name: label dtype: string splits: - name: validation - num_bytes: 761331 - num_examples: 2374 + num_bytes: 540787 + num_examples: 2000 - name: test - num_bytes: 4641646 - num_examples: 14435 - download_size: 3074476 - dataset_size: 5402977 -- config_name: bucc18.zh + num_bytes: 547978 + num_examples: 1999 + - name: train + num_bytes: 13542657 + num_examples: 49164 + download_size: 30282057 + dataset_size: 14631422 +- config_name: PAWS-X.zh features: - - name: source_sentence - dtype: string - - name: target_sentence + - name: sentence1 dtype: string - - name: source_lang + - name: sentence2 dtype: string - - name: target_lang + - name: label dtype: string splits: - name: validation - num_bytes: 55723 - num_examples: 257 + num_bytes: 459120 + num_examples: 2000 - name: test - num_bytes: 415909 - num_examples: 1899 - download_size: 320378 - dataset_size: 471632 + num_bytes: 460638 + num_examples: 2000 + - name: train + num_bytes: 10469712 + num_examples: 49401 + download_size: 30282057 + dataset_size: 11389470 - config_name: tatoeba.afr features: - name: source_sentence @@ -3142,10 +3167,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 250635 + num_bytes: 179651 num_examples: 1000 - download_size: 47676 - dataset_size: 250635 + download_size: 59635 + dataset_size: 179651 - config_name: tatoeba.ara features: - name: source_sentence @@ -3158,10 +3183,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 263650 + num_bytes: 192666 num_examples: 1000 - download_size: 51228 - dataset_size: 263650 + download_size: 72650 + dataset_size: 192666 - config_name: tatoeba.ben features: - name: source_sentence @@ -3174,10 +3199,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 282703 + num_bytes: 211719 num_examples: 1000 - download_size: 51362 - dataset_size: 282703 + download_size: 91703 + dataset_size: 211719 - config_name: tatoeba.bul features: - name: source_sentence @@ -3190,11 +3215,11 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 293279 + num_bytes: 222295 num_examples: 1000 - download_size: 62454 - dataset_size: 293279 -- config_name: tatoeba.cmn + download_size: 102279 + dataset_size: 222295 +- config_name: tatoeba.deu features: - name: source_sentence dtype: string @@ -3206,11 +3231,11 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 259931 + num_bytes: 225583 num_examples: 1000 - download_size: 58281 - dataset_size: 259931 -- config_name: tatoeba.deu + download_size: 105567 + dataset_size: 225583 +- config_name: tatoeba.cmn features: - name: source_sentence dtype: string @@ -3222,10 +3247,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 296567 + num_bytes: 188947 num_examples: 1000 - download_size: 79066 - dataset_size: 296567 + download_size: 68931 + dataset_size: 188947 - config_name: tatoeba.ell features: - name: source_sentence @@ -3238,10 +3263,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 269961 + num_bytes: 198977 num_examples: 1000 - download_size: 52251 - dataset_size: 269961 + download_size: 78961 + dataset_size: 198977 - config_name: tatoeba.est features: - name: source_sentence @@ -3254,10 +3279,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 250728 + num_bytes: 179744 num_examples: 1000 - download_size: 49968 - dataset_size: 250728 + download_size: 59728 + dataset_size: 179744 - config_name: tatoeba.eus features: - name: source_sentence @@ -3270,10 +3295,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 257068 + num_bytes: 186084 num_examples: 1000 - download_size: 54271 - dataset_size: 257068 + download_size: 66068 + dataset_size: 186084 - config_name: tatoeba.fin features: - name: source_sentence @@ -3286,10 +3311,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 266669 + num_bytes: 195685 num_examples: 1000 - download_size: 60580 - dataset_size: 266669 + download_size: 75669 + dataset_size: 195685 - config_name: tatoeba.fra features: - name: source_sentence @@ -3302,10 +3327,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 271018 + num_bytes: 200034 num_examples: 1000 - download_size: 60925 - dataset_size: 271018 + download_size: 80018 + dataset_size: 200034 - config_name: tatoeba.heb features: - name: source_sentence @@ -3318,10 +3343,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 274500 + num_bytes: 203516 num_examples: 1000 - download_size: 57306 - dataset_size: 274500 + download_size: 83500 + dataset_size: 203516 - config_name: tatoeba.hin features: - name: source_sentence @@ -3334,10 +3359,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 313558 + num_bytes: 242574 num_examples: 1000 - download_size: 68816 - dataset_size: 313558 + download_size: 122558 + dataset_size: 242574 - config_name: tatoeba.hun features: - name: source_sentence @@ -3350,10 +3375,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 259889 + num_bytes: 188905 num_examples: 1000 - download_size: 58096 - dataset_size: 259889 + download_size: 68889 + dataset_size: 188905 - config_name: tatoeba.ind features: - name: source_sentence @@ -3366,10 +3391,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 265844 + num_bytes: 194860 num_examples: 1000 - download_size: 57047 - dataset_size: 265844 + download_size: 74844 + dataset_size: 194860 - config_name: tatoeba.ita features: - name: source_sentence @@ -3382,10 +3407,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 256833 + num_bytes: 185849 num_examples: 1000 - download_size: 52422 - dataset_size: 256833 + download_size: 65833 + dataset_size: 185849 - config_name: tatoeba.jav features: - name: source_sentence @@ -3398,10 +3423,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 53068 + num_bytes: 38529 num_examples: 205 - download_size: 15208 - dataset_size: 53068 + download_size: 13913 + dataset_size: 38529 - config_name: tatoeba.jpn features: - name: source_sentence @@ -3414,10 +3439,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 284083 + num_bytes: 213099 num_examples: 1000 - download_size: 66620 - dataset_size: 284083 + download_size: 93083 + dataset_size: 213099 - config_name: tatoeba.kat features: - name: source_sentence @@ -3430,10 +3455,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 214646 + num_bytes: 161696 num_examples: 746 - download_size: 41759 - dataset_size: 214646 + download_size: 72160 + dataset_size: 161696 - config_name: tatoeba.kaz features: - name: source_sentence @@ -3446,10 +3471,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 157003 + num_bytes: 116194 num_examples: 575 - download_size: 35693 - dataset_size: 157003 + download_size: 47178 + dataset_size: 116194 - config_name: tatoeba.kor features: - name: source_sentence @@ -3462,10 +3487,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 270139 + num_bytes: 199155 num_examples: 1000 - download_size: 61210 - dataset_size: 270139 + download_size: 79139 + dataset_size: 199155 - config_name: tatoeba.mal features: - name: source_sentence @@ -3478,10 +3503,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 225934 + num_bytes: 177173 num_examples: 687 - download_size: 51077 - dataset_size: 225934 + download_size: 94717 + dataset_size: 177173 - config_name: tatoeba.mar features: - name: source_sentence @@ -3494,10 +3519,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 291542 + num_bytes: 220558 num_examples: 1000 - download_size: 56575 - dataset_size: 291542 + download_size: 100542 + dataset_size: 220558 - config_name: tatoeba.nld features: - name: source_sentence @@ -3510,10 +3535,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 264263 + num_bytes: 193279 num_examples: 1000 - download_size: 59774 - dataset_size: 264263 + download_size: 73263 + dataset_size: 193279 - config_name: tatoeba.pes features: - name: source_sentence @@ -3526,10 +3551,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 284719 + num_bytes: 213735 num_examples: 1000 - download_size: 64642 - dataset_size: 284719 + download_size: 93719 + dataset_size: 213735 - config_name: tatoeba.por features: - name: source_sentence @@ -3542,10 +3567,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 266185 + num_bytes: 195201 num_examples: 1000 - download_size: 58250 - dataset_size: 266185 + download_size: 75185 + dataset_size: 195201 - config_name: tatoeba.rus features: - name: source_sentence @@ -3558,10 +3583,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 283472 + num_bytes: 212488 num_examples: 1000 - download_size: 61601 - dataset_size: 283472 + download_size: 92472 + dataset_size: 212488 - config_name: tatoeba.spa features: - name: source_sentence @@ -3574,10 +3599,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 263266 + num_bytes: 192282 num_examples: 1000 - download_size: 57055 - dataset_size: 263266 + download_size: 72266 + dataset_size: 192282 - config_name: tatoeba.swh features: - name: source_sentence @@ -3590,10 +3615,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 94957 + num_bytes: 67283 num_examples: 390 - download_size: 19362 - dataset_size: 94957 + download_size: 20467 + dataset_size: 67283 - config_name: tatoeba.tam features: - name: source_sentence @@ -3606,10 +3631,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 98078 + num_bytes: 76297 num_examples: 307 - download_size: 23648 - dataset_size: 98078 + download_size: 39441 + dataset_size: 76297 - config_name: tatoeba.tel features: - name: source_sentence @@ -3622,10 +3647,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 69837 + num_bytes: 53239 num_examples: 234 - download_size: 18260 - dataset_size: 69837 + download_size: 25143 + dataset_size: 53239 - config_name: tatoeba.tgl features: - name: source_sentence @@ -3638,10 +3663,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 259138 + num_bytes: 188154 num_examples: 1000 - download_size: 53699 - dataset_size: 259138 + download_size: 68138 + dataset_size: 188154 - config_name: tatoeba.tha features: - name: source_sentence @@ -3654,10 +3679,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 167866 + num_bytes: 128974 num_examples: 548 - download_size: 39659 - dataset_size: 167866 + download_size: 63198 + dataset_size: 128974 - config_name: tatoeba.tur features: - name: source_sentence @@ -3670,10 +3695,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 262885 + num_bytes: 191901 num_examples: 1000 - download_size: 54137 - dataset_size: 262885 + download_size: 71885 + dataset_size: 191901 - config_name: tatoeba.urd features: - name: source_sentence @@ -3686,10 +3711,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 279712 + num_bytes: 208728 num_examples: 1000 - download_size: 60399 - dataset_size: 279712 + download_size: 88712 + dataset_size: 208728 - config_name: tatoeba.vie features: - name: source_sentence @@ -3702,35 +3727,10 @@ dataset_info: dtype: string splits: - name: validation - num_bytes: 282407 + num_bytes: 211423 num_examples: 1000 - download_size: 66746 - dataset_size: 282407 -- config_name: tydiqa - features: - - name: id - dtype: string - - name: title - dtype: string - - name: context - dtype: string - - name: question - dtype: string - - name: answers - sequence: - - name: answer_start - dtype: int32 - - name: text - dtype: string - splits: - - name: train - num_bytes: 52948467 - num_examples: 49881 - - name: validation - num_bytes: 5006433 - num_examples: 5077 - download_size: 29402238 - dataset_size: 57954900 + download_size: 91407 + dataset_size: 211423 - config_name: udpos.Afrikaans features: - name: tokens @@ -3757,17 +3757,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 586370 - num_examples: 1315 - name: validation - num_bytes: 91290 + num_bytes: 91302 num_examples: 194 - name: test - num_bytes: 174244 + num_bytes: 174256 num_examples: 425 - download_size: 193788 - dataset_size: 851904 + - name: train + num_bytes: 586382 + num_examples: 1315 + download_size: 355216681 + dataset_size: 851940 - config_name: udpos.Arabic features: - name: tokens @@ -3794,17 +3794,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 4453682 - num_examples: 6075 - name: validation - num_bytes: 593650 + num_bytes: 593662 num_examples: 909 - name: test - num_bytes: 973822 + num_bytes: 973834 num_examples: 1680 - download_size: 1186113 - dataset_size: 6021154 + - name: train + num_bytes: 4453694 + num_examples: 6075 + download_size: 355216681 + dataset_size: 6021190 - config_name: udpos.Basque features: - name: tokens @@ -3831,17 +3831,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 1327713 - num_examples: 5396 - name: validation - num_bytes: 438671 + num_bytes: 438683 num_examples: 1798 - name: test - num_bytes: 444644 + num_bytes: 444656 num_examples: 1799 - download_size: 703094 - dataset_size: 2211028 + - name: train + num_bytes: 1327725 + num_examples: 5396 + download_size: 355216681 + dataset_size: 2211064 - config_name: udpos.Bulgarian features: - name: tokens @@ -3868,54 +3868,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 2689767 - num_examples: 8907 - name: validation - num_bytes: 347117 + num_bytes: 347129 num_examples: 1115 - name: test - num_bytes: 339947 + num_bytes: 339959 num_examples: 1116 - download_size: 926186 - dataset_size: 3376831 -- config_name: udpos.Chinese - features: - - name: tokens - sequence: string - - name: pos_tags - sequence: - class_label: - names: - '0': ADJ - '1': ADP - '2': ADV - '3': AUX - '4': CCONJ - '5': DET - '6': INTJ - '7': NOUN - '8': NUM - '9': PART - '10': PRON - '11': PROPN - '12': PUNCT - '13': SCONJ - '14': SYM - '15': VERB - '16': X - splits: - name: train - num_bytes: 4218891 - num_examples: 18998 - - name: validation - num_bytes: 594448 - num_examples: 3038 - - name: test - num_bytes: 1236051 - num_examples: 5528 - download_size: 1471747 - dataset_size: 6049390 + num_bytes: 2689779 + num_examples: 8907 + download_size: 355216681 + dataset_size: 3376867 - config_name: udpos.Dutch features: - name: tokens @@ -3942,17 +3905,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 4517994 - num_examples: 18051 - name: validation - num_bytes: 393592 + num_bytes: 393604 num_examples: 1394 - name: test - num_bytes: 397904 + num_bytes: 397916 num_examples: 1471 - download_size: 1410982 - dataset_size: 5309490 + - name: train + num_bytes: 4518018 + num_examples: 18051 + download_size: 355216681 + dataset_size: 5309538 - config_name: udpos.English features: - name: tokens @@ -3979,17 +3942,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 6225509 - num_examples: 21253 - name: validation - num_bytes: 1042040 + num_bytes: 1042052 num_examples: 3974 - name: test - num_bytes: 1421148 + num_bytes: 1421160 num_examples: 5440 - download_size: 2116535 - dataset_size: 8688697 + - name: train + num_bytes: 6225545 + num_examples: 21253 + download_size: 355216681 + dataset_size: 8688757 - config_name: udpos.Estonian features: - name: tokens @@ -4016,17 +3979,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 6614893 - num_examples: 25749 - name: validation - num_bytes: 814171 + num_bytes: 814183 num_examples: 3125 - name: test - num_bytes: 1065701 + num_bytes: 1065713 num_examples: 3760 - download_size: 2619121 - dataset_size: 8494765 + - name: train + num_bytes: 6614929 + num_examples: 25749 + download_size: 355216681 + dataset_size: 8494825 - config_name: udpos.Finnish features: - name: tokens @@ -4053,17 +4016,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 5613706 - num_examples: 27198 - name: validation - num_bytes: 656646 + num_bytes: 656658 num_examples: 3239 - name: test - num_bytes: 1025726 + num_bytes: 1025738 num_examples: 4422 - download_size: 2503217 - dataset_size: 7296078 + - name: train + num_bytes: 5613742 + num_examples: 27198 + download_size: 355216681 + dataset_size: 7296138 - config_name: udpos.French features: - name: tokens @@ -4090,17 +4053,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 10118933 - num_examples: 47308 - name: validation - num_bytes: 1294096 + num_bytes: 1294108 num_examples: 5979 - name: test - num_bytes: 1731049 + num_bytes: 1731061 num_examples: 9465 - download_size: 3378680 - dataset_size: 13144078 + - name: train + num_bytes: 10118993 + num_examples: 47308 + download_size: 355216681 + dataset_size: 13144162 - config_name: udpos.German features: - name: tokens @@ -4127,17 +4090,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 54773777 - num_examples: 166849 - name: validation - num_bytes: 6044838 + num_bytes: 6044862 num_examples: 19233 - name: test - num_bytes: 7345863 + num_bytes: 7345899 num_examples: 22458 - download_size: 18623155 - dataset_size: 68164478 + - name: train + num_bytes: 54773981 + num_examples: 166849 + download_size: 355216681 + dataset_size: 68164742 - config_name: udpos.Greek features: - name: tokens @@ -4164,17 +4127,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 8932104 - num_examples: 28152 - name: validation - num_bytes: 1062447 + num_bytes: 1062459 num_examples: 2559 - name: test - num_bytes: 1028665 + num_bytes: 1028677 num_examples: 2809 - download_size: 2763293 - dataset_size: 11023216 + - name: train + num_bytes: 8932140 + num_examples: 28152 + download_size: 355216681 + dataset_size: 11023276 - config_name: udpos.Hebrew features: - name: tokens @@ -4201,17 +4164,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 2505691 - num_examples: 5241 - name: validation - num_bytes: 210013 + num_bytes: 210025 num_examples: 484 - name: test - num_bytes: 223865 + num_bytes: 223877 num_examples: 491 - download_size: 624771 - dataset_size: 2939569 + - name: train + num_bytes: 2505703 + num_examples: 5241 + download_size: 355216681 + dataset_size: 2939605 - config_name: udpos.Hindi features: - name: tokens @@ -4238,17 +4201,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 6690250 - num_examples: 13304 - name: validation - num_bytes: 839702 + num_bytes: 839714 num_examples: 1659 - name: test - num_bytes: 1400225 + num_bytes: 1400237 num_examples: 2684 - download_size: 1468314 - dataset_size: 8930177 + - name: train + num_bytes: 6690274 + num_examples: 13304 + download_size: 355216681 + dataset_size: 8930225 - config_name: udpos.Hungarian features: - name: tokens @@ -4275,17 +4238,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 372226 - num_examples: 910 - name: validation - num_bytes: 215879 + num_bytes: 215891 num_examples: 441 - name: test - num_bytes: 193728 + num_bytes: 193740 num_examples: 449 - download_size: 251882 - dataset_size: 781833 + - name: train + num_bytes: 372238 + num_examples: 910 + download_size: 355216681 + dataset_size: 781869 - config_name: udpos.Indonesian features: - name: tokens @@ -4312,17 +4275,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 1710678 - num_examples: 4477 - name: validation - num_bytes: 220863 + num_bytes: 220875 num_examples: 559 - name: test - num_bytes: 557101 + num_bytes: 557113 num_examples: 1557 - download_size: 684225 - dataset_size: 2488642 + - name: train + num_bytes: 1710690 + num_examples: 4477 + download_size: 355216681 + dataset_size: 2488678 - config_name: udpos.Italian features: - name: tokens @@ -4349,17 +4312,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 11299293 - num_examples: 29685 - name: validation - num_bytes: 988996 + num_bytes: 989008 num_examples: 2278 - name: test - num_bytes: 1337869 + num_bytes: 1337881 num_examples: 3518 - download_size: 3256246 - dataset_size: 13626158 + - name: train + num_bytes: 11299329 + num_examples: 29685 + download_size: 355216681 + dataset_size: 13626218 - config_name: udpos.Japanese features: - name: tokens @@ -4386,18 +4349,52 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 2792951 - num_examples: 7125 - - name: validation - num_bytes: 200356 - num_examples: 511 + - name: validation + num_bytes: 200368 + num_examples: 511 + - name: test + num_bytes: 928914 + num_examples: 2372 + - name: train + num_bytes: 2792963 + num_examples: 7125 + download_size: 355216681 + dataset_size: 3922245 +- config_name: udpos.Kazakh + features: + - name: tokens + sequence: string + - name: pos_tags + sequence: + class_label: + names: + '0': ADJ + '1': ADP + '2': ADV + '3': AUX + '4': CCONJ + '5': DET + '6': INTJ + '7': NOUN + '8': NUM + '9': PART + '10': PRON + '11': PROPN + '12': PUNCT + '13': SCONJ + '14': SYM + '15': VERB + '16': X + splits: - name: test - num_bytes: 928902 - num_examples: 2372 - download_size: 1012282 - dataset_size: 3922209 -- config_name: udpos.Kazakh + num_bytes: 228936 + num_examples: 1047 + - name: train + num_bytes: 11450 + num_examples: 31 + download_size: 355216681 + dataset_size: 240386 +- config_name: udpos.Korean features: - name: tokens sequence: string @@ -4423,15 +4420,18 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 11438 - num_examples: 31 + - name: validation + num_bytes: 782599 + num_examples: 3016 - name: test - num_bytes: 228924 - num_examples: 1047 - download_size: 76300 - dataset_size: 240362 -- config_name: udpos.Korean + num_bytes: 1162551 + num_examples: 4276 + - name: train + num_bytes: 7341303 + num_examples: 27410 + download_size: 355216681 + dataset_size: 9286453 +- config_name: udpos.Chinese features: - name: tokens sequence: string @@ -4457,17 +4457,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 7341267 - num_examples: 27410 - name: validation - num_bytes: 782587 - num_examples: 3016 + num_bytes: 594460 + num_examples: 3038 - name: test - num_bytes: 1162539 - num_examples: 4276 - download_size: 3115101 - dataset_size: 9286393 + num_bytes: 1236063 + num_examples: 5528 + - name: train + num_bytes: 4218915 + num_examples: 18998 + download_size: 355216681 + dataset_size: 6049438 - config_name: udpos.Marathi features: - name: tokens @@ -4494,17 +4494,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 59023 - num_examples: 373 - name: validation - num_bytes: 8497 + num_bytes: 8509 num_examples: 46 - name: test - num_bytes: 7871 + num_bytes: 7883 num_examples: 47 - download_size: 22133 - dataset_size: 75391 + - name: train + num_bytes: 59035 + num_examples: 373 + download_size: 355216681 + dataset_size: 75427 - config_name: udpos.Persian features: - name: tokens @@ -4531,17 +4531,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 2400776 - num_examples: 4798 - name: validation - num_bytes: 317053 + num_bytes: 317065 num_examples: 599 - name: test - num_bytes: 320683 + num_bytes: 320695 num_examples: 600 - download_size: 606912 - dataset_size: 3038512 + - name: train + num_bytes: 2400788 + num_examples: 4798 + download_size: 355216681 + dataset_size: 3038548 - config_name: udpos.Portuguese features: - name: tokens @@ -4568,17 +4568,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 7669556 - num_examples: 17992 - name: validation - num_bytes: 712397 + num_bytes: 712409 num_examples: 1770 - name: test - num_bytes: 1082582 + num_bytes: 1082594 num_examples: 2681 - download_size: 2505672 - dataset_size: 9464535 + - name: train + num_bytes: 7669580 + num_examples: 17992 + download_size: 355216681 + dataset_size: 9464583 - config_name: udpos.Russian features: - name: tokens @@ -4605,17 +4605,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 24230098 - num_examples: 67435 - name: validation - num_bytes: 3457031 + num_bytes: 3457043 num_examples: 9960 - name: test - num_bytes: 4236693 + num_bytes: 4236717 num_examples: 11336 - download_size: 8818512 - dataset_size: 31923822 + - name: train + num_bytes: 24230182 + num_examples: 67435 + download_size: 355216681 + dataset_size: 31923942 - config_name: udpos.Spanish features: - name: tokens @@ -4642,17 +4642,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 13858406 - num_examples: 28492 - name: validation - num_bytes: 1498765 + num_bytes: 1498777 num_examples: 3054 - name: test - num_bytes: 1476500 + num_bytes: 1476512 num_examples: 3147 - download_size: 4347905 - dataset_size: 16833671 + - name: train + num_bytes: 13858442 + num_examples: 28492 + download_size: 355216681 + dataset_size: 16833731 - config_name: udpos.Tagalog features: - name: tokens @@ -4680,10 +4680,10 @@ dataset_info: '16': X splits: - name: test - num_bytes: 5153 + num_bytes: 5165 num_examples: 55 - download_size: 3345 - dataset_size: 5153 + download_size: 355216681 + dataset_size: 5165 - config_name: udpos.Tamil features: - name: tokens @@ -4710,17 +4710,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 202596 - num_examples: 400 - name: validation - num_bytes: 40031 + num_bytes: 40043 num_examples: 80 - name: test - num_bytes: 62366 + num_bytes: 62378 num_examples: 120 - download_size: 73764 - dataset_size: 304993 + - name: train + num_bytes: 202608 + num_examples: 400 + download_size: 355216681 + dataset_size: 305029 - config_name: udpos.Telugu features: - name: tokens @@ -4747,17 +4747,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 138049 - num_examples: 1051 - name: validation - num_bytes: 17990 + num_bytes: 18002 num_examples: 131 - name: test - num_bytes: 19575 + num_bytes: 19587 num_examples: 146 - download_size: 46045 - dataset_size: 175614 + - name: train + num_bytes: 138061 + num_examples: 1051 + download_size: 355216681 + dataset_size: 175650 - config_name: udpos.Thai features: - name: tokens @@ -4785,10 +4785,10 @@ dataset_info: '16': X splits: - name: test - num_bytes: 561336 + num_bytes: 561348 num_examples: 1000 - download_size: 92925 - dataset_size: 561336 + download_size: 355216681 + dataset_size: 561348 - config_name: udpos.Turkish features: - name: tokens @@ -4815,17 +4815,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 704405 - num_examples: 3664 - name: validation - num_bytes: 186455 + num_bytes: 186467 num_examples: 988 - name: test - num_bytes: 827382 + num_bytes: 827394 num_examples: 4785 - download_size: 581177 - dataset_size: 1718242 + - name: train + num_bytes: 704417 + num_examples: 3664 + download_size: 355216681 + dataset_size: 1718278 - config_name: udpos.Urdu features: - name: tokens @@ -4852,17 +4852,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 2107362 - num_examples: 4043 - name: validation - num_bytes: 284261 + num_bytes: 284273 num_examples: 552 - name: test - num_bytes: 288553 + num_bytes: 288565 num_examples: 535 - download_size: 499594 - dataset_size: 2680176 + - name: train + num_bytes: 2107374 + num_examples: 4043 + download_size: 355216681 + dataset_size: 2680212 - config_name: udpos.Vietnamese features: - name: tokens @@ -4889,17 +4889,17 @@ dataset_info: '15': VERB '16': X splits: - - name: train - num_bytes: 367335 - num_examples: 1400 - name: validation - num_bytes: 206188 + num_bytes: 206200 num_examples: 800 - name: test - num_bytes: 214063 + num_bytes: 214075 num_examples: 800 - download_size: 181239 - dataset_size: 787586 + - name: train + num_bytes: 367347 + num_examples: 1400 + download_size: 355216681 + dataset_size: 787622 - config_name: udpos.Yoruba features: - name: tokens @@ -4927,1161 +4927,10 @@ dataset_info: '16': X splits: - name: test - num_bytes: 44656 + num_bytes: 44668 num_examples: 100 - download_size: 10151 - dataset_size: 44656 -configs: -- config_name: MLQA.ar.ar - data_files: - - split: test - path: MLQA.ar.ar/test-* - - split: validation - path: MLQA.ar.ar/validation-* -- config_name: MLQA.ar.de - data_files: - - split: test - path: MLQA.ar.de/test-* - - split: validation - path: MLQA.ar.de/validation-* -- config_name: MLQA.ar.en - data_files: - - split: test - path: MLQA.ar.en/test-* - - split: validation - path: MLQA.ar.en/validation-* -- config_name: MLQA.ar.es - data_files: - - split: test - path: MLQA.ar.es/test-* - - split: validation - path: MLQA.ar.es/validation-* -- config_name: MLQA.ar.hi - data_files: - - split: test - path: MLQA.ar.hi/test-* - - split: validation - path: MLQA.ar.hi/validation-* -- config_name: MLQA.ar.vi - data_files: - - split: test - path: MLQA.ar.vi/test-* - - split: validation - path: MLQA.ar.vi/validation-* -- config_name: MLQA.ar.zh - data_files: - - split: test - path: MLQA.ar.zh/test-* - - split: validation - path: MLQA.ar.zh/validation-* -- config_name: MLQA.de.ar - data_files: - - split: test - path: MLQA.de.ar/test-* - - split: validation - path: MLQA.de.ar/validation-* -- config_name: MLQA.de.de - data_files: - - split: test - path: MLQA.de.de/test-* - - split: validation - path: MLQA.de.de/validation-* -- config_name: MLQA.de.en - data_files: - - split: test - path: MLQA.de.en/test-* - - split: validation - path: MLQA.de.en/validation-* -- config_name: MLQA.de.es - data_files: - - split: test - path: MLQA.de.es/test-* - - split: validation - path: MLQA.de.es/validation-* -- config_name: MLQA.de.hi - data_files: - - split: test - path: MLQA.de.hi/test-* - - split: validation - path: MLQA.de.hi/validation-* -- config_name: MLQA.de.vi - data_files: - - split: test - path: MLQA.de.vi/test-* - - split: validation - path: MLQA.de.vi/validation-* -- config_name: MLQA.de.zh - data_files: - - split: test - path: MLQA.de.zh/test-* - - split: validation - path: MLQA.de.zh/validation-* -- config_name: MLQA.en.ar - data_files: - - split: test - path: MLQA.en.ar/test-* - - split: validation - path: MLQA.en.ar/validation-* -- config_name: MLQA.en.de - data_files: - - split: test - path: MLQA.en.de/test-* - - split: validation - path: MLQA.en.de/validation-* -- config_name: MLQA.en.en - data_files: - - split: test - path: MLQA.en.en/test-* - - split: validation - path: MLQA.en.en/validation-* -- config_name: MLQA.en.es - data_files: - - split: test - path: MLQA.en.es/test-* - - split: validation - path: MLQA.en.es/validation-* -- config_name: MLQA.en.hi - data_files: - - split: test - path: MLQA.en.hi/test-* - - split: validation - path: MLQA.en.hi/validation-* -- config_name: MLQA.en.vi - data_files: - - split: test - path: MLQA.en.vi/test-* - - split: validation - path: MLQA.en.vi/validation-* -- config_name: MLQA.en.zh - data_files: - - split: test - path: MLQA.en.zh/test-* - - split: validation - path: MLQA.en.zh/validation-* -- config_name: MLQA.es.ar - data_files: - - split: test - path: MLQA.es.ar/test-* - - split: validation - path: MLQA.es.ar/validation-* -- config_name: MLQA.es.de - data_files: - - split: test - path: MLQA.es.de/test-* - - split: validation - path: MLQA.es.de/validation-* -- config_name: MLQA.es.en - data_files: - - split: test - path: MLQA.es.en/test-* - - split: validation - path: MLQA.es.en/validation-* -- config_name: MLQA.es.es - data_files: - - split: test - path: MLQA.es.es/test-* - - split: validation - path: MLQA.es.es/validation-* -- config_name: MLQA.es.hi - data_files: - - split: test - path: MLQA.es.hi/test-* - - split: validation - path: MLQA.es.hi/validation-* -- config_name: MLQA.es.vi - data_files: - - split: test - path: MLQA.es.vi/test-* - - split: validation - path: MLQA.es.vi/validation-* -- config_name: MLQA.es.zh - data_files: - - split: test - path: MLQA.es.zh/test-* - - split: validation - path: MLQA.es.zh/validation-* -- config_name: MLQA.hi.ar - data_files: - - split: test - path: MLQA.hi.ar/test-* - - split: validation - path: MLQA.hi.ar/validation-* -- config_name: MLQA.hi.de - data_files: - - split: test - path: MLQA.hi.de/test-* - - split: validation - path: MLQA.hi.de/validation-* -- config_name: MLQA.hi.en - data_files: - - split: test - path: MLQA.hi.en/test-* - - split: validation - path: MLQA.hi.en/validation-* -- config_name: MLQA.hi.es - data_files: - - split: test - path: MLQA.hi.es/test-* - - split: validation - path: MLQA.hi.es/validation-* -- config_name: MLQA.hi.hi - data_files: - - split: test - path: MLQA.hi.hi/test-* - - split: validation - path: MLQA.hi.hi/validation-* -- config_name: MLQA.hi.vi - data_files: - - split: test - path: MLQA.hi.vi/test-* - - split: validation - path: MLQA.hi.vi/validation-* -- config_name: MLQA.hi.zh - data_files: - - split: test - path: MLQA.hi.zh/test-* - - split: validation - path: MLQA.hi.zh/validation-* -- config_name: MLQA.vi.ar - data_files: - - split: test - path: MLQA.vi.ar/test-* - - split: validation - path: MLQA.vi.ar/validation-* -- config_name: MLQA.vi.de - data_files: - - split: test - path: MLQA.vi.de/test-* - - split: validation - path: MLQA.vi.de/validation-* -- config_name: MLQA.vi.en - data_files: - - split: test - path: MLQA.vi.en/test-* - - split: validation - path: MLQA.vi.en/validation-* -- config_name: MLQA.vi.es - data_files: - - split: test - path: MLQA.vi.es/test-* - - split: validation - path: MLQA.vi.es/validation-* -- config_name: MLQA.vi.hi - data_files: - - split: test - path: MLQA.vi.hi/test-* - - split: validation - path: MLQA.vi.hi/validation-* -- config_name: MLQA.vi.vi - data_files: - - split: test - path: MLQA.vi.vi/test-* - - split: validation - path: MLQA.vi.vi/validation-* -- config_name: MLQA.vi.zh - data_files: - - split: test - path: MLQA.vi.zh/test-* - - split: validation - path: MLQA.vi.zh/validation-* -- config_name: MLQA.zh.ar - data_files: - - split: test - path: MLQA.zh.ar/test-* - - split: validation - path: MLQA.zh.ar/validation-* -- config_name: MLQA.zh.de - data_files: - - split: test - path: MLQA.zh.de/test-* - - split: validation - path: MLQA.zh.de/validation-* -- config_name: MLQA.zh.en - data_files: - - split: test - path: MLQA.zh.en/test-* - - split: validation - path: MLQA.zh.en/validation-* -- config_name: MLQA.zh.es - data_files: - - split: test - path: MLQA.zh.es/test-* - - split: validation - path: MLQA.zh.es/validation-* -- config_name: MLQA.zh.hi - data_files: - - split: test - path: MLQA.zh.hi/test-* - - split: validation - path: MLQA.zh.hi/validation-* -- config_name: MLQA.zh.vi - data_files: - - split: test - path: MLQA.zh.vi/test-* - - split: validation - path: MLQA.zh.vi/validation-* -- config_name: MLQA.zh.zh - data_files: - - split: test - path: MLQA.zh.zh/test-* - - split: validation - path: MLQA.zh.zh/validation-* -- config_name: PAN-X.af - data_files: - - split: train - path: PAN-X.af/train-* - - split: validation - path: PAN-X.af/validation-* - - split: test - path: PAN-X.af/test-* -- config_name: PAN-X.ar - data_files: - - split: train - path: PAN-X.ar/train-* - - split: validation - path: PAN-X.ar/validation-* - - split: test - path: PAN-X.ar/test-* -- config_name: PAN-X.bg - data_files: - - split: train - path: PAN-X.bg/train-* - - split: validation - path: PAN-X.bg/validation-* - - split: test - path: PAN-X.bg/test-* -- config_name: PAN-X.bn - data_files: - - split: train - path: PAN-X.bn/train-* - - split: validation - path: PAN-X.bn/validation-* - - split: test - path: PAN-X.bn/test-* -- config_name: PAN-X.de - data_files: - - split: train - path: PAN-X.de/train-* - - split: validation - path: PAN-X.de/validation-* - - split: test - path: PAN-X.de/test-* -- config_name: PAN-X.el - data_files: - - split: train - path: PAN-X.el/train-* - - split: validation - path: PAN-X.el/validation-* - - split: test - path: PAN-X.el/test-* -- config_name: PAN-X.en - data_files: - - split: train - path: PAN-X.en/train-* - - split: validation - path: PAN-X.en/validation-* - - split: test - path: PAN-X.en/test-* -- config_name: PAN-X.es - data_files: - - split: train - path: PAN-X.es/train-* - - split: validation - path: PAN-X.es/validation-* - - split: test - path: PAN-X.es/test-* -- config_name: PAN-X.et - data_files: - - split: train - path: PAN-X.et/train-* - - split: validation - path: PAN-X.et/validation-* - - split: test - path: PAN-X.et/test-* -- config_name: PAN-X.eu - data_files: - - split: train - path: PAN-X.eu/train-* - - split: validation - path: PAN-X.eu/validation-* - - split: test - path: PAN-X.eu/test-* -- config_name: PAN-X.fa - data_files: - - split: train - path: PAN-X.fa/train-* - - split: validation - path: PAN-X.fa/validation-* - - split: test - path: PAN-X.fa/test-* -- config_name: PAN-X.fi - data_files: - - split: train - path: PAN-X.fi/train-* - - split: validation - path: PAN-X.fi/validation-* - - split: test - path: PAN-X.fi/test-* -- config_name: PAN-X.fr - data_files: - - split: train - path: PAN-X.fr/train-* - - split: validation - path: PAN-X.fr/validation-* - - split: test - path: PAN-X.fr/test-* -- config_name: PAN-X.he - data_files: - - split: train - path: PAN-X.he/train-* - - split: validation - path: PAN-X.he/validation-* - - split: test - path: PAN-X.he/test-* -- config_name: PAN-X.hi - data_files: - - split: train - path: PAN-X.hi/train-* - - split: validation - path: PAN-X.hi/validation-* - - split: test - path: PAN-X.hi/test-* -- config_name: PAN-X.hu - data_files: - - split: train - path: PAN-X.hu/train-* - - split: validation - path: PAN-X.hu/validation-* - - split: test - path: PAN-X.hu/test-* -- config_name: PAN-X.id - data_files: - - split: train - path: PAN-X.id/train-* - - split: validation - path: PAN-X.id/validation-* - - split: test - path: PAN-X.id/test-* -- config_name: PAN-X.it - data_files: - - split: train - path: PAN-X.it/train-* - - split: validation - path: PAN-X.it/validation-* - - split: test - path: PAN-X.it/test-* -- config_name: PAN-X.ja - data_files: - - split: train - path: PAN-X.ja/train-* - - split: validation - path: PAN-X.ja/validation-* - - split: test - path: PAN-X.ja/test-* -- config_name: PAN-X.jv - data_files: - - split: train - path: PAN-X.jv/train-* - - split: validation - path: PAN-X.jv/validation-* - - split: test - path: PAN-X.jv/test-* -- config_name: PAN-X.ka - data_files: - - split: train - path: PAN-X.ka/train-* - - split: validation - path: PAN-X.ka/validation-* - - split: test - path: PAN-X.ka/test-* -- config_name: PAN-X.kk - data_files: - - split: train - path: PAN-X.kk/train-* - - split: validation - path: PAN-X.kk/validation-* - - split: test - path: PAN-X.kk/test-* -- config_name: PAN-X.ko - data_files: - - split: train - path: PAN-X.ko/train-* - - split: validation - path: PAN-X.ko/validation-* - - split: test - path: PAN-X.ko/test-* -- config_name: PAN-X.ml - data_files: - - split: train - path: PAN-X.ml/train-* - - split: validation - path: PAN-X.ml/validation-* - - split: test - path: PAN-X.ml/test-* -- config_name: PAN-X.mr - data_files: - - split: train - path: PAN-X.mr/train-* - - split: validation - path: PAN-X.mr/validation-* - - split: test - path: PAN-X.mr/test-* -- config_name: PAN-X.ms - data_files: - - split: train - path: PAN-X.ms/train-* - - split: validation - path: PAN-X.ms/validation-* - - split: test - path: PAN-X.ms/test-* -- config_name: PAN-X.my - data_files: - - split: train - path: PAN-X.my/train-* - - split: validation - path: PAN-X.my/validation-* - - split: test - path: PAN-X.my/test-* -- config_name: PAN-X.nl - data_files: - - split: train - path: PAN-X.nl/train-* - - split: validation - path: PAN-X.nl/validation-* - - split: test - path: PAN-X.nl/test-* -- config_name: PAN-X.pt - data_files: - - split: train - path: PAN-X.pt/train-* - - split: validation - path: PAN-X.pt/validation-* - - split: test - path: PAN-X.pt/test-* -- config_name: PAN-X.ru - data_files: - - split: train - path: PAN-X.ru/train-* - - split: validation - path: PAN-X.ru/validation-* - - split: test - path: PAN-X.ru/test-* -- config_name: PAN-X.sw - data_files: - - split: train - path: PAN-X.sw/train-* - - split: validation - path: PAN-X.sw/validation-* - - split: test - path: PAN-X.sw/test-* -- config_name: PAN-X.ta - data_files: - - split: train - path: PAN-X.ta/train-* - - split: validation - path: PAN-X.ta/validation-* - - split: test - path: PAN-X.ta/test-* -- config_name: PAN-X.te - data_files: - - split: train - path: PAN-X.te/train-* - - split: validation - path: PAN-X.te/validation-* - - split: test - path: PAN-X.te/test-* -- config_name: PAN-X.th - data_files: - - split: train - path: PAN-X.th/train-* - - split: validation - path: PAN-X.th/validation-* - - split: test - path: PAN-X.th/test-* -- config_name: PAN-X.tl - data_files: - - split: train - path: PAN-X.tl/train-* - - split: validation - path: PAN-X.tl/validation-* - - split: test - path: PAN-X.tl/test-* -- config_name: PAN-X.tr - data_files: - - split: train - path: PAN-X.tr/train-* - - split: validation - path: PAN-X.tr/validation-* - - split: test - path: PAN-X.tr/test-* -- config_name: PAN-X.ur - data_files: - - split: train - path: PAN-X.ur/train-* - - split: validation - path: PAN-X.ur/validation-* - - split: test - path: PAN-X.ur/test-* -- config_name: PAN-X.vi - data_files: - - split: train - path: PAN-X.vi/train-* - - split: validation - path: PAN-X.vi/validation-* - - split: test - path: PAN-X.vi/test-* -- config_name: PAN-X.yo - data_files: - - split: train - path: PAN-X.yo/train-* - - split: validation - path: PAN-X.yo/validation-* - - split: test - path: PAN-X.yo/test-* -- config_name: PAN-X.zh - data_files: - - split: train - path: PAN-X.zh/train-* - - split: validation - path: PAN-X.zh/validation-* - - split: test - path: PAN-X.zh/test-* -- config_name: PAWS-X.de - data_files: - - split: train - path: PAWS-X.de/train-* - - split: validation - path: PAWS-X.de/validation-* - - split: test - path: PAWS-X.de/test-* -- config_name: PAWS-X.en - data_files: - - split: train - path: PAWS-X.en/train-* - - split: validation - path: PAWS-X.en/validation-* - - split: test - path: PAWS-X.en/test-* -- config_name: PAWS-X.es - data_files: - - split: train - path: PAWS-X.es/train-* - - split: validation - path: PAWS-X.es/validation-* - - split: test - path: PAWS-X.es/test-* -- config_name: PAWS-X.fr - data_files: - - split: train - path: PAWS-X.fr/train-* - - split: validation - path: PAWS-X.fr/validation-* - - split: test - path: PAWS-X.fr/test-* -- config_name: PAWS-X.ja - data_files: - - split: train - path: PAWS-X.ja/train-* - - split: validation - path: PAWS-X.ja/validation-* - - split: test - path: PAWS-X.ja/test-* -- config_name: PAWS-X.ko - data_files: - - split: train - path: PAWS-X.ko/train-* - - split: validation - path: PAWS-X.ko/validation-* - - split: test - path: PAWS-X.ko/test-* -- config_name: PAWS-X.zh - data_files: - - split: train - path: PAWS-X.zh/train-* - - split: validation - path: PAWS-X.zh/validation-* - - split: test - path: PAWS-X.zh/test-* -- config_name: SQuAD - data_files: - - split: train - path: SQuAD/train-* - - split: validation - path: SQuAD/validation-* -- config_name: XNLI - data_files: - - split: test - path: XNLI/test-* - - split: validation - path: XNLI/validation-* -- config_name: XQuAD.ar - data_files: - - split: validation - path: XQuAD.ar/validation-* -- config_name: XQuAD.de - data_files: - - split: validation - path: XQuAD.de/validation-* -- config_name: XQuAD.el - data_files: - - split: validation - path: XQuAD.el/validation-* -- config_name: XQuAD.en - data_files: - - split: validation - path: XQuAD.en/validation-* -- config_name: XQuAD.es - data_files: - - split: validation - path: XQuAD.es/validation-* -- config_name: XQuAD.hi - data_files: - - split: validation - path: XQuAD.hi/validation-* -- config_name: XQuAD.ru - data_files: - - split: validation - path: XQuAD.ru/validation-* -- config_name: XQuAD.th - data_files: - - split: validation - path: XQuAD.th/validation-* -- config_name: XQuAD.tr - data_files: - - split: validation - path: XQuAD.tr/validation-* -- config_name: XQuAD.vi - data_files: - - split: validation - path: XQuAD.vi/validation-* -- config_name: XQuAD.zh - data_files: - - split: validation - path: XQuAD.zh/validation-* -- config_name: bucc18.de - data_files: - - split: validation - path: bucc18.de/validation-* - - split: test - path: bucc18.de/test-* -- config_name: bucc18.fr - data_files: - - split: validation - path: bucc18.fr/validation-* - - split: test - path: bucc18.fr/test-* -- config_name: bucc18.ru - data_files: - - split: validation - path: bucc18.ru/validation-* - - split: test - path: bucc18.ru/test-* -- config_name: bucc18.zh - data_files: - - split: validation - path: bucc18.zh/validation-* - - split: test - path: bucc18.zh/test-* -- config_name: tatoeba.afr - data_files: - - split: validation - path: tatoeba.afr/validation-* -- config_name: tatoeba.ara - data_files: - - split: validation - path: tatoeba.ara/validation-* -- config_name: tatoeba.ben - data_files: - - split: validation - path: tatoeba.ben/validation-* -- config_name: tatoeba.bul - data_files: - - split: validation - path: tatoeba.bul/validation-* -- config_name: tatoeba.cmn - data_files: - - split: validation - path: tatoeba.cmn/validation-* -- config_name: tatoeba.deu - data_files: - - split: validation - path: tatoeba.deu/validation-* -- config_name: tatoeba.ell - data_files: - - split: validation - path: tatoeba.ell/validation-* -- config_name: tatoeba.est - data_files: - - split: validation - path: tatoeba.est/validation-* -- config_name: tatoeba.eus - data_files: - - split: validation - path: tatoeba.eus/validation-* -- config_name: tatoeba.fin - data_files: - - split: validation - path: tatoeba.fin/validation-* -- config_name: tatoeba.fra - data_files: - - split: validation - path: tatoeba.fra/validation-* -- config_name: tatoeba.heb - data_files: - - split: validation - path: tatoeba.heb/validation-* -- config_name: tatoeba.hin - data_files: - - split: validation - path: tatoeba.hin/validation-* -- config_name: tatoeba.hun - data_files: - - split: validation - path: tatoeba.hun/validation-* -- config_name: tatoeba.ind - data_files: - - split: validation - path: tatoeba.ind/validation-* -- config_name: tatoeba.ita - data_files: - - split: validation - path: tatoeba.ita/validation-* -- config_name: tatoeba.jav - data_files: - - split: validation - path: tatoeba.jav/validation-* -- config_name: tatoeba.jpn - data_files: - - split: validation - path: tatoeba.jpn/validation-* -- config_name: tatoeba.kat - data_files: - - split: validation - path: tatoeba.kat/validation-* -- config_name: tatoeba.kaz - data_files: - - split: validation - path: tatoeba.kaz/validation-* -- config_name: tatoeba.kor - data_files: - - split: validation - path: tatoeba.kor/validation-* -- config_name: tatoeba.mal - data_files: - - split: validation - path: tatoeba.mal/validation-* -- config_name: tatoeba.mar - data_files: - - split: validation - path: tatoeba.mar/validation-* -- config_name: tatoeba.nld - data_files: - - split: validation - path: tatoeba.nld/validation-* -- config_name: tatoeba.pes - data_files: - - split: validation - path: tatoeba.pes/validation-* -- config_name: tatoeba.por - data_files: - - split: validation - path: tatoeba.por/validation-* -- config_name: tatoeba.rus - data_files: - - split: validation - path: tatoeba.rus/validation-* -- config_name: tatoeba.spa - data_files: - - split: validation - path: tatoeba.spa/validation-* -- config_name: tatoeba.swh - data_files: - - split: validation - path: tatoeba.swh/validation-* -- config_name: tatoeba.tam - data_files: - - split: validation - path: tatoeba.tam/validation-* -- config_name: tatoeba.tel - data_files: - - split: validation - path: tatoeba.tel/validation-* -- config_name: tatoeba.tgl - data_files: - - split: validation - path: tatoeba.tgl/validation-* -- config_name: tatoeba.tha - data_files: - - split: validation - path: tatoeba.tha/validation-* -- config_name: tatoeba.tur - data_files: - - split: validation - path: tatoeba.tur/validation-* -- config_name: tatoeba.urd - data_files: - - split: validation - path: tatoeba.urd/validation-* -- config_name: tatoeba.vie - data_files: - - split: validation - path: tatoeba.vie/validation-* -- config_name: tydiqa - data_files: - - split: train - path: tydiqa/train-* - - split: validation - path: tydiqa/validation-* -- config_name: udpos.Afrikaans - data_files: - - split: train - path: udpos.Afrikaans/train-* - - split: validation - path: udpos.Afrikaans/validation-* - - split: test - path: udpos.Afrikaans/test-* -- config_name: udpos.Arabic - data_files: - - split: train - path: udpos.Arabic/train-* - - split: validation - path: udpos.Arabic/validation-* - - split: test - path: udpos.Arabic/test-* -- config_name: udpos.Basque - data_files: - - split: train - path: udpos.Basque/train-* - - split: validation - path: udpos.Basque/validation-* - - split: test - path: udpos.Basque/test-* -- config_name: udpos.Bulgarian - data_files: - - split: train - path: udpos.Bulgarian/train-* - - split: validation - path: udpos.Bulgarian/validation-* - - split: test - path: udpos.Bulgarian/test-* -- config_name: udpos.Chinese - data_files: - - split: train - path: udpos.Chinese/train-* - - split: validation - path: udpos.Chinese/validation-* - - split: test - path: udpos.Chinese/test-* -- config_name: udpos.Dutch - data_files: - - split: train - path: udpos.Dutch/train-* - - split: validation - path: udpos.Dutch/validation-* - - split: test - path: udpos.Dutch/test-* -- config_name: udpos.English - data_files: - - split: train - path: udpos.English/train-* - - split: validation - path: udpos.English/validation-* - - split: test - path: udpos.English/test-* -- config_name: udpos.Estonian - data_files: - - split: train - path: udpos.Estonian/train-* - - split: validation - path: udpos.Estonian/validation-* - - split: test - path: udpos.Estonian/test-* -- config_name: udpos.Finnish - data_files: - - split: train - path: udpos.Finnish/train-* - - split: validation - path: udpos.Finnish/validation-* - - split: test - path: udpos.Finnish/test-* -- config_name: udpos.French - data_files: - - split: train - path: udpos.French/train-* - - split: validation - path: udpos.French/validation-* - - split: test - path: udpos.French/test-* -- config_name: udpos.German - data_files: - - split: train - path: udpos.German/train-* - - split: validation - path: udpos.German/validation-* - - split: test - path: udpos.German/test-* -- config_name: udpos.Greek - data_files: - - split: train - path: udpos.Greek/train-* - - split: validation - path: udpos.Greek/validation-* - - split: test - path: udpos.Greek/test-* -- config_name: udpos.Hebrew - data_files: - - split: train - path: udpos.Hebrew/train-* - - split: validation - path: udpos.Hebrew/validation-* - - split: test - path: udpos.Hebrew/test-* -- config_name: udpos.Hindi - data_files: - - split: train - path: udpos.Hindi/train-* - - split: validation - path: udpos.Hindi/validation-* - - split: test - path: udpos.Hindi/test-* -- config_name: udpos.Hungarian - data_files: - - split: train - path: udpos.Hungarian/train-* - - split: validation - path: udpos.Hungarian/validation-* - - split: test - path: udpos.Hungarian/test-* -- config_name: udpos.Indonesian - data_files: - - split: train - path: udpos.Indonesian/train-* - - split: validation - path: udpos.Indonesian/validation-* - - split: test - path: udpos.Indonesian/test-* -- config_name: udpos.Italian - data_files: - - split: train - path: udpos.Italian/train-* - - split: validation - path: udpos.Italian/validation-* - - split: test - path: udpos.Italian/test-* -- config_name: udpos.Japanese - data_files: - - split: train - path: udpos.Japanese/train-* - - split: validation - path: udpos.Japanese/validation-* - - split: test - path: udpos.Japanese/test-* -- config_name: udpos.Kazakh - data_files: - - split: train - path: udpos.Kazakh/train-* - - split: test - path: udpos.Kazakh/test-* -- config_name: udpos.Korean - data_files: - - split: train - path: udpos.Korean/train-* - - split: validation - path: udpos.Korean/validation-* - - split: test - path: udpos.Korean/test-* -- config_name: udpos.Marathi - data_files: - - split: train - path: udpos.Marathi/train-* - - split: validation - path: udpos.Marathi/validation-* - - split: test - path: udpos.Marathi/test-* -- config_name: udpos.Persian - data_files: - - split: train - path: udpos.Persian/train-* - - split: validation - path: udpos.Persian/validation-* - - split: test - path: udpos.Persian/test-* -- config_name: udpos.Portuguese - data_files: - - split: train - path: udpos.Portuguese/train-* - - split: validation - path: udpos.Portuguese/validation-* - - split: test - path: udpos.Portuguese/test-* -- config_name: udpos.Russian - data_files: - - split: train - path: udpos.Russian/train-* - - split: validation - path: udpos.Russian/validation-* - - split: test - path: udpos.Russian/test-* -- config_name: udpos.Spanish - data_files: - - split: train - path: udpos.Spanish/train-* - - split: validation - path: udpos.Spanish/validation-* - - split: test - path: udpos.Spanish/test-* -- config_name: udpos.Tagalog - data_files: - - split: test - path: udpos.Tagalog/test-* -- config_name: udpos.Tamil - data_files: - - split: train - path: udpos.Tamil/train-* - - split: validation - path: udpos.Tamil/validation-* - - split: test - path: udpos.Tamil/test-* -- config_name: udpos.Telugu - data_files: - - split: train - path: udpos.Telugu/train-* - - split: validation - path: udpos.Telugu/validation-* - - split: test - path: udpos.Telugu/test-* -- config_name: udpos.Thai - data_files: - - split: test - path: udpos.Thai/test-* -- config_name: udpos.Turkish - data_files: - - split: train - path: udpos.Turkish/train-* - - split: validation - path: udpos.Turkish/validation-* - - split: test - path: udpos.Turkish/test-* -- config_name: udpos.Urdu - data_files: - - split: train - path: udpos.Urdu/train-* - - split: validation - path: udpos.Urdu/validation-* - - split: test - path: udpos.Urdu/test-* -- config_name: udpos.Vietnamese - data_files: - - split: train - path: udpos.Vietnamese/train-* - - split: validation - path: udpos.Vietnamese/validation-* - - split: test - path: udpos.Vietnamese/test-* -- config_name: udpos.Yoruba - data_files: - - split: test - path: udpos.Yoruba/test-* + download_size: 355216681 + dataset_size: 44668 --- # Dataset Card for "xtreme" @@ -6116,9 +4965,9 @@ configs: - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) -- **Size of downloaded dataset files:** 15.88 GB -- **Size of the generated dataset:** 1.08 GB -- **Total amount of disk used:** 16.96 GB +- **Size of downloaded dataset files:** 15143.21 MB +- **Size of the generated dataset:** 1027.42 MB +- **Total amount of disk used:** 16170.64 MB ### Dataset Summary @@ -6152,9 +5001,9 @@ Niger-Congo languages Swahili and Yoruba, spoken in Africa. #### MLQA.ar.ar -- **Size of downloaded dataset files:** 75.72 MB -- **Size of the generated dataset:** 9.20 MB -- **Total amount of disk used:** 84.91 MB +- **Size of downloaded dataset files:** 72.21 MB +- **Size of the generated dataset:** 8.77 MB +- **Total amount of disk used:** 80.98 MB An example of 'validation' looks as follows. ``` @@ -6163,9 +5012,9 @@ An example of 'validation' looks as follows. #### MLQA.ar.de -- **Size of downloaded dataset files:** 75.72 MB -- **Size of the generated dataset:** 2.55 MB -- **Total amount of disk used:** 78.27 MB +- **Size of downloaded dataset files:** 72.21 MB +- **Size of the generated dataset:** 2.43 MB +- **Total amount of disk used:** 74.64 MB An example of 'validation' looks as follows. ``` @@ -6174,9 +5023,9 @@ An example of 'validation' looks as follows. #### MLQA.ar.en -- **Size of downloaded dataset files:** 75.72 MB -- **Size of the generated dataset:** 9.04 MB -- **Total amount of disk used:** 84.76 MB +- **Size of downloaded dataset files:** 72.21 MB +- **Size of the generated dataset:** 8.62 MB +- **Total amount of disk used:** 80.83 MB An example of 'validation' looks as follows. ``` @@ -6185,9 +5034,9 @@ An example of 'validation' looks as follows. #### MLQA.ar.es -- **Size of downloaded dataset files:** 75.72 MB -- **Size of the generated dataset:** 3.27 MB -- **Total amount of disk used:** 78.99 MB +- **Size of downloaded dataset files:** 72.21 MB +- **Size of the generated dataset:** 3.12 MB +- **Total amount of disk used:** 75.33 MB An example of 'validation' looks as follows. ``` @@ -6196,9 +5045,9 @@ An example of 'validation' looks as follows. #### MLQA.ar.hi -- **Size of downloaded dataset files:** 75.72 MB -- **Size of the generated dataset:** 3.32 MB -- **Total amount of disk used:** 79.04 MB +- **Size of downloaded dataset files:** 72.21 MB +- **Size of the generated dataset:** 3.17 MB +- **Total amount of disk used:** 75.38 MB An example of 'validation' looks as follows. ``` diff --git a/SQuAD/train-00000-of-00001.parquet b/SQuAD/train-00000-of-00001.parquet deleted file mode 100644 index 0ca4abbf4bf271f37fe75c16bf67e6e71d799f76..0000000000000000000000000000000000000000 --- a/SQuAD/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:19672e8619417c5c1c14b3b5364baf61d982a60248c4fa713b850cd3414d5b29 -size 14453979 diff --git a/SQuAD/validation-00000-of-00001.parquet b/SQuAD/validation-00000-of-00001.parquet deleted file mode 100644 index c546c36d3a1e277c0a0e73c8eb25dd33c3fe3f3e..0000000000000000000000000000000000000000 --- a/SQuAD/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6e08fffdb70075f85c1cdbbf2be2589af4a6958ad0924d2849ad474ea0db6d48 -size 1818677 diff --git a/XNLI/test-00000-of-00001.parquet b/XNLI/test-00000-of-00001.parquet deleted file mode 100644 index b408cc7677261a95caab25764a8d0f9d141d03c2..0000000000000000000000000000000000000000 --- a/XNLI/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5c0d8d6fddb7efeba51d89d8fb81c93ee79e2547ecc6b7d92833f563a3ce8809 -size 5908398 diff --git a/XNLI/validation-00000-of-00001.parquet b/XNLI/validation-00000-of-00001.parquet deleted file mode 100644 index 892cc2a15d0d354370508892e10ea7ca14db8249..0000000000000000000000000000000000000000 --- a/XNLI/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8bde49b947eae1341b88197a2d62d1a1437e8a263d8e7c404f7d1711a98a2bd6 -size 2973225 diff --git a/XQuAD.ar/validation-00000-of-00001.parquet b/XQuAD.ar/validation-00000-of-00001.parquet deleted file mode 100644 index 89260c3ed5a790b6613f1671545ead898fd9a4a6..0000000000000000000000000000000000000000 --- a/XQuAD.ar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1f7e1c101c7b82266db837cad87bc6d5aa652fbb46a8d68b3d9c470aa453b1d5 -size 263032 diff --git a/XQuAD.de/validation-00000-of-00001.parquet b/XQuAD.de/validation-00000-of-00001.parquet deleted file mode 100644 index e61f27825c45b8d1b0272b261d65557c0c0658f1..0000000000000000000000000000000000000000 --- a/XQuAD.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f7332837d3c4d58beaddc48819c70552e02fecfca3bd12b6b8d5ed44da46ba76 -size 241987 diff --git a/XQuAD.el/validation-00000-of-00001.parquet b/XQuAD.el/validation-00000-of-00001.parquet deleted file mode 100644 index 9188bcd64039301ec7e12a0e6077da7aa339bcc7..0000000000000000000000000000000000000000 --- a/XQuAD.el/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:76132f89d76bc4ac40a0022577cbc075310b00a40197d862f64360c362a4b433 -size 324409 diff --git a/XQuAD.en/validation-00000-of-00001.parquet b/XQuAD.en/validation-00000-of-00001.parquet deleted file mode 100644 index e10a2c9cac1e32a1198267b4cf78d77a9c70c0b6..0000000000000000000000000000000000000000 --- a/XQuAD.en/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a0501635ad8e0e89b416eaa66d31ec9b5df0038416135f620820e380e0870c69 -size 212402 diff --git a/XQuAD.es/validation-00000-of-00001.parquet b/XQuAD.es/validation-00000-of-00001.parquet deleted file mode 100644 index f03f5a8bcad74fc0d1d8990976d674be15d81ace..0000000000000000000000000000000000000000 --- a/XQuAD.es/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:15616d6bc38492d66499837e858b2d1b80ce8ecfbfcb22406686cf86147428d3 -size 236904 diff --git a/XQuAD.hi/validation-00000-of-00001.parquet b/XQuAD.hi/validation-00000-of-00001.parquet deleted file mode 100644 index adb9db66923509ecf6bf1e59a4eba348a313f9b3..0000000000000000000000000000000000000000 --- a/XQuAD.hi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da6529634c829c4d56e8a468c45695da743c9ce782022af3130622234e774863 -size 322113 diff --git a/XQuAD.ru/validation-00000-of-00001.parquet b/XQuAD.ru/validation-00000-of-00001.parquet deleted file mode 100644 index 5ba412e63641d41e06d56c0865310ea4d15de16e..0000000000000000000000000000000000000000 --- a/XQuAD.ru/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e3141314d8a8e3c409731b174ccfdfb50ce06c1e6ef5386a1df07bd17684d135 -size 321758 diff --git a/XQuAD.th/validation-00000-of-00001.parquet b/XQuAD.th/validation-00000-of-00001.parquet deleted file mode 100644 index 22d3ec5ccda06c798420a3ec8d112ccf470d1e59..0000000000000000000000000000000000000000 --- a/XQuAD.th/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ec5459b3527a4533a3dad5452bb339f3eadebecf74b2f4790376b75e3bf2f857 -size 337337 diff --git a/XQuAD.tr/validation-00000-of-00001.parquet b/XQuAD.tr/validation-00000-of-00001.parquet deleted file mode 100644 index c8c9eff2f5bdaec6ded4762f5751d9cd68242030..0000000000000000000000000000000000000000 --- a/XQuAD.tr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1754dfd0b9bd95ee4e247d749978a59cbecfece7a8be01140f9cc859cf58b5a4 -size 228394 diff --git a/XQuAD.vi/validation-00000-of-00001.parquet b/XQuAD.vi/validation-00000-of-00001.parquet deleted file mode 100644 index 7f28a5d45112a4d0a616cb4f9b01d2507faa23e1..0000000000000000000000000000000000000000 --- a/XQuAD.vi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e07a7499f9d27debb966341da0d92f7dbdea30b43f4a31f3c311a398dd91147e -size 237674 diff --git a/XQuAD.zh/validation-00000-of-00001.parquet b/XQuAD.zh/validation-00000-of-00001.parquet deleted file mode 100644 index 2ac3d52d82e9a13cce4c92f2d58d3ba3ac287f55..0000000000000000000000000000000000000000 --- a/XQuAD.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9c720125d8ebde410c72df6caa2c7e74de23e06716c76da92c0db16016235d73 -size 205798 diff --git a/bucc18.de/test-00000-of-00001.parquet b/bucc18.de/test-00000-of-00001.parquet deleted file mode 100644 index 0eefe06907e2ae59dabc1822232adda791d83485..0000000000000000000000000000000000000000 --- a/bucc18.de/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9443315b7afb068c8c68ac62ba33955277fd64e72cbb9687fb01d620fe2c270e -size 1477634 diff --git a/bucc18.de/validation-00000-of-00001.parquet b/bucc18.de/validation-00000-of-00001.parquet deleted file mode 100644 index dec5c546cff117cc62f41b327ea46619ad32d95d..0000000000000000000000000000000000000000 --- a/bucc18.de/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5f259d8cfc5d0adaf716d0f2b3820a84acf72d72e43eabee0c874934d36db581 -size 158496 diff --git a/bucc18.fr/test-00000-of-00001.parquet b/bucc18.fr/test-00000-of-00001.parquet deleted file mode 100644 index 5382997ee35f94a3f2a8aa5bb4c14454ffe88243..0000000000000000000000000000000000000000 --- a/bucc18.fr/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:03519c9320608177ee664ef64f58afd6b497d733e9e17804060ae46203952c26 -size 1306859 diff --git a/bucc18.fr/validation-00000-of-00001.parquet b/bucc18.fr/validation-00000-of-00001.parquet deleted file mode 100644 index 4095f4e5788e95aaa347c3f3af6f19b8356bfb17..0000000000000000000000000000000000000000 --- a/bucc18.fr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e6bfa5d5e71ca2d60fd5d091a2cb1544652b32fcc18f30bc49f10a2007111f7 -size 130237 diff --git a/bucc18.ru/test-00000-of-00001.parquet b/bucc18.ru/test-00000-of-00001.parquet deleted file mode 100644 index 75514e7c77659e0d350ec14f8319601c6e602476..0000000000000000000000000000000000000000 --- a/bucc18.ru/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d6a43c4c932769aa850f639c4e0fc88867783c9e23fddfa8094de1e808953274 -size 2646080 diff --git a/bucc18.ru/validation-00000-of-00001.parquet b/bucc18.ru/validation-00000-of-00001.parquet deleted file mode 100644 index cc1c2432fc383386d66776168bc041c5160fba63..0000000000000000000000000000000000000000 --- a/bucc18.ru/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:636ac47a00c4f8dbaa29baf44c4b28c0ec56ed1b9a78ef7c34abbd9a4417448e -size 428396 diff --git a/bucc18.zh/test-00000-of-00001.parquet b/bucc18.zh/test-00000-of-00001.parquet deleted file mode 100644 index 4d9f2fcab1d59b45e9e27847bd4b5b40a62b4386..0000000000000000000000000000000000000000 --- a/bucc18.zh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6f3235e88867dbb860ed45f6fd1eb7e0579a5a06b60c4aea923d4df9b4ae83af -size 279660 diff --git a/bucc18.zh/validation-00000-of-00001.parquet b/bucc18.zh/validation-00000-of-00001.parquet deleted file mode 100644 index c9a02200462f94c8212c9e262fc14e6c7120752c..0000000000000000000000000000000000000000 --- a/bucc18.zh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6a7ba18d302f407ef069bf2eebd2598d9c9ecd82cbab0a7ce3302f7c13656266 -size 40718 diff --git a/dataset_infos.json b/dataset_infos.json new file mode 100644 index 0000000000000000000000000000000000000000..cc42ff3127dd37987f1e72d1c47438958c503d95 --- /dev/null +++ b/dataset_infos.json @@ -0,0 +1 @@ +{"XNLI": {"description": "\nThe Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and\n2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into\n14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,\nHindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the\ncorresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to\nevaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only\nEnglish NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI\nis an evaluation benchmark.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"language": {"dtype": "string", "id": null, "_type": "Value"}, "sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XNLI", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 20359500, "num_examples": 75150, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10049303, "num_examples": 37350, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 17865352, "post_processing_size": null, "dataset_size": 30408803, "size_in_bytes": 48274155}, "tydiqa": {"description": "Gold passage task (GoldP): Given a passage that is guaranteed to contain the\n answer, predict the single contiguous span of characters that answers the question. This is more similar to\n existing reading comprehension datasets (as opposed to the information-seeking task outlined above).\n This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing\n a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,\n XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:\n only the gold answer passage is provided rather than the entire Wikipedia article;\n unanswerable questions have been discarded, similar to MLQA and XQuAD;\n we evaluate with the SQuAD 1.1 metrics like XQuAD; and\n Thai and Japanese are removed since the lack of whitespace breaks some tools.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{tydiqa,\n title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},\n author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}\n year = {2020},\n journal = {Transactions of the Association for Computational Linguistics}\n }\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/tydiqa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tydiqa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 52948607, "num_examples": 49881, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 5006461, "num_examples": 5077, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-train.json": {"num_bytes": 58004076, "checksum": "cefc8e09ff2548d9b10a678d3a6bbbe5bc036be543f92418819ea676c97be23b"}, "https://storage.googleapis.com/tydiqa/v1.1/tydiqa-goldp-v1.1-dev.json": {"num_bytes": 5617409, "checksum": "b286e0f34bc7f52259359989716f369b160565bd12ad8f3a3e311f9b0dbad1c0"}}, "download_size": 63621485, "post_processing_size": null, "dataset_size": 57955068, "size_in_bytes": 121576553}, "SQuAD": {"description": "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\n archivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "SQuAD", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 79317110, "num_examples": 87599, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 10472653, "num_examples": 10570, "dataset_name": "xtreme"}}, "download_checksums": {"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json": {"num_bytes": 30288272, "checksum": "3527663986b8295af4f7fcdff1ba1ff3f72d07d61a20f487cb238a6ef92fd955"}, "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json": {"num_bytes": 4854279, "checksum": "95aa6a52d5d6a735563366753ca50492a658031da74f301ac5238b03966972c9"}}, "download_size": 35142551, "post_processing_size": null, "dataset_size": 89789763, "size_in_bytes": 124932314}, "PAN-X.af": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.af", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 259709, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 257204, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1321396, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 1838309, "size_in_bytes": 235847193}, "PAN-X.ar": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1808303, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1811983, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3634136, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7254422, "size_in_bytes": 241263306}, "PAN-X.bg": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.bg", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2310314, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2306158, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4600773, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 9217245, "size_in_bytes": 243226129}, "PAN-X.bn": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.bn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 159088, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 159282, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1568845, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 1887215, "size_in_bytes": 235896099}, "PAN-X.de": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2381565, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2377639, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4762352, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 9521556, "size_in_bytes": 243530440}, "PAN-X.el": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2533806, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2547594, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5063176, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 10144576, "size_in_bytes": 244153460}, "PAN-X.en": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1920069, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1916220, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3823474, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7659763, "size_in_bytes": 241668647}, "PAN-X.es": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1592525, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1602291, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3199161, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6393977, "size_in_bytes": 240402861}, "PAN-X.et": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.et", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2030160, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2021409, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3023211, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7074780, "size_in_bytes": 241083664}, "PAN-X.eu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.eu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2296335, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249835, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2292327, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6838497, "size_in_bytes": 240847381}, "PAN-X.fa": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.fa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1782306, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1770284, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3529354, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7081944, "size_in_bytes": 241090828}, "PAN-X.fi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.fi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2131769, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2130665, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4273793, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8536227, "size_in_bytes": 242545111}, "PAN-X.fr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1664190, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1675785, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3335424, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6675399, "size_in_bytes": 240684283}, "PAN-X.he": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.he", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2332760, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2318756, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4667100, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 9318616, "size_in_bytes": 243327500}, "PAN-X.hi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 190671, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 196190, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 964212, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 1351073, "size_in_bytes": 235359957}, "PAN-X.hu": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.hu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2211851, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2249779, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4499914, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8961544, "size_in_bytes": 242970428}, "PAN-X.id": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.id", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1537979, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1536879, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3084007, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6158865, "size_in_bytes": 240167749}, "PAN-X.it": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.it", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1908529, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1928408, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3874663, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7711600, "size_in_bytes": 241720484}, "PAN-X.ja": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 6323003, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 6448960, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12670401, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 25442364, "size_in_bytes": 259451248}, "PAN-X.jv": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.jv", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 14600, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 16917, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 16106, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 47623, "size_in_bytes": 234056507}, "PAN-X.ka": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ka", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2806901, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2824641, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2777362, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8408904, "size_in_bytes": 242417788}, "PAN-X.kk": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.kk", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 238109, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 236724, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 240276, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 715109, "size_in_bytes": 234723993}, "PAN-X.ko": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2138167, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2138294, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4284733, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8561194, "size_in_bytes": 242570078}, "PAN-X.ml": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ml", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 290755, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 276926, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2865204, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 3432885, "size_in_bytes": 237441769}, "PAN-X.mr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.mr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 245358, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 255904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1248259, "num_examples": 5000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 1749521, "size_in_bytes": 235758405}, "PAN-X.ms": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ms", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 147515, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 147168, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2965048, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 3259731, "size_in_bytes": 237268615}, "PAN-X.my": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.my", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40428, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 37366, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 32735, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 110529, "size_in_bytes": 234119413}, "PAN-X.nl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.nl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2016856, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2038638, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4062189, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8117683, "size_in_bytes": 242126567}, "PAN-X.pt": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.pt", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1575141, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1562625, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3149283, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6287049, "size_in_bytes": 240295933}, "PAN-X.ru": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2053169, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2074145, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4121791, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 8249105, "size_in_bytes": 242257989}, "PAN-X.sw": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.sw", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 136368, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 140231, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 135911, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 412510, "size_in_bytes": 234421394}, "PAN-X.ta": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ta", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 277625, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 278114, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4122130, "num_examples": 15000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 4677869, "size_in_bytes": 238686753}, "PAN-X.te": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.te", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 293281, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 296963, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 295410, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 885654, "size_in_bytes": 234894538}, "PAN-X.th": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13262737, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13586928, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 27133029, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 53982694, "size_in_bytes": 287991578}, "PAN-X.tl": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.tl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 114156, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 117904, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1168717, "num_examples": 10000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 1400777, "size_in_bytes": 235409661}, "PAN-X.tr": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1915352, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1911503, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3779170, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 7606025, "size_in_bytes": 241614909}, "PAN-X.ur": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.ur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 152148, "num_examples": 1000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 151922, "num_examples": 1000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3072276, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 3376346, "size_in_bytes": 237385230}, "PAN-X.vi": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1565143, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1580216, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 3153227, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 6298586, "size_in_bytes": 240307470}, "PAN-X.yo": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.yo", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 13245, "num_examples": 100, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 13533, "num_examples": 100, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14709, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 41487, "size_in_bytes": 234050371}, "PAN-X.zh": {"description": "The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been\nconstructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset\ncan be loaded with the DaNLP package:\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{pan-x,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},\n volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}\n year={2017}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/afshinrahimi/mmner", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 7, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "langs": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAN-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 4491325, "num_examples": 10000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4363172, "num_examples": 10000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8832051, "num_examples": 20000, "dataset_name": "xtreme"}}, "download_checksums": {"https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip": {"num_bytes": 234008884, "checksum": "164814b64f749ad000988b5ab45050b5116913b3db466ab173008955ddf649a4"}}, "download_size": 234008884, "post_processing_size": null, "dataset_size": 17686548, "size_in_bytes": 251695432}, "MLQA.ar.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8368114, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 824108, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9192222, "size_in_bytes": 84911272}, "MLQA.ar.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2183942, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 364837, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2548779, "size_in_bytes": 78267829}, "MLQA.ar.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3290629, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 288446, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3579075, "size_in_bytes": 79298125}, "MLQA.ar.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3229872, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 340049, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3569921, "size_in_bytes": 79288971}, "MLQA.ar.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 8225662, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 810089, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 9035751, "size_in_bytes": 84754801}, "MLQA.ar.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3041378, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 228180, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3269558, "size_in_bytes": 78988608}, "MLQA.ar.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.ar.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3039396, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 281770, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3321166, "size_in_bytes": 79040216}, "MLQA.de.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1620006, "num_examples": 1649, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 200174, "num_examples": 207, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1820180, "size_in_bytes": 77539230}, "MLQA.de.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4366102, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 488367, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4854469, "size_in_bytes": 80573519}, "MLQA.de.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1688483, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 216075, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1904558, "size_in_bytes": 77623608}, "MLQA.de.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1679180, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184318, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1863498, "size_in_bytes": 77582548}, "MLQA.de.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4343144, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 485894, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4829038, "size_in_bytes": 80548088}, "MLQA.de.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1716615, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 170582, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1887197, "size_in_bytes": 77606247}, "MLQA.de.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.de.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1371074, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 153871, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1524945, "size_in_bytes": 77243995}, "MLQA.vi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3205185, "num_examples": 2047, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 230335, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3435520, "size_in_bytes": 79154570}, "MLQA.vi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2227033, "num_examples": 1675, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 277185, "num_examples": 182, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 2504218, "size_in_bytes": 78223268}, "MLQA.vi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7922085, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 726518, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8648603, "size_in_bytes": 84367653}, "MLQA.vi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2989660, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 269389, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3259049, "size_in_bytes": 78978099}, "MLQA.vi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7843431, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 719273, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 8562704, "size_in_bytes": 84281754}, "MLQA.vi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2866597, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 283461, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3150058, "size_in_bytes": 78869108}, "MLQA.vi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.vi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 2776664, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 255007, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3031671, "size_in_bytes": 78750721}, "MLQA.zh.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1731483, "num_examples": 1912, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 175349, "num_examples": 188, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1906832, "size_in_bytes": 77625882}, "MLQA.zh.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1390018, "num_examples": 1621, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 174605, "num_examples": 190, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1564623, "size_in_bytes": 77283673}, "MLQA.zh.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1806186, "num_examples": 1943, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 172934, "num_examples": 184, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1979120, "size_in_bytes": 77698170}, "MLQA.zh.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4422350, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 443810, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4866160, "size_in_bytes": 80585210}, "MLQA.zh.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4450985, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 446868, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4897853, "size_in_bytes": 80616903}, "MLQA.zh.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1736283, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 138073, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1874356, "size_in_bytes": 77593406}, "MLQA.zh.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.zh.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1578219, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 184401, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1762620, "size_in_bytes": 77481670}, "MLQA.en.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6739219, "num_examples": 5335, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 630843, "num_examples": 517, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7370062, "size_in_bytes": 83089112}, "MLQA.en.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5056722, "num_examples": 4517, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 594936, "num_examples": 512, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5651658, "size_in_bytes": 81370708}, "MLQA.en.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 7056698, "num_examples": 5495, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 640646, "num_examples": 511, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7697344, "size_in_bytes": 83416394}, "MLQA.en.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6539307, "num_examples": 5137, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 608444, "num_examples": 504, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7147751, "size_in_bytes": 82866801}, "MLQA.en.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 14004648, "num_examples": 11590, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1329112, "num_examples": 1148, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 15333760, "size_in_bytes": 91052810}, "MLQA.en.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6179249, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 555462, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 6734711, "size_in_bytes": 82453761}, "MLQA.en.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.en.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 6378866, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 623171, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 7002037, "size_in_bytes": 82721087}, "MLQA.es.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1740282, "num_examples": 1978, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 148649, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1888931, "size_in_bytes": 77607981}, "MLQA.es.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1404025, "num_examples": 1776, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 144186, "num_examples": 196, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1548211, "size_in_bytes": 77267261}, "MLQA.es.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1747969, "num_examples": 2018, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 176841, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1924810, "size_in_bytes": 77643860}, "MLQA.es.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1678451, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 126646, "num_examples": 161, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1805097, "size_in_bytes": 77524147}, "MLQA.es.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4362737, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419068, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4781805, "size_in_bytes": 80500855}, "MLQA.es.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4394333, "num_examples": 5253, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 422071, "num_examples": 500, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4816404, "size_in_bytes": 80535454}, "MLQA.es.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.es.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1523523, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 181834, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 1705357, "size_in_bytes": 77424407}, "MLQA.hi.ar": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4445589, "num_examples": 1831, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 410424, "num_examples": 186, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4856013, "size_in_bytes": 80575063}, "MLQA.hi.de": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3022864, "num_examples": 1430, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 301713, "num_examples": 163, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 3324577, "size_in_bytes": 79043627}, "MLQA.hi.vi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4743484, "num_examples": 1947, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 419106, "num_examples": 177, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 5162590, "size_in_bytes": 80881640}, "MLQA.hi.zh": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 4354875, "num_examples": 1767, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 424246, "num_examples": 189, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4779121, "size_in_bytes": 80498171}, "MLQA.hi.en": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11449261, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1097857, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12547118, "size_in_bytes": 88266168}, "MLQA.hi.es": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 3862229, "num_examples": 1723, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 420402, "num_examples": 187, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 4282631, "size_in_bytes": 80001681}, "MLQA.hi.hi": {"description": " MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.\nMLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,\nGerman, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between\n4 different languages on average.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "@article{lewis2019mlqa,\ntitle={MLQA: Evaluating Cross-lingual Extractive Question Answering},\nauthor={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},\njournal={arXiv preprint arXiv:1910.07475},\nyear={2019}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/MLQA", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "MLQA.hi.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 11810475, "num_examples": 4918, "dataset_name": "xtreme"}, "validation": {"name": "validation", "num_bytes": 1136784, "num_examples": 507, "dataset_name": "xtreme"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip": {"num_bytes": 75719050, "checksum": "246e8089933d13007fe80684d5c5c0713d6834cf8b3b4a0ec7c66f0a0d2baac8"}}, "download_size": 75719050, "post_processing_size": null, "dataset_size": 12947259, "size_in_bytes": 88666309}, "XQuAD.ar": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.ar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1722799, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ar.json": {"num_bytes": 1582988, "checksum": "abdabd7afed5c635d99cca0f3f0d0c9d9ed0bc77451e963c2e4e0638c29e486d"}}, "download_size": 1582988, "post_processing_size": null, "dataset_size": 1722799, "size_in_bytes": 3305787}, "XQuAD.de": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1283301, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.de.json": {"num_bytes": 669810, "checksum": "990b5d746746ed65ed4702ea5f35f99ffa4e2f1c390c07d003642acd937916f9"}}, "download_size": 669810, "post_processing_size": null, "dataset_size": 1283301, "size_in_bytes": 1953111}, "XQuAD.vi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.vi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1477239, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.vi.json": {"num_bytes": 911401, "checksum": "f619a1eb11fb42d3ab0834259e488a65f585447ef6154437bfb7199d85161a04"}}, "download_size": 911401, "post_processing_size": null, "dataset_size": 1477239, "size_in_bytes": 2388640}, "XQuAD.zh": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 984241, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.zh.json": {"num_bytes": 808652, "checksum": "691d0b3359bc6b8faa8de931dfdfe21d50a65861ae348e32a0d1a0190b0c8835"}}, "download_size": 808652, "post_processing_size": null, "dataset_size": 984241, "size_in_bytes": 1792893}, "XQuAD.en": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1116123, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.en.json": {"num_bytes": 609383, "checksum": "e4c57d1c9143aaa1c5d265ba5987a65f4e69528d2a98f29d6e75019b10344f29"}}, "download_size": 609383, "post_processing_size": null, "dataset_size": 1116123, "size_in_bytes": 1725506}, "XQuAD.es": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1273499, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.es.json": {"num_bytes": 684322, "checksum": "dcbae93ec3a9f4b9e78fd834a171d6f96c1a875e10e15b7530b7e4ef4971e37e"}}, "download_size": 684322, "post_processing_size": null, "dataset_size": 1273499, "size_in_bytes": 1957821}, "XQuAD.hi": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.hi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2682975, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.hi.json": {"num_bytes": 1680538, "checksum": "df2cce3532b37e9beb8979704b5c9a4bf874358f105395a298b89427b43b9d24"}}, "download_size": 1680538, "post_processing_size": null, "dataset_size": 2682975, "size_in_bytes": 4363513}, "XQuAD.el": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.el", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2206690, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.el.json": {"num_bytes": 1918889, "checksum": "821cf0f88e73fa258fd2f548b19b6ec39f7025059e16f6f9fc8cd797c9c3663e"}}, "download_size": 1918889, "post_processing_size": null, "dataset_size": 2206690, "size_in_bytes": 4125579}, "XQuAD.ru": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2136990, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.ru.json": {"num_bytes": 1896368, "checksum": "208d5b1aa154c52b1b5c5eda16281e455e8fd198cdb9af3f469f0d6037d973bf"}}, "download_size": 1896368, "post_processing_size": null, "dataset_size": 2136990, "size_in_bytes": 4033358}, "XQuAD.th": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.th", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 2854959, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.th.json": {"num_bytes": 1809143, "checksum": "5cdda11d0e1e075f7872abf4e6ae830388ce7f617964d542308e9ae4257e0f43"}}, "download_size": 1809143, "post_processing_size": null, "dataset_size": 2854959, "size_in_bytes": 4664102}, "XQuAD.tr": {"description": "XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question\nanswering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from\nthe development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into\nten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,\nthe dataset is entirely parallel across 11 languages.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n @article{Artetxe:etal:2019,\n author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},\n title = {On the cross-lingual transferability of monolingual representations},\n journal = {CoRR},\n volume = {abs/1910.11856},\n year = {2019},\n archivePrefix = {arXiv},\n eprint = {1910.11856}\n}\n\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/deepmind/xquad", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"answer_start": {"dtype": "int32", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "XQuAD.tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1210763, "num_examples": 1190, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/deepmind/xquad/raw/master/xquad.tr.json": {"num_bytes": 729506, "checksum": "92179a564774b7696100d144c1e10870d0a966b6fccbdd254a65b9d2ab1971cc"}}, "download_size": 729506, "post_processing_size": null, "dataset_size": 1210763, "size_in_bytes": 1940269}, "bucc18.de": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "bucc18.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 248707, "num_examples": 1038, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2325701, "num_examples": 9580, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-de-en.training-gold.tar.bz2": {"num_bytes": 28189548, "checksum": "766e0fdebbd1438fb87c21254828eb13c8b997d8fbab002103dd060dcac50c5c"}, "https://comparable.limsi.fr/bucc2018/bucc2018-de-en.sample-gold.tar.bz2": {"num_bytes": 2529652, "checksum": "8e16ba8b02ef8d648b06adfbd6dfb188f43524e18f97b2b12a14a086caac62f3"}}, "download_size": 30719200, "post_processing_size": null, "dataset_size": 2574408, "size_in_bytes": 33293608}, "bucc18.fr": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "bucc18.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 212513, "num_examples": 929, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 2082419, "num_examples": 9086, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.training-gold.tar.bz2": {"num_bytes": 20757713, "checksum": "53c0d0e7dd97dc89593a2db25a26c5f0ccdc7113e8451263bb0c80e4c4c7dc30"}, "https://comparable.limsi.fr/bucc2018/bucc2018-fr-en.sample-gold.tar.bz2": {"num_bytes": 1948831, "checksum": "df9eb3966954e163c9264076f7c9c1eb56d9d8a91855f9d3afbf2c0fdaef0a08"}}, "download_size": 22706544, "post_processing_size": null, "dataset_size": 2294932, "size_in_bytes": 25001476}, "bucc18.zh": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "bucc18.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 55739, "num_examples": 257, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 415925, "num_examples": 1899, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.training-gold.tar.bz2": {"num_bytes": 6344925, "checksum": "3facb71798277c8f44dc78c1f8ae2110f254d0e14799f3508eedd54b4236877a"}, "https://comparable.limsi.fr/bucc2018/bucc2018-zh-en.sample-gold.tar.bz2": {"num_bytes": 769869, "checksum": "a3425be5c0320ee131a0927b66c3e29befb3b481ebf1b87257e660e514bc16ac"}}, "download_size": 7114794, "post_processing_size": null, "dataset_size": 471664, "size_in_bytes": 7586458}, "bucc18.ru": {"description": "Building and Using Comparable Corpora\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://comparable.limsi.fr/bucc2018/", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "bucc18.ru", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 761347, "num_examples": 2374, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4641678, "num_examples": 14435, "dataset_name": "xtreme"}}, "download_checksums": {"https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.training-gold.tar.bz2": {"num_bytes": 37085079, "checksum": "1895df56e936ca3d4f5b12299ceffe0b7ff4806584c40bdaa3ae1d445f25afa5"}, "https://comparable.limsi.fr/bucc2018/bucc2018-ru-en.sample-gold.tar.bz2": {"num_bytes": 4269233, "checksum": "fce3cabc7ee50ddb4b18aa6fb090e2669c8383d2a29fc97eed6ae70fed9a23e5"}}, "download_size": 41354312, "post_processing_size": null, "dataset_size": 5403025, "size_in_bytes": 46757337}, "PAWS-X.de": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.de", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 500009, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 510194, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12451883, "num_examples": 49380, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13462086, "size_in_bytes": 43744143}, "PAWS-X.en": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.en", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 478291, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 480738, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11827719, "num_examples": 49175, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 12786748, "size_in_bytes": 43068805}, "PAWS-X.es": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.es", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 494069, "num_examples": 1961, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 505047, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12462107, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13461223, "size_in_bytes": 43743280}, "PAWS-X.fr": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.fr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 516111, "num_examples": 1988, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 521031, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 12948512, "num_examples": 49399, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 13985654, "size_in_bytes": 44267711}, "PAWS-X.ja": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.ja", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 647774, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 654640, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 14695653, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 15998067, "size_in_bytes": 46280124}, "PAWS-X.ko": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.ko", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 540787, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 547978, "num_examples": 1999, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13542657, "num_examples": 49164, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 14631422, "size_in_bytes": 44913479}, "PAWS-X.zh": {"description": "\nThis dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training\npairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All\ntranslated pairs are sourced from examples in PAWS-Wiki.\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @InProceedings{pawsx2019emnlp,\n title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},\n author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},\n booktitle = {Proc. of EMNLP},\n year = {2019}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/google-research-datasets/paws/tree/master/pawsx", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "PAWS-X.zh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 459120, "num_examples": 2000, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 460638, "num_examples": 2000, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 10469712, "num_examples": 49401, "dataset_name": "xtreme"}}, "download_checksums": {"https://storage.googleapis.com/paws/pawsx/x-final.tar.gz": {"num_bytes": 30282057, "checksum": "4146db499101d66e68ae4c8ed3cf9dadecd625f44b7d8cf3d4a0fe93afc4fd9f"}}, "download_size": 30282057, "post_processing_size": null, "dataset_size": 11389470, "size_in_bytes": 41671527}, "tatoeba.afr": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.afr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179651, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.afr-eng.afr": {"num_bytes": 30586, "checksum": "7bb9e073ad8422d6bfdec7c9ebdcef8ac486e72b237200e447923a8b921a0a56"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.afr-eng.eng": {"num_bytes": 29049, "checksum": "0b700c125efb4030b4cc50d9d765d5884afc24f39296f29b028a1b2a8512034f"}}, "download_size": 59635, "post_processing_size": null, "dataset_size": 179651, "size_in_bytes": 239286}, "tatoeba.ara": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.ara", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192666, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ara-eng.ara": {"num_bytes": 43582, "checksum": "e67a0ae072b79cd9e8eb09f166c3bc0b23488d39f5720f2ee0a8350ae17b719f"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ara-eng.eng": {"num_bytes": 29068, "checksum": "3c11838c963f598a52dcf2f452b666353538257001db5c59c3a5f54a999b336b"}}, "download_size": 72650, "post_processing_size": null, "dataset_size": 192666, "size_in_bytes": 265316}, "tatoeba.ben": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.ben", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211719, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ben-eng.ben": {"num_bytes": 65990, "checksum": "ac3385695d6a6c7e5d18e38ad4b8b7d3780f3df23dd0ff3f539071b8269a8613"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ben-eng.eng": {"num_bytes": 25713, "checksum": "67fbe75fec549d436c3356b6d6f8dd53179b6a908661b5d507d28c7fee83350e"}}, "download_size": 91703, "post_processing_size": null, "dataset_size": 211719, "size_in_bytes": 303422}, "tatoeba.bul": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.bul", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 222295, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.bul-eng.bul": {"num_bytes": 65500, "checksum": "f9fa90cf3599d8c87f7a6ed22f5d648e3ce6687c705656a8c8ea088d891f79d5"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.bul-eng.eng": {"num_bytes": 36779, "checksum": "4b03b3b52ffb7cf4286e0c4453c90910a3021546f160bdf0e4d39d1f45bfbc0b"}}, "download_size": 102279, "post_processing_size": null, "dataset_size": 222295, "size_in_bytes": 324574}, "tatoeba.deu": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.deu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 225583, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.deu-eng.deu": {"num_bytes": 57121, "checksum": "edfa6f75a42554df388f45891735d5e4214158a99def4b73b5908af4a3054551"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.deu-eng.eng": {"num_bytes": 48446, "checksum": "eb9cc83a42f2c4b22f310d05311207e41abb56c2a084666cac3ee0f84d2d0b84"}}, "download_size": 105567, "post_processing_size": null, "dataset_size": 225583, "size_in_bytes": 331150}, "tatoeba.cmn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.cmn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188947, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.cmn-eng.cmn": {"num_bytes": 33410, "checksum": "965d033966fcd186c89741ad49ab4b0a0b2bbd33e02666635ff3b2be23c1ac1f"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.cmn-eng.eng": {"num_bytes": 35521, "checksum": "29ba36232488f5806aceccac57c59c5e750ddd08edb40eef417e3ada9ff9a239"}}, "download_size": 68931, "post_processing_size": null, "dataset_size": 188947, "size_in_bytes": 257878}, "tatoeba.ell": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.ell", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 198977, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ell-eng.ell": {"num_bytes": 52202, "checksum": "4fb3d3d30bdafd15100dfad5c4680f8f2ed5ca87ed0a6122e2fe2aa21fee65e8"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ell-eng.eng": {"num_bytes": 26759, "checksum": "1f630710e718e2f85a4e757d3c7f3d6e78ded0b25c99653b4c552138318d9ffe"}}, "download_size": 78961, "post_processing_size": null, "dataset_size": 198977, "size_in_bytes": 277938}, "tatoeba.est": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.est", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 179744, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.est-eng.est": {"num_bytes": 29996, "checksum": "94856999ef35e5357502d7ecf50419d0108b99270e507d9c57f8b283bd1be9c5"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.est-eng.eng": {"num_bytes": 29732, "checksum": "bb033b10596178452aecf2d97ad25580251375e7d224d8b38dad3d93d69b1e4f"}}, "download_size": 59728, "post_processing_size": null, "dataset_size": 179744, "size_in_bytes": 239472}, "tatoeba.eus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.eus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 186084, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.eus-eng.eus": {"num_bytes": 34060, "checksum": "4255fb70a6c268b09fcc59a9b308f0fcaaf45ef45e66fc55bf3c80eac4d8c97b"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.eus-eng.eng": {"num_bytes": 32008, "checksum": "1e80f0174ad544697fd69ddcf21287ca10c5e3cacba2fc42bf1d68c460d14ba2"}}, "download_size": 66068, "post_processing_size": null, "dataset_size": 186084, "size_in_bytes": 252152}, "tatoeba.fin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.fin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195685, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.fin-eng.fin": {"num_bytes": 39857, "checksum": "8db3c734f755d578445947f1182f40faf2a9a0eca37561dd248717c088802d60"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.fin-eng.eng": {"num_bytes": 35812, "checksum": "322e610359f4d24852e673bbe4524d52c26dbf980aca0760e95c66dc21ecd504"}}, "download_size": 75669, "post_processing_size": null, "dataset_size": 195685, "size_in_bytes": 271354}, "tatoeba.fra": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.fra", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 200034, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.fra-eng.fra": {"num_bytes": 43727, "checksum": "644172ff9642fefa9e41c29b7d6f44196518e84350dc44d4992e943c0cca92b6"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.fra-eng.eng": {"num_bytes": 36291, "checksum": "5634220f8a26a9a23b84753a9aec0b0832e6bdaa9da3f83e0bd84c928c3f46e3"}}, "download_size": 80018, "post_processing_size": null, "dataset_size": 200034, "size_in_bytes": 280052}, "tatoeba.heb": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.heb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 203516, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.heb-eng.heb": {"num_bytes": 47660, "checksum": "4a07ca4b8a6fb7ab499791573a2454730f47acbe209359d7b9372a9f6094a102"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.heb-eng.eng": {"num_bytes": 35840, "checksum": "73c27ed0f76c1d2da199230f05489749e10bb67ab879c6dfee8ca9807d6bd99c"}}, "download_size": 83500, "post_processing_size": null, "dataset_size": 203516, "size_in_bytes": 287016}, "tatoeba.hin": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.hin", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 242574, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.hin-eng.hin": {"num_bytes": 88468, "checksum": "15e4fb0a394be4438319f1d6955d1aea226e2a8c5ad38798b23b76ae43d742ed"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.hin-eng.eng": {"num_bytes": 34090, "checksum": "adfce2269a55dbac69b25c0f4f6eb89e0f9383165485925a2e042e61b9480562"}}, "download_size": 122558, "post_processing_size": null, "dataset_size": 242574, "size_in_bytes": 365132}, "tatoeba.hun": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.hun", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188905, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.hun-eng.hun": {"num_bytes": 35335, "checksum": "56bd0682be8c1db6568313650b3310d641cc8d0019d12dd7caf201302350eeac"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.hun-eng.eng": {"num_bytes": 33554, "checksum": "5323266e91ddee67ed1ae00d6bbac0cdf3d37749d1b2da3459bf0d424bc71383"}}, "download_size": 68889, "post_processing_size": null, "dataset_size": 188905, "size_in_bytes": 257794}, "tatoeba.ind": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.ind", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 194860, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ind-eng.ind": {"num_bytes": 39969, "checksum": "4f03cd70cba071f746eacd3ebf6b60fd5a8377ce18b4cc52edec6721f05f352a"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ind-eng.eng": {"num_bytes": 34875, "checksum": "62a3f5127b60eb3526b8fa3994a68fa1a1f114f3a395307a8808a3517c05ffc5"}}, "download_size": 74844, "post_processing_size": null, "dataset_size": 194860, "size_in_bytes": 269704}, "tatoeba.ita": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.ita", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 185849, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ita-eng.ita": {"num_bytes": 34237, "checksum": "9b3f369d0ed92273b46dd3b983721636e3d15024ce7125f5103229249c386d26"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.ita-eng.eng": {"num_bytes": 31596, "checksum": "738bf8f981e42d285f4f08bc09238782d285b02050e6e95287aa4e998bb7b24b"}}, "download_size": 65833, "post_processing_size": null, "dataset_size": 185849, "size_in_bytes": 251682}, "tatoeba.jav": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.jav", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 38529, "num_examples": 205, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.jav-eng.jav": {"num_bytes": 7457, "checksum": "ad88399db8f94c2a040aa53e7e862225964fac9308a3beb3d5b38f3eca2f827f"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.jav-eng.eng": {"num_bytes": 6456, "checksum": "172776353690f6c047ea21da969fa6979980d692fff1cfbac17eb25851423760"}}, "download_size": 13913, "post_processing_size": null, "dataset_size": 38529, "size_in_bytes": 52442}, "tatoeba.jpn": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.jpn", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213099, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.jpn-eng.jpn": {"num_bytes": 53844, "checksum": "56040bd6949170a631039d9f8f4c6440db8761b0065c9686feba55c99a320d46"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.jpn-eng.eng": {"num_bytes": 39239, "checksum": "b42129b34e1bf225ccc25fc00e532a6113af98adbc6605b93021bd8aadeb68b6"}}, "download_size": 93083, "post_processing_size": null, "dataset_size": 213099, "size_in_bytes": 306182}, "tatoeba.kat": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.kat", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 161696, "num_examples": 746, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kat-eng.kat": {"num_bytes": 50967, "checksum": "6ef69b5efbf355597ed91eb355b33a5f524bdf0875dbeaaccf6375badc20e29b"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kat-eng.eng": {"num_bytes": 21193, "checksum": "d70a14aa64fd7c6b545f11aea754a632e1cbecb91af27fcf6a98a8449a48a8e7"}}, "download_size": 72160, "post_processing_size": null, "dataset_size": 161696, "size_in_bytes": 233856}, "tatoeba.kaz": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.kaz", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 116194, "num_examples": 575, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kaz-eng.kaz": {"num_bytes": 29687, "checksum": "f20c682582a80b6aa10f3b933db93bc314449b554ce611e263bc75990b319aef"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kaz-eng.eng": {"num_bytes": 17491, "checksum": "0ab684e7032c6520540d5785adf00ef206d097221d0dd4dc9bcaabd64068e10d"}}, "download_size": 47178, "post_processing_size": null, "dataset_size": 116194, "size_in_bytes": 163372}, "tatoeba.kor": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.kor", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 199155, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kor-eng.kor": {"num_bytes": 44054, "checksum": "e550c84184ec35b1a0dab3154284719511a21746e53c40f46eb6ab08179e9188"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.kor-eng.eng": {"num_bytes": 35085, "checksum": "f900cf3c9b72ed5a400e1804702863ff3df00be58eb060902e02285d0e68fab3"}}, "download_size": 79139, "post_processing_size": null, "dataset_size": 199155, "size_in_bytes": 278294}, "tatoeba.mal": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.mal", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 177173, "num_examples": 687, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.mal-eng.mal": {"num_bytes": 72952, "checksum": "1a896f54f85a454fb0123864049c65921ae9dfd0cafda6deef8060f0104d965e"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.mal-eng.eng": {"num_bytes": 21765, "checksum": "4d80cdbb844cd4e33f874e5dc45c1cdda4f80998034448f7eb56b8b6532a6622"}}, "download_size": 94717, "post_processing_size": null, "dataset_size": 177173, "size_in_bytes": 271890}, "tatoeba.mar": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.mar", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 220558, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.mar-eng.mar": {"num_bytes": 72652, "checksum": "b2931584fbe62062beb97cc939e4d208ace5ee56f15808860ab14e130fd3c576"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.mar-eng.eng": {"num_bytes": 27890, "checksum": "709d09b697dca053c814b9d525b72cb47cb011aa860c6598f3e2b1b3dd1280dd"}}, "download_size": 100542, "post_processing_size": null, "dataset_size": 220558, "size_in_bytes": 321100}, "tatoeba.nld": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.nld", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 193279, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.nld-eng.nld": {"num_bytes": 37866, "checksum": "d564d4ce1c621ccaefdbe9f5cb08eacccc7bf2a0b58666303e84ca9c7973bdb7"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.nld-eng.eng": {"num_bytes": 35397, "checksum": "3b8836749df573a53235b85ed6771f31bf2de428f520d2d6a1dd94b61b4ef057"}}, "download_size": 73263, "post_processing_size": null, "dataset_size": 193279, "size_in_bytes": 266542}, "tatoeba.pes": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.pes", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 213735, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.pes-eng.pes": {"num_bytes": 58866, "checksum": "f1553713723491fe5876e1060b18fb4abf0c77be3ba06db2e3307e83aedbbb32"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.pes-eng.eng": {"num_bytes": 34853, "checksum": "b5c6cf8c8d93ff8f2fe26b53f3ee29b62db9c6f7dcddcb086ba48953f4ce926b"}}, "download_size": 93719, "post_processing_size": null, "dataset_size": 213735, "size_in_bytes": 307454}, "tatoeba.por": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.por", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 195201, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.por-eng.por": {"num_bytes": 39331, "checksum": "b0c926a232c9889a87a1a970f9399c5618c2d95baf204321e9da794c0aec16f5"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.por-eng.eng": {"num_bytes": 35854, "checksum": "deb4568cfb7b7cbbc060a7fe97c4639fb4680842f4fcd28df791ffdbb753855a"}}, "download_size": 75185, "post_processing_size": null, "dataset_size": 195201, "size_in_bytes": 270386}, "tatoeba.rus": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.rus", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 212488, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.rus-eng.rus": {"num_bytes": 58822, "checksum": "446ff2cae66053c2277d9735b2c2df6b786cae258385f7ade7bed68d8835c6a0"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.rus-eng.eng": {"num_bytes": 33650, "checksum": "7b26f52d6085b7c4944d6f5f6f5b6e1932085b42112f1444db515ce59e878fb8"}}, "download_size": 92472, "post_processing_size": null, "dataset_size": 212488, "size_in_bytes": 304960}, "tatoeba.spa": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.spa", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 192282, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.spa-eng.spa": {"num_bytes": 37490, "checksum": "f9628cea40481e8251f0999718bd893cff0f261752f5e526b3bc20284e2ca018"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.spa-eng.eng": {"num_bytes": 34776, "checksum": "89e4470f4572040b1ca94b3edad97dcd8bd2f0141f072e12933b8659dadf917d"}}, "download_size": 72266, "post_processing_size": null, "dataset_size": 192282, "size_in_bytes": 264548}, "tatoeba.swh": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.swh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 67283, "num_examples": 390, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.swh-eng.swh": {"num_bytes": 9645, "checksum": "1c672915446c336cc378676e6dbf91eb54d27bbfd0c61563d349265bc6374753"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.swh-eng.eng": {"num_bytes": 10822, "checksum": "e8539647caff9e329776ae863b6224d432923c6e4e9256b9df92ca58ff282eac"}}, "download_size": 20467, "post_processing_size": null, "dataset_size": 67283, "size_in_bytes": 87750}, "tatoeba.tam": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.tam", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 76297, "num_examples": 307, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tam-eng.tam": {"num_bytes": 30553, "checksum": "bde87fb1ddedccf6c7a2b70ffdd19a959a573c113c2e7a041c4b623fb2170bde"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tam-eng.eng": {"num_bytes": 8888, "checksum": "e7c5106acdd100214a161970b2a5c31e7386e5b6a963e3d3afdf30412c90ac53"}}, "download_size": 39441, "post_processing_size": null, "dataset_size": 76297, "size_in_bytes": 115738}, "tatoeba.tel": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.tel", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 53239, "num_examples": 234, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tel-eng.tel": {"num_bytes": 18337, "checksum": "7e1a1bcd106cce650a09e2f042f1354b55b29bea2bcfa86554dfa0ad12ce8976"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tel-eng.eng": {"num_bytes": 6806, "checksum": "1efc2ef57d9b1ecebfc4baa45e86fd793e38473304e9c043aebabc3a1b29a294"}}, "download_size": 25143, "post_processing_size": null, "dataset_size": 53239, "size_in_bytes": 78382}, "tatoeba.tgl": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.tgl", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 188154, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tgl-eng.tgl": {"num_bytes": 36506, "checksum": "f99165dc05190b99f6574fe24db884ff85d111612a25e7a37323f001aafc2a6e"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tgl-eng.eng": {"num_bytes": 31632, "checksum": "e7c9beda3f3072a968a34a7226a66d1ebf1dcb33cf002805dc752f80a7c620ae"}}, "download_size": 68138, "post_processing_size": null, "dataset_size": 188154, "size_in_bytes": 256292}, "tatoeba.tha": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.tha", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 128974, "num_examples": 548, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tha-eng.tha": {"num_bytes": 44759, "checksum": "65c7b3c01a56a1ac8971e72e0ea8e74a027718dc84044d8802c0ab36395a3156"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tha-eng.eng": {"num_bytes": 18439, "checksum": "2881c82d2c5fa59cf0a68bc9e012f5e0b0a716f7357cbecf77c247efc2fd7294"}}, "download_size": 63198, "post_processing_size": null, "dataset_size": 128974, "size_in_bytes": 192172}, "tatoeba.tur": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.tur", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 191901, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tur-eng.tur": {"num_bytes": 37607, "checksum": "1ffa0acc006018b3105abda41a4d4ca42f3c122964a49b71793546367b079a86"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.tur-eng.eng": {"num_bytes": 34278, "checksum": "a768df7dd3d1344f872a458b32c3a65e24f8381826ccb16ba6677426176c8121"}}, "download_size": 71885, "post_processing_size": null, "dataset_size": 191901, "size_in_bytes": 263786}, "tatoeba.urd": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.urd", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 208728, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.urd-eng.urd": {"num_bytes": 56819, "checksum": "2efc22dc61885a9454aeeee68c8b841c7f9138d53ba644a82308bd210140450b"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.urd-eng.eng": {"num_bytes": 31893, "checksum": "dab35fda3f73b3fd86b6b9f9f9f6242430961aa5d1ac247adbc646867df79cec"}}, "download_size": 88712, "post_processing_size": null, "dataset_size": 208728, "size_in_bytes": 297440}, "tatoeba.vie": {"description": "his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.\nFor each languages, we have selected 1000 English sentences and their translations, if available. Please check\nthis paper for a description of the languages, their families and scripts as well as baseline results.\nPlease note that the English sentences are not identical for all language pairs. This means that the results are\nnot directly comparable across languages. In particular, the sentences tend to have less variety for several\nlow-resource languages, e.g. \"Tom needed water\", \"Tom needs water\", \"Tom is getting water\", ...\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": " @article{tatoeba,\n title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},\n author={Mikel, Artetxe and Holger, Schwenk,},\n journal={arXiv:1812.10464v2},\n year={2018}\n}\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", "license": "", "features": {"source_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "target_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "source_lang": {"dtype": "string", "id": null, "_type": "Value"}, "target_lang": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "tatoeba.vie", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 211423, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.vie-eng.vie": {"num_bytes": 52721, "checksum": "6dbb02d778b0bfc8678cd85f87db76de55dd7e409a26fe32ad42d50e0f1fff77"}, "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/tatoeba.vie-eng.eng": {"num_bytes": 38686, "checksum": "a1f60bd8ae6c42224a4c050d2aa1ff4242d14827d64d7831e96ecf2b2c367f5f"}}, "download_size": 91407, "post_processing_size": null, "dataset_size": 211423, "size_in_bytes": 302830}, "udpos.Afrikaans": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Afrikaans", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 91302, "num_examples": 194, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 174256, "num_examples": 425, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 586382, "num_examples": 1315, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 851940, "size_in_bytes": 356068621}, "udpos.Arabic": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Arabic", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 593662, "num_examples": 909, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 973834, "num_examples": 1680, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4453694, "num_examples": 6075, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 6021190, "size_in_bytes": 361237871}, "udpos.Basque": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Basque", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 438683, "num_examples": 1798, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 444656, "num_examples": 1799, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1327725, "num_examples": 5396, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2211064, "size_in_bytes": 357427745}, "udpos.Bulgarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Bulgarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 347129, "num_examples": 1115, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 339959, "num_examples": 1116, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2689779, "num_examples": 8907, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3376867, "size_in_bytes": 358593548}, "udpos.Dutch": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Dutch", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 393604, "num_examples": 1394, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 397916, "num_examples": 1471, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4518018, "num_examples": 18051, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5309538, "size_in_bytes": 360526219}, "udpos.English": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.English", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1042052, "num_examples": 3974, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1421160, "num_examples": 5440, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6225545, "num_examples": 21253, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8688757, "size_in_bytes": 363905438}, "udpos.Estonian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Estonian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 814183, "num_examples": 3125, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1065713, "num_examples": 3760, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6614929, "num_examples": 25749, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8494825, "size_in_bytes": 363711506}, "udpos.Finnish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Finnish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 656658, "num_examples": 3239, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1025738, "num_examples": 4422, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 5613742, "num_examples": 27198, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 7296138, "size_in_bytes": 362512819}, "udpos.French": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.French", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1294108, "num_examples": 5979, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1731061, "num_examples": 9465, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 10118993, "num_examples": 47308, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 13144162, "size_in_bytes": 368360843}, "udpos.German": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.German", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 6044862, "num_examples": 19233, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 7345899, "num_examples": 22458, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 54773981, "num_examples": 166849, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 68164742, "size_in_bytes": 423381423}, "udpos.Greek": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Greek", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1062459, "num_examples": 2559, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1028677, "num_examples": 2809, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 8932140, "num_examples": 28152, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 11023276, "size_in_bytes": 366239957}, "udpos.Hebrew": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Hebrew", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 210025, "num_examples": 484, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 223877, "num_examples": 491, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2505703, "num_examples": 5241, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2939605, "size_in_bytes": 358156286}, "udpos.Hindi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Hindi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 839714, "num_examples": 1659, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1400237, "num_examples": 2684, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 6690274, "num_examples": 13304, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 8930225, "size_in_bytes": 364146906}, "udpos.Hungarian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Hungarian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 215891, "num_examples": 441, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 193740, "num_examples": 449, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 372238, "num_examples": 910, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 781869, "size_in_bytes": 355998550}, "udpos.Indonesian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Indonesian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 220875, "num_examples": 559, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 557113, "num_examples": 1557, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 1710690, "num_examples": 4477, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2488678, "size_in_bytes": 357705359}, "udpos.Italian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Italian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 989008, "num_examples": 2278, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1337881, "num_examples": 3518, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11299329, "num_examples": 29685, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 13626218, "size_in_bytes": 368842899}, "udpos.Japanese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Japanese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 200368, "num_examples": 511, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 928914, "num_examples": 2372, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2792963, "num_examples": 7125, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3922245, "size_in_bytes": 359138926}, "udpos.Kazakh": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Kazakh", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 228936, "num_examples": 1047, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 11450, "num_examples": 31, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 240386, "size_in_bytes": 355457067}, "udpos.Korean": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Korean", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 782599, "num_examples": 3016, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1162551, "num_examples": 4276, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7341303, "num_examples": 27410, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9286453, "size_in_bytes": 364503134}, "udpos.Chinese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Chinese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 594460, "num_examples": 3038, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1236063, "num_examples": 5528, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 4218915, "num_examples": 18998, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 6049438, "size_in_bytes": 361266119}, "udpos.Marathi": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Marathi", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 8509, "num_examples": 46, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 7883, "num_examples": 47, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 59035, "num_examples": 373, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 75427, "size_in_bytes": 355292108}, "udpos.Persian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Persian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 317065, "num_examples": 599, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 320695, "num_examples": 600, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2400788, "num_examples": 4798, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 3038548, "size_in_bytes": 358255229}, "udpos.Portuguese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Portuguese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 712409, "num_examples": 1770, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1082594, "num_examples": 2681, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 7669580, "num_examples": 17992, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 9464583, "size_in_bytes": 364681264}, "udpos.Russian": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Russian", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 3457043, "num_examples": 9960, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 4236717, "num_examples": 11336, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 24230182, "num_examples": 67435, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 31923942, "size_in_bytes": 387140623}, "udpos.Spanish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Spanish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 1498777, "num_examples": 3054, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 1476512, "num_examples": 3147, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 13858442, "num_examples": 28492, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 16833731, "size_in_bytes": 372050412}, "udpos.Tagalog": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Tagalog", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5165, "num_examples": 55, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 5165, "size_in_bytes": 355221846}, "udpos.Tamil": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Tamil", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40043, "num_examples": 80, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 62378, "num_examples": 120, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 202608, "num_examples": 400, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 305029, "size_in_bytes": 355521710}, "udpos.Telugu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Telugu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 18002, "num_examples": 131, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 19587, "num_examples": 146, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 138061, "num_examples": 1051, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 175650, "size_in_bytes": 355392331}, "udpos.Thai": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Thai", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 561348, "num_examples": 1000, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 561348, "size_in_bytes": 355778029}, "udpos.Turkish": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Turkish", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 186467, "num_examples": 988, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 827394, "num_examples": 4785, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 704417, "num_examples": 3664, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 1718278, "size_in_bytes": 356934959}, "udpos.Urdu": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Urdu", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 284273, "num_examples": 552, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 288565, "num_examples": 535, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 2107374, "num_examples": 4043, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 2680212, "size_in_bytes": 357896893}, "udpos.Vietnamese": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Vietnamese", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 206200, "num_examples": 800, "dataset_name": "xtreme"}, "test": {"name": "test", "num_bytes": 214075, "num_examples": 800, "dataset_name": "xtreme"}, "train": {"name": "train", "num_bytes": 367347, "num_examples": 1400, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 787622, "size_in_bytes": 356004303}, "udpos.Yoruba": {"description": "Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological\nfeatures, and syntactic dependencies) across different human languages. UD is an open community effort with over 200\ncontributors producing more than 100 treebanks in over 70 languages. If you\u2019re new to UD, you should start by reading\nthe first part of the Short Introduction and then browsing the annotation guidelines.\n\nThe Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of\nthe cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages\n(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of\nsyntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,\nand availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil\n(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the\nNiger-Congo languages Swahili and Yoruba, spoken in Africa.\n", "citation": "\n@article{hu2020xtreme,\n author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},\n title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},\n journal = {CoRR},\n volume = {abs/2003.11080},\n year = {2020},\n archivePrefix = {arXiv},\n eprint = {2003.11080}\n}\n", "homepage": "https://github.com/google-research/xtreme\thttps://universaldependencies.org/", "license": "", "features": {"tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 17, "names": ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"], "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "xtreme", "config_name": "udpos.Yoruba", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 44668, "num_examples": 100, "dataset_name": "xtreme"}}, "download_checksums": {"https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz": {"num_bytes": 355216681, "checksum": "5ff973e44345a5f69b94cc1427158e14e851c967d58773cc2ac5a1d3adaca409"}}, "download_size": 355216681, "post_processing_size": null, "dataset_size": 44668, "size_in_bytes": 355261349}} \ No newline at end of file diff --git a/tatoeba.afr/validation-00000-of-00001.parquet b/tatoeba.afr/validation-00000-of-00001.parquet deleted file mode 100644 index c2ee1917b3145821b0920944bfd918eaa0df46e6..0000000000000000000000000000000000000000 --- a/tatoeba.afr/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:253d33ce000b2e8ba1108d987c7425f2ebb69f4cecb665cbffd4e0206d4e4100 -size 47676 diff --git a/tatoeba.ara/validation-00000-of-00001.parquet b/tatoeba.ara/validation-00000-of-00001.parquet deleted file mode 100644 index 5bf40242dd73a9db28c540b3812416a4ec2550e5..0000000000000000000000000000000000000000 --- a/tatoeba.ara/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:647d7ee51da954f60eb53e1dda2d0e481193bc539606871edf2c6d92241f0324 -size 51228 diff --git a/tatoeba.ben/validation-00000-of-00001.parquet b/tatoeba.ben/validation-00000-of-00001.parquet deleted file mode 100644 index f81c6fdcbc1ecf2c08d71c618e87df01cc4643f7..0000000000000000000000000000000000000000 --- a/tatoeba.ben/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1baf0f768d695415208b1f54bb1430563768121cf710f63f763ba8c2d947a665 -size 51362 diff --git a/tatoeba.bul/validation-00000-of-00001.parquet b/tatoeba.bul/validation-00000-of-00001.parquet deleted file mode 100644 index 34763b629305a9225d4b5199e81026ff75ef3214..0000000000000000000000000000000000000000 --- a/tatoeba.bul/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0fc4840d9264c616d75d0772658d2246644c6ae10a483a561ff7153637fab381 -size 62454 diff --git a/tatoeba.cmn/validation-00000-of-00001.parquet b/tatoeba.cmn/validation-00000-of-00001.parquet deleted file mode 100644 index e00466a97c1238a59a1033f23ac38a22cd9b34a5..0000000000000000000000000000000000000000 --- a/tatoeba.cmn/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5c9932eec126dd5e504dd36dd9c78221f178e27584ff4d046a98bc1ebf0ebe5d -size 58281 diff --git a/tatoeba.deu/validation-00000-of-00001.parquet b/tatoeba.deu/validation-00000-of-00001.parquet deleted file mode 100644 index 71232f59a02b7473dd63395f5bd8e28081f34a61..0000000000000000000000000000000000000000 --- a/tatoeba.deu/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3a0b0eb0bfb330a72c615f126318d92720885f8e8321c976e90cbb14fb62b82f -size 79066 diff --git a/tatoeba.ell/validation-00000-of-00001.parquet b/tatoeba.ell/validation-00000-of-00001.parquet deleted file mode 100644 index fa99641108e579c13968725da514e2005b34251b..0000000000000000000000000000000000000000 --- a/tatoeba.ell/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e14bbdabff69a565a3111adc4753b37d7ce2f0ef28316b4768f58383af1c1199 -size 52251 diff --git a/tatoeba.est/validation-00000-of-00001.parquet b/tatoeba.est/validation-00000-of-00001.parquet deleted file mode 100644 index 48005ad0717476ddb0a8b064e09629b7bcba8e92..0000000000000000000000000000000000000000 --- a/tatoeba.est/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:98faf8696fb43519c9cab5409989bbf35ed42bff1e9e81f6382de13b5890f8b0 -size 49968 diff --git a/tatoeba.eus/validation-00000-of-00001.parquet b/tatoeba.eus/validation-00000-of-00001.parquet deleted file mode 100644 index 700af9245c496b1e14fa0afd479948ea43646781..0000000000000000000000000000000000000000 --- a/tatoeba.eus/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:73aac38292d95dab169b83b49917426d6ab2341c45a4f57c63231d8f1c257654 -size 54271 diff --git a/tatoeba.fin/validation-00000-of-00001.parquet b/tatoeba.fin/validation-00000-of-00001.parquet deleted file mode 100644 index c07fa96cd5a374c5f20374135ea41a95d42a74cc..0000000000000000000000000000000000000000 --- a/tatoeba.fin/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6aeb32dd0537d873c749ab54a310607352eabe254e606e6e9ba906daf6f30791 -size 60580 diff --git a/tatoeba.fra/validation-00000-of-00001.parquet b/tatoeba.fra/validation-00000-of-00001.parquet deleted file mode 100644 index a07f206801d44bc4d0795af47580d2373194474b..0000000000000000000000000000000000000000 --- a/tatoeba.fra/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:52e353621bab3a0b8f27bbb46d7d9edaa6ff2b39b4c8ef238df2448e6bac94a6 -size 60925 diff --git a/tatoeba.heb/validation-00000-of-00001.parquet b/tatoeba.heb/validation-00000-of-00001.parquet deleted file mode 100644 index 756183de69c91c8658abde6df687f5f39143a99f..0000000000000000000000000000000000000000 --- a/tatoeba.heb/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:928d700071d4ed6fb29baa2068ddfd50e2da140116049a40e61966a7c63c8e58 -size 57306 diff --git a/tatoeba.hin/validation-00000-of-00001.parquet b/tatoeba.hin/validation-00000-of-00001.parquet deleted file mode 100644 index f7e4a2e571c7f401c11bc7e85f680f51267b2355..0000000000000000000000000000000000000000 --- a/tatoeba.hin/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aef2ed0ef53f7c83406450c7475459756a14965ce3adaa2f0ab767c3be02fb9d -size 68816 diff --git a/tatoeba.hun/validation-00000-of-00001.parquet b/tatoeba.hun/validation-00000-of-00001.parquet deleted file mode 100644 index 733854e446003c24877e88487d25994cbac69618..0000000000000000000000000000000000000000 --- a/tatoeba.hun/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:11313bc460b063f9fa32a2ee14e71f616c7a8424db2fb84bd62ac866f8442fc8 -size 58096 diff --git a/tatoeba.ind/validation-00000-of-00001.parquet b/tatoeba.ind/validation-00000-of-00001.parquet deleted file mode 100644 index 1bf17e651e7e3cd821ff2207dab87cd3a3fefad7..0000000000000000000000000000000000000000 --- a/tatoeba.ind/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d5eec2a4fec3d8a5faee990b752cc8438b1117fada567e6445187762fbb79728 -size 57047 diff --git a/tatoeba.ita/validation-00000-of-00001.parquet b/tatoeba.ita/validation-00000-of-00001.parquet deleted file mode 100644 index 8f26afc2dd7dfe0b87cd0fb8fa63b7ea3c6d9950..0000000000000000000000000000000000000000 --- a/tatoeba.ita/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d8637ce2fb58bc35928acbbb78ab0a3883aa8e52aa3811cda2da471ee3252e76 -size 52422 diff --git a/tatoeba.jav/validation-00000-of-00001.parquet b/tatoeba.jav/validation-00000-of-00001.parquet deleted file mode 100644 index df6bf7b4217fffae53b943ff700565e67b4db784..0000000000000000000000000000000000000000 --- a/tatoeba.jav/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7364fe192faeda9a1f9e4cd4ec822eac3c006238062cc4413a1861631e309f27 -size 15208 diff --git a/tatoeba.jpn/validation-00000-of-00001.parquet b/tatoeba.jpn/validation-00000-of-00001.parquet deleted file mode 100644 index 213cca0a1f757f0579c1f3bb94a30d7acbeaeb01..0000000000000000000000000000000000000000 --- a/tatoeba.jpn/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b6d81c3fb53343faae217e38dad70e7b313cbe44dd28016de5277677e85afbfc -size 66620 diff --git a/tatoeba.kat/validation-00000-of-00001.parquet b/tatoeba.kat/validation-00000-of-00001.parquet deleted file mode 100644 index c78e4d2165084d6598c8db3d1f2766bad2b875e7..0000000000000000000000000000000000000000 --- a/tatoeba.kat/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e05f0bc074fce55d0ce5f4dc9c728a153fffb16b8f277ce0bc667826500ffa4f -size 41759 diff --git a/tatoeba.kaz/validation-00000-of-00001.parquet b/tatoeba.kaz/validation-00000-of-00001.parquet deleted file mode 100644 index 44bc5a9935969da4b7c52a2d05e6b864ca7fecb3..0000000000000000000000000000000000000000 --- a/tatoeba.kaz/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:88081be33d2ba6ae2f55a1a22398cb723e605b4d42ae9c1c51ad1004457ad204 -size 35693 diff --git a/tatoeba.kor/validation-00000-of-00001.parquet b/tatoeba.kor/validation-00000-of-00001.parquet deleted file mode 100644 index e40c10e4669efdd6180f4dc27e638c3a0466569c..0000000000000000000000000000000000000000 --- a/tatoeba.kor/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:375c53e160194d790e894398b81d533a99c8c25c035be54ca5819f6c092f4eb5 -size 61210 diff --git a/tatoeba.mal/validation-00000-of-00001.parquet b/tatoeba.mal/validation-00000-of-00001.parquet deleted file mode 100644 index 6b1db65cb2a509c51e72f44b1c6fd60f78a31046..0000000000000000000000000000000000000000 --- a/tatoeba.mal/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9521e08bd90485d9db20b8bfb6278e165ed58125a276b5c46a48db4a36b253ba -size 51077 diff --git a/tatoeba.mar/validation-00000-of-00001.parquet b/tatoeba.mar/validation-00000-of-00001.parquet deleted file mode 100644 index c4e43715f9cd0d9246be262a342ae030a1fb7ca0..0000000000000000000000000000000000000000 --- a/tatoeba.mar/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:450884ce2d7a8a7932265b414b702ed5eb5695829b3f3f0dd271fda2630934c0 -size 56575 diff --git a/tatoeba.nld/validation-00000-of-00001.parquet b/tatoeba.nld/validation-00000-of-00001.parquet deleted file mode 100644 index 6ffedf20b76c5fea23d219155e24b0724c25a307..0000000000000000000000000000000000000000 --- a/tatoeba.nld/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:40406f6da6cfe262c2f1c350f9cda458242e32d64c016dfff547166ca0dd811e -size 59774 diff --git a/tatoeba.pes/validation-00000-of-00001.parquet b/tatoeba.pes/validation-00000-of-00001.parquet deleted file mode 100644 index b4962a0a5c241f6fd3c3f7272d33208823c98621..0000000000000000000000000000000000000000 --- a/tatoeba.pes/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e61e0164a167e404477b29ec8b45ead4686ede3afa683a1c1102297b9287fe9f -size 64642 diff --git a/tatoeba.por/validation-00000-of-00001.parquet b/tatoeba.por/validation-00000-of-00001.parquet deleted file mode 100644 index 9431dffb5a06d1c4df7a26e7aae895ee4f1e2085..0000000000000000000000000000000000000000 --- a/tatoeba.por/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:edf65a12c3b07f61dfb3dd124ee16e99489a1f31fbddbbbb8baabfe09bf44b88 -size 58250 diff --git a/tatoeba.rus/validation-00000-of-00001.parquet b/tatoeba.rus/validation-00000-of-00001.parquet deleted file mode 100644 index eba11a08415647f16b6c4667b5e05cf327741181..0000000000000000000000000000000000000000 --- a/tatoeba.rus/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:df7046815fb5ca6411a7d4b8f6b3ff0c4f9317b87f0e2a876d0c19c4e837f6fd -size 61601 diff --git a/tatoeba.spa/validation-00000-of-00001.parquet b/tatoeba.spa/validation-00000-of-00001.parquet deleted file mode 100644 index bd626912f38b59f2157c202f75f19388cacdffe0..0000000000000000000000000000000000000000 --- a/tatoeba.spa/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a91a969cecfa6f416938fc4b7c0ff3cd814cec6932cfeddf174c0472e9e3aae6 -size 57055 diff --git a/tatoeba.swh/validation-00000-of-00001.parquet b/tatoeba.swh/validation-00000-of-00001.parquet deleted file mode 100644 index a57f9544e913e4f992240adc38a92f77ebb92cb1..0000000000000000000000000000000000000000 --- a/tatoeba.swh/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3c3f99af2b4453f9a37734402fb403be0db4d2a6535a60f565098de9ec54e61f -size 19362 diff --git a/tatoeba.tam/validation-00000-of-00001.parquet b/tatoeba.tam/validation-00000-of-00001.parquet deleted file mode 100644 index d10c9dc95370cd27a0623c2495cd0db99602713a..0000000000000000000000000000000000000000 --- a/tatoeba.tam/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1bfd02ce5aaf8e68c9b0a7a2c0257b80b311cc58feb7b2945d4bfe8c9cc7fda6 -size 23648 diff --git a/tatoeba.tel/validation-00000-of-00001.parquet b/tatoeba.tel/validation-00000-of-00001.parquet deleted file mode 100644 index 5cb39a69bec3e48ee6827913054af8343d37a705..0000000000000000000000000000000000000000 --- a/tatoeba.tel/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e994978040ab6f1f7a261404d0af5dcfcd5171ebbd18c2602e938a771241c1a5 -size 18260 diff --git a/tatoeba.tgl/validation-00000-of-00001.parquet b/tatoeba.tgl/validation-00000-of-00001.parquet deleted file mode 100644 index e23092d7c7b5220895bfd6f1935540337d846a83..0000000000000000000000000000000000000000 --- a/tatoeba.tgl/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0bfa772b90f31e8fa72a549accb2cbe1500615dc641536d574f1046b9400292b -size 53699 diff --git a/tatoeba.tha/validation-00000-of-00001.parquet b/tatoeba.tha/validation-00000-of-00001.parquet deleted file mode 100644 index 0840884dcaa97d4194aa3135f95c4bf923ed46f4..0000000000000000000000000000000000000000 --- a/tatoeba.tha/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:423b00ac30e712915d0aa1ffa73a562916748fdae1b7e91e53af9ae746e52bec -size 39659 diff --git a/tatoeba.tur/validation-00000-of-00001.parquet b/tatoeba.tur/validation-00000-of-00001.parquet deleted file mode 100644 index 09f7d72393f6163cc79b74a4f81b6538085bea03..0000000000000000000000000000000000000000 --- a/tatoeba.tur/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:145b6159e73eb4d9a8be577481845722fa03be25ff9784ec70b50fff49ecea6b -size 54137 diff --git a/tatoeba.urd/validation-00000-of-00001.parquet b/tatoeba.urd/validation-00000-of-00001.parquet deleted file mode 100644 index c52664e10f97471c1efb36ed161506407fa66e37..0000000000000000000000000000000000000000 --- a/tatoeba.urd/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf8f48588eed612d9927ab3cfb542e637e47d6389bae65ff4de2b85ab81030f5 -size 60399 diff --git a/tatoeba.vie/validation-00000-of-00001.parquet b/tatoeba.vie/validation-00000-of-00001.parquet deleted file mode 100644 index a273c5d1ac5596a71bade4ee6ee5668ea930c7e8..0000000000000000000000000000000000000000 --- a/tatoeba.vie/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:938c7af51a38c1810936a0b052cd15d39b0168cef93d81f366c623662ea38481 -size 66746 diff --git a/tydiqa/train-00000-of-00001.parquet b/tydiqa/train-00000-of-00001.parquet deleted file mode 100644 index 45887e874f29b8549f639a7b3697053c565d3217..0000000000000000000000000000000000000000 --- a/tydiqa/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd1a8c2a27be3a14ec22c9990665d719e93ce4be341427fd53f9001be954f416 -size 26918058 diff --git a/tydiqa/validation-00000-of-00001.parquet b/tydiqa/validation-00000-of-00001.parquet deleted file mode 100644 index 03c660b9462b2b8d9b960738d9a52442bb5e25ff..0000000000000000000000000000000000000000 --- a/tydiqa/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0dbad5124a1b0521c180e89aa5431c23eb246f5c305a722a0fcafa35e589ff07 -size 2484180 diff --git a/udpos.Afrikaans/test-00000-of-00001.parquet b/udpos.Afrikaans/test-00000-of-00001.parquet deleted file mode 100644 index 29ccbc73d6862a3d201672f1c60e6462332ef1ad..0000000000000000000000000000000000000000 --- a/udpos.Afrikaans/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b0df2dd9d4519387ac1e8ca0a2bf1472b3493fb00378649744d4fb4847a0c3f0 -size 44369 diff --git a/udpos.Afrikaans/train-00000-of-00001.parquet b/udpos.Afrikaans/train-00000-of-00001.parquet deleted file mode 100644 index aebb3574ca7484c2a6b3948affc7db5b88629123..0000000000000000000000000000000000000000 --- a/udpos.Afrikaans/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8541052ead5eb51265bf6ff61d4ec9860ca0acaf7658ec162758d53c6488a5dd -size 125755 diff --git a/udpos.Afrikaans/validation-00000-of-00001.parquet b/udpos.Afrikaans/validation-00000-of-00001.parquet deleted file mode 100644 index 2461456bcc221415fb034b4ed5b327b5ea539350..0000000000000000000000000000000000000000 --- a/udpos.Afrikaans/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:be8c20eef7a57863035b38eb367c1b187caba6382a9470863691b9190198871d -size 23664 diff --git a/udpos.Arabic/test-00000-of-00001.parquet b/udpos.Arabic/test-00000-of-00001.parquet deleted file mode 100644 index c5e785a3ecf4a587807f7e6881c8bd0dd54a15cb..0000000000000000000000000000000000000000 --- a/udpos.Arabic/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:28a59cb1ea6712fc88eab3f52b241d23991059990045d3ba85f8febc7761c4dc -size 214916 diff --git a/udpos.Arabic/train-00000-of-00001.parquet b/udpos.Arabic/train-00000-of-00001.parquet deleted file mode 100644 index c44f8dfa9354516438047b18c835e8276c4ac175..0000000000000000000000000000000000000000 --- a/udpos.Arabic/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:52a61be1aee7cf8ea86f394939b2250e8c2822d23e71e9e9ec5c9a4d3c0e60f2 -size 849651 diff --git a/udpos.Arabic/validation-00000-of-00001.parquet b/udpos.Arabic/validation-00000-of-00001.parquet deleted file mode 100644 index 3330fb2fa28d29e809cc2d5d9b0c3e86cb4d6f34..0000000000000000000000000000000000000000 --- a/udpos.Arabic/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ea48f253921ae521bbc5b77e326935e9053b99a446c1d81c0b3ada2b241c87f8 -size 121546 diff --git a/udpos.Basque/test-00000-of-00001.parquet b/udpos.Basque/test-00000-of-00001.parquet deleted file mode 100644 index 9c5dfd05896384a879d1760c91ce797cfcc193fd..0000000000000000000000000000000000000000 --- a/udpos.Basque/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c1223715cb1ca132c2f93d8edb2ccd3e54340bb27ae0d87012e344dce81a9a9b -size 141893 diff --git a/udpos.Basque/train-00000-of-00001.parquet b/udpos.Basque/train-00000-of-00001.parquet deleted file mode 100644 index 6060716e05897f66b069a35f00e7babc27b5c5b0..0000000000000000000000000000000000000000 --- a/udpos.Basque/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:112b9ad7b7174948805927744466ae63e9353bf9b5742fa8ec9b48ac577260a6 -size 421174 diff --git a/udpos.Basque/validation-00000-of-00001.parquet b/udpos.Basque/validation-00000-of-00001.parquet deleted file mode 100644 index bad54792ee62edb33066d14e5832888b6cb0dfa7..0000000000000000000000000000000000000000 --- a/udpos.Basque/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f4d82a5b461466c9246c2aaeffd928e1149281a075602d9c0e081d250cd600de -size 140027 diff --git a/udpos.Bulgarian/test-00000-of-00001.parquet b/udpos.Bulgarian/test-00000-of-00001.parquet deleted file mode 100644 index a898b417f136b8197d87492f54755696822801ee..0000000000000000000000000000000000000000 --- a/udpos.Bulgarian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c843dc8ae3ecbda81cb1743c7823968af301c5dc67f5c03a5bf57b9c96c8fa8e -size 102163 diff --git a/udpos.Bulgarian/train-00000-of-00001.parquet b/udpos.Bulgarian/train-00000-of-00001.parquet deleted file mode 100644 index a24c98e682a283352c670bf60bf0c924c7264552..0000000000000000000000000000000000000000 --- a/udpos.Bulgarian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6058b57eafed437616f8731b5167f9b04d8fa1e03002dd6b85637a40a2024e90 -size 720206 diff --git a/udpos.Bulgarian/validation-00000-of-00001.parquet b/udpos.Bulgarian/validation-00000-of-00001.parquet deleted file mode 100644 index 538502287c1a77e917924f00d95f2e7ed978693d..0000000000000000000000000000000000000000 --- a/udpos.Bulgarian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c3de80908d2742b6c200561d3fc5f2eccec029bf907c8836031e30f7173c6677 -size 103817 diff --git a/udpos.Chinese/test-00000-of-00001.parquet b/udpos.Chinese/test-00000-of-00001.parquet deleted file mode 100644 index c2dbdbf1466ce50ea984e1bcee62da39d8375c45..0000000000000000000000000000000000000000 --- a/udpos.Chinese/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c45a1020924a931db87176b85b690b97081f3dcf3269167d23a92a89f563ee1b -size 298180 diff --git a/udpos.Chinese/train-00000-of-00001.parquet b/udpos.Chinese/train-00000-of-00001.parquet deleted file mode 100644 index 5a0a2a31e787fedfe1e12200423226fef0167953..0000000000000000000000000000000000000000 --- a/udpos.Chinese/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:689eb8008a63156a1d09bee495b129a3e9e62a6b9c799076c120bd540b791c59 -size 1015171 diff --git a/udpos.Chinese/validation-00000-of-00001.parquet b/udpos.Chinese/validation-00000-of-00001.parquet deleted file mode 100644 index 7a1ff7d51d362b8004f8ae9442f0126ade51b464..0000000000000000000000000000000000000000 --- a/udpos.Chinese/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:24b2a9a01456a1b3987dbdd66ce0046f542f1f49f98e428af1d8990856022aff -size 158396 diff --git a/udpos.Dutch/test-00000-of-00001.parquet b/udpos.Dutch/test-00000-of-00001.parquet deleted file mode 100644 index 0e11651783712f94a04e6e1ce6da33df70ab2179..0000000000000000000000000000000000000000 --- a/udpos.Dutch/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7cbdcb92ebab183c05e12b714e9072dae0197a20a0080393fc2fca68e7c5beb7 -size 104912 diff --git a/udpos.Dutch/train-00000-of-00001.parquet b/udpos.Dutch/train-00000-of-00001.parquet deleted file mode 100644 index 653243c6e8ebcd42f4f7551b8d8de68659c4f8ca..0000000000000000000000000000000000000000 --- a/udpos.Dutch/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:42ad2ab50173f1f7b436eb046aa049d7539687cbaa495cf86402cda154693dfb -size 1198233 diff --git a/udpos.Dutch/validation-00000-of-00001.parquet b/udpos.Dutch/validation-00000-of-00001.parquet deleted file mode 100644 index b4d99507671458ab92245119472e347baf8a93a4..0000000000000000000000000000000000000000 --- a/udpos.Dutch/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b0cb8f2f9877682bb670fb4be6d901ec376ead560745400373865bc7aa08f9a5 -size 107837 diff --git a/udpos.English/test-00000-of-00001.parquet b/udpos.English/test-00000-of-00001.parquet deleted file mode 100644 index 64b7ae293442bfbd7f9398df5ae04530796f0bce..0000000000000000000000000000000000000000 --- a/udpos.English/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7f0dd8683f3d3fdfd13c9584fe102713637a764b68ae6863b208231124297104 -size 366880 diff --git a/udpos.English/train-00000-of-00001.parquet b/udpos.English/train-00000-of-00001.parquet deleted file mode 100644 index c4aadc306be2582e4a176545049c7a979a2fdebb..0000000000000000000000000000000000000000 --- a/udpos.English/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:788bf430b772850e9fa4f6f83f2cf3503051f96cf528067fd91180693b7cf1ea -size 1484961 diff --git a/udpos.English/validation-00000-of-00001.parquet b/udpos.English/validation-00000-of-00001.parquet deleted file mode 100644 index 1014c5c0b10a857796add5699649ec37170b8991..0000000000000000000000000000000000000000 --- a/udpos.English/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:06095dce8703ae4932754692f92c2750a3f2233949295c52c01b844e2eda1404 -size 264694 diff --git a/udpos.Estonian/test-00000-of-00001.parquet b/udpos.Estonian/test-00000-of-00001.parquet deleted file mode 100644 index fc095ffb596318e979bf9f5b0e3e31446b793d24..0000000000000000000000000000000000000000 --- a/udpos.Estonian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:398df4627e647815addcfde28b4b5765d3d158165a0561c6ebf1d904b3ffca93 -size 324088 diff --git a/udpos.Estonian/train-00000-of-00001.parquet b/udpos.Estonian/train-00000-of-00001.parquet deleted file mode 100644 index d6dd66c6ef48799ca8237eaa206a2215b1e17ea5..0000000000000000000000000000000000000000 --- a/udpos.Estonian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8d264defbd01ab40b5c4626b5349d4e2ed23f14a9aba0384a859177d6918fa08 -size 2029706 diff --git a/udpos.Estonian/validation-00000-of-00001.parquet b/udpos.Estonian/validation-00000-of-00001.parquet deleted file mode 100644 index 1012541b6a42c0dc56c6d4accc17dae7a30b0b95..0000000000000000000000000000000000000000 --- a/udpos.Estonian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b4691c1631e7a7ef57b05f27c57ce3f503d997eac5118b9d35f2564983184852 -size 265327 diff --git a/udpos.Finnish/test-00000-of-00001.parquet b/udpos.Finnish/test-00000-of-00001.parquet deleted file mode 100644 index 062b7ec72b0e7d8200fee66c7a3c162249b94e28..0000000000000000000000000000000000000000 --- a/udpos.Finnish/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f49a139ecccda79231cc44cfd959b35326cc8aa12595a4da186ba7b27f6c712c -size 360707 diff --git a/udpos.Finnish/train-00000-of-00001.parquet b/udpos.Finnish/train-00000-of-00001.parquet deleted file mode 100644 index 3d7893bd95433d981ad24dc65a2becee49cc000d..0000000000000000000000000000000000000000 --- a/udpos.Finnish/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:123e104a4ffc6316ddf9d7f202fba7156113d2fd382253e343d7d43f0df28fb2 -size 1905926 diff --git a/udpos.Finnish/validation-00000-of-00001.parquet b/udpos.Finnish/validation-00000-of-00001.parquet deleted file mode 100644 index 4c4a5bcafcd1f13593a7fe2cc07745d0810245f4..0000000000000000000000000000000000000000 --- a/udpos.Finnish/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8ed2c9afbda4fd17af76151b9291e78a9b4d5f229884065532d2616b965a9ac3 -size 236584 diff --git a/udpos.French/test-00000-of-00001.parquet b/udpos.French/test-00000-of-00001.parquet deleted file mode 100644 index ad8e8a7242aecd27b430d5943c3659ae9dcc7052..0000000000000000000000000000000000000000 --- a/udpos.French/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8b49a6380856299b44c6c5262ec51d30211e53bf674bddcc4d9c1b399ec951d9 -size 428204 diff --git a/udpos.French/train-00000-of-00001.parquet b/udpos.French/train-00000-of-00001.parquet deleted file mode 100644 index 2bfc7fb9d8211d5e0b8b5f2eb1da77933608ddc3..0000000000000000000000000000000000000000 --- a/udpos.French/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da498ca606c7b9fb87569a0300103a44d6154192d025b1d984e2c8efaa00d20e -size 2611124 diff --git a/udpos.French/validation-00000-of-00001.parquet b/udpos.French/validation-00000-of-00001.parquet deleted file mode 100644 index 9d90be808e6d1fa95b01282b93699f48cc6726ff..0000000000000000000000000000000000000000 --- a/udpos.French/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f720deb783ffd2e265072738ff617684cd2a88313f51ca5bfdc08f3967fba80c -size 339352 diff --git a/udpos.German/test-00000-of-00001.parquet b/udpos.German/test-00000-of-00001.parquet deleted file mode 100644 index a66985814726593ff81928adae8af6c7d2bf1b87..0000000000000000000000000000000000000000 --- a/udpos.German/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5fddb9a5dfa0d9850a8c624216c12acccc7d4d1a74705c8154fdf354d1efc443 -size 1995300 diff --git a/udpos.German/train-00000-of-00001.parquet b/udpos.German/train-00000-of-00001.parquet deleted file mode 100644 index 818af513010ee4f10d864df1ae759896c686e828..0000000000000000000000000000000000000000 --- a/udpos.German/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c89cc7e1600ffa722795a2a8a272bd9b49321e5506b9ed2481e26c12f78d4ad6 -size 14962896 diff --git a/udpos.German/validation-00000-of-00001.parquet b/udpos.German/validation-00000-of-00001.parquet deleted file mode 100644 index ebe2f57b57892850da2eded4e337992f7fa1434c..0000000000000000000000000000000000000000 --- a/udpos.German/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2c9ecaad86df347ad3a87c62237aafc0fd838b8e7635a73049eb296905dfe952 -size 1664959 diff --git a/udpos.Greek/test-00000-of-00001.parquet b/udpos.Greek/test-00000-of-00001.parquet deleted file mode 100644 index ae8b8e269ec659c607dba5eea502556f348c090c..0000000000000000000000000000000000000000 --- a/udpos.Greek/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fa2107509a990d17389ed3a871253b9cc2dd1cd7c5142fa2755670ddcb28ad88 -size 276114 diff --git a/udpos.Greek/train-00000-of-00001.parquet b/udpos.Greek/train-00000-of-00001.parquet deleted file mode 100644 index ab010acaa747c45f9dc54db0d2bba75019cb60e7..0000000000000000000000000000000000000000 --- a/udpos.Greek/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ea69c1304abcfd14965a90d3b155961262d2f916e82e5a97dbfa57e2a96a7d3a -size 2209999 diff --git a/udpos.Greek/validation-00000-of-00001.parquet b/udpos.Greek/validation-00000-of-00001.parquet deleted file mode 100644 index 6e1b9c07546dbe7b9cd627f83ff460ebc068b71f..0000000000000000000000000000000000000000 --- a/udpos.Greek/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bd8213dd15f5912854a6b830bc54184c5ec445b79ab1ba15cd383cf77870c0e3 -size 277180 diff --git a/udpos.Hebrew/test-00000-of-00001.parquet b/udpos.Hebrew/test-00000-of-00001.parquet deleted file mode 100644 index 0912921662d96496a5e187c1ae23cdf6e059b2c3..0000000000000000000000000000000000000000 --- a/udpos.Hebrew/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:407158cc99439bdc6a092dba8260efc86606a333e6b8eb62de57fcbe5209a6e1 -size 50816 diff --git a/udpos.Hebrew/train-00000-of-00001.parquet b/udpos.Hebrew/train-00000-of-00001.parquet deleted file mode 100644 index bd7dcee2468d613aad510104bb11e8e1f9ab820e..0000000000000000000000000000000000000000 --- a/udpos.Hebrew/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:14165cad20653cbc2194156aa77882745ac999b2e26b70e9c6ba01391db67891 -size 525289 diff --git a/udpos.Hebrew/validation-00000-of-00001.parquet b/udpos.Hebrew/validation-00000-of-00001.parquet deleted file mode 100644 index 90c65fc00ec03d96d853398a0dba012d10692b2a..0000000000000000000000000000000000000000 --- a/udpos.Hebrew/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:892c9df82d3253448300bfc6d341fc4735963835620e130be0f5b9dfcc3abf40 -size 48666 diff --git a/udpos.Hindi/test-00000-of-00001.parquet b/udpos.Hindi/test-00000-of-00001.parquet deleted file mode 100644 index 0f8292185050fec233b1f4f37c5ae2cc4906b319..0000000000000000000000000000000000000000 --- a/udpos.Hindi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9581f286ee6967aa9b998fb98884d54b9795fc47ffaf719579ae3b6d88a681fc -size 244001 diff --git a/udpos.Hindi/train-00000-of-00001.parquet b/udpos.Hindi/train-00000-of-00001.parquet deleted file mode 100644 index 4651a5fd40ba41521766de5970aac0090463d668..0000000000000000000000000000000000000000 --- a/udpos.Hindi/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6b59d60d6a7802f6389383cf2ff62552a6a3246ef1510d3a98fd8469bcab51c9 -size 1083090 diff --git a/udpos.Hindi/validation-00000-of-00001.parquet b/udpos.Hindi/validation-00000-of-00001.parquet deleted file mode 100644 index 93033064a8f280b5db8883ef7528af88ac523f20..0000000000000000000000000000000000000000 --- a/udpos.Hindi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fdfaa5e097b693b014544324f28f14930c6e32d7d136aa3cb4dcaaef1b9fa37b -size 141223 diff --git a/udpos.Hungarian/test-00000-of-00001.parquet b/udpos.Hungarian/test-00000-of-00001.parquet deleted file mode 100644 index c2d0085febed39a8c45dc15ce71b89ddb7be9af5..0000000000000000000000000000000000000000 --- a/udpos.Hungarian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fbcc933cd339cbc5ede7fba03c9d57bbecbee91e682a5fdcaa5700ca211a7d96 -size 65600 diff --git a/udpos.Hungarian/train-00000-of-00001.parquet b/udpos.Hungarian/train-00000-of-00001.parquet deleted file mode 100644 index 442ee600d88d34708c3f3bbf7a049e9917e61e70..0000000000000000000000000000000000000000 --- a/udpos.Hungarian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d3b599c71940f4389a37ad25a4849929f1b49c5b27f2168c657ca82681bfe88a -size 117572 diff --git a/udpos.Hungarian/validation-00000-of-00001.parquet b/udpos.Hungarian/validation-00000-of-00001.parquet deleted file mode 100644 index 974bd54906e9fab2b8dfde68b1f8bef736ac9cce..0000000000000000000000000000000000000000 --- a/udpos.Hungarian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:983be7b0c05e0db7972273ddc9f97e8f753b70e4400caa48eaf35c5ad35db28e -size 68710 diff --git a/udpos.Indonesian/test-00000-of-00001.parquet b/udpos.Indonesian/test-00000-of-00001.parquet deleted file mode 100644 index e0f5c2ebb0b16f3cd55e414cfdcc24ac26b1494a..0000000000000000000000000000000000000000 --- a/udpos.Indonesian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb0af18126e03c1560db19e886b76b8e833991c9efb710ed645afd7b94346e36 -size 147720 diff --git a/udpos.Indonesian/train-00000-of-00001.parquet b/udpos.Indonesian/train-00000-of-00001.parquet deleted file mode 100644 index 259dee1cd357f5d153508d35499611409169f882..0000000000000000000000000000000000000000 --- a/udpos.Indonesian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:99eb58fc3c34116b6585a758d966460bfc1c423948d1d37eda0bb5d7a969f77a -size 470175 diff --git a/udpos.Indonesian/validation-00000-of-00001.parquet b/udpos.Indonesian/validation-00000-of-00001.parquet deleted file mode 100644 index f09ea18d74592a28e4386130df3e08b0bb8f9364..0000000000000000000000000000000000000000 --- a/udpos.Indonesian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:53686584967c36c08224130043018c209e3da112bd727819fc2006d17689b4f7 -size 66330 diff --git a/udpos.Italian/test-00000-of-00001.parquet b/udpos.Italian/test-00000-of-00001.parquet deleted file mode 100644 index 62d6361563c80b577fa388d82dec1f26bb9a4c42..0000000000000000000000000000000000000000 --- a/udpos.Italian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:197c3730c40a0b367f8ae3e5ce1b585439fdaf754e74d769371da22ad8dcfcec -size 343541 diff --git a/udpos.Italian/train-00000-of-00001.parquet b/udpos.Italian/train-00000-of-00001.parquet deleted file mode 100644 index 5433d1e256b646770ee5dcccfc93f0cc471c0c6b..0000000000000000000000000000000000000000 --- a/udpos.Italian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7ed8c1298cc791e9ed67fff78c5892b174b6860aed6400f5c23d6d8855e64ecc -size 2679902 diff --git a/udpos.Italian/validation-00000-of-00001.parquet b/udpos.Italian/validation-00000-of-00001.parquet deleted file mode 100644 index 0aa2a2c53c93a8c00e5ee437db64b6f969a54406..0000000000000000000000000000000000000000 --- a/udpos.Italian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a099952ce54dcd1e5b37742ee7570abb52dcf57cd93f802ea8f7e15bad27a843 -size 232803 diff --git a/udpos.Japanese/test-00000-of-00001.parquet b/udpos.Japanese/test-00000-of-00001.parquet deleted file mode 100644 index 23a667e486c155ed0010da5ff7a4091889d4e686..0000000000000000000000000000000000000000 --- a/udpos.Japanese/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f68c2c86f81b8c566524963cde9218e94432b09516813514fadf929550d98405 -size 226251 diff --git a/udpos.Japanese/train-00000-of-00001.parquet b/udpos.Japanese/train-00000-of-00001.parquet deleted file mode 100644 index 51529398c9f407c21aa9c428837d586880048cb0..0000000000000000000000000000000000000000 --- a/udpos.Japanese/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:389ad2a0aa6b775eb4e0d81dbef7e7848033af35d3d24b07b85f1c9540e7608a -size 729400 diff --git a/udpos.Japanese/validation-00000-of-00001.parquet b/udpos.Japanese/validation-00000-of-00001.parquet deleted file mode 100644 index c4ab2f482b9536788ac1cd998e0c95351bffd1ca..0000000000000000000000000000000000000000 --- a/udpos.Japanese/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6ba2de60acb3cf235c674cea211d8254f816f6a57d2e89f8dca620808968b64f -size 56631 diff --git a/udpos.Kazakh/test-00000-of-00001.parquet b/udpos.Kazakh/test-00000-of-00001.parquet deleted file mode 100644 index 67b57235f4751a8c5f70655e44e315c711019aaf..0000000000000000000000000000000000000000 --- a/udpos.Kazakh/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:278d4b31b32d68a72bf20ee8e400e832158245db15c64f49f02dbd1d8b5eb3b0 -size 70260 diff --git a/udpos.Kazakh/train-00000-of-00001.parquet b/udpos.Kazakh/train-00000-of-00001.parquet deleted file mode 100644 index 7c93ef8ff601fd71f2f3752eafebd1bf60df02d6..0000000000000000000000000000000000000000 --- a/udpos.Kazakh/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ccee7b08d244a4892d7a194bb7686a5cb12ecf45792faf37880ef25c0345217b -size 6040 diff --git a/udpos.Korean/test-00000-of-00001.parquet b/udpos.Korean/test-00000-of-00001.parquet deleted file mode 100644 index 68c4ecf38562ea0a7ee5cfb252b515d78f99c461..0000000000000000000000000000000000000000 --- a/udpos.Korean/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:04e8337a74202a0b905ae4608901dddb9513bf5cf4ab6f71721fb1582642e219 -size 407115 diff --git a/udpos.Korean/train-00000-of-00001.parquet b/udpos.Korean/train-00000-of-00001.parquet deleted file mode 100644 index de4fdca4fbecfab12dc5801f143381876ed9b02a..0000000000000000000000000000000000000000 --- a/udpos.Korean/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d6e58d0ae238408237e9a4d1ee57eb672e2745db0540787ead3569d045604db -size 2429204 diff --git a/udpos.Korean/validation-00000-of-00001.parquet b/udpos.Korean/validation-00000-of-00001.parquet deleted file mode 100644 index 41232a8db2f51ef11128e0ff9654ec7d0c7ceaa3..0000000000000000000000000000000000000000 --- a/udpos.Korean/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:337eb1a0c03c29d52d99c83bb8f14dbb57ad45e1aa90bd8dee23ccf8dab65d96 -size 278782 diff --git a/udpos.Marathi/test-00000-of-00001.parquet b/udpos.Marathi/test-00000-of-00001.parquet deleted file mode 100644 index cc56a260841538a2bb0504f9e99458663fda3a22..0000000000000000000000000000000000000000 --- a/udpos.Marathi/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2b973b1b73f5b0316251444a5aae2f32b0b7e2f36c1d166743144b80f3c77487 -size 4141 diff --git a/udpos.Marathi/train-00000-of-00001.parquet b/udpos.Marathi/train-00000-of-00001.parquet deleted file mode 100644 index 22a538e43802026a7f3069e40ec1c8c08a0948bb..0000000000000000000000000000000000000000 --- a/udpos.Marathi/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b5298eb7f517af46ce28602917d286e5afc05fbcb1874d1886d2f49b7ade1042 -size 13572 diff --git a/udpos.Marathi/validation-00000-of-00001.parquet b/udpos.Marathi/validation-00000-of-00001.parquet deleted file mode 100644 index 25348a55abec3478d10a3d713916de4302d8b677..0000000000000000000000000000000000000000 --- a/udpos.Marathi/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2e0914b937674e162c48a80627990727e0f06833f0ece716da871b963983562f -size 4420 diff --git a/udpos.Persian/test-00000-of-00001.parquet b/udpos.Persian/test-00000-of-00001.parquet deleted file mode 100644 index 1d80e0b653ae82b54e1fab0097b51e2a6e51e515..0000000000000000000000000000000000000000 --- a/udpos.Persian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8b87594ce1d4da0c772c2a968b1c325ad54fab0dca0911e96480d81baae6c098 -size 67280 diff --git a/udpos.Persian/train-00000-of-00001.parquet b/udpos.Persian/train-00000-of-00001.parquet deleted file mode 100644 index 793fa48b4a675a1628dfeab09ec72a039d11c7ed..0000000000000000000000000000000000000000 --- a/udpos.Persian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6f959072289ed786fb7995d3a6235ad766f0dfe246d19ab816727d8170ab5504 -size 472370 diff --git a/udpos.Persian/validation-00000-of-00001.parquet b/udpos.Persian/validation-00000-of-00001.parquet deleted file mode 100644 index 7911ce6686387c8845e4643fc35923fbd6c8c350..0000000000000000000000000000000000000000 --- a/udpos.Persian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe8edf3498e529a7834f3b9089c1ef0f348e2b9f49e15879ea78900a73a8f6a7 -size 67262 diff --git a/udpos.Portuguese/test-00000-of-00001.parquet b/udpos.Portuguese/test-00000-of-00001.parquet deleted file mode 100644 index 8785f32364fd9576f25352b0d8a9dc1ad382476d..0000000000000000000000000000000000000000 --- a/udpos.Portuguese/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aa21c00ee2ff51601abc23a0feae103d73a2d13f4828f1ceb70e47aa84a951d0 -size 292834 diff --git a/udpos.Portuguese/train-00000-of-00001.parquet b/udpos.Portuguese/train-00000-of-00001.parquet deleted file mode 100644 index b4cf7165ebdcf8936ad08779e215f3e3bb3742cb..0000000000000000000000000000000000000000 --- a/udpos.Portuguese/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c96384047cade0f01b7f5b9f7e3caf429400d62637f580b4f9d7934b11bed39c -size 2018290 diff --git a/udpos.Portuguese/validation-00000-of-00001.parquet b/udpos.Portuguese/validation-00000-of-00001.parquet deleted file mode 100644 index 7443b678c19d089d3d07c0293be539e6a66b4341..0000000000000000000000000000000000000000 --- a/udpos.Portuguese/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5b005d231c3d96a82fbd0aa4f4cc87003cfa0076bea126eaac4cebb80301c4ec -size 194548 diff --git a/udpos.Russian/test-00000-of-00001.parquet b/udpos.Russian/test-00000-of-00001.parquet deleted file mode 100644 index 086f778993b9c2c42d6999c1d6ae94decebe85c7..0000000000000000000000000000000000000000 --- a/udpos.Russian/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8d241173fa5f896eeef877b6ae061933b7c60b26f6501ff8cd22505e6db302f6 -size 1198168 diff --git a/udpos.Russian/train-00000-of-00001.parquet b/udpos.Russian/train-00000-of-00001.parquet deleted file mode 100644 index 83a2be3ccb39916a1f77843f3945283ff3d277af..0000000000000000000000000000000000000000 --- a/udpos.Russian/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7dae257bca56d7dc182ff5dcc3d41434f83dde2a715986f004b55f1dc600e7fd -size 6652570 diff --git a/udpos.Russian/validation-00000-of-00001.parquet b/udpos.Russian/validation-00000-of-00001.parquet deleted file mode 100644 index 51ff1631183ccc393c0d01f1057a9668efa1bc5b..0000000000000000000000000000000000000000 --- a/udpos.Russian/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aea29cdd94bb9e5530b26c6fd4f227c035811e9a3573b39b1873008aeb5f3eea -size 967774 diff --git a/udpos.Spanish/test-00000-of-00001.parquet b/udpos.Spanish/test-00000-of-00001.parquet deleted file mode 100644 index 50978176c7e13badaae48c6fa58288266bb07600..0000000000000000000000000000000000000000 --- a/udpos.Spanish/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6c8d939473c2a149a5847712372a2c67e9c188842dc1fe07c330dfb6296f8af4 -size 381163 diff --git a/udpos.Spanish/train-00000-of-00001.parquet b/udpos.Spanish/train-00000-of-00001.parquet deleted file mode 100644 index cc85edadac3d3ee4971c86b4c6a0892efb6c9adc..0000000000000000000000000000000000000000 --- a/udpos.Spanish/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4480be7b7db1ea1a3c8825b9dbef934003722cb117fdb1e718224b2a39cd1e7f -size 3572017 diff --git a/udpos.Spanish/validation-00000-of-00001.parquet b/udpos.Spanish/validation-00000-of-00001.parquet deleted file mode 100644 index 68b92351e906e8b33a2cc6311b8568250fd635ab..0000000000000000000000000000000000000000 --- a/udpos.Spanish/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8d64d9642ecd648e32afb6f377361aa24b04b387fa3bace4297ac07eac674f3e -size 394725 diff --git a/udpos.Tagalog/test-00000-of-00001.parquet b/udpos.Tagalog/test-00000-of-00001.parquet deleted file mode 100644 index f74e1da75eb91318883836c29b05055262f40f93..0000000000000000000000000000000000000000 --- a/udpos.Tagalog/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf5856da6aa0325533ee43de9663eb63d4489bcc65526bd4a4b5386c3c38fb63 -size 3345 diff --git a/udpos.Tamil/test-00000-of-00001.parquet b/udpos.Tamil/test-00000-of-00001.parquet deleted file mode 100644 index f64f01ba3f65866527d27271feda86bb66b4d181..0000000000000000000000000000000000000000 --- a/udpos.Tamil/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:57eb3c9f44854f40af7106c0fa84705d1d41518ea6e50c800251f4d3a1af33bf -size 16443 diff --git a/udpos.Tamil/train-00000-of-00001.parquet b/udpos.Tamil/train-00000-of-00001.parquet deleted file mode 100644 index 5eaf1d9080da7fb8f61ad8f0eaf5f4cf3a3b0e25..0000000000000000000000000000000000000000 --- a/udpos.Tamil/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:44049afbf1a7f65d27e7ac78b94d26e1038df993f9bdd65e351b9f701ad51deb -size 45030 diff --git a/udpos.Tamil/validation-00000-of-00001.parquet b/udpos.Tamil/validation-00000-of-00001.parquet deleted file mode 100644 index a28f6d867aa900667d49403051e78159c239e347..0000000000000000000000000000000000000000 --- a/udpos.Tamil/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5a863cdb72e40aae39db72e113141e4caea865563a0d2d5e9a80fd3925b6a705 -size 12291 diff --git a/udpos.Telugu/test-00000-of-00001.parquet b/udpos.Telugu/test-00000-of-00001.parquet deleted file mode 100644 index 8affa88ee5072e51118a9ffd2dae4089f68ce885..0000000000000000000000000000000000000000 --- a/udpos.Telugu/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1be1475cbe152ea2ef3c5356afcdd805b03a912dc9ef4f6f9e6b74d8c3c7d6c1 -size 7370 diff --git a/udpos.Telugu/train-00000-of-00001.parquet b/udpos.Telugu/train-00000-of-00001.parquet deleted file mode 100644 index cf157251c35ab047f3fa7649c114ec610f239ea1..0000000000000000000000000000000000000000 --- a/udpos.Telugu/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:28e2225b7cf52df96af98ff347ab92c18256e854f99cdb526e711cd59a003f68 -size 31720 diff --git a/udpos.Telugu/validation-00000-of-00001.parquet b/udpos.Telugu/validation-00000-of-00001.parquet deleted file mode 100644 index a34c63b7b90dce0149749e18c013d1a0401cb9bd..0000000000000000000000000000000000000000 --- a/udpos.Telugu/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:976dd6c11ceae4700f69281c8cc985fbef82f760515b0d97d8483b682f8a2477 -size 6955 diff --git a/udpos.Thai/test-00000-of-00001.parquet b/udpos.Thai/test-00000-of-00001.parquet deleted file mode 100644 index 9d3ceb9984eabab9bce46b517a1426d14d0daccf..0000000000000000000000000000000000000000 --- a/udpos.Thai/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6c581543ff2b2717917a887497a178e5594872bca6ef597f2df416a28992f4c5 -size 92925 diff --git a/udpos.Turkish/test-00000-of-00001.parquet b/udpos.Turkish/test-00000-of-00001.parquet deleted file mode 100644 index 2045e9549bafb57a35638103c7d3592904692056..0000000000000000000000000000000000000000 --- a/udpos.Turkish/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:16b893cf2b78b94a8cd4a4c805af33c3069002fd530424698f8867ca03b170cc -size 268046 diff --git a/udpos.Turkish/train-00000-of-00001.parquet b/udpos.Turkish/train-00000-of-00001.parquet deleted file mode 100644 index f64d49e782589e5f6fe688640bbf4f55f7169b46..0000000000000000000000000000000000000000 --- a/udpos.Turkish/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a1fb7c1956831b8009060745c467a285fc4a919cb694c3a8ba062dadf42edbe3 -size 247349 diff --git a/udpos.Turkish/validation-00000-of-00001.parquet b/udpos.Turkish/validation-00000-of-00001.parquet deleted file mode 100644 index f3aad12da71ff27198f6b79746b89e9761471604..0000000000000000000000000000000000000000 --- a/udpos.Turkish/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:23da6395e0d1e0df424acb1eedcbcac1fcde254c1848137c8ab48def5613c953 -size 65782 diff --git a/udpos.Urdu/test-00000-of-00001.parquet b/udpos.Urdu/test-00000-of-00001.parquet deleted file mode 100644 index 258152a116b371763a5eee5ad270b87e5646acf3..0000000000000000000000000000000000000000 --- a/udpos.Urdu/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ccbaa83129cee670c9e3eab016cea05d12813d47be17036983c0af92fa806257 -size 56682 diff --git a/udpos.Urdu/train-00000-of-00001.parquet b/udpos.Urdu/train-00000-of-00001.parquet deleted file mode 100644 index f7621022519f03a38de4b9c362afbc1b5f962475..0000000000000000000000000000000000000000 --- a/udpos.Urdu/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1dfe4a08718b50e84174d8920c4190e77652051dfea57d51c0c6d9b2c3f26b8f -size 386898 diff --git a/udpos.Urdu/validation-00000-of-00001.parquet b/udpos.Urdu/validation-00000-of-00001.parquet deleted file mode 100644 index 9281fd3d340d83be9d5832e7d2b095135851a180..0000000000000000000000000000000000000000 --- a/udpos.Urdu/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ce614ae721fb295948b4151a333b455de578be8ac266316aff8c88a73412f8bb -size 56014 diff --git a/udpos.Vietnamese/test-00000-of-00001.parquet b/udpos.Vietnamese/test-00000-of-00001.parquet deleted file mode 100644 index ffab2d663697725c992dd00527a38a577ae34e07..0000000000000000000000000000000000000000 --- a/udpos.Vietnamese/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:78f30f2f0c7a60ae27962b39f359c347860e424f78c4ecd358e41d8f6063ef52 -size 50373 diff --git a/udpos.Vietnamese/train-00000-of-00001.parquet b/udpos.Vietnamese/train-00000-of-00001.parquet deleted file mode 100644 index 4c1737a34212a1054ea32a25b76e8b8254be0909..0000000000000000000000000000000000000000 --- a/udpos.Vietnamese/train-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2ac2b66a51d87510c499ac98fecc4a556d0e3e5d80bb8134f5c4a36a8a56f8fe -size 83589 diff --git a/udpos.Vietnamese/validation-00000-of-00001.parquet b/udpos.Vietnamese/validation-00000-of-00001.parquet deleted file mode 100644 index 69daea33cc4b090e3c806c7a2e4836899d98c6ea..0000000000000000000000000000000000000000 --- a/udpos.Vietnamese/validation-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a9a44bcd5e274670ae218efd0b2d55d996238501963fcdc866197e992beed223 -size 47277 diff --git a/udpos.Yoruba/test-00000-of-00001.parquet b/udpos.Yoruba/test-00000-of-00001.parquet deleted file mode 100644 index 41bbcdeba9e8d4b6d6ead68c376e8a6024707d20..0000000000000000000000000000000000000000 --- a/udpos.Yoruba/test-00000-of-00001.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:82bb90dc1176f8d8545c8caf56fc4a0e867a3eca36803ec637993f2ad67ab8b9 -size 10151 diff --git a/xtreme.py b/xtreme.py new file mode 100644 index 0000000000000000000000000000000000000000..6a16fff932ccbca2c0c6cd95c9f074cbb21c218d --- /dev/null +++ b/xtreme.py @@ -0,0 +1,940 @@ +"""TODO(xtreme): Add a description here.""" + + +import csv +import json +import os +import textwrap + +import datasets + + +# TODO(xtreme): BibTeX citation +_CITATION = """\ +@article{hu2020xtreme, + author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson}, + title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization}, + journal = {CoRR}, + volume = {abs/2003.11080}, + year = {2020}, + archivePrefix = {arXiv}, + eprint = {2003.11080} +} +""" + +# TODO(xtrem): +_DESCRIPTION = """\ +The Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of +the cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages +(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of +syntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks, +and availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil +(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the +Niger-Congo languages Swahili and Yoruba, spoken in Africa. +""" +_MLQA_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"] +_XQUAD_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi", "el", "ru", "th", "tr"] +_PAWSX_LANG = ["de", "en", "es", "fr", "ja", "ko", "zh"] +_BUCC_LANG = ["de", "fr", "zh", "ru"] +_TATOEBA_LANG = [ + "afr", + "ara", + "ben", + "bul", + "deu", + "cmn", + "ell", + "est", + "eus", + "fin", + "fra", + "heb", + "hin", + "hun", + "ind", + "ita", + "jav", + "jpn", + "kat", + "kaz", + "kor", + "mal", + "mar", + "nld", + "pes", + "por", + "rus", + "spa", + "swh", + "tam", + "tel", + "tgl", + "tha", + "tur", + "urd", + "vie", +] + +_UD_POS_LANG = [ + "Afrikaans", + "Arabic", + "Basque", + "Bulgarian", + "Dutch", + "English", + "Estonian", + "Finnish", + "French", + "German", + "Greek", + "Hebrew", + "Hindi", + "Hungarian", + "Indonesian", + "Italian", + "Japanese", + "Kazakh", + "Korean", + "Chinese", + "Marathi", + "Persian", + "Portuguese", + "Russian", + "Spanish", + "Tagalog", + "Tamil", + "Telugu", + "Thai", + "Turkish", + "Urdu", + "Vietnamese", + "Yoruba", +] +_PAN_X_LANG = [ + "af", + "ar", + "bg", + "bn", + "de", + "el", + "en", + "es", + "et", + "eu", + "fa", + "fi", + "fr", + "he", + "hi", + "hu", + "id", + "it", + "ja", + "jv", + "ka", + "kk", + "ko", + "ml", + "mr", + "ms", + "my", + "nl", + "pt", + "ru", + "sw", + "ta", + "te", + "th", + "tl", + "tr", + "ur", + "vi", + "yo", + "zh", +] + +_NAMES = ["XNLI", "tydiqa", "SQuAD"] +for lang in _PAN_X_LANG: + _NAMES.append(f"PAN-X.{lang}") +for lang1 in _MLQA_LANG: + for lang2 in _MLQA_LANG: + _NAMES.append(f"MLQA.{lang1}.{lang2}") +for lang in _XQUAD_LANG: + _NAMES.append(f"XQuAD.{lang}") +for lang in _BUCC_LANG: + _NAMES.append(f"bucc18.{lang}") +for lang in _PAWSX_LANG: + _NAMES.append(f"PAWS-X.{lang}") +for lang in _TATOEBA_LANG: + _NAMES.append(f"tatoeba.{lang}") +for lang in _UD_POS_LANG: + _NAMES.append(f"udpos.{lang}") + +_DESCRIPTIONS = { + "tydiqa": textwrap.dedent( + """Gold passage task (GoldP): Given a passage that is guaranteed to contain the + answer, predict the single contiguous span of characters that answers the question. This is more similar to + existing reading comprehension datasets (as opposed to the information-seeking task outlined above). + This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing + a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1, + XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways: + only the gold answer passage is provided rather than the entire Wikipedia article; + unanswerable questions have been discarded, similar to MLQA and XQuAD; + we evaluate with the SQuAD 1.1 metrics like XQuAD; and + Thai and Japanese are removed since the lack of whitespace breaks some tools. + """ + ), + "XNLI": textwrap.dedent( + """ + The Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and + 2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into + 14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, + Hindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the + corresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to + evaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only + English NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI + is an evaluation benchmark.""" + ), + "PAWS-X": textwrap.dedent( + """ + This dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training + pairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All + translated pairs are sourced from examples in PAWS-Wiki.""" + ), + "XQuAD": textwrap.dedent( + """\ + XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question + answering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from + the development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into + ten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently, + the dataset is entirely parallel across 11 languages.""" + ), + "MLQA": textwrap.dedent( + """\ + MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance. + MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic, + German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between + 4 different languages on average.""" + ), + "tatoeba": textwrap.dedent( + """\ + his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17. + For each languages, we have selected 1000 English sentences and their translations, if available. Please check + this paper for a description of the languages, their families and scripts as well as baseline results. + Please note that the English sentences are not identical for all language pairs. This means that the results are + not directly comparable across languages. In particular, the sentences tend to have less variety for several + low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ... + """ + ), + "bucc18": textwrap.dedent( + """Building and Using Comparable Corpora + """ + ), + "udpos": textwrap.dedent( + """\ + Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological + features, and syntactic dependencies) across different human languages. UD is an open community effort with over 200 + contributors producing more than 100 treebanks in over 70 languages. If you’re new to UD, you should start by reading + the first part of the Short Introduction and then browsing the annotation guidelines. + """ + ), + "SQuAD": textwrap.dedent( + """\ + Stanford Question Answering Dataset (SQuAD) is a reading comprehension \ + dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \ + articles, where the answer to every question is a segment of text, or span, \ + from the corresponding reading passage, or the question might be unanswerable.""" + ), + "PAN-X": textwrap.dedent( + """\ + The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been + constructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset + can be loaded with the DaNLP package:""" + ), +} +_CITATIONS = { + "tydiqa": textwrap.dedent( + ( + """\ + @article{tydiqa, + title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages}, + author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki} + year = {2020}, + journal = {Transactions of the Association for Computational Linguistics} + }""" + ) + ), + "XNLI": textwrap.dedent( + """\ + @InProceedings{conneau2018xnli, + author = {Conneau, Alexis + and Rinott, Ruty + and Lample, Guillaume + and Williams, Adina + and Bowman, Samuel R. + and Schwenk, Holger + and Stoyanov, Veselin}, + title = {XNLI: Evaluating Cross-lingual Sentence Representations}, + booktitle = {Proceedings of the 2018 Conference on Empirical Methods + in Natural Language Processing}, + year = {2018}, + publisher = {Association for Computational Linguistics}, + location = {Brussels, Belgium}, + }""" + ), + "XQuAD": textwrap.dedent( + """ + @article{Artetxe:etal:2019, + author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama}, + title = {On the cross-lingual transferability of monolingual representations}, + journal = {CoRR}, + volume = {abs/1910.11856}, + year = {2019}, + archivePrefix = {arXiv}, + eprint = {1910.11856} + } + """ + ), + "MLQA": textwrap.dedent( + """\ + @article{lewis2019mlqa, + title={MLQA: Evaluating Cross-lingual Extractive Question Answering}, + author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger}, + journal={arXiv preprint arXiv:1910.07475}, + year={2019}""" + ), + "PAWS-X": textwrap.dedent( + """\ + @InProceedings{pawsx2019emnlp, + title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}}, + author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason}, + booktitle = {Proc. of EMNLP}, + year = {2019} + }""" + ), + "tatoeba": textwrap.dedent( + """\ + @article{tatoeba, + title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond}, + author={Mikel, Artetxe and Holger, Schwenk,}, + journal={arXiv:1812.10464v2}, + year={2018} + }""" + ), + "bucc18": textwrap.dedent(""""""), + "udpos": textwrap.dedent(""""""), + "SQuAD": textwrap.dedent( + """\ + @article{2016arXiv160605250R, + author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, + Konstantin and {Liang}, Percy}, + title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", + journal = {arXiv e-prints}, + year = 2016, + eid = {arXiv:1606.05250}, + pages = {arXiv:1606.05250}, + archivePrefix = {arXiv}, + eprint = {1606.05250}, +}""" + ), + "PAN-X": textwrap.dedent( + """\ + @article{pan-x, + title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond}, + author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji}, + volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers} + year={2017} + }""" + ), +} + +_TEXT_FEATURES = { + "XNLI": { + "language": "language", + "sentence1": "sentence1", + "sentence2": "sentence2", + }, + "tydiqa": { + "id": "id", + "title": "title", + "context": "context", + "question": "question", + "answers": "answers", + }, + "XQuAD": { + "id": "id", + "context": "context", + "question": "question", + "answers": "answers", + }, + "MLQA": { + "id": "id", + "title": "title", + "context": "context", + "question": "question", + "answers": "answers", + }, + "tatoeba": { + "source_sentence": "", + "target_sentence": "", + "source_lang": "", + "target_lang": "", + }, + "bucc18": { + "source_sentence": "", + "target_sentence": "", + "source_lang": "", + "target_lang": "", + }, + "PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"}, + "udpos": {"tokens": "", "pos_tags": ""}, + "SQuAD": { + "id": "id", + "title": "title", + "context": "context", + "question": "question", + "answers": "answers", + }, + "PAN-X": {"tokens": "", "ner_tags": "", "lang": ""}, +} +_DATA_URLS = { + "tydiqa": "https://storage.googleapis.com/tydiqa/", + "XNLI": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip", + "XQuAD": "https://github.com/deepmind/xquad/raw/master/", + "MLQA": "https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip", + "PAWS-X": "https://storage.googleapis.com/paws/pawsx/x-final.tar.gz", + "bucc18": "https://comparable.limsi.fr/bucc2018/", + "tatoeba": "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/", + "udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz", + "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/", + "PAN-X": "https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip", +} + +_URLS = { + "tydiqa": "https://github.com/google-research-datasets/tydiqa", + "XQuAD": "https://github.com/deepmind/xquad", + "XNLI": "https://www.nyu.edu/projects/bowman/xnli/", + "MLQA": "https://github.com/facebookresearch/MLQA", + "PAWS-X": "https://github.com/google-research-datasets/paws/tree/master/pawsx", + "bucc18": "https://comparable.limsi.fr/bucc2018/", + "tatoeba": "https://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md", + "udpos": "https://universaldependencies.org/", + "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/", + "PAN-X": "https://github.com/afshinrahimi/mmner", +} + + +class XtremeConfig(datasets.BuilderConfig): + """BuilderConfig for Break""" + + def __init__(self, data_url, citation, url, text_features, **kwargs): + """ + Args: + text_features: `dict[string, string]`, map from the name of the feature + dict for each text field to the name of the column in the tsv file + label_column: + label_classes + **kwargs: keyword arguments forwarded to super. + """ + super(XtremeConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) + self.text_features = text_features + self.data_url = data_url + self.citation = citation + self.url = url + + +class Xtreme(datasets.GeneratorBasedBuilder): + """TODO(xtreme): Short description of my dataset.""" + + # TODO(xtreme): Set up version. + VERSION = datasets.Version("0.1.0") + BUILDER_CONFIGS = [ + XtremeConfig( + name=name, + description=_DESCRIPTIONS[name.split(".")[0]], + citation=_CITATIONS[name.split(".")[0]], + text_features=_TEXT_FEATURES[name.split(".")[0]], + data_url=_DATA_URLS[name.split(".")[0]], + url=_URLS[name.split(".")[0]], + ) + for name in _NAMES + ] + + def _info(self): + features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} + if "answers" in features.keys(): + features["answers"] = datasets.features.Sequence( + { + "answer_start": datasets.Value("int32"), + "text": datasets.Value("string"), + } + ) + if self.config.name.startswith("PAWS-X"): + features = PawsxParser.features + elif self.config.name == "XNLI": + features["gold_label"] = datasets.Value("string") + elif self.config.name.startswith("udpos"): + features = UdposParser.features + elif self.config.name.startswith("PAN-X"): + features = PanxParser.features + return datasets.DatasetInfo( + # This is the description that will appear on the datasets page. + description=self.config.description + "\n" + _DESCRIPTION, + # datasets.features.FeatureConnectors + features=datasets.Features( + features + # These are the features of your dataset like images, labels ... + ), + # If there's a common (input, target) tuple from the features, + # specify them here. They'll be used if as_supervised=True in + # builder.as_dataset. + supervised_keys=None, + # Homepage of the dataset for documentation + homepage="https://github.com/google-research/xtreme" + "\t" + self.config.url, + citation=self.config.citation + "\n" + _CITATION, + ) + + def _split_generators(self, dl_manager): + """Returns SplitGenerators.""" + if self.config.name == "tydiqa": + train_url = "v1.1/tydiqa-goldp-v1.1-train.json" + dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json" + urls_to_download = { + "train": self.config.data_url + train_url, + "dev": self.config.data_url + dev_url, + } + dl_dir = dl_manager.download_and_extract(urls_to_download) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + # These kwargs will be passed to _generate_examples + gen_kwargs={"filepath": dl_dir["train"]}, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + # These kwargs will be passed to _generate_examples + gen_kwargs={"filepath": dl_dir["dev"]}, + ), + ] + if self.config.name == "XNLI": + dl_dir = dl_manager.download_and_extract(self.config.data_url) + data_dir = os.path.join(dl_dir, "XNLI-1.0") + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")}, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")}, + ), + ] + + if self.config.name.startswith("MLQA"): + mlqa_downloaded_files = dl_manager.download_and_extract(self.config.data_url) + l1 = self.config.name.split(".")[1] + l2 = self.config.name.split(".")[2] + return [ + datasets.SplitGenerator( + name=datasets.Split.TEST, + # These kwargs will be passed to _generate_examples + gen_kwargs={ + "filepath": os.path.join( + os.path.join(mlqa_downloaded_files, "MLQA_V1/test"), + f"test-context-{l1}-question-{l2}.json", + ) + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + # These kwargs will be passed to _generate_examples + gen_kwargs={ + "filepath": os.path.join( + os.path.join(mlqa_downloaded_files, "MLQA_V1/dev"), + f"dev-context-{l1}-question-{l2}.json", + ) + }, + ), + ] + + if self.config.name.startswith("XQuAD"): + lang = self.config.name.split(".")[1] + xquad_downloaded_file = dl_manager.download_and_extract(self.config.data_url + f"xquad.{lang}.json") + return [ + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + # These kwargs will be passed to _generate_examples + gen_kwargs={"filepath": xquad_downloaded_file}, + ), + ] + if self.config.name.startswith("PAWS-X"): + return PawsxParser.split_generators(dl_manager=dl_manager, config=self.config) + elif self.config.name.startswith("tatoeba"): + lang = self.config.name.split(".")[1] + + tatoeba_source_data = dl_manager.download_and_extract(self.config.data_url + f"tatoeba.{lang}-eng.{lang}") + tatoeba_eng_data = dl_manager.download_and_extract(self.config.data_url + f"tatoeba.{lang}-eng.eng") + return [ + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + # These kwargs will be passed to _generate_examples + gen_kwargs={"filepath": (tatoeba_source_data, tatoeba_eng_data)}, + ), + ] + if self.config.name.startswith("bucc18"): + lang = self.config.name.split(".")[1] + bucc18_dl_test_archive = dl_manager.download( + self.config.data_url + f"bucc2018-{lang}-en.training-gold.tar.bz2" + ) + bucc18_dl_dev_archive = dl_manager.download( + self.config.data_url + f"bucc2018-{lang}-en.sample-gold.tar.bz2" + ) + return [ + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={"filepath": dl_manager.iter_archive(bucc18_dl_dev_archive)}, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={"filepath": dl_manager.iter_archive(bucc18_dl_test_archive)}, + ), + ] + if self.config.name.startswith("udpos"): + return UdposParser.split_generators(dl_manager=dl_manager, config=self.config) + + if self.config.name == "SQuAD": + + urls_to_download = { + "train": self.config.data_url + "train-v1.1.json", + "dev": self.config.data_url + "dev-v1.1.json", + } + downloaded_files = dl_manager.download_and_extract(urls_to_download) + + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={"filepath": downloaded_files["train"]}, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={"filepath": downloaded_files["dev"]}, + ), + ] + + if self.config.name.startswith("PAN-X"): + return PanxParser.split_generators(dl_manager=dl_manager, config=self.config) + + def _generate_examples(self, filepath=None, **kwargs): + """Yields examples.""" + # TODO(xtreme): Yields (key, example) tuples from the dataset + + if self.config.name == "tydiqa" or self.config.name.startswith("MLQA") or self.config.name == "SQuAD": + with open(filepath, encoding="utf-8") as f: + data = json.load(f) + for article in data["data"]: + title = article.get("title", "").strip() + for paragraph in article["paragraphs"]: + context = paragraph["context"].strip() + for qa in paragraph["qas"]: + question = qa["question"].strip() + id_ = qa["id"] + + answer_starts = [answer["answer_start"] for answer in qa["answers"]] + answers = [answer["text"].strip() for answer in qa["answers"]] + + # Features currently used are "context", "question", and "answers". + # Others are extracted here for the ease of future expansions. + yield id_, { + "title": title, + "context": context, + "question": question, + "id": id_, + "answers": { + "answer_start": answer_starts, + "text": answers, + }, + } + if self.config.name == "XNLI": + with open(filepath, encoding="utf-8") as f: + data = csv.DictReader(f, delimiter="\t") + for id_, row in enumerate(data): + yield id_, { + "sentence1": row["sentence1"], + "sentence2": row["sentence2"], + "language": row["language"], + "gold_label": row["gold_label"], + } + if self.config.name.startswith("PAWS-X"): + yield from PawsxParser.generate_examples(config=self.config, filepath=filepath, **kwargs) + if self.config.name.startswith("XQuAD"): + with open(filepath, encoding="utf-8") as f: + xquad = json.load(f) + for article in xquad["data"]: + for paragraph in article["paragraphs"]: + context = paragraph["context"].strip() + for qa in paragraph["qas"]: + question = qa["question"].strip() + id_ = qa["id"] + + answer_starts = [answer["answer_start"] for answer in qa["answers"]] + answers = [answer["text"].strip() for answer in qa["answers"]] + + # Features currently used are "context", "question", and "answers". + # Others are extracted here for the ease of future expansions. + yield id_, { + "context": context, + "question": question, + "id": id_, + "answers": { + "answer_start": answer_starts, + "text": answers, + }, + } + if self.config.name.startswith("bucc18"): + lang = self.config.name.split(".")[1] + data_dir = f"bucc2018/{lang}-en" + for path, file in filepath: + if path.startswith(data_dir): + csv_content = [line.decode("utf-8") for line in file] + if path.endswith("en"): + target_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None))) + elif path.endswith("gold"): + source_target_ids = list(csv.reader(csv_content, delimiter="\t", quotechar=None)) + else: + source_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None))) + + for id_, (source_id, target_id) in enumerate(source_target_ids): + yield id_, { + "source_sentence": source_sentences[source_id], + "target_sentence": target_sentences[target_id], + "source_lang": source_id, + "target_lang": target_id, + } + if self.config.name.startswith("tatoeba"): + source_file = filepath[0] + target_file = filepath[1] + source_sentences = [] + target_sentences = [] + with open(source_file, encoding="utf-8") as f1: + for row in f1: + source_sentences.append(row) + with open(target_file, encoding="utf-8") as f2: + for row in f2: + target_sentences.append(row) + for i in range(len(source_sentences)): + yield i, { + "source_sentence": source_sentences[i], + "target_sentence": target_sentences[i], + "source_lang": source_file.split(".")[-1], + "target_lang": "eng", + } + if self.config.name.startswith("udpos"): + yield from UdposParser.generate_examples(config=self.config, filepath=filepath, **kwargs) + if self.config.name.startswith("PAN-X"): + yield from PanxParser.generate_examples(filepath=filepath, **kwargs) + + +class PanxParser: + + features = datasets.Features( + { + "tokens": datasets.Sequence(datasets.Value("string")), + "ner_tags": datasets.Sequence( + datasets.features.ClassLabel( + names=[ + "O", + "B-PER", + "I-PER", + "B-ORG", + "I-ORG", + "B-LOC", + "I-LOC", + ] + ) + ), + "langs": datasets.Sequence(datasets.Value("string")), + } + ) + + @staticmethod + def split_generators(dl_manager=None, config=None): + data_dir = dl_manager.download_and_extract(config.data_url) + lang = config.name.split(".")[1] + archive = os.path.join(data_dir, lang + ".tar.gz") + split_filenames = { + datasets.Split.TRAIN: "train", + datasets.Split.VALIDATION: "dev", + datasets.Split.TEST: "test", + } + return [ + datasets.SplitGenerator( + name=split, + gen_kwargs={ + "filepath": dl_manager.iter_archive(archive), + "filename": split_filenames[split], + }, + ) + for split in split_filenames + ] + + @staticmethod + def generate_examples(filepath=None, filename=None): + idx = 1 + for path, file in filepath: + if path.endswith(filename): + tokens = [] + ner_tags = [] + langs = [] + for line in file: + line = line.decode("utf-8") + if line == "" or line == "\n": + if tokens: + yield idx, { + "tokens": tokens, + "ner_tags": ner_tags, + "langs": langs, + } + idx += 1 + tokens = [] + ner_tags = [] + langs = [] + else: + # pan-x data is tab separated + splits = line.split("\t") + # strip out en: prefix + langs.append(splits[0][:2]) + tokens.append(splits[0][3:]) + if len(splits) > 1: + ner_tags.append(splits[-1].replace("\n", "")) + else: + # examples have no label in test set + ner_tags.append("O") + if tokens: + yield idx, { + "tokens": tokens, + "ner_tags": ner_tags, + "langs": langs, + } + + +class PawsxParser: + + features = datasets.Features( + { + "sentence1": datasets.Value("string"), + "sentence2": datasets.Value("string"), + "label": datasets.Value("string"), + } + ) + + @staticmethod + def split_generators(dl_manager=None, config=None): + lang = config.name.split(".")[1] + archive = dl_manager.download(config.data_url) + split_filenames = { + datasets.Split.TRAIN: "translated_train.tsv" if lang != "en" else "train.tsv", + datasets.Split.VALIDATION: "dev_2k.tsv", + datasets.Split.TEST: "test_2k.tsv", + } + return [ + datasets.SplitGenerator( + name=split, + gen_kwargs={"filepath": dl_manager.iter_archive(archive), "filename": split_filenames[split]}, + ) + for split in split_filenames + ] + + @staticmethod + def generate_examples(config=None, filepath=None, filename=None): + lang = config.name.split(".")[1] + for path, file in filepath: + if f"/{lang}/" in path and path.endswith(filename): + lines = (line.decode("utf-8") for line in file) + data = csv.reader(lines, delimiter="\t") + next(data) # skip header + for id_, row in enumerate(data): + if len(row) == 4: + yield id_, { + "sentence1": row[1], + "sentence2": row[2], + "label": row[3], + } + + +class UdposParser: + + features = datasets.Features( + { + "tokens": datasets.Sequence(datasets.Value("string")), + "pos_tags": datasets.Sequence( + datasets.features.ClassLabel( + names=[ + "ADJ", + "ADP", + "ADV", + "AUX", + "CCONJ", + "DET", + "INTJ", + "NOUN", + "NUM", + "PART", + "PRON", + "PROPN", + "PUNCT", + "SCONJ", + "SYM", + "VERB", + "X", + ] + ) + ), + } + ) + + @staticmethod + def split_generators(dl_manager=None, config=None): + archive = dl_manager.download(config.data_url) + split_names = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"} + split_generators = { + split: datasets.SplitGenerator( + name=split, + gen_kwargs={ + "filepath": dl_manager.iter_archive(archive), + "split": split_names[split], + }, + ) + for split in split_names + } + lang = config.name.split(".")[1] + if lang in ["Tagalog", "Thai", "Yoruba"]: + return [split_generators["test"]] + elif lang == "Kazakh": + return [split_generators["train"], split_generators["test"]] + else: + return [split_generators["train"], split_generators["validation"], split_generators["test"]] + + @staticmethod + def generate_examples(config=None, filepath=None, split=None): + lang = config.name.split(".")[1] + idx = 0 + for path, file in filepath: + if f"_{lang}" in path and split in path and path.endswith(".conllu"): + # For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _ + if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path: + lines = (line.decode("utf-8") for line in file) + data = csv.reader(lines, delimiter="\t", quoting=csv.QUOTE_NONE) + tokens = [] + pos_tags = [] + for id_row, row in enumerate(data): + if len(row) >= 10 and row[1] != "_" and row[3] != "_": + tokens.append(row[1]) + pos_tags.append(row[3]) + if len(row) == 0 and len(tokens) > 0: + yield idx, { + "tokens": tokens, + "pos_tags": pos_tags, + } + idx += 1 + tokens = [] + pos_tags = []