Commit
•
4269340
0
Parent(s):
Update files from the datasets library (from 1.0.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.0.0
- .gitattributes +27 -0
- dataset_infos.json +0 -0
- dummy/MLQA.en.ar/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.de/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.en/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.es/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.hi/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.vi/1.0.0/dummy_data.zip +3 -0
- dummy/MLQA.en.zh/1.0.0/dummy_data.zip +3 -0
- dummy/XNLI/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.ar/1.0.0/dummy_data-zip-extracted/dummy_data/xquad.ar.json +54 -0
- dummy/XQuAD.ar/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.de/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.el/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.en/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.hi/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.ru/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.th/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.tr/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.vi/1.0.0/dummy_data.zip +3 -0
- dummy/XQuAD.zh/1.0.0/dummy_data.zip +3 -0
- dummy/XQuADes/1.0.0/dummy_data.zip +3 -0
- dummy/tydiqa/1.0.0/dummy_data.zip +3 -0
- xtreme.py +913 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dummy/MLQA.en.ar/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:990692ce4bfc8d381f355deb5ea48b5fb86e0e9b2d9309d1f425444ee1e20429
|
3 |
+
size 2736
|
dummy/MLQA.en.de/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79351f97b800279f218f9a27b7134a47f4db7a42509ddb4eb3c7f05a0c8841a0
|
3 |
+
size 2806
|
dummy/MLQA.en.en/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b364bf29b16f4f3888b432c0bba0d33f21eaa804cea41be5d2721f42c6c04c7
|
3 |
+
size 2650
|
dummy/MLQA.en.es/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:176d98d301de90a03da7b5dec2c5524ab1ba11e6fdce38ee1402f27702b68577
|
3 |
+
size 2678
|
dummy/MLQA.en.hi/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b60ccd3cc2c1d93631fafc3fed06442ecf5ca91f57a3b0eef8322d9bfa9c0ac4
|
3 |
+
size 3138
|
dummy/MLQA.en.vi/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e0e98d15adbeff967f8471bfead318dfdf4b6bf45a44a2785c2cfda63c1f2d1
|
3 |
+
size 2158
|
dummy/MLQA.en.zh/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b72beb6d43b8e2de8e960fb3b872aee5345d5cf9cfbf69eaa9d8a80ba02069be
|
3 |
+
size 2558
|
dummy/XNLI/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a7d892ba5e1bc03f8d213b9425f7e162a893f99cb5514285ceb2332304d0808
|
3 |
+
size 3734
|
dummy/XQuAD.ar/1.0.0/dummy_data-zip-extracted/dummy_data/xquad.ar.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"data": [
|
3 |
+
{
|
4 |
+
"paragraphs": [
|
5 |
+
{
|
6 |
+
"context": "Die Verteidigung der Panthers gab nur 308 Punkte ab und belegte den sechsten Platz in der Liga, w\u00e4hrend sie die NFL mit 24 Interceptions in dieser Kategorie anf\u00fchrte und sich mit vier Pro Bowl-Selektionen r\u00fchmen konnte. Pro Bowl Defensive Tackle Kawann Short f\u00fchrte das Team mit 11 Sacks an, erzwang zudem drei Fumbles und erzielte zwei Fumble Recoverys. Mario Addison, ebenfalls Lineman, addierte 6\u00bd Sacks hinzu. Die Panthers-Line pr\u00e4sentierte auch den erfahrenen Defensive End Jared Allen, einen 5-fachen Pro-Bowler, der mit 136 Sacks der aktive Anf\u00fchrer in der NFL-Kategorie Karriere-Sacks war, sowie den Defensive End Kony Ealy, der 5 Sacks in nur 9 Starts erzielte. Nach ihnen wurden zwei der drei Linebacker der Panthers ausgew\u00e4hlt, um im Pro Bowl zu spielen: Thomas Davis und Luke Kuechly. Davis erzielte 5\u00bd Sacks, vier erzwungene Fumbles und vier Interceptions, w\u00e4hrend Kuechly das Team bei den Tackles anf\u00fchrte (118), zwei Fumbles erzwang und vier P\u00e4sse abfing. Carolinas Secondarys bestanden aus dem Pro Bowl-Safety Kurt Coleman, der das Team mit einem Karrierehoch von sieben Interceptions anf\u00fchrte und gleichzeitig 88 Tackles erzielen konnte, und Pro Bowl-Cornerback Josh Norman, der sich w\u00e4hrend der Saison zur Shutdown Corner entwickelte und vier Interceptions erzielte, von denen zwei zu Touchdowns f\u00fcr sein Team wurden.",
|
7 |
+
"qas": [
|
8 |
+
{
|
9 |
+
"answers": [
|
10 |
+
{
|
11 |
+
"answer_start": 38,
|
12 |
+
"text": "308"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"id": "56beb4343aeaaa14008c925b",
|
16 |
+
"question": "Wie viele Punkte gab die Verteidigung der Panthers ab?"
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"answers": [
|
20 |
+
{
|
21 |
+
"answer_start": 527,
|
22 |
+
"text": "136"
|
23 |
+
}
|
24 |
+
],
|
25 |
+
"id": "56beb4343aeaaa14008c925c",
|
26 |
+
"question": "Wie viele Sacks erzielte Jared Allen in seiner Karriere?"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"answers": [
|
30 |
+
{
|
31 |
+
"answer_start": 921,
|
32 |
+
"text": "118"
|
33 |
+
}
|
34 |
+
],
|
35 |
+
"id": "56beb4343aeaaa14008c925d",
|
36 |
+
"question": "Wie viele Tackles wurden bei Luke Kuechly registriert?"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"answers": [
|
40 |
+
{
|
41 |
+
"answer_start": 179,
|
42 |
+
"text": "vier"
|
43 |
+
}
|
44 |
+
],
|
45 |
+
"id": "56beb4343aeaaa14008c925e",
|
46 |
+
"question": "Wie viele B\u00e4lle fing Josh Norman ab?"
|
47 |
+
}
|
48 |
+
]
|
49 |
+
}
|
50 |
+
]
|
51 |
+
}
|
52 |
+
]
|
53 |
+
}
|
54 |
+
|
dummy/XQuAD.ar/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cbb0d35654aa78e61707be84a4eadd523d5e328f4c0a4cc4508eb355ac2dd8f0
|
3 |
+
size 1303
|
dummy/XQuAD.de/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c93c0084328f990580dbe8721455748d72745bb1015e747eab8031a523981391
|
3 |
+
size 1303
|
dummy/XQuAD.el/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eeb25f02c5536e1cafc92764245c91b5a365deb6016a943695bac02b17f568ed
|
3 |
+
size 1303
|
dummy/XQuAD.en/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0cdbcfabbfe70a3e3cdcfe162b982637347775d33ff4d6984087294d06e05935
|
3 |
+
size 1303
|
dummy/XQuAD.hi/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:113c40c582bd3b8312655dd1a9ec8941148a6d14e80188b0ce1258d144f80428
|
3 |
+
size 1303
|
dummy/XQuAD.ru/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a18dcfc14a3fc2f57dcb4e9eed79bf0ab3c25fe98810ad0783276a0294e9430
|
3 |
+
size 1303
|
dummy/XQuAD.th/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:087cc1ea600945a407c84383be563a05b03b55318f24576e5f2032e87cfebc2f
|
3 |
+
size 1303
|
dummy/XQuAD.tr/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b42627b74f3985b537308d5ff43fe7a7e8c694232096ecfc02ac39c5bcd0817c
|
3 |
+
size 1303
|
dummy/XQuAD.vi/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c7d6969b60f84f5ee4e13a153aea6236c3bb39d68c9c030dece946cbfe70822
|
3 |
+
size 1303
|
dummy/XQuAD.zh/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3be20c666cd67d2ee133514092fe5e3e360cfe7543c315e7b3379c6657342b62
|
3 |
+
size 1303
|
dummy/XQuADes/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ceff401dab183eb4e7d3e6ffbf85de216d64bf5610e0b5e82f681684f4f479a1
|
3 |
+
size 1303
|
dummy/tydiqa/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b27090c80d2f66215a1bdb5b4241eb5a0a9562783ecd26e12fc2f3b7d87987bc
|
3 |
+
size 1699
|
xtreme.py
ADDED
@@ -0,0 +1,913 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""TODO(xtreme): Add a description here."""
|
2 |
+
|
3 |
+
from __future__ import absolute_import, division, print_function
|
4 |
+
|
5 |
+
import csv
|
6 |
+
import glob
|
7 |
+
import json
|
8 |
+
import os
|
9 |
+
import textwrap
|
10 |
+
|
11 |
+
import six
|
12 |
+
|
13 |
+
import datasets
|
14 |
+
|
15 |
+
|
16 |
+
# TODO(xtreme): BibTeX citation
|
17 |
+
_CITATION = """\
|
18 |
+
@article{hu2020xtreme,
|
19 |
+
author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},
|
20 |
+
title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},
|
21 |
+
journal = {CoRR},
|
22 |
+
volume = {abs/2003.11080},
|
23 |
+
year = {2020},
|
24 |
+
archivePrefix = {arXiv},
|
25 |
+
eprint = {2003.11080}
|
26 |
+
}
|
27 |
+
"""
|
28 |
+
|
29 |
+
# TODO(xtrem):
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
The Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of
|
32 |
+
the cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages
|
33 |
+
(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of
|
34 |
+
syntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,
|
35 |
+
and availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil
|
36 |
+
(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the
|
37 |
+
Niger-Congo languages Swahili and Yoruba, spoken in Africa.
|
38 |
+
"""
|
39 |
+
_MLQA_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
|
40 |
+
_XQUAD_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi", "el", "ru", "th", "tr"]
|
41 |
+
_PAWSX_LANG = ["de", "en", "es", "fr", "ja", "ko", "zh"]
|
42 |
+
_BUCC_LANG = ["de", "fr", "zh", "ru"]
|
43 |
+
_TATOEBA_LANG = [
|
44 |
+
"afr",
|
45 |
+
"ara",
|
46 |
+
"ben",
|
47 |
+
"bul",
|
48 |
+
"deu",
|
49 |
+
"cmn",
|
50 |
+
"ell",
|
51 |
+
"est",
|
52 |
+
"eus",
|
53 |
+
"fin",
|
54 |
+
"fra",
|
55 |
+
"heb",
|
56 |
+
"hin",
|
57 |
+
"hun",
|
58 |
+
"ind",
|
59 |
+
"ita",
|
60 |
+
"jav",
|
61 |
+
"jpn",
|
62 |
+
"kat",
|
63 |
+
"kaz",
|
64 |
+
"kor",
|
65 |
+
"mal",
|
66 |
+
"mar",
|
67 |
+
"nld",
|
68 |
+
"pes",
|
69 |
+
"por",
|
70 |
+
"rus",
|
71 |
+
"spa",
|
72 |
+
"swh",
|
73 |
+
"tam",
|
74 |
+
"tgl",
|
75 |
+
"tha",
|
76 |
+
"tur",
|
77 |
+
"urd",
|
78 |
+
"vie",
|
79 |
+
]
|
80 |
+
|
81 |
+
_UD_POS_LANG = [
|
82 |
+
"Afrikaans",
|
83 |
+
"Arabic",
|
84 |
+
"Basque",
|
85 |
+
"Bulgarian",
|
86 |
+
"Dutch",
|
87 |
+
"English",
|
88 |
+
"Estonian",
|
89 |
+
"Finnish",
|
90 |
+
"French",
|
91 |
+
"German",
|
92 |
+
"Greek",
|
93 |
+
"Hebrew",
|
94 |
+
"Hindi",
|
95 |
+
"Hungarian",
|
96 |
+
"Indonesian",
|
97 |
+
"Italian",
|
98 |
+
"Japanese",
|
99 |
+
"Kazakh",
|
100 |
+
"Korean",
|
101 |
+
"Chinese",
|
102 |
+
"Marathi",
|
103 |
+
"Persian",
|
104 |
+
"Portuguese",
|
105 |
+
"Russian",
|
106 |
+
"Spanish",
|
107 |
+
"Tagalog",
|
108 |
+
"Tamil",
|
109 |
+
"Telugu",
|
110 |
+
"Thai",
|
111 |
+
"Turkish",
|
112 |
+
"Urdu",
|
113 |
+
"Vietnamese",
|
114 |
+
"Yoruba",
|
115 |
+
]
|
116 |
+
_PAN_X_LANG = [
|
117 |
+
"af",
|
118 |
+
"ar",
|
119 |
+
"bg",
|
120 |
+
"bn",
|
121 |
+
"de",
|
122 |
+
"el",
|
123 |
+
"en",
|
124 |
+
"es",
|
125 |
+
"et",
|
126 |
+
"eu",
|
127 |
+
"fa",
|
128 |
+
"fi",
|
129 |
+
"fr",
|
130 |
+
"he",
|
131 |
+
"hi",
|
132 |
+
"hu",
|
133 |
+
"id",
|
134 |
+
"it",
|
135 |
+
"ja",
|
136 |
+
"jv",
|
137 |
+
"ka",
|
138 |
+
"kk",
|
139 |
+
"ko",
|
140 |
+
"ml",
|
141 |
+
"mr",
|
142 |
+
"ms",
|
143 |
+
"my",
|
144 |
+
"nl",
|
145 |
+
"pt",
|
146 |
+
"ru",
|
147 |
+
"sw",
|
148 |
+
"ta",
|
149 |
+
"te",
|
150 |
+
"th",
|
151 |
+
"tl",
|
152 |
+
"tr",
|
153 |
+
"ur",
|
154 |
+
"vi",
|
155 |
+
"yo",
|
156 |
+
"zh",
|
157 |
+
]
|
158 |
+
_PAN_X_FOLDER = "AmazonPhotos.zip"
|
159 |
+
_NAMES = ["XNLI", "tydiqa", "SQuAD"]
|
160 |
+
for lang in _PAN_X_LANG:
|
161 |
+
_NAMES.append("PAN-X.{}".format(lang))
|
162 |
+
for lang1 in _MLQA_LANG:
|
163 |
+
for lang2 in _MLQA_LANG:
|
164 |
+
_NAMES.append("MLQA.{}.{}".format(lang1, lang2))
|
165 |
+
for lang in _XQUAD_LANG:
|
166 |
+
_NAMES.append("XQuAD.{}".format(lang))
|
167 |
+
for lang in _BUCC_LANG:
|
168 |
+
_NAMES.append("bucc18.{}".format(lang))
|
169 |
+
for lang in _PAWSX_LANG:
|
170 |
+
_NAMES.append("PAWS-X.{}".format(lang))
|
171 |
+
for lang in _TATOEBA_LANG:
|
172 |
+
_NAMES.append("tatoeba.{}".format(lang))
|
173 |
+
for lang in _UD_POS_LANG:
|
174 |
+
_NAMES.append("udpos.{}".format(lang))
|
175 |
+
|
176 |
+
_DESCRIPTIONS = {
|
177 |
+
"tydiqa": textwrap.dedent(
|
178 |
+
"""Gold passage task (GoldP): Given a passage that is guaranteed to contain the
|
179 |
+
answer, predict the single contiguous span of characters that answers the question. This is more similar to
|
180 |
+
existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
|
181 |
+
This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
|
182 |
+
a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
|
183 |
+
XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
|
184 |
+
only the gold answer passage is provided rather than the entire Wikipedia article;
|
185 |
+
unanswerable questions have been discarded, similar to MLQA and XQuAD;
|
186 |
+
we evaluate with the SQuAD 1.1 metrics like XQuAD; and
|
187 |
+
Thai and Japanese are removed since the lack of whitespace breaks some tools.
|
188 |
+
"""
|
189 |
+
),
|
190 |
+
"XNLI": textwrap.dedent(
|
191 |
+
"""
|
192 |
+
The Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and
|
193 |
+
2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into
|
194 |
+
14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,
|
195 |
+
Hindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the
|
196 |
+
corresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to
|
197 |
+
evaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only
|
198 |
+
English NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI
|
199 |
+
is an evaluation benchmark."""
|
200 |
+
),
|
201 |
+
"PAWS-X": textwrap.dedent(
|
202 |
+
"""
|
203 |
+
This dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training
|
204 |
+
pairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All
|
205 |
+
translated pairs are sourced from examples in PAWS-Wiki."""
|
206 |
+
),
|
207 |
+
"XQuAD": textwrap.dedent(
|
208 |
+
"""\
|
209 |
+
XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question
|
210 |
+
answering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from
|
211 |
+
the development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into
|
212 |
+
ten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,
|
213 |
+
the dataset is entirely parallel across 11 languages."""
|
214 |
+
),
|
215 |
+
"MLQA": textwrap.dedent(
|
216 |
+
"""\
|
217 |
+
MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
|
218 |
+
MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
|
219 |
+
German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
|
220 |
+
4 different languages on average."""
|
221 |
+
),
|
222 |
+
"tatoeba": textwrap.dedent(
|
223 |
+
"""\
|
224 |
+
his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.
|
225 |
+
|
226 |
+
For each languages, we have selected 1000 English sentences and their translations, if available. Please check
|
227 |
+
this paper for a description of the languages, their families and scripts as well as baseline results.
|
228 |
+
|
229 |
+
Please note that the English sentences are not identical for all language pairs. This means that the results are
|
230 |
+
not directly comparable across languages. In particular, the sentences tend to have less variety for several
|
231 |
+
low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ...
|
232 |
+
"""
|
233 |
+
),
|
234 |
+
"bucc18": textwrap.dedent(
|
235 |
+
"""Building and Using Comparable Corpora
|
236 |
+
"""
|
237 |
+
),
|
238 |
+
"udpos": textwrap.dedent(
|
239 |
+
"""\
|
240 |
+
Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological
|
241 |
+
features, and syntactic dependencies) across different human languages. UD is an open community effort with over 200
|
242 |
+
contributors producing more than 100 treebanks in over 70 languages. If you’re new to UD, you should start by reading
|
243 |
+
the first part of the Short Introduction and then browsing the annotation guidelines.
|
244 |
+
"""
|
245 |
+
),
|
246 |
+
"SQuAD": textwrap.dedent(
|
247 |
+
"""\
|
248 |
+
Stanford Question Answering Dataset (SQuAD) is a reading comprehension \
|
249 |
+
dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
|
250 |
+
articles, where the answer to every question is a segment of text, or span, \
|
251 |
+
from the corresponding reading passage, or the question might be unanswerable."""
|
252 |
+
),
|
253 |
+
"PAN-X": textwrap.dedent(
|
254 |
+
"""\
|
255 |
+
The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been
|
256 |
+
constructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset
|
257 |
+
can be loaded with the DaNLP package:"""
|
258 |
+
),
|
259 |
+
}
|
260 |
+
_CITATIONS = {
|
261 |
+
"tydiqa": textwrap.dedent(
|
262 |
+
(
|
263 |
+
"""\
|
264 |
+
@article{tydiqa,
|
265 |
+
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
|
266 |
+
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
|
267 |
+
year = {2020},
|
268 |
+
journal = {Transactions of the Association for Computational Linguistics}
|
269 |
+
}"""
|
270 |
+
)
|
271 |
+
),
|
272 |
+
"XNLI": textwrap.dedent(
|
273 |
+
"""\
|
274 |
+
@InProceedings{conneau2018xnli,
|
275 |
+
author = {Conneau, Alexis
|
276 |
+
and Rinott, Ruty
|
277 |
+
and Lample, Guillaume
|
278 |
+
and Williams, Adina
|
279 |
+
and Bowman, Samuel R.
|
280 |
+
and Schwenk, Holger
|
281 |
+
and Stoyanov, Veselin},
|
282 |
+
title = {XNLI: Evaluating Cross-lingual Sentence Representations},
|
283 |
+
booktitle = {Proceedings of the 2018 Conference on Empirical Methods
|
284 |
+
in Natural Language Processing},
|
285 |
+
year = {2018},
|
286 |
+
publisher = {Association for Computational Linguistics},
|
287 |
+
location = {Brussels, Belgium},
|
288 |
+
}"""
|
289 |
+
),
|
290 |
+
"XQuAD": textwrap.dedent(
|
291 |
+
"""
|
292 |
+
@article{Artetxe:etal:2019,
|
293 |
+
author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},
|
294 |
+
title = {On the cross-lingual transferability of monolingual representations},
|
295 |
+
journal = {CoRR},
|
296 |
+
volume = {abs/1910.11856},
|
297 |
+
year = {2019},
|
298 |
+
archivePrefix = {arXiv},
|
299 |
+
eprint = {1910.11856}
|
300 |
+
}
|
301 |
+
"""
|
302 |
+
),
|
303 |
+
"MLQA": textwrap.dedent(
|
304 |
+
"""\
|
305 |
+
@article{lewis2019mlqa,
|
306 |
+
title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
|
307 |
+
author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
|
308 |
+
journal={arXiv preprint arXiv:1910.07475},
|
309 |
+
year={2019}"""
|
310 |
+
),
|
311 |
+
"PAWS-X": textwrap.dedent(
|
312 |
+
"""\
|
313 |
+
@InProceedings{pawsx2019emnlp,
|
314 |
+
title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},
|
315 |
+
author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},
|
316 |
+
booktitle = {Proc. of EMNLP},
|
317 |
+
year = {2019}
|
318 |
+
}"""
|
319 |
+
),
|
320 |
+
"tatoeba": textwrap.dedent(
|
321 |
+
"""\
|
322 |
+
@article{tatoeba,
|
323 |
+
title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
|
324 |
+
author={Mikel, Artetxe and Holger, Schwenk,},
|
325 |
+
journal={arXiv:1812.10464v2},
|
326 |
+
year={2018}
|
327 |
+
}"""
|
328 |
+
),
|
329 |
+
"bucc18": textwrap.dedent(""""""),
|
330 |
+
"udpos": textwrap.dedent(""""""),
|
331 |
+
"SQuAD": textwrap.dedent(
|
332 |
+
"""\
|
333 |
+
@article{2016arXiv160605250R,
|
334 |
+
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
|
335 |
+
Konstantin and {Liang}, Percy},
|
336 |
+
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
|
337 |
+
journal = {arXiv e-prints},
|
338 |
+
year = 2016,
|
339 |
+
eid = {arXiv:1606.05250},
|
340 |
+
pages = {arXiv:1606.05250},
|
341 |
+
archivePrefix = {arXiv},
|
342 |
+
eprint = {1606.05250},
|
343 |
+
}"""
|
344 |
+
),
|
345 |
+
"PAN-X": textwrap.dedent(
|
346 |
+
"""\
|
347 |
+
@article{pan-x,
|
348 |
+
title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
|
349 |
+
author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},
|
350 |
+
volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}
|
351 |
+
year={2017}
|
352 |
+
}"""
|
353 |
+
),
|
354 |
+
}
|
355 |
+
|
356 |
+
_TEXT_FEATURES = {
|
357 |
+
"XNLI": {"language": "language", "sentence1": "sentence1", "sentence2": "sentence2"},
|
358 |
+
"tydiqa": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
|
359 |
+
"XQuAD": {"id": "id", "context": "context", "question": "question", "answers": "answers"},
|
360 |
+
"MLQA": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
|
361 |
+
"tatoeba": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
362 |
+
"bucc18": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
363 |
+
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
|
364 |
+
"udpos": {"word": "", "pos_tag": ""},
|
365 |
+
"SQuAD": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
|
366 |
+
"PAN-X": {"word": "", "ner_tag": "", "lang": ""},
|
367 |
+
}
|
368 |
+
_DATA_URLS = {
|
369 |
+
"tydiqa": "https://storage.googleapis.com/tydiqa/",
|
370 |
+
"XNLI": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip",
|
371 |
+
"XQuAD": "https://github.com/deepmind/xquad/raw/master/",
|
372 |
+
"MLQA": "https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip",
|
373 |
+
"PAWS-X": "https://storage.googleapis.com/paws/pawsx/x-final.tar.gz",
|
374 |
+
"bucc18": "https://comparable.limsi.fr/bucc2018/",
|
375 |
+
"tatoeba": "https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1",
|
376 |
+
"udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz",
|
377 |
+
"SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/",
|
378 |
+
"PAN-X": "",
|
379 |
+
}
|
380 |
+
|
381 |
+
_URLS = {
|
382 |
+
"tydiqa": "https://github.com/google-research-datasets/tydiqa",
|
383 |
+
"XQuAD": "https://github.com/deepmind/xquad",
|
384 |
+
"XNLI": "https://www.nyu.edu/projects/bowman/xnli/",
|
385 |
+
"MLQA": "https://github.com/facebookresearch/MLQA",
|
386 |
+
"PAWS-X": "https://github.com/google-research-datasets/paws/tree/master/pawsx",
|
387 |
+
"bucc18": "https://comparable.limsi.fr/bucc2018/",
|
388 |
+
"tatoeba": "https://github.com/facebookresearch/LASER/blob/master/data/tatoeba/v1/README.md",
|
389 |
+
"udpos": "https://universaldependencies.org/",
|
390 |
+
"SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/",
|
391 |
+
"PAN-X": "",
|
392 |
+
}
|
393 |
+
|
394 |
+
|
395 |
+
class XtremeConfig(datasets.BuilderConfig):
|
396 |
+
"""BuilderConfig for Break"""
|
397 |
+
|
398 |
+
def __init__(self, data_url, citation, url, text_features, **kwargs):
|
399 |
+
"""
|
400 |
+
|
401 |
+
Args:
|
402 |
+
text_features: `dict[string, string]`, map from the name of the feature
|
403 |
+
dict for each text field to the name of the column in the tsv file
|
404 |
+
label_column:
|
405 |
+
label_classes
|
406 |
+
**kwargs: keyword arguments forwarded to super.
|
407 |
+
"""
|
408 |
+
super(XtremeConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
409 |
+
self.text_features = text_features
|
410 |
+
self.data_url = data_url
|
411 |
+
self.citation = citation
|
412 |
+
self.url = url
|
413 |
+
|
414 |
+
|
415 |
+
class Xtreme(datasets.GeneratorBasedBuilder):
|
416 |
+
"""TODO(xtreme): Short description of my dataset."""
|
417 |
+
|
418 |
+
# TODO(xtreme): Set up version.
|
419 |
+
VERSION = datasets.Version("0.1.0")
|
420 |
+
BUILDER_CONFIGS = [
|
421 |
+
XtremeConfig(
|
422 |
+
name=name,
|
423 |
+
description=_DESCRIPTIONS[name.split(".")[0]],
|
424 |
+
citation=_CITATIONS[name.split(".")[0]],
|
425 |
+
text_features=_TEXT_FEATURES[name.split(".")[0]],
|
426 |
+
data_url=_DATA_URLS[name.split(".")[0]],
|
427 |
+
url=_URLS[name.split(".")[0]],
|
428 |
+
)
|
429 |
+
for name in _NAMES
|
430 |
+
]
|
431 |
+
|
432 |
+
@property
|
433 |
+
def manual_download_instructions(self):
|
434 |
+
if self.config.name.startswith("PAN-X"):
|
435 |
+
return """\
|
436 |
+
You need to manually download the AmazonPhotos.zip file on Amazon Cloud Drive
|
437 |
+
(https://www.amazon.com/clouddrive/share/d3KGCRCIYwhKJF0H3eWA26hjg2ZCRhjpEQtDL70FSBN). The folder containing the saved file
|
438 |
+
can be used to load the dataset via `datasets.load_dataset("xtreme", data_dir="<path/to/folder>").
|
439 |
+
"""
|
440 |
+
return None
|
441 |
+
|
442 |
+
def _info(self):
|
443 |
+
# TODO(xtreme): Specifies the datasets.DatasetInfo object
|
444 |
+
features = {text_feature: datasets.Value("string") for text_feature in six.iterkeys(self.config.text_features)}
|
445 |
+
if "answers" in features.keys():
|
446 |
+
features["answers"] = datasets.features.Sequence(
|
447 |
+
{"answer_start": datasets.Value("int32"), "text": datasets.Value("string")}
|
448 |
+
)
|
449 |
+
if self.config.name.startswith("PAWS-X"):
|
450 |
+
features["label"] = datasets.Value("string")
|
451 |
+
if self.config.name == "XNLI":
|
452 |
+
features["gold_label"] = datasets.Value("string")
|
453 |
+
|
454 |
+
if self.config.name.startswith("PAN-X"):
|
455 |
+
features = datasets.Features(
|
456 |
+
{
|
457 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
458 |
+
"ner_tags": datasets.Sequence(datasets.Value("string")),
|
459 |
+
"langs": datasets.Sequence(datasets.Value("string")),
|
460 |
+
}
|
461 |
+
)
|
462 |
+
return datasets.DatasetInfo(
|
463 |
+
# This is the description that will appear on the datasets page.
|
464 |
+
description=self.config.description + "\n" + _DESCRIPTION,
|
465 |
+
# datasets.features.FeatureConnectors
|
466 |
+
features=datasets.Features(
|
467 |
+
features
|
468 |
+
# These are the features of your dataset like images, labels ...
|
469 |
+
),
|
470 |
+
# If there's a common (input, target) tuple from the features,
|
471 |
+
# specify them here. They'll be used if as_supervised=True in
|
472 |
+
# builder.as_dataset.
|
473 |
+
supervised_keys=None,
|
474 |
+
# Homepage of the dataset for documentation
|
475 |
+
homepage="https://github.com/google-research/xtreme" + "\t" + self.config.url,
|
476 |
+
citation=self.config.citation + "\n" + _CITATION,
|
477 |
+
)
|
478 |
+
|
479 |
+
def _split_generators(self, dl_manager):
|
480 |
+
"""Returns SplitGenerators."""
|
481 |
+
# TODO(xtreme): Downloads the data and defines the splits
|
482 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
483 |
+
# download and extract URLs
|
484 |
+
|
485 |
+
if self.config.name == "tydiqa":
|
486 |
+
train_url = "v1.1/tydiqa-goldp-v1.1-train.json"
|
487 |
+
dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json"
|
488 |
+
urls_to_download = {
|
489 |
+
"train": os.path.join(self.config.data_url, train_url),
|
490 |
+
"dev": os.path.join(self.config.data_url, dev_url),
|
491 |
+
}
|
492 |
+
dl_dir = dl_manager.download_and_extract(urls_to_download)
|
493 |
+
return [
|
494 |
+
datasets.SplitGenerator(
|
495 |
+
name=datasets.Split.TRAIN,
|
496 |
+
# These kwargs will be passed to _generate_examples
|
497 |
+
gen_kwargs={"filepath": dl_dir["train"]},
|
498 |
+
),
|
499 |
+
datasets.SplitGenerator(
|
500 |
+
name=datasets.Split.VALIDATION,
|
501 |
+
# These kwargs will be passed to _generate_examples
|
502 |
+
gen_kwargs={"filepath": dl_dir["dev"]},
|
503 |
+
),
|
504 |
+
]
|
505 |
+
if self.config.name == "XNLI":
|
506 |
+
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
507 |
+
data_dir = os.path.join(dl_dir, "XNLI-1.0")
|
508 |
+
return [
|
509 |
+
datasets.SplitGenerator(
|
510 |
+
name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")}
|
511 |
+
),
|
512 |
+
datasets.SplitGenerator(
|
513 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")}
|
514 |
+
),
|
515 |
+
]
|
516 |
+
|
517 |
+
if self.config.name.startswith("MLQA"):
|
518 |
+
mlqa_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
|
519 |
+
l1 = self.config.name.split(".")[1]
|
520 |
+
l2 = self.config.name.split(".")[2]
|
521 |
+
return [
|
522 |
+
datasets.SplitGenerator(
|
523 |
+
name=datasets.Split.TEST,
|
524 |
+
# These kwargs will be passed to _generate_examples
|
525 |
+
gen_kwargs={
|
526 |
+
"filepath": os.path.join(
|
527 |
+
os.path.join(mlqa_downloaded_files, "MLQA_V1/test"),
|
528 |
+
"test-context-{}-question-{}.json".format(l1, l2),
|
529 |
+
)
|
530 |
+
},
|
531 |
+
),
|
532 |
+
datasets.SplitGenerator(
|
533 |
+
name=datasets.Split.VALIDATION,
|
534 |
+
# These kwargs will be passed to _generate_examples
|
535 |
+
gen_kwargs={
|
536 |
+
"filepath": os.path.join(
|
537 |
+
os.path.join(mlqa_downloaded_files, "MLQA_V1/dev"),
|
538 |
+
"dev-context-{}-question-{}.json".format(l1, l2),
|
539 |
+
)
|
540 |
+
},
|
541 |
+
),
|
542 |
+
]
|
543 |
+
|
544 |
+
if self.config.name.startswith("XQuAD"):
|
545 |
+
lang = self.config.name.split(".")[1]
|
546 |
+
xquad_downloaded_file = dl_manager.download_and_extract(
|
547 |
+
os.path.join(self.config.data_url, "xquad.{}.json".format(lang))
|
548 |
+
)
|
549 |
+
return [
|
550 |
+
datasets.SplitGenerator(
|
551 |
+
name=datasets.Split.VALIDATION,
|
552 |
+
# These kwargs will be passed to _generate_examples
|
553 |
+
gen_kwargs={"filepath": xquad_downloaded_file},
|
554 |
+
),
|
555 |
+
]
|
556 |
+
if self.config.name.startswith("PAWS-X"):
|
557 |
+
lang = self.config.name.split(".")[1]
|
558 |
+
paws_x_dir = dl_manager.download_and_extract(self.config.data_url)
|
559 |
+
data_dir = os.path.join(paws_x_dir, "x-final", lang)
|
560 |
+
return [
|
561 |
+
datasets.SplitGenerator(
|
562 |
+
name=datasets.Split.VALIDATION,
|
563 |
+
# These kwargs will be passed to _generate_examples
|
564 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "dev_2k.tsv")},
|
565 |
+
),
|
566 |
+
datasets.SplitGenerator(
|
567 |
+
name=datasets.Split.TEST,
|
568 |
+
# These kwargs will be passed to _generate_examples
|
569 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "test_2k.tsv")},
|
570 |
+
),
|
571 |
+
datasets.SplitGenerator(
|
572 |
+
name=datasets.Split.TRAIN,
|
573 |
+
# These kwargs will be passed to _generate_examples
|
574 |
+
gen_kwargs={
|
575 |
+
"filepath": os.path.join(data_dir, "translated_train.tsv")
|
576 |
+
if lang != "en"
|
577 |
+
else os.path.join(data_dir, "train.tsv")
|
578 |
+
},
|
579 |
+
),
|
580 |
+
]
|
581 |
+
elif self.config.name.startswith("tatoeba"):
|
582 |
+
lang = self.config.name.split(".")[1]
|
583 |
+
|
584 |
+
tatoeba_source_data = dl_manager.download_and_extract(
|
585 |
+
os.path.join(self.config.data_url, "tatoeba.{}-eng.{}".format(lang, lang))
|
586 |
+
)
|
587 |
+
tatoeba_eng_data = dl_manager.download_and_extract(
|
588 |
+
os.path.join(self.config.data_url, "tatoeba.{}-eng.eng".format(lang))
|
589 |
+
)
|
590 |
+
return [
|
591 |
+
datasets.SplitGenerator(
|
592 |
+
name=datasets.Split.VALIDATION,
|
593 |
+
# These kwargs will be passed to _generate_examples
|
594 |
+
gen_kwargs={"filepath": (tatoeba_source_data, tatoeba_eng_data)},
|
595 |
+
),
|
596 |
+
]
|
597 |
+
if self.config.name.startswith("bucc18"):
|
598 |
+
lang = self.config.name.split(".")[1]
|
599 |
+
bucc18_dl_test_dir = dl_manager.download_and_extract(
|
600 |
+
os.path.join(self.config.data_url, "bucc2018-{}-en.training-gold.tar.bz2".format(lang))
|
601 |
+
)
|
602 |
+
bucc18_dl_dev_dir = dl_manager.download_and_extract(
|
603 |
+
os.path.join(self.config.data_url, "bucc2018-{}-en.sample-gold.tar.bz2".format(lang))
|
604 |
+
)
|
605 |
+
return [
|
606 |
+
datasets.SplitGenerator(
|
607 |
+
name=datasets.Split.VALIDATION,
|
608 |
+
# These kwargs will be passed to _generate_examples
|
609 |
+
gen_kwargs={"filepath": os.path.join(bucc18_dl_dev_dir, "bucc2018", lang + "-en")},
|
610 |
+
),
|
611 |
+
datasets.SplitGenerator(
|
612 |
+
name=datasets.Split.TEST,
|
613 |
+
# These kwargs will be passed to _generate_examples
|
614 |
+
gen_kwargs={"filepath": os.path.join(bucc18_dl_test_dir, "bucc2018", lang + "-en")},
|
615 |
+
),
|
616 |
+
]
|
617 |
+
if self.config.name.startswith("udpos"):
|
618 |
+
udpos_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
|
619 |
+
data_dir = os.path.join(udpos_downloaded_files, "ud-treebanks-v2.5")
|
620 |
+
|
621 |
+
lang = self.config.name.split(".")[1]
|
622 |
+
data_dir = os.path.join(data_dir, "*_" + lang + "*")
|
623 |
+
folders = sorted(glob.glob(data_dir))
|
624 |
+
|
625 |
+
if lang == "Kazakh":
|
626 |
+
return [
|
627 |
+
datasets.SplitGenerator(
|
628 |
+
name=datasets.Split.TEST,
|
629 |
+
# These kwargs will be passed to _generate_examples
|
630 |
+
gen_kwargs={
|
631 |
+
"filepath": [
|
632 |
+
os.path.join(folder, file)
|
633 |
+
for folder in folders
|
634 |
+
for file in sorted(os.listdir(folder))
|
635 |
+
if "test" in file and file.endswith(".conllu")
|
636 |
+
]
|
637 |
+
},
|
638 |
+
),
|
639 |
+
datasets.SplitGenerator(
|
640 |
+
name=datasets.Split.TRAIN,
|
641 |
+
# These kwargs will be passed to _generate_examples
|
642 |
+
gen_kwargs={
|
643 |
+
"filepath": [
|
644 |
+
os.path.join(folder, file)
|
645 |
+
for folder in folders
|
646 |
+
for file in sorted(os.listdir(folder))
|
647 |
+
if "train" in file and file.endswith(".conllu")
|
648 |
+
]
|
649 |
+
},
|
650 |
+
),
|
651 |
+
]
|
652 |
+
elif lang == "Tagalog" or lang == "Thai" or lang == "Yoruba":
|
653 |
+
return [
|
654 |
+
datasets.SplitGenerator(
|
655 |
+
name=datasets.Split.TEST,
|
656 |
+
# These kwargs will be passed to _generate_examples
|
657 |
+
gen_kwargs={
|
658 |
+
"filepath": [
|
659 |
+
os.path.join(folder, file)
|
660 |
+
for folder in folders
|
661 |
+
for file in sorted(os.listdir(folder))
|
662 |
+
if "test" in file and file.endswith(".conllu")
|
663 |
+
]
|
664 |
+
},
|
665 |
+
)
|
666 |
+
]
|
667 |
+
else:
|
668 |
+
return [
|
669 |
+
datasets.SplitGenerator(
|
670 |
+
name=datasets.Split.VALIDATION,
|
671 |
+
# These kwargs will be passed to _generate_examples
|
672 |
+
gen_kwargs={
|
673 |
+
"filepath": [
|
674 |
+
os.path.join(folder, file)
|
675 |
+
for folder in folders
|
676 |
+
for file in sorted(os.listdir(folder))
|
677 |
+
if "NYUAD" not in folder and "dev" in file and file.endswith(".conllu")
|
678 |
+
]
|
679 |
+
# we exclude Arabic NYUAD which deos not contains any word, only _
|
680 |
+
},
|
681 |
+
),
|
682 |
+
datasets.SplitGenerator(
|
683 |
+
name=datasets.Split.TEST,
|
684 |
+
# These kwargs will be passed to _generate_examples
|
685 |
+
gen_kwargs={
|
686 |
+
"filepath": [
|
687 |
+
os.path.join(folder, file)
|
688 |
+
for folder in folders
|
689 |
+
for file in sorted(os.listdir(folder))
|
690 |
+
if "NYUAD" not in folder and "test" in file and file.endswith(".conllu")
|
691 |
+
]
|
692 |
+
},
|
693 |
+
),
|
694 |
+
datasets.SplitGenerator(
|
695 |
+
name=datasets.Split.TRAIN,
|
696 |
+
# These kwargs will be passed to _generate_examples
|
697 |
+
gen_kwargs={
|
698 |
+
"filepath": [
|
699 |
+
os.path.join(folder, file)
|
700 |
+
for folder in folders
|
701 |
+
for file in sorted(os.listdir(folder))
|
702 |
+
if "NYUAD" not in folder and "train" in file and file.endswith(".conllu")
|
703 |
+
]
|
704 |
+
},
|
705 |
+
),
|
706 |
+
]
|
707 |
+
|
708 |
+
if self.config.name == "SQuAD":
|
709 |
+
|
710 |
+
urls_to_download = {
|
711 |
+
"train": os.path.join(self.config.data_url, "train-v1.1.json"),
|
712 |
+
"dev": os.path.join(self.config.data_url, "dev-v1.1.json"),
|
713 |
+
}
|
714 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
715 |
+
|
716 |
+
return [
|
717 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
718 |
+
datasets.SplitGenerator(
|
719 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}
|
720 |
+
),
|
721 |
+
]
|
722 |
+
|
723 |
+
if self.config.name.startswith("PAN-X"):
|
724 |
+
path_to_manual_folder = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
|
725 |
+
panx_path = os.path.join(path_to_manual_folder, _PAN_X_FOLDER)
|
726 |
+
if not os.path.exists(panx_path):
|
727 |
+
raise FileNotFoundError(
|
728 |
+
"{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('xtreme', data_dir=...)` that includes {}. Manual download instructions: {}".format(
|
729 |
+
panx_path, _PAN_X_FOLDER, self.manual_download_instructions
|
730 |
+
)
|
731 |
+
)
|
732 |
+
|
733 |
+
panx_dl_dir = dl_manager.extract(panx_path)
|
734 |
+
lang = self.config.name.split(".")[1]
|
735 |
+
lang_folder = dl_manager.extract(os.path.join(panx_dl_dir, "panx_dataset", lang + ".tar.gz"))
|
736 |
+
return [
|
737 |
+
datasets.SplitGenerator(
|
738 |
+
name=datasets.Split.VALIDATION,
|
739 |
+
# These kwargs will be passed to _generate_examples
|
740 |
+
gen_kwargs={
|
741 |
+
"filepath": os.path.join(lang_folder, "dev")
|
742 |
+
# we exclude Arabic NYUAD which deos not contains any word, only _
|
743 |
+
},
|
744 |
+
),
|
745 |
+
datasets.SplitGenerator(
|
746 |
+
name=datasets.Split.TEST,
|
747 |
+
# These kwargs will be passed to _generate_examples
|
748 |
+
gen_kwargs={"filepath": os.path.join(lang_folder, "test")},
|
749 |
+
),
|
750 |
+
datasets.SplitGenerator(
|
751 |
+
name=datasets.Split.TRAIN,
|
752 |
+
# These kwargs will be passed to _generate_examples
|
753 |
+
gen_kwargs={"filepath": os.path.join(lang_folder, "train")},
|
754 |
+
),
|
755 |
+
]
|
756 |
+
|
757 |
+
def _generate_examples(self, filepath):
|
758 |
+
"""Yields examples."""
|
759 |
+
# TODO(xtreme): Yields (key, example) tuples from the dataset
|
760 |
+
|
761 |
+
if self.config.name == "tydiqa" or self.config.name.startswith("MLQA") or self.config.name == "SQuAD":
|
762 |
+
with open(filepath, encoding="utf-8") as f:
|
763 |
+
data = json.load(f)
|
764 |
+
for article in data["data"]:
|
765 |
+
title = article.get("title", "").strip()
|
766 |
+
for paragraph in article["paragraphs"]:
|
767 |
+
context = paragraph["context"].strip()
|
768 |
+
for qa in paragraph["qas"]:
|
769 |
+
question = qa["question"].strip()
|
770 |
+
id_ = qa["id"]
|
771 |
+
|
772 |
+
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
773 |
+
answers = [answer["text"].strip() for answer in qa["answers"]]
|
774 |
+
|
775 |
+
# Features currently used are "context", "question", and "answers".
|
776 |
+
# Others are extracted here for the ease of future expansions.
|
777 |
+
yield id_, {
|
778 |
+
"title": title,
|
779 |
+
"context": context,
|
780 |
+
"question": question,
|
781 |
+
"id": id_,
|
782 |
+
"answers": {"answer_start": answer_starts, "text": answers},
|
783 |
+
}
|
784 |
+
if self.config.name == "XNLI":
|
785 |
+
with open(filepath, encoding="utf-8") as f:
|
786 |
+
data = csv.DictReader(f, delimiter="\t")
|
787 |
+
for id_, row in enumerate(data):
|
788 |
+
yield id_, {
|
789 |
+
"sentence1": row["sentence1"],
|
790 |
+
"sentence2": row["sentence2"],
|
791 |
+
"language": row["language"],
|
792 |
+
"gold_label": row["gold_label"],
|
793 |
+
}
|
794 |
+
if self.config.name.startswith("PAWS-X"):
|
795 |
+
with open(filepath, encoding="utf-8") as f:
|
796 |
+
data = csv.reader(f, delimiter="\t")
|
797 |
+
next(data) # skip header
|
798 |
+
for id_, row in enumerate(data):
|
799 |
+
if len(row) == 4:
|
800 |
+
yield id_, {"sentence1": row[1], "sentence2": row[2], "label": row[3]}
|
801 |
+
if self.config.name.startswith("XQuAD"):
|
802 |
+
with open(filepath, encoding="utf-8") as f:
|
803 |
+
xquad = json.load(f)
|
804 |
+
for article in xquad["data"]:
|
805 |
+
for paragraph in article["paragraphs"]:
|
806 |
+
context = paragraph["context"].strip()
|
807 |
+
for qa in paragraph["qas"]:
|
808 |
+
question = qa["question"].strip()
|
809 |
+
id_ = qa["id"]
|
810 |
+
|
811 |
+
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
812 |
+
answers = [answer["text"].strip() for answer in qa["answers"]]
|
813 |
+
|
814 |
+
# Features currently used are "context", "question", and "answers".
|
815 |
+
# Others are extracted here for the ease of future expansions.
|
816 |
+
yield id_, {
|
817 |
+
"context": context,
|
818 |
+
"question": question,
|
819 |
+
"id": id_,
|
820 |
+
"answers": {"answer_start": answer_starts, "text": answers},
|
821 |
+
}
|
822 |
+
if self.config.name.startswith("bucc18"):
|
823 |
+
files = sorted(os.listdir(filepath))
|
824 |
+
target_file = "/"
|
825 |
+
source_file = "/"
|
826 |
+
source_target_file = "/"
|
827 |
+
for file in files:
|
828 |
+
if file.endswith("en"):
|
829 |
+
target_file = os.path.join(filepath, file)
|
830 |
+
elif file.endswith("gold"):
|
831 |
+
source_target_file = os.path.join(filepath, file)
|
832 |
+
else:
|
833 |
+
source_file = os.path.join(filepath, file)
|
834 |
+
with open(target_file, encoding="utf-8") as f:
|
835 |
+
data = csv.reader(f, delimiter="\t")
|
836 |
+
target_sentences = [row for row in data]
|
837 |
+
with open(source_file, encoding="utf-8") as f:
|
838 |
+
data = csv.reader(f, delimiter="\t")
|
839 |
+
source_sentences = [row for row in data]
|
840 |
+
with open(source_target_file, encoding="utf-8") as f:
|
841 |
+
data = csv.reader(f, delimiter="\t")
|
842 |
+
source_target_ids = [row for row in data]
|
843 |
+
for id_, pair in enumerate(source_target_ids):
|
844 |
+
source_id = pair[0]
|
845 |
+
target_id = pair[1]
|
846 |
+
source_sent = ""
|
847 |
+
target_sent = ""
|
848 |
+
for i in range(len(source_sentences)):
|
849 |
+
if source_sentences[i][0] == source_id:
|
850 |
+
source_sent = source_sentences[i][1]
|
851 |
+
source_id = source_sentences[i][0]
|
852 |
+
break
|
853 |
+
for j in range(len(target_sentences)):
|
854 |
+
if target_sentences[j][0] == target_id:
|
855 |
+
target_sent = target_sentences[j][1]
|
856 |
+
target_id = target_sentences[j][0]
|
857 |
+
break
|
858 |
+
yield id_, {
|
859 |
+
"source_sentence": source_sent,
|
860 |
+
"target_sentence": target_sent,
|
861 |
+
"source_lang": source_id,
|
862 |
+
"target_lang": target_id,
|
863 |
+
}
|
864 |
+
if self.config.name.startswith("tatoeba"):
|
865 |
+
source_file = filepath[0]
|
866 |
+
target_file = filepath[1]
|
867 |
+
source_sentences = []
|
868 |
+
target_sentences = []
|
869 |
+
with open(source_file, encoding="utf-8") as f1:
|
870 |
+
for row in f1:
|
871 |
+
source_sentences.append(row)
|
872 |
+
with open(target_file, encoding="utf-8") as f2:
|
873 |
+
for row in f2:
|
874 |
+
target_sentences.append(row)
|
875 |
+
for i in range(len(source_sentences)):
|
876 |
+
yield i, {
|
877 |
+
"source_sentence": source_sentences[i],
|
878 |
+
"target_sentence": target_sentences[i],
|
879 |
+
"source_lang": source_file.split(".")[-1],
|
880 |
+
"target_lang": "eng",
|
881 |
+
}
|
882 |
+
if self.config.name.startswith("udpos"):
|
883 |
+
for id_file, file in enumerate(filepath):
|
884 |
+
with open(file, encoding="utf-8") as f:
|
885 |
+
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
886 |
+
for id_row, row in enumerate(data):
|
887 |
+
if len(row) >= 10 and row[1] != "_":
|
888 |
+
yield str(id_file) + "_" + str(id_row), {"word": row[1], "pos_tag": row[3]}
|
889 |
+
if self.config.name.startswith("PAN-X"):
|
890 |
+
guid_index = 1
|
891 |
+
with open(filepath, encoding="utf-8") as f:
|
892 |
+
words = []
|
893 |
+
ner_tags = []
|
894 |
+
langs = []
|
895 |
+
for line in f:
|
896 |
+
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
897 |
+
if words:
|
898 |
+
yield guid_index, {"words": words, "ner_tags": ner_tags, "langs": langs}
|
899 |
+
guid_index += 1
|
900 |
+
words = []
|
901 |
+
ner_tags = []
|
902 |
+
langs = []
|
903 |
+
else:
|
904 |
+
# pan-x data is tab separated
|
905 |
+
splits = line.split("\t")
|
906 |
+
# strip out en: prefix
|
907 |
+
langs.append(splits[0][:2])
|
908 |
+
words.append(splits[0][3:])
|
909 |
+
if len(splits) > 1:
|
910 |
+
ner_tags.append(splits[-1].replace("\n", ""))
|
911 |
+
else:
|
912 |
+
# examples have no label in test set
|
913 |
+
ner_tags.append("O")
|