system HF staff commited on
Commit
7eeece4
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - no-annotation
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - af
8
+ - am
9
+ - an
10
+ - as
11
+ - av
12
+ - ay
13
+ - bs
14
+ - ce
15
+ - co
16
+ - cv
17
+ - dv
18
+ - eo
19
+ - gl
20
+ - gn
21
+ - gu
22
+ - ha
23
+ - hr
24
+ - ht
25
+ - ia
26
+ - id
27
+ - ie
28
+ - ig
29
+ - io
30
+ - ja
31
+ - jv
32
+ - km
33
+ - ko
34
+ - ku
35
+ - kv
36
+ - ky
37
+ - lb
38
+ - lg
39
+ - li
40
+ - ln
41
+ - lo
42
+ - mg
43
+ - mi
44
+ - ml
45
+ - mn
46
+ - mr
47
+ - ms
48
+ - my
49
+ - ne
50
+ - om
51
+ - or
52
+ - os
53
+ - pa
54
+ - rm
55
+ - rw
56
+ - sc
57
+ - sd
58
+ - si
59
+ - sk
60
+ - sn
61
+ - so
62
+ - sr
63
+ - su
64
+ - sw
65
+ - ta
66
+ - th
67
+ - tl
68
+ - tn
69
+ - to
70
+ - ug
71
+ - vi
72
+ - vo
73
+ - wa
74
+ - wo
75
+ - xh
76
+ - yo
77
+ - zh
78
+ - ar
79
+ - az
80
+ - ba
81
+ - be
82
+ - bg
83
+ - bn
84
+ - bo
85
+ - br
86
+ - ca
87
+ - cs
88
+ - cy
89
+ - da
90
+ - de
91
+ - el
92
+ - en
93
+ - es
94
+ - et
95
+ - eu
96
+ - fa
97
+ - fi
98
+ - fo
99
+ - fr
100
+ - fy
101
+ - ga
102
+ - gd
103
+ - gv
104
+ - he
105
+ - hi
106
+ - hu
107
+ - hy
108
+ - is
109
+ - it
110
+ - ka
111
+ - kk
112
+ - kn
113
+ - kw
114
+ - la
115
+ - lt
116
+ - lv
117
+ - mk
118
+ - mt
119
+ - nb
120
+ - nl
121
+ - nn
122
+ - nv
123
+ - oc
124
+ - pl
125
+ - ps
126
+ - pt
127
+ - qu
128
+ - ro
129
+ - ru
130
+ - sa
131
+ - sh
132
+ - sl
133
+ - sq
134
+ - sv
135
+ - te
136
+ - tg
137
+ - tk
138
+ - tr
139
+ - tt
140
+ - uk
141
+ - ur
142
+ - uz
143
+ - yi
144
+ - ace
145
+ - als
146
+ - ang
147
+ - arz
148
+ - ast
149
+ - azb
150
+ - bar
151
+ - bcl
152
+ - bho
153
+ - bjn
154
+ - bpy
155
+ - bxr
156
+ - cbk
157
+ - cdo
158
+ - ceb
159
+ - chr
160
+ - ckb
161
+ - crh
162
+ - csb
163
+ - diq
164
+ - dsb
165
+ - dty
166
+ - egl
167
+ - ext
168
+ - frp
169
+ - fur
170
+ - gag
171
+ - glk
172
+ - hak
173
+ - hif
174
+ - hsb
175
+ - ilo
176
+ - jam
177
+ - jbo
178
+ - kaa
179
+ - kab
180
+ - kbd
181
+ - koi
182
+ - kok
183
+ - krc
184
+ - ksh
185
+ - lad
186
+ - lez
187
+ - lij
188
+ - lmo
189
+ - lrc
190
+ - ltg
191
+ - lzh
192
+ - mai
193
+ - mdf
194
+ - mhr
195
+ - min
196
+ - mrj
197
+ - mwl
198
+ - myv
199
+ - mzn
200
+ - nan
201
+ - nap
202
+ - nci
203
+ - nds
204
+ - new
205
+ - nrm
206
+ - nso
207
+ - olo
208
+ - pag
209
+ - pam
210
+ - pap
211
+ - pcd
212
+ - pdc
213
+ - pfl
214
+ - pnb
215
+ - rue
216
+ - rup
217
+ - sah
218
+ - scn
219
+ - sco
220
+ - sgs
221
+ - sme
222
+ - srn
223
+ - stq
224
+ - szl
225
+ - tcy
226
+ - tet
227
+ - tyv
228
+ - udm
229
+ - vec
230
+ - vep
231
+ - vls
232
+ - vro
233
+ - war
234
+ - wuu
235
+ - xmf
236
+ - zea
237
+ - other-roa-tara
238
+ - other-zh-yue
239
+ - other-map-bms
240
+ - other-nds-nl
241
+ - other-be-tarask
242
+ licenses:
243
+ - odbl-1-0
244
+ multilinguality:
245
+ - multilingual
246
+ size_categories:
247
+ - 100K<n<1M
248
+ source_datasets:
249
+ - original
250
+ task_categories:
251
+ - text-classification
252
+ task_ids:
253
+ - text-classification-other-language-identification
254
+ ---
255
+
256
+ # Dataset Card for wili_2018
257
+
258
+ ## Table of Contents
259
+ - [Dataset Description](#dataset-description)
260
+ - [Dataset Summary](#dataset-summary)
261
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
262
+ - [Languages](#languages)
263
+ - [Dataset Structure](#dataset-structure)
264
+ - [Data Instances](#data-instances)
265
+ - [Data Fields](#data-instances)
266
+ - [Data Splits](#data-instances)
267
+ - [Dataset Creation](#dataset-creation)
268
+ - [Curation Rationale](#curation-rationale)
269
+ - [Source Data](#source-data)
270
+ - [Annotations](#annotations)
271
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
272
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
273
+ - [Social Impact of Dataset](#social-impact-of-dataset)
274
+ - [Discussion of Biases](#discussion-of-biases)
275
+ - [Other Known Limitations](#other-known-limitations)
276
+ - [Additional Information](#additional-information)
277
+ - [Dataset Curators](#dataset-curators)
278
+ - [Licensing Information](#licensing-information)
279
+ - [Citation Information](#citation-information)
280
+
281
+ ## Dataset Description
282
+
283
+ - **Homepage:** https://zenodo.org/record/841984
284
+ - **Repository:** [Needs More Information]
285
+ - **Paper:** https://arxiv.org/pdf/1801.07779
286
+ - **Leaderboard:** [Needs More Information]
287
+ - **Point of Contact:** Thoma, Martin (Email: info@martin-thoma.de)
288
+
289
+ ### Dataset Summary
290
+
291
+ WiLI-2018, the Wikipedia language identification benchmark dataset, contains 235000 paragraphs of 235 languages. The dataset is balanced and a train-test split is provided.
292
+
293
+ ### Supported Tasks and Leaderboards
294
+
295
+ [Needs More Information]
296
+
297
+ ### Languages
298
+
299
+ 235 Different Languages
300
+
301
+ ## Dataset Structure
302
+
303
+ ### Data Instances
304
+
305
+ ```
306
+ {
307
+ 'label': 207,
308
+ 'sentence': 'Ti Turkia ket maysa a demokrata, sekular, unitario, batay-linteg a republika nga addaan ti taga-ugma a tinawtawid a kultura. Ti Turkia ket umadadu a naipatipon iti Laud babaen ti panagkameng kadagiti organisasion a kas ti Konsilo iti Europa, NATO, OECD, OSCE ken ti G-20 a dagiti kangrunaan nga ekonomia. Ti Turkia ket nangrugi a nakitulag ti napno a panagkameng iti Kappon ti Europa idi 2005, nga isu ket maysa idin a kumaduaan a kameng iti Europeano a Komunidad ti Ekonomia manipud idi 1963 ken nakadanon ti maysa a tulagan ti kappon ti aduana idi 1995. Ti Turkia ket nagtaraken iti asideg a kultural, politikal, ekonomiko ken industria a panakibiang iti Tengnga a Daya, dagiti Turko nga estado iti Tengnga nga Asia ken dagiti pagilian ti Aprika babaen ti panagkameng kadagiti organisasion a kas ti Turko a Konsilo, Nagsaupan nga Administrasion iti Turko nga Arte ken Kultura, Organisasion iti Islamiko a Panagtitinnulong ken ti Organisasion ti Ekonomiko a Panagtitinnulong.'
309
+ }
310
+ ```
311
+
312
+ ### Data Fields
313
+
314
+ [Needs More Information]
315
+
316
+ ### Data Splits
317
+
318
+ 175000 lines of text each for train and test data.
319
+
320
+ ## Dataset Creation
321
+
322
+ ### Curation Rationale
323
+
324
+ [Needs More Information]
325
+
326
+ ### Source Data
327
+
328
+ #### Initial Data Collection and Normalization
329
+
330
+ [Needs More Information]
331
+
332
+ #### Who are the source language producers?
333
+
334
+ [Needs More Information]
335
+
336
+ ### Annotations
337
+
338
+ #### Annotation process
339
+
340
+ [Needs More Information]
341
+
342
+ #### Who are the annotators?
343
+
344
+ [Needs More Information]
345
+
346
+ ### Personal and Sensitive Information
347
+
348
+ [Needs More Information]
349
+
350
+ ## Considerations for Using the Data
351
+
352
+ ### Social Impact of Dataset
353
+
354
+ [Needs More Information]
355
+
356
+ ### Discussion of Biases
357
+
358
+ [Needs More Information]
359
+
360
+ ### Other Known Limitations
361
+
362
+ [Needs More Information]
363
+
364
+ ## Additional Information
365
+
366
+ ### Dataset Curators
367
+
368
+ The dataset was initially created by Thomas Martin
369
+
370
+ ### Licensing Information
371
+
372
+ ODC Open Database License v1.0
373
+
374
+ ### Citation Information
375
+
376
+ ```
377
+ @dataset{thoma_martin_2018_841984,
378
+ author = {Thoma, Martin},
379
+ title = {{WiLI-2018 - Wikipedia Language Identification database}},
380
+ month = jan,
381
+ year = 2018,
382
+ publisher = {Zenodo},
383
+ version = {1.0.0},
384
+ doi = {10.5281/zenodo.841984},
385
+ url = {https://doi.org/10.5281/zenodo.841984}
386
+ }
387
+ ```
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"WiLI-2018 dataset": {"description": "It is a benchmark dataset for language identification and contains 235000 paragraphs of 235 languages\n", "citation": "@dataset{thoma_martin_2018_841984,\n author = {Thoma, Martin},\n title = {{WiLI-2018 - Wikipedia Language Identification database}},\n month = jan,\n year = 2018,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.841984},\n url = {https://doi.org/10.5281/zenodo.841984}\n}\n", "homepage": "https://zenodo.org/record/841984", "license": "ODC Open Database License v1.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 235, "names": ["cdo", "glk", "jam", "lug", "san", "rue", "wol", "new", "mwl", "bre", "ara", "hye", "xmf", "ext", "cor", "yor", "div", "asm", "lat", "cym", "hif", "ace", "kbd", "tgk", "rus", "nso", "mya", "msa", "ava", "cbk", "urd", "deu", "swa", "pus", "bxr", "udm", "csb", "yid", "vro", "por", "pdc", "eng", "tha", "hat", "lmo", "pag", "jav", "chv", "nan", "sco", "kat", "bho", "bos", "kok", "oss", "mri", "fry", "cat", "azb", "kin", "hin", "sna", "dan", "egl", "mkd", "ron", "bul", "hrv", "som", "pam", "nav", "ksh", "nci", "khm", "sgs", "srn", "bar", "cos", "ckb", "pfl", "arz", "roa-tara", "fra", "mai", "zh-yue", "guj", "fin", "kir", "vol", "hau", "afr", "uig", "lao", "swe", "slv", "kor", "szl", "srp", "dty", "nrm", "dsb", "ind", "wln", "pnb", "ukr", "bpy", "vie", "tur", "aym", "lit", "zea", "pol", "est", "scn", "vls", "stq", "gag", "grn", "kaz", "ben", "pcd", "bjn", "krc", "amh", "diq", "ltz", "ita", "kab", "bel", "ang", "mhr", "che", "koi", "glv", "ido", "fao", "bak", "isl", "bcl", "tet", "jpn", "kur", "map-bms", "tyv", "olo", "arg", "ori", "lim", "tel", "lin", "roh", "sqi", "xho", "mlg", "fas", "hbs", "tam", "aze", "lad", "nob", "sin", "gla", "nap", "snd", "ast", "mal", "mdf", "tsn", "nds", "tgl", "nno", "sun", "lzh", "jbo", "crh", "pap", "oci", "hak", "uzb", "zho", "hsb", "sme", "mlt", "vep", "lez", "nld", "nds-nl", "mrj", "spa", "ceb", "ina", "heb", "hun", "que", "kaa", "mar", "vec", "frp", "ell", "sah", "eus", "ces", "slk", "chr", "lij", "nep", "srd", "ilo", "be-tarask", "bod", "orm", "war", "glg", "mon", "gle", "min", "ibo", "ile", "epo", "lav", "lrc", "als", "mzn", "rup", "fur", "tat", "myv", "pan", "ton", "kom", "wuu", "tcy", "tuk", "kan", "ltg"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "wili_2018", "config_name": "WiLI-2018 dataset", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 65408201, "num_examples": 117500, "dataset_name": "wili_2018"}, "test": {"name": "test", "num_bytes": 66491260, "num_examples": 117500, "dataset_name": "wili_2018"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u": {"num_bytes": 64716393, "checksum": "895b3892a1edba1702b0f2117b756204ccc177a1c285420234bdb5d717ad4100"}, "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ": {"num_bytes": 65799958, "checksum": "663f32b6f7d8a26b83e251803d386f29dcd558762125f4f8289f2cef067d4ce8"}}, "download_size": 130516351, "post_processing_size": null, "dataset_size": 131899461, "size_in_bytes": 262415812}}
dummy/WiLI-2018 dataset/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e870a8d86efe03d0497f8d3fe1f4adee41d50b042bc82b1de4c158d5221d369
3
+ size 4225
wili_2018.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """WiLI-2018, the Wikipedia language identification benchmark dataset"""
17
+
18
+ from __future__ import absolute_import, division, print_function
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @dataset{thoma_martin_2018_841984,
25
+ author = {Thoma, Martin},
26
+ title = {{WiLI-2018 - Wikipedia Language Identification database}},
27
+ month = jan,
28
+ year = 2018,
29
+ publisher = {Zenodo},
30
+ version = {1.0.0},
31
+ doi = {10.5281/zenodo.841984},
32
+ url = {https://doi.org/10.5281/zenodo.841984}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ It is a benchmark dataset for language identification and contains 235000 paragraphs of 235 languages
38
+ """
39
+
40
+ # TODO: Add a link to an official homepage for the dataset here
41
+ _HOMEPAGE = "https://zenodo.org/record/841984"
42
+
43
+ # TODO: Add the licence for the dataset here if you can find it
44
+ _LICENSE = "ODC Open Database License v1.0"
45
+
46
+
47
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
48
+ _TRAIN_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1ZzlIQvw1KNBG97QQCfdatvVrrbeLaM1u"
49
+ _TEST_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=1Xx4kFc1Xdzz8AhDasxZ0cSa-a35EQSDZ"
50
+
51
+ _CLASSES = [
52
+ "cdo",
53
+ "glk",
54
+ "jam",
55
+ "lug",
56
+ "san",
57
+ "rue",
58
+ "wol",
59
+ "new",
60
+ "mwl",
61
+ "bre",
62
+ "ara",
63
+ "hye",
64
+ "xmf",
65
+ "ext",
66
+ "cor",
67
+ "yor",
68
+ "div",
69
+ "asm",
70
+ "lat",
71
+ "cym",
72
+ "hif",
73
+ "ace",
74
+ "kbd",
75
+ "tgk",
76
+ "rus",
77
+ "nso",
78
+ "mya",
79
+ "msa",
80
+ "ava",
81
+ "cbk",
82
+ "urd",
83
+ "deu",
84
+ "swa",
85
+ "pus",
86
+ "bxr",
87
+ "udm",
88
+ "csb",
89
+ "yid",
90
+ "vro",
91
+ "por",
92
+ "pdc",
93
+ "eng",
94
+ "tha",
95
+ "hat",
96
+ "lmo",
97
+ "pag",
98
+ "jav",
99
+ "chv",
100
+ "nan",
101
+ "sco",
102
+ "kat",
103
+ "bho",
104
+ "bos",
105
+ "kok",
106
+ "oss",
107
+ "mri",
108
+ "fry",
109
+ "cat",
110
+ "azb",
111
+ "kin",
112
+ "hin",
113
+ "sna",
114
+ "dan",
115
+ "egl",
116
+ "mkd",
117
+ "ron",
118
+ "bul",
119
+ "hrv",
120
+ "som",
121
+ "pam",
122
+ "nav",
123
+ "ksh",
124
+ "nci",
125
+ "khm",
126
+ "sgs",
127
+ "srn",
128
+ "bar",
129
+ "cos",
130
+ "ckb",
131
+ "pfl",
132
+ "arz",
133
+ "roa-tara",
134
+ "fra",
135
+ "mai",
136
+ "zh-yue",
137
+ "guj",
138
+ "fin",
139
+ "kir",
140
+ "vol",
141
+ "hau",
142
+ "afr",
143
+ "uig",
144
+ "lao",
145
+ "swe",
146
+ "slv",
147
+ "kor",
148
+ "szl",
149
+ "srp",
150
+ "dty",
151
+ "nrm",
152
+ "dsb",
153
+ "ind",
154
+ "wln",
155
+ "pnb",
156
+ "ukr",
157
+ "bpy",
158
+ "vie",
159
+ "tur",
160
+ "aym",
161
+ "lit",
162
+ "zea",
163
+ "pol",
164
+ "est",
165
+ "scn",
166
+ "vls",
167
+ "stq",
168
+ "gag",
169
+ "grn",
170
+ "kaz",
171
+ "ben",
172
+ "pcd",
173
+ "bjn",
174
+ "krc",
175
+ "amh",
176
+ "diq",
177
+ "ltz",
178
+ "ita",
179
+ "kab",
180
+ "bel",
181
+ "ang",
182
+ "mhr",
183
+ "che",
184
+ "koi",
185
+ "glv",
186
+ "ido",
187
+ "fao",
188
+ "bak",
189
+ "isl",
190
+ "bcl",
191
+ "tet",
192
+ "jpn",
193
+ "kur",
194
+ "map-bms",
195
+ "tyv",
196
+ "olo",
197
+ "arg",
198
+ "ori",
199
+ "lim",
200
+ "tel",
201
+ "lin",
202
+ "roh",
203
+ "sqi",
204
+ "xho",
205
+ "mlg",
206
+ "fas",
207
+ "hbs",
208
+ "tam",
209
+ "aze",
210
+ "lad",
211
+ "nob",
212
+ "sin",
213
+ "gla",
214
+ "nap",
215
+ "snd",
216
+ "ast",
217
+ "mal",
218
+ "mdf",
219
+ "tsn",
220
+ "nds",
221
+ "tgl",
222
+ "nno",
223
+ "sun",
224
+ "lzh",
225
+ "jbo",
226
+ "crh",
227
+ "pap",
228
+ "oci",
229
+ "hak",
230
+ "uzb",
231
+ "zho",
232
+ "hsb",
233
+ "sme",
234
+ "mlt",
235
+ "vep",
236
+ "lez",
237
+ "nld",
238
+ "nds-nl",
239
+ "mrj",
240
+ "spa",
241
+ "ceb",
242
+ "ina",
243
+ "heb",
244
+ "hun",
245
+ "que",
246
+ "kaa",
247
+ "mar",
248
+ "vec",
249
+ "frp",
250
+ "ell",
251
+ "sah",
252
+ "eus",
253
+ "ces",
254
+ "slk",
255
+ "chr",
256
+ "lij",
257
+ "nep",
258
+ "srd",
259
+ "ilo",
260
+ "be-tarask",
261
+ "bod",
262
+ "orm",
263
+ "war",
264
+ "glg",
265
+ "mon",
266
+ "gle",
267
+ "min",
268
+ "ibo",
269
+ "ile",
270
+ "epo",
271
+ "lav",
272
+ "lrc",
273
+ "als",
274
+ "mzn",
275
+ "rup",
276
+ "fur",
277
+ "tat",
278
+ "myv",
279
+ "pan",
280
+ "ton",
281
+ "kom",
282
+ "wuu",
283
+ "tcy",
284
+ "tuk",
285
+ "kan",
286
+ "ltg",
287
+ ]
288
+
289
+
290
+ class Wili_2018(datasets.GeneratorBasedBuilder):
291
+ """WiLI Language Identification Dataset"""
292
+
293
+ VERSION = datasets.Version("1.1.0")
294
+
295
+ BUILDER_CONFIGS = [
296
+ datasets.BuilderConfig(
297
+ name="WiLI-2018 dataset",
298
+ version=VERSION,
299
+ description="Plain text of import of WiLI-2018",
300
+ )
301
+ ]
302
+
303
+ def _info(self):
304
+
305
+ return datasets.DatasetInfo(
306
+ # This is the description that will appear on the datasets page.
307
+ description=_DESCRIPTION,
308
+ # This defines the different columns of the dataset and their types
309
+ features=datasets.Features(
310
+ {"sentence": datasets.Value("string"), "label": datasets.features.ClassLabel(names=_CLASSES)}
311
+ ),
312
+ supervised_keys=None,
313
+ homepage=_HOMEPAGE,
314
+ license=_LICENSE,
315
+ citation=_CITATION,
316
+ )
317
+
318
+ def _split_generators(self, dl_manager):
319
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
320
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
321
+ return [
322
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
323
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
324
+ ]
325
+
326
+ def _generate_examples(self, filepath):
327
+
328
+ with open(filepath, encoding="utf-8") as f:
329
+ for id_, line in enumerate(f):
330
+ text, label = line.rsplit(",", 1)
331
+ text = text.strip('"')
332
+ label = int(label.strip())
333
+ yield id_, {"sentence": text, "label": label - 1}