Datasets:

License:
kunkuk commited on
Commit
9a57a87
·
1 Parent(s): 07dbbb5

Added voxlingua.py

Browse files
Files changed (2) hide show
  1. load_script.py +0 -0
  2. voxlingua.py +192 -0
load_script.py DELETED
File without changes
voxlingua.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import collections
3
+ import gzip
4
+ import textwrap
5
+
6
+ _DESCRIPTION = "tba"
7
+ _URL = "tba"
8
+ _CITATION = "tba"
9
+ _LICENSE = "tba"
10
+
11
+ class VoxLinguaConfig(datasets.BuilderConfig):
12
+ """VoxLingua107 corpus."""
13
+ def __init__(
14
+ self,
15
+ features,
16
+ url,
17
+ data_url=None,
18
+ supervised_keys=None,
19
+ task_templates=None,
20
+ **kwargs,
21
+ ):
22
+ super(VoxLinguaConfig, self).__init__(version=datasets.Version("1.9.0", ""), **kwargs)
23
+ self.features = features
24
+ self.data_url = data_url
25
+ self.url = url
26
+ self.supervised_keys = supervised_keys
27
+ self.task_templates = task_templates
28
+
29
+ def _languages():
30
+ """Create the sorted dictionary of language codes, and language names.
31
+ Returns:
32
+ The sorted dictionary as an instance of `collections.OrderedDict`.
33
+ """
34
+ langs = {
35
+ "af":"",
36
+ "am":"",
37
+ "ar":"",
38
+ "as":"",
39
+ "az":"",
40
+ "ba":"",
41
+ "be":"",
42
+ "bg":"",
43
+ "bn":"",
44
+ "bo":"",
45
+ "br":"",
46
+ "bs":"",
47
+ "ca":"",
48
+ "ceb":"",
49
+ "cs":"",
50
+ "cy":"",
51
+ "da":"",
52
+ "de":"",
53
+ "el":"",
54
+ "en":"",
55
+ "eo":"",
56
+ "es":"",
57
+ "et":"",
58
+ "eu":"",
59
+ "fa":"",
60
+ "fi":"",
61
+ "fo":"",
62
+ "fr":"",
63
+ "gl":"",
64
+ "gn":"",
65
+ "gu":"",
66
+ "gv":"",
67
+ "ha":"",
68
+ "haw":"",
69
+ "hi":"",
70
+ "hr":"",
71
+ "ht":"",
72
+ "hu":"",
73
+ "hy":"",
74
+ "ia":"",
75
+ "id":"",
76
+ "is":"",
77
+ "it":"",
78
+ "iw":"",
79
+ "ja":"",
80
+ "jw":"",
81
+ "ka":"",
82
+ "kk":"",
83
+ "km":"",
84
+ "kn":"",
85
+ "ko":"",
86
+ "la":"",
87
+ "lb":"",
88
+ "ln":"",
89
+ "lo":"",
90
+ "lt":"",
91
+ "lv":"",
92
+ "mg":"",
93
+ "mi":"",
94
+ "mk":"",
95
+ "ml":"",
96
+ "mn":"",
97
+ "mr":"",
98
+ "ms":"",
99
+ "mt":"",
100
+ "my":"",
101
+ "ne":"",
102
+ "nl":"",
103
+ "nn":"",
104
+ "no":"",
105
+ "oc":"",
106
+ "pa":"",
107
+ "pl":"",
108
+ "ps":"",
109
+ "pt":"",
110
+ "ro":"",
111
+ "ru":"",
112
+ "sa":"",
113
+ "sco":"",
114
+ "sd":"",
115
+ "si":"",
116
+ "sk":"",
117
+ "sl":"",
118
+ "sn":"",
119
+ "so":"",
120
+ "sq":"",
121
+ "sr":"",
122
+ "su":"",
123
+ "sv":"",
124
+ "sw":"",
125
+ "ta":"",
126
+ "te":"",
127
+ "tg":"",
128
+ "th":"",
129
+ "tk":"",
130
+ "tl":"",
131
+ "tr":"",
132
+ "tt":"",
133
+ "uk":"",
134
+ "ur":"",
135
+ "uz":"",
136
+ "vi":"",
137
+ "war":"",
138
+ "yi":"",
139
+ "yo":"",
140
+ "zh":""
141
+ }
142
+ return collections.OrderedDict(sorted(langs.items()))
143
+
144
+
145
+ class VoxLingua(datasets.GeneratorBasedBuilder):
146
+ BUILDER_CONFIGS = [
147
+ VoxLinguaConfig(
148
+ name = "lid",
149
+ description=textwrap.dedent(
150
+ """ lid tbd """
151
+ ),
152
+ features=datasets.Features(
153
+ {
154
+ "file": datasets.Value("string"),
155
+ "audio": datasets.Audio(sampling_rate=16_000),
156
+ "label": datasets.ClassLabel(
157
+ names=[f"{i}" for i in range(107)]
158
+ ),
159
+ }
160
+ ),
161
+ supervised_keys=("file", "label"),
162
+ url="http://bark.phon.ioc.ee/voxlingua107/",
163
+ data_url="http://bark.phon.ioc.ee/voxlingua107/{language}"
164
+ )
165
+ ]
166
+
167
+ BUILDER_CONFIG_CLASS = VoxLinguaConfig
168
+
169
+ def _info(self):
170
+ return datasets.DatasetInfo(
171
+ description=_DESCRIPTION,
172
+ features=self.config.features,
173
+ supervised_keys=self.config.supervised_keys,
174
+ homepage=self.config.url,
175
+ citation=_CITATION,
176
+ task_templates=self.config.task_templates,
177
+ )
178
+
179
+ def _split_generators(self, dl_manager):
180
+ train_data_urls = [self.config.url + f"{key}.zip" for key in _languages().keys()]
181
+ downloaded_files_train = dl_manager.download(train_data_urls)
182
+ dev_data_url = [self.config.url + f"dev.zip"]
183
+ downloaded_files_dev = dl_manager.download(dev_data_url)
184
+ return [
185
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"archive_path": downloaded_files_train}),
186
+ datasets.SplitGenerator(name=datasets.Split.DEV, gen_kwargs={"archive_path": downloaded_files_dev}),
187
+ ]
188
+
189
+ def _generate_examples(self, archive_path, split=None):
190
+ return ""
191
+
192
+