Datasets:
ArXiv:
License:
error fix
Browse files- snow-mountain.py +102 -27
snow-mountain.py
CHANGED
@@ -18,7 +18,6 @@ import os
|
|
18 |
import csv
|
19 |
import json
|
20 |
import datasets
|
21 |
-
import zipfile
|
22 |
import pandas as pd
|
23 |
from scipy.io import wavfile
|
24 |
|
@@ -95,40 +94,116 @@ class Test(datasets.GeneratorBasedBuilder):
|
|
95 |
|
96 |
def _split_generators(self, dl_manager):
|
97 |
|
98 |
-
downloaded_files = dl_manager.download(_FILES[self.config.name])
|
99 |
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
-
|
103 |
-
for size in data_size:
|
104 |
-
splits.append(
|
105 |
datasets.SplitGenerator(
|
106 |
-
name=
|
107 |
gen_kwargs={
|
108 |
-
"filepath": downloaded_files[
|
109 |
-
"dl_manager": dl_manager,
|
110 |
},
|
111 |
-
)
|
112 |
-
)
|
113 |
-
splits.append(
|
114 |
datasets.SplitGenerator(
|
115 |
-
name=
|
116 |
gen_kwargs={
|
117 |
-
"filepath": downloaded_files[
|
118 |
-
"dl_manager": dl_manager,
|
119 |
},
|
120 |
-
)
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
|
134 |
def _generate_examples(self, filepath, dl_manager):
|
|
|
18 |
import csv
|
19 |
import json
|
20 |
import datasets
|
|
|
21 |
import pandas as pd
|
22 |
from scipy.io import wavfile
|
23 |
|
|
|
94 |
|
95 |
def _split_generators(self, dl_manager):
|
96 |
|
97 |
+
downloaded_files = dl_manager.download(_FILES[self.config.name])
|
98 |
|
99 |
+
train_splits = [
|
100 |
+
datasets.SplitGenerator(
|
101 |
+
name="train_500",
|
102 |
+
gen_kwargs={
|
103 |
+
"filepath": downloaded_files["train_500"],
|
104 |
+
},
|
105 |
+
),
|
106 |
+
datasets.SplitGenerator(
|
107 |
+
name="train_1000",
|
108 |
+
gen_kwargs={
|
109 |
+
"filepath": downloaded_files["train_1000"],
|
110 |
+
},
|
111 |
+
),
|
112 |
+
datasets.SplitGenerator(
|
113 |
+
name="train_2500",
|
114 |
+
gen_kwargs={
|
115 |
+
"filepath": downloaded_files["train_2500"],
|
116 |
+
},
|
117 |
+
),
|
118 |
+
datasets.SplitGenerator(
|
119 |
+
name="train_short",
|
120 |
+
gen_kwargs={
|
121 |
+
"filepath": downloaded_files["train_short"],
|
122 |
+
},
|
123 |
+
),
|
124 |
+
datasets.SplitGenerator(
|
125 |
+
name="train_full",
|
126 |
+
gen_kwargs={
|
127 |
+
"filepath": downloaded_files["train_full"],
|
128 |
+
},
|
129 |
+
),
|
130 |
+
]
|
131 |
|
132 |
+
dev_splits = [
|
|
|
|
|
133 |
datasets.SplitGenerator(
|
134 |
+
name="val_500",
|
135 |
gen_kwargs={
|
136 |
+
"filepath": downloaded_files["val_500"],
|
|
|
137 |
},
|
138 |
+
),
|
|
|
|
|
139 |
datasets.SplitGenerator(
|
140 |
+
name="val_1000",
|
141 |
gen_kwargs={
|
142 |
+
"filepath": downloaded_files["val_1000"],
|
|
|
143 |
},
|
144 |
+
),
|
145 |
+
datasets.SplitGenerator(
|
146 |
+
name="val_2500",
|
147 |
+
gen_kwargs={
|
148 |
+
"filepath": downloaded_files["val_2500"],
|
149 |
+
},
|
150 |
+
),
|
151 |
+
datasets.SplitGenerator(
|
152 |
+
name="val_short",
|
153 |
+
gen_kwargs={
|
154 |
+
"filepath": downloaded_files["val_short"],
|
155 |
+
},
|
156 |
+
),
|
157 |
+
datasets.SplitGenerator(
|
158 |
+
name="val_full",
|
159 |
+
gen_kwargs={
|
160 |
+
"filepath": downloaded_files["val_full"],
|
161 |
+
},
|
162 |
+
),
|
163 |
+
]
|
164 |
+
|
165 |
+
test_splits = [
|
166 |
+
datasets.SplitGenerator(
|
167 |
+
name="test_common",
|
168 |
+
gen_kwargs={
|
169 |
+
"filepath": downloaded_files["test_common"],
|
170 |
+
},
|
171 |
+
),
|
172 |
+
]
|
173 |
+
return train_splits + dev_splits + test_splits
|
174 |
+
|
175 |
+
# data_size = ['500', '1000', '2500', 'short', 'full']
|
176 |
+
|
177 |
+
# splits = []
|
178 |
+
# for size in data_size:
|
179 |
+
# splits.append(
|
180 |
+
# datasets.SplitGenerator(
|
181 |
+
# name=f"train_{size}",
|
182 |
+
# gen_kwargs={
|
183 |
+
# "filepath": downloaded_files[f"train_{size}"],
|
184 |
+
# "dl_manager": dl_manager,
|
185 |
+
# },
|
186 |
+
# )
|
187 |
+
# )
|
188 |
+
# splits.append(
|
189 |
+
# datasets.SplitGenerator(
|
190 |
+
# name=f"val_{size}",
|
191 |
+
# gen_kwargs={
|
192 |
+
# "filepath": downloaded_files[f"val_{size}"],
|
193 |
+
# "dl_manager": dl_manager,
|
194 |
+
# },
|
195 |
+
# )
|
196 |
+
# )
|
197 |
+
# splits.append(
|
198 |
+
# datasets.SplitGenerator(
|
199 |
+
# name="test_common",
|
200 |
+
# gen_kwargs={
|
201 |
+
# "filepath": downloaded_files["test_common"],
|
202 |
+
# "dl_manager": dl_manager,
|
203 |
+
# },
|
204 |
+
# )
|
205 |
+
# )
|
206 |
+
# return splits
|
207 |
|
208 |
|
209 |
def _generate_examples(self, filepath, dl_manager):
|