Progress
Browse files
.gitignore
CHANGED
@@ -6,3 +6,6 @@ venv
|
|
6 |
|
7 |
# MacOS
|
8 |
.DS_Store
|
|
|
|
|
|
|
|
6 |
|
7 |
# MacOS
|
8 |
.DS_Store
|
9 |
+
|
10 |
+
_common_voice.py
|
11 |
+
_giga_speech.py
|
nena_speech_1_0.py
CHANGED
@@ -92,63 +92,58 @@ class NENASpeech(datasets.GeneratorBasedBuilder):
|
|
92 |
)
|
93 |
|
94 |
def _split_generators(self, dl_manager):
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
# # set the audio feature and the path to the extracted file
|
151 |
-
# path = os.path.join(local_extracted_archive_paths, path) if local_extracted_archive_paths else path
|
152 |
-
# result["audio"] = {"path": path, "bytes": file.read()}
|
153 |
-
# result["path"] = path
|
154 |
-
# yield path, result
|
|
|
92 |
)
|
93 |
|
94 |
def _split_generators(self, dl_manager):
|
95 |
+
dialect = self.config.name
|
96 |
+
|
97 |
+
audio_urls = {}
|
98 |
+
splits = ("train", "dev", "test")
|
99 |
+
for split in splits:
|
100 |
+
audio_urls[split] = _AUDIO_URL.format(dialect=dialect, split=split)
|
101 |
+
archive_paths = dl_manager.download(audio_urls)
|
102 |
+
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
103 |
+
|
104 |
+
meta_urls = {split: _TRANSCRIPT_URL.format(dialect=dialect, split=split) for split in splits}
|
105 |
+
meta_paths = dl_manager.download_and_extract(meta_urls)
|
106 |
+
|
107 |
+
split_generators = []
|
108 |
+
split_names = {
|
109 |
+
"train": datasets.Split.TRAIN,
|
110 |
+
"dev": datasets.Split.VALIDATION,
|
111 |
+
"test": datasets.Split.TEST,
|
112 |
+
}
|
113 |
+
for split in splits:
|
114 |
+
split_generators.append(
|
115 |
+
datasets.SplitGenerator(
|
116 |
+
name=split_names.get(split, split),
|
117 |
+
gen_kwargs={
|
118 |
+
"local_extracted_archive_paths": local_extracted_archive_paths.get(split),
|
119 |
+
"archive": dl_manager.iter_archive(archive_paths.get(split)),
|
120 |
+
"meta_path": meta_paths[split],
|
121 |
+
},
|
122 |
+
),
|
123 |
+
)
|
124 |
+
|
125 |
+
return split_generators
|
126 |
+
|
127 |
+
def _generate_examples(self, local_extracted_archive_paths, archive, meta_path):
|
128 |
+
data_fields = list(self._info().features.keys())
|
129 |
+
metadata = {}
|
130 |
+
with open(meta_path, encoding="utf-8") as f:
|
131 |
+
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
132 |
+
for row in tqdm(reader, desc="Reading metadata..."):
|
133 |
+
if not row["path"].endswith(".mp3"):
|
134 |
+
row["path"] += ".mp3"
|
135 |
+
# if data is incomplete, fill with empty values
|
136 |
+
for field in data_fields:
|
137 |
+
if field not in row:
|
138 |
+
row[field] = ""
|
139 |
+
metadata[row["path"]] = row
|
140 |
+
|
141 |
+
for path, file in archive:
|
142 |
+
_, filename = os.path.split(path)
|
143 |
+
if filename in metadata:
|
144 |
+
result = dict(metadata[filename])
|
145 |
+
# set the audio feature and the path to the extracted file
|
146 |
+
path = os.path.join(local_extracted_archive_paths, path) if local_extracted_archive_paths else path
|
147 |
+
result["audio"] = {"path": path, "bytes": file.read()}
|
148 |
+
result["path"] = path
|
149 |
+
yield path, result
|
|
|
|
|
|
|
|
|
|
transcript/{burmi → curmi copy}/dev.tsv
RENAMED
File without changes
|
transcript/{burmi → curmi copy}/test.tsv
RENAMED
File without changes
|
transcript/{burmi → curmi copy}/train.tsv
RENAMED
File without changes
|