add speaker prompts
Browse files- libritts-r-aligned.py +58 -16
libritts-r-aligned.py
CHANGED
@@ -15,6 +15,7 @@ from multiprocessing import cpu_count
|
|
15 |
from phones.convert import Converter
|
16 |
import torchaudio
|
17 |
import torchaudio.transforms as AT
|
|
|
18 |
|
19 |
logger = datasets.logging.get_logger(__name__)
|
20 |
|
@@ -64,6 +65,12 @@ _URLS = {
|
|
64 |
"train-other-500": _URL + "train_other_500.tar.gz",
|
65 |
}
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
class LibriTTSAlignConfig(datasets.BuilderConfig):
|
69 |
"""BuilderConfig for LibriTTSAlign."""
|
@@ -106,7 +113,8 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
106 |
"phones": datasets.Sequence(datasets.Value("string")),
|
107 |
"phone_durations": datasets.Sequence(datasets.Value("int32")),
|
108 |
# audio feature
|
109 |
-
"audio": datasets.Value("string")
|
|
|
110 |
}
|
111 |
|
112 |
return datasets.DatasetInfo(
|
@@ -159,19 +167,25 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
159 |
speakers = data_all["speaker"].unique()
|
160 |
# seed for reproducibility
|
161 |
np.random.seed(42)
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
|
|
174 |
data_all = data_all[data_all["speaker"].isin(data_dev_all["speaker"].unique())]
|
|
|
|
|
175 |
self.speaker2idxs = {}
|
176 |
self.speaker2idxs["all"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev_all["speaker"].unique())))}
|
177 |
self.speaker2idxs["train"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_train["speaker"].unique())))}
|
@@ -194,6 +208,15 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
194 |
self.alignments_ds = None
|
195 |
self.data = None
|
196 |
return splits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
def _create_alignments_ds(self, name, url):
|
199 |
self.empty_textgrids = 0
|
@@ -251,7 +274,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
251 |
if self.empty_textgrids > 0:
|
252 |
logger.warning(f"Found {self.empty_textgrids} empty textgrids")
|
253 |
del self.ds, self.phone_cache, self.phone_converter
|
254 |
-
|
255 |
entries,
|
256 |
columns=[
|
257 |
"phones",
|
@@ -264,6 +287,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
264 |
"basename",
|
265 |
],
|
266 |
)
|
|
|
267 |
|
268 |
def _create_entry(self, dsi_idx):
|
269 |
dsi, idx = dsi_idx
|
@@ -298,7 +322,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
298 |
if start >= end:
|
299 |
self.empty_textgrids += 1
|
300 |
return None
|
301 |
-
|
302 |
return (
|
303 |
phones,
|
304 |
durations,
|
@@ -312,10 +336,13 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
312 |
|
313 |
def _generate_examples(self, ds):
|
314 |
j = 0
|
|
|
|
|
315 |
for i, row in ds.iterrows():
|
316 |
# 10kB is the minimum size of a wav file for our purposes
|
317 |
if Path(row["audio"]).stat().st_size >= 10_000:
|
318 |
if len(row["phones"]) < 384:
|
|
|
319 |
result = {
|
320 |
"id": row["basename"],
|
321 |
"speaker": row["speaker"],
|
@@ -325,6 +352,21 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
|
|
325 |
"phones": row["phones"],
|
326 |
"phone_durations": row["duration"],
|
327 |
"audio": str(row["audio"]),
|
|
|
328 |
}
|
329 |
yield j, result
|
330 |
-
j += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
from phones.convert import Converter
|
16 |
import torchaudio
|
17 |
import torchaudio.transforms as AT
|
18 |
+
from functools import lru_cache
|
19 |
|
20 |
logger = datasets.logging.get_logger(__name__)
|
21 |
|
|
|
65 |
"train-other-500": _URL + "train_other_500.tar.gz",
|
66 |
}
|
67 |
|
68 |
+
@lru_cache(maxsize=1000)
|
69 |
+
def get_speaker_prompts(speaker, hash_ds):
|
70 |
+
ds = hash_ds.df
|
71 |
+
speaker_prompts = ds[ds["speaker"] == speaker]
|
72 |
+
speaker_prompts = tuple(speaker_prompts["audio"])
|
73 |
+
return speaker_prompts
|
74 |
|
75 |
class LibriTTSAlignConfig(datasets.BuilderConfig):
|
76 |
"""BuilderConfig for LibriTTSAlign."""
|
|
|
113 |
"phones": datasets.Sequence(datasets.Value("string")),
|
114 |
"phone_durations": datasets.Sequence(datasets.Value("int32")),
|
115 |
# audio feature
|
116 |
+
"audio": datasets.Value("string"),
|
117 |
+
"audio_speaker_prompt": datasets.Sequence(datasets.Value("string")),
|
118 |
}
|
119 |
|
120 |
return datasets.DatasetInfo(
|
|
|
167 |
speakers = data_all["speaker"].unique()
|
168 |
# seed for reproducibility
|
169 |
np.random.seed(42)
|
170 |
+
self.data_all = data_all
|
171 |
+
del data_all
|
172 |
+
data_dev_all = [
|
173 |
+
x for x in
|
174 |
+
process_map(
|
175 |
+
self._create_dev_split,
|
176 |
+
speakers,
|
177 |
+
chunksize=1000,
|
178 |
+
max_workers=_MAX_WORKERS,
|
179 |
+
desc="creating dev split",
|
180 |
+
tqdm_class=tqdm,
|
181 |
+
)
|
182 |
+
if x is not None
|
183 |
+
]
|
184 |
+
data_dev_all = pd.concat(data_dev_all)
|
185 |
+
data_all = self.data_all
|
186 |
data_all = data_all[data_all["speaker"].isin(data_dev_all["speaker"].unique())]
|
187 |
+
data_all = data_all[~data_all["basename"].isin(data_dev_all["basename"].unique())]
|
188 |
+
del self.data_all
|
189 |
self.speaker2idxs = {}
|
190 |
self.speaker2idxs["all"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev_all["speaker"].unique())))}
|
191 |
self.speaker2idxs["train"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_train["speaker"].unique())))}
|
|
|
208 |
self.alignments_ds = None
|
209 |
self.data = None
|
210 |
return splits
|
211 |
+
|
212 |
+
def _create_dev_split(self, speaker):
|
213 |
+
data_speaker = self.data_all[self.data_all["speaker"] == speaker]
|
214 |
+
if len(data_speaker) < 10:
|
215 |
+
print(f"Speaker {speaker} has only {len(data_speaker)} samples, skipping")
|
216 |
+
return None
|
217 |
+
else:
|
218 |
+
data_speaker = data_speaker.sample(2)
|
219 |
+
return data_speaker
|
220 |
|
221 |
def _create_alignments_ds(self, name, url):
|
222 |
self.empty_textgrids = 0
|
|
|
274 |
if self.empty_textgrids > 0:
|
275 |
logger.warning(f"Found {self.empty_textgrids} empty textgrids")
|
276 |
del self.ds, self.phone_cache, self.phone_converter
|
277 |
+
df = pd.DataFrame(
|
278 |
entries,
|
279 |
columns=[
|
280 |
"phones",
|
|
|
287 |
"basename",
|
288 |
],
|
289 |
)
|
290 |
+
return df
|
291 |
|
292 |
def _create_entry(self, dsi_idx):
|
293 |
dsi, idx = dsi_idx
|
|
|
322 |
if start >= end:
|
323 |
self.empty_textgrids += 1
|
324 |
return None
|
325 |
+
|
326 |
return (
|
327 |
phones,
|
328 |
durations,
|
|
|
336 |
|
337 |
def _generate_examples(self, ds):
|
338 |
j = 0
|
339 |
+
hash_col = "audio"
|
340 |
+
hash_ds = HashableDataFrame(ds, hash_col)
|
341 |
for i, row in ds.iterrows():
|
342 |
# 10kB is the minimum size of a wav file for our purposes
|
343 |
if Path(row["audio"]).stat().st_size >= 10_000:
|
344 |
if len(row["phones"]) < 384:
|
345 |
+
speaker_prompts = get_speaker_prompts(row["speaker"], hash_ds)
|
346 |
result = {
|
347 |
"id": row["basename"],
|
348 |
"speaker": row["speaker"],
|
|
|
352 |
"phones": row["phones"],
|
353 |
"phone_durations": row["duration"],
|
354 |
"audio": str(row["audio"]),
|
355 |
+
"audio_speaker_prompt": speaker_prompts,
|
356 |
}
|
357 |
yield j, result
|
358 |
+
j += 1
|
359 |
+
|
360 |
+
class HashableDataFrame():
|
361 |
+
def __init__(self, df, hash_col):
|
362 |
+
self.df = df
|
363 |
+
self.hash_col = hash_col
|
364 |
+
self.hash = hashlib.md5(self.df[self.hash_col].values).hexdigest()
|
365 |
+
# to integer
|
366 |
+
self.hash = int(self.hash, 16)
|
367 |
+
|
368 |
+
def __hash__(self):
|
369 |
+
return self.hash
|
370 |
+
|
371 |
+
def __eq__(self, other):
|
372 |
+
return self.hash == other.hash
|