File size: 2,859 Bytes
4578bc8 ce601f2 4578bc8 bb3ef72 e0927c5 bb3ef72 4578bc8 e0927c5 4578bc8 e0927c5 4578bc8 bb3ef72 4578bc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import os
from os.path import expanduser
import shutil
from soundfile import LibsndfileError
from datasets import load_dataset, DatasetDict, Audio
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
dataset_id = os.getenv("DATASET_ID", 0)
num_proc = int(os.getenv("NUM_PROC", 1))
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
audio_loader = Audio()
se_model = os.getenv("SE_MODEL", "metavoice")
if se_model == "metavoice":
from speaker_embedding_metavoice import MetaVoiceSE
speaker_embedder = MetaVoiceSE()
elif se_model == "pyannote":
from speaker_embedding_pyannote import PyannoteSE
speaker_embedder = PyannoteSE()
elif se_model == "w2vbert-600m":
from speaker_embedding_hf import Wav2VecEmbedding
speaker_embedder = Wav2VecEmbedding()
elif se_model == "xlsr-2b":
from speaker_embedding_hf import XLSR2BEmbedding
speaker_embedder = XLSR2BEmbedding()
elif se_model == "hubert-xl":
from speaker_embedding_hf import HuBERTXLEmbedding
speaker_embedder = HuBERTXLEmbedding()
else:
raise ValueError(f"unknown speaker embedding: {se_model}")
def error_file(example):
for side in sides:
try:
audio_loader.decode_example(example[f"{side}.audio"])
except LibsndfileError:
return False
return True
print(f"Num examples: {len(dataset)}")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio())
print(f"Num examples (after filtering): {len(dataset)}")
def speaker_embedding(example):
for side in sides:
embedding = speaker_embedder.get_speaker_embedding(
example[f"{side}.audio"]["array"], example[f"{side}.audio"]["sampling_rate"]
)
if embedding.ndim == 1:
example[f"{side}.audio.speaker_embedding"] = embedding
else:
example[f"{side}.audio.speaker_embedding"] = embedding.mean(0)
example[f"{side}.audio.speaker_embedding.full"] = embedding
return example
dataset = dataset.map(
function=speaker_embedding,
remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
num_proc=num_proc,
desc="attach speaker embedding dataset"
)
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.speaker-embedding.{se_model}", config_name=f"subset_{dataset_id}")
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
|