init
Browse files- attach_speaker_embedding_s2s.py +15 -1
- speaker_embedding_clap.py +0 -35
- speaker_embedding_hf.py +72 -0
attach_speaker_embedding_s2s.py
CHANGED
@@ -20,6 +20,15 @@ if se_model == "metavoice":
|
|
20 |
elif se_model == "pyannote":
|
21 |
from speaker_embedding_pyannote import PyannoteSE
|
22 |
speaker_embedder = PyannoteSE()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
else:
|
24 |
raise ValueError(f"unknown speaker embedding: {se_model}")
|
25 |
|
@@ -44,9 +53,14 @@ print(f"Num examples (after filtering): {len(dataset)}")
|
|
44 |
|
45 |
def speaker_embedding(example):
|
46 |
for side in sides:
|
47 |
-
|
48 |
example[f"{side}.audio"]["array"], example[f"{side}.audio"]["sampling_rate"]
|
49 |
)
|
|
|
|
|
|
|
|
|
|
|
50 |
return example
|
51 |
|
52 |
|
|
|
20 |
elif se_model == "pyannote":
|
21 |
from speaker_embedding_pyannote import PyannoteSE
|
22 |
speaker_embedder = PyannoteSE()
|
23 |
+
elif se_model == "w2vbert-600m":
|
24 |
+
from speaker_embedding_hf import Wav2VecEmbedding
|
25 |
+
speaker_embedder = Wav2VecEmbedding()
|
26 |
+
elif se_model == "xlsr-2b":
|
27 |
+
from speaker_embedding_hf import XLSR2BEmbedding
|
28 |
+
speaker_embedder = XLSR2BEmbedding()
|
29 |
+
elif se_model == "hubert-xl":
|
30 |
+
from speaker_embedding_hf import HuBERTXLEmbedding
|
31 |
+
speaker_embedder = HuBERTXLEmbedding()
|
32 |
else:
|
33 |
raise ValueError(f"unknown speaker embedding: {se_model}")
|
34 |
|
|
|
53 |
|
54 |
def speaker_embedding(example):
|
55 |
for side in sides:
|
56 |
+
embedding = speaker_embedder.get_speaker_embedding(
|
57 |
example[f"{side}.audio"]["array"], example[f"{side}.audio"]["sampling_rate"]
|
58 |
)
|
59 |
+
if embedding.ndim == 1:
|
60 |
+
example[f"{side}.audio.speaker_embedding"] = embedding
|
61 |
+
else:
|
62 |
+
example[f"{side}.audio.speaker_embedding"] = embedding.mean(0)
|
63 |
+
example[f"{side}.audio.speaker_embedding.full"] = embedding
|
64 |
return example
|
65 |
|
66 |
|
speaker_embedding_clap.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
"""CLAP embedding.
|
2 |
-
- feature dimension: 512
|
3 |
-
- source: https://huggingface.co/laion/larger_clap_music_and_speech
|
4 |
-
"""
|
5 |
-
from typing import Optional
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import librosa
|
9 |
-
import numpy as np
|
10 |
-
from transformers import ClapModel, ClapProcessor
|
11 |
-
|
12 |
-
|
13 |
-
class ClapSE:
|
14 |
-
def __init__(self, ckpt: str = "laion/larger_clap_music_and_speech"):
|
15 |
-
self.model = ClapModel.from_pretrained(ckpt)
|
16 |
-
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
-
self.model.to(self.device)
|
18 |
-
self.model.eval()
|
19 |
-
self.processor = ClapProcessor.from_pretrained(ckpt)
|
20 |
-
|
21 |
-
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
22 |
-
if sampling_rate != self.processor.feature_extractor.sampling_rate:
|
23 |
-
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.feature_extractor.sampling_rate)
|
24 |
-
inputs = self.processor(
|
25 |
-
audios=wav, sampling_rate=self.processor.feature_extractor.sampling_rate, return_tensors="pt"
|
26 |
-
)
|
27 |
-
with torch.no_grad():
|
28 |
-
outputs = self.model.get_audio_features(**{k: v.to(self.device) for k, v in inputs.items()})
|
29 |
-
return outputs.cpu().numpy()[0]
|
30 |
-
|
31 |
-
|
32 |
-
class ClapGeneralSE(ClapSE):
|
33 |
-
|
34 |
-
def __init__(self):
|
35 |
-
super().__init__(ckpt="laion/larger_clap_general")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
speaker_embedding_hf.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Meta's w2vBERT based speaker embedding."""
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import librosa
|
6 |
+
import numpy as np
|
7 |
+
from transformers import AutoModel, AutoFeatureExtractor
|
8 |
+
|
9 |
+
|
10 |
+
############
|
11 |
+
# W2V BERT #
|
12 |
+
############
|
13 |
+
class W2VBERTEmbedding:
|
14 |
+
def __init__(self, ckpt: str = "facebook/w2v-bert-2.0"):
|
15 |
+
self.processor = AutoFeatureExtractor.from_pretrained(ckpt)
|
16 |
+
self.model = AutoModel.from_pretrained(ckpt)
|
17 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
+
self.model.to(self.device)
|
19 |
+
self.model.eval()
|
20 |
+
|
21 |
+
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
22 |
+
# audio file is decoded on the fly
|
23 |
+
if sampling_rate != self.processor.sampling_rate:
|
24 |
+
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.sampling_rate)
|
25 |
+
inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
|
26 |
+
with torch.no_grad():
|
27 |
+
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
28 |
+
return outputs.last_hidden_state.cpu().numpy()[0]
|
29 |
+
|
30 |
+
|
31 |
+
##########
|
32 |
+
# HuBERT #
|
33 |
+
##########
|
34 |
+
class HuBERTXLEmbedding(W2VBERTEmbedding):
|
35 |
+
def __init__(self):
|
36 |
+
super().__init__("facebook/hubert-xlarge-ll60k")
|
37 |
+
|
38 |
+
|
39 |
+
class HuBERTLargeEmbedding(W2VBERTEmbedding):
|
40 |
+
def __init__(self):
|
41 |
+
super().__init__("facebook/hubert-large-ll60k")
|
42 |
+
|
43 |
+
|
44 |
+
class HuBERTBaseEmbedding(W2VBERTEmbedding):
|
45 |
+
def __init__(self):
|
46 |
+
super().__init__("facebook/hubert-base-ls960")
|
47 |
+
|
48 |
+
|
49 |
+
###########
|
50 |
+
# wav2vec #
|
51 |
+
###########
|
52 |
+
class Wav2VecEmbedding(W2VBERTEmbedding):
|
53 |
+
def __init__(self):
|
54 |
+
super().__init__("facebook/wav2vec2-large-xlsr-53")
|
55 |
+
|
56 |
+
|
57 |
+
#########
|
58 |
+
# XLS-R #
|
59 |
+
#########
|
60 |
+
class XLSR2BEmbedding(W2VBERTEmbedding):
|
61 |
+
def __init__(self):
|
62 |
+
super().__init__("facebook/wav2vec2-xls-r-2b")
|
63 |
+
|
64 |
+
|
65 |
+
class XLSR1BEmbedding(W2VBERTEmbedding):
|
66 |
+
def __init__(self):
|
67 |
+
super().__init__("facebook/wav2vec2-xls-r-1b")
|
68 |
+
|
69 |
+
|
70 |
+
class XLSR300MEmbedding(W2VBERTEmbedding):
|
71 |
+
def __init__(self):
|
72 |
+
super().__init__("facebook/wav2vec2-xls-r-300m")
|