init
Browse files- experiment_cache/.DS_Store +0 -0
- experiment_cache/figure/2d.latent_space.clap_general_se.expresso.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.clap_general_se.expresso.style.png +2 -2
- experiment_cache/figure/2d.latent_space.clap_general_se.voxceleb1-test-split.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.clap_se.expresso.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.clap_se.expresso.style.png +2 -2
- experiment_cache/figure/2d.latent_space.clap_se.voxceleb1-test-split.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.meta_voice_se.expresso.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.meta_voice_se.expresso.style.png +2 -2
- experiment_cache/figure/2d.latent_space.meta_voice_se.voxceleb1-test-split.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.pyannote_se.expresso.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.pyannote_se.expresso.style.png +2 -2
- experiment_cache/figure/2d.latent_space.pyannote_se.voxceleb1-test-split.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.w2v_bert_se.expresso.speaker_id.png +2 -2
- experiment_cache/figure/2d.latent_space.w2v_bert_se.expresso.style.png +2 -2
- experiment_cache/figure/2d.latent_space.w2v_bert_se.voxceleb1-test-split.speaker_id.png +2 -2
- experiment_speaker_verification.py +37 -31
- model_xls.py +29 -0
- test.py +20 -12
experiment_cache/.DS_Store
ADDED
Binary file (8.2 kB). View file
|
|
experiment_cache/figure/2d.latent_space.clap_general_se.expresso.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_general_se.expresso.style.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_general_se.voxceleb1-test-split.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_se.expresso.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_se.expresso.style.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.clap_se.voxceleb1-test-split.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.meta_voice_se.expresso.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.meta_voice_se.expresso.style.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.meta_voice_se.voxceleb1-test-split.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.pyannote_se.expresso.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.pyannote_se.expresso.style.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.pyannote_se.voxceleb1-test-split.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.w2v_bert_se.expresso.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.w2v_bert_se.expresso.style.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_cache/figure/2d.latent_space.w2v_bert_se.voxceleb1-test-split.speaker_id.png
CHANGED
Git LFS Details
|
Git LFS Details
|
experiment_speaker_verification.py
CHANGED
@@ -18,6 +18,7 @@ from model_meta_voice import MetaVoiceSE
|
|
18 |
from model_pyannote_embedding import PyannoteSE
|
19 |
from model_w2v_bert import W2VBertSE
|
20 |
from model_clap import ClapSE, ClapGeneralSE
|
|
|
21 |
|
22 |
|
23 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
@@ -97,9 +98,9 @@ def cluster_embedding(model_name, dataset_name, label_name: str):
|
|
97 |
plt.gca().set_aspect('equal', 'datalim')
|
98 |
plt.legend(handles=scatter.legend_elements(num=len(label_type))[0],
|
99 |
labels=label_type,
|
100 |
-
bbox_to_anchor=(1.
|
101 |
borderaxespad=0,
|
102 |
-
loc='
|
103 |
ncol=3 if len(label2id) > 12 else 1)
|
104 |
plt.savefig(figure_path, bbox_inches='tight', dpi=600)
|
105 |
|
@@ -115,35 +116,40 @@ def analyze_embedding(model_name: str, dataset_name: str, n_shot: int = 5, n_cro
|
|
115 |
|
116 |
|
117 |
if __name__ == '__main__':
|
118 |
-
get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
|
119 |
-
get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
|
120 |
-
get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
121 |
-
get_embedding(ClapSE, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
-
get_embedding(ClapGeneralSE, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
-
|
124 |
-
|
125 |
-
get_embedding(
|
126 |
-
get_embedding(
|
127 |
-
get_embedding(
|
128 |
-
get_embedding(
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
cluster_embedding("
|
133 |
-
cluster_embedding("
|
134 |
-
cluster_embedding("
|
135 |
-
|
136 |
-
cluster_embedding("
|
137 |
-
cluster_embedding("
|
138 |
-
|
139 |
-
cluster_embedding("
|
140 |
-
cluster_embedding("
|
141 |
-
|
142 |
-
cluster_embedding("
|
143 |
-
cluster_embedding("
|
144 |
-
cluster_embedding("
|
145 |
-
|
146 |
-
cluster_embedding("
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
|
149 |
|
|
|
18 |
from model_pyannote_embedding import PyannoteSE
|
19 |
from model_w2v_bert import W2VBertSE
|
20 |
from model_clap import ClapSE, ClapGeneralSE
|
21 |
+
from model_xls import XLSRSE
|
22 |
|
23 |
|
24 |
def get_embedding(model_class, model_name: str, dataset_name: str, data_split: str):
|
|
|
98 |
plt.gca().set_aspect('equal', 'datalim')
|
99 |
plt.legend(handles=scatter.legend_elements(num=len(label_type))[0],
|
100 |
labels=label_type,
|
101 |
+
bbox_to_anchor=(1.04, 1),
|
102 |
borderaxespad=0,
|
103 |
+
loc='upper left',
|
104 |
ncol=3 if len(label2id) > 12 else 1)
|
105 |
plt.savefig(figure_path, bbox_inches='tight', dpi=600)
|
106 |
|
|
|
116 |
|
117 |
|
118 |
if __name__ == '__main__':
|
119 |
+
# get_embedding(MetaVoiceSE, "meta_voice_se", "asahi417/voxceleb1-test-split", "test")
|
120 |
+
# get_embedding(PyannoteSE, "pyannote_se", "asahi417/voxceleb1-test-split", "test")
|
121 |
+
# get_embedding(W2VBertSE, "w2v_bert_se", "asahi417/voxceleb1-test-split", "test")
|
122 |
+
# get_embedding(ClapSE, "clap_se", "asahi417/voxceleb1-test-split", "test")
|
123 |
+
# get_embedding(ClapGeneralSE, "clap_general_se", "asahi417/voxceleb1-test-split", "test")
|
124 |
+
get_embedding(XLSRSE, "xlsr_se", "asahi417/voxceleb1-test-split", "test")
|
125 |
+
|
126 |
+
# get_embedding(MetaVoiceSE, "meta_voice_se", "ylacombe/expresso", "train")
|
127 |
+
# get_embedding(PyannoteSE, "pyannote_se", "ylacombe/expresso", "train")
|
128 |
+
# get_embedding(W2VBertSE, "w2v_bert_se", "ylacombe/expresso", "train")
|
129 |
+
# get_embedding(ClapSE, "clap_se", "ylacombe/expresso", "train")
|
130 |
+
# get_embedding(ClapGeneralSE, "clap_general_se", "ylacombe/expresso", "train")
|
131 |
+
get_embedding(XLSRSE, "xlsr_se", "ylacombe/expresso", "train")
|
132 |
+
|
133 |
+
# cluster_embedding("meta_voice_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
134 |
+
# cluster_embedding("pyannote_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
135 |
+
# cluster_embedding("w2v_bert_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
136 |
+
# cluster_embedding("clap_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
137 |
+
# cluster_embedding("clap_general_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
138 |
+
cluster_embedding("xlsr_se", "asahi417/voxceleb1-test-split", "speaker_id")
|
139 |
+
#
|
140 |
+
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "speaker_id")
|
141 |
+
# cluster_embedding("pyannote_se", "ylacombe/expresso", "speaker_id")
|
142 |
+
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "speaker_id")
|
143 |
+
# cluster_embedding("clap_se", "ylacombe/expresso", "speaker_id")
|
144 |
+
# cluster_embedding("clap_general_se", "ylacombe/expresso", "speaker_id")
|
145 |
+
cluster_embedding("xlsr_se", "ylacombe/expresso", "speaker_id")
|
146 |
+
#
|
147 |
+
# cluster_embedding("meta_voice_se", "ylacombe/expresso", "style")
|
148 |
+
# cluster_embedding("pyannote_se", "ylacombe/expresso", "style")
|
149 |
+
# cluster_embedding("w2v_bert_se", "ylacombe/expresso", "style")
|
150 |
+
# cluster_embedding("clap_se", "ylacombe/expresso", "style")
|
151 |
+
# cluster_embedding("clap_general_se", "ylacombe/expresso", "style")
|
152 |
+
cluster_embedding("xlsr_se", "ylacombe/expresso", "style")
|
153 |
|
154 |
|
155 |
|
model_xls.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Meta's XLS-R based speaker embedding.
|
2 |
+
- feature dimension: 768
|
3 |
+
- source: https://huggingface.co/facebook/wav2vec2-large-xlsr-53
|
4 |
+
"""
|
5 |
+
from typing import Optional
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import librosa
|
9 |
+
import numpy as np
|
10 |
+
from transformers import AutoFeatureExtractor, AutoModelForPreTraining
|
11 |
+
|
12 |
+
|
13 |
+
class XLSRSE:
|
14 |
+
def __init__(self):
|
15 |
+
self.processor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
16 |
+
self.model = AutoModelForPreTraining.from_pretrained("facebook/wav2vec2-large-xlsr-53")
|
17 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
+
self.model.to(self.device)
|
19 |
+
self.model.eval()
|
20 |
+
|
21 |
+
def get_speaker_embedding(self, wav: np.ndarray, sampling_rate: Optional[int] = None) -> np.ndarray:
|
22 |
+
# audio file is decoded on the fly
|
23 |
+
if sampling_rate != self.processor.sampling_rate:
|
24 |
+
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.processor.sampling_rate)
|
25 |
+
inputs = self.processor(wav, sampling_rate=self.processor.sampling_rate, return_tensors="pt")
|
26 |
+
with torch.no_grad():
|
27 |
+
outputs = self.model(**{k: v.to(self.device) for k, v in inputs.items()})
|
28 |
+
return outputs.projected_states.mean(1).cpu().numpy()[0]
|
29 |
+
# return outputs.projected_quantized_states.mean(1).cpu().numpy()[0]
|
test.py
CHANGED
@@ -3,26 +3,34 @@ from model_clap import ClapSE
|
|
3 |
from model_meta_voice import MetaVoiceSE
|
4 |
from model_pyannote_embedding import PyannoteSE
|
5 |
from model_w2v_bert import W2VBertSE
|
|
|
6 |
|
7 |
|
8 |
def test():
|
9 |
wav, sr = librosa.load("sample.wav")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
print("CLAP")
|
11 |
model = ClapSE()
|
12 |
v = model.get_speaker_embedding(wav, sr)
|
13 |
print(v.shape)
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
|
27 |
|
28 |
if __name__ == '__main__':
|
|
|
3 |
from model_meta_voice import MetaVoiceSE
|
4 |
from model_pyannote_embedding import PyannoteSE
|
5 |
from model_w2v_bert import W2VBertSE
|
6 |
+
from model_xls import XLSRSE
|
7 |
|
8 |
|
9 |
def test():
|
10 |
wav, sr = librosa.load("sample.wav")
|
11 |
+
print("XLS-R")
|
12 |
+
model = XLSRSE()
|
13 |
+
v = model.get_speaker_embedding(wav, sr)
|
14 |
+
print(v.shape)
|
15 |
+
model = ClapSE()
|
16 |
+
v = model.get_speaker_embedding(wav, sr)
|
17 |
+
print(v.shape)
|
18 |
print("CLAP")
|
19 |
model = ClapSE()
|
20 |
v = model.get_speaker_embedding(wav, sr)
|
21 |
print(v.shape)
|
22 |
+
print("MetaVoiceSE")
|
23 |
+
model = MetaVoiceSE()
|
24 |
+
v = model.get_speaker_embedding(wav, sr)
|
25 |
+
print(v.shape)
|
26 |
+
print("PyannoteSE")
|
27 |
+
model = PyannoteSE()
|
28 |
+
v = model.get_speaker_embedding(wav, sr)
|
29 |
+
print(v.shape)
|
30 |
+
print("W2VBertSE")
|
31 |
+
model = W2VBertSE()
|
32 |
+
v = model.get_speaker_embedding(wav, sr)
|
33 |
+
print(v.shape)
|
34 |
|
35 |
|
36 |
if __name__ == '__main__':
|