|
import librosa |
|
from model_clap import CLAPEmbedding |
|
from model_meta_voice import MetaVoiceEmbedding |
|
from model_pyannote_embedding import PyannoteEmbedding |
|
from model_speaker_embedding import W2VBERTEmbedding, XLSR300MEmbedding, HuBERTXLEmbedding |
|
|
|
|
|
def test(): |
|
wav, sr = librosa.load("sample.wav") |
|
print("XLS-R") |
|
model = XLSR300MEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
print("CLAP") |
|
model = CLAPEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
print("MetaVoiceSE") |
|
model = MetaVoiceEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
print("PyannoteSE") |
|
model = PyannoteEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
print("W2VBertSE") |
|
model = W2VBERTEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
print("huBERT") |
|
model = HuBERTXLEmbedding() |
|
v = model.get_speaker_embedding(wav, sr) |
|
print(v.shape) |
|
|
|
|
|
if __name__ == '__main__': |
|
test() |
|
|