from transformers import pipeline from datasets import load_dataset import torch print(torch.cuda.is_available()) # Should return True if a GPU is available synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts") embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # You can replace this embedding with your own as well. # speech = synthesiser("Hello, my dog is cooler than you!", forward_params={"speaker_embeddings": speaker_embedding}) # sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"])