Matthijs Hollemans commited on
Commit
24c15f3
β€’
1 Parent(s): 0ae1e38

add application

Browse files
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *.pyc
2
+ __pycache__/
3
+ .DS_Store
4
+
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Speecht5 Vc Demo
3
- emoji: 😻
4
  colorFrom: yellow
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.17.0
8
  app_file: app.py
 
1
  ---
2
+ title: SpeechT5 Voice Conversion Demo
3
+ emoji: πŸ‘©β€πŸŽ€
4
  colorFrom: yellow
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 3.17.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import librosa
3
+ import numpy as np
4
+ import torch
5
+
6
+ from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech, SpeechT5HifiGan
7
+
8
+ checkpoint = "Matthijs/speecht5_vc"
9
+ processor = SpeechT5Processor.from_pretrained(checkpoint)
10
+ model = SpeechT5ForSpeechToSpeech.from_pretrained(checkpoint)
11
+ vocoder = SpeechT5HifiGan.from_pretrained("Matthijs/speecht5_hifigan")
12
+
13
+
14
+ speaker_embeddings = {
15
+ "BDL": "spkemb/cmu_us_bdl_arctic-wav-arctic_a0009.npy",
16
+ "CLB": "spkemb/cmu_us_clb_arctic-wav-arctic_a0144.npy",
17
+ "RMS": "spkemb/cmu_us_rms_arctic-wav-arctic_b0353.npy",
18
+ "SLT": "spkemb/cmu_us_slt_arctic-wav-arctic_a0508.npy",
19
+ }
20
+
21
+
22
+ def process_audio(sampling_rate, waveform):
23
+ # convert from int16 to floating point
24
+ waveform = waveform / 32678.0
25
+
26
+ # convert to mono if stereo
27
+ if len(waveform.shape) > 1:
28
+ waveform = librosa.to_mono(waveform.T)
29
+
30
+ # resample to 16 kHz if necessary
31
+ if sampling_rate != 16000:
32
+ waveform = librosa.resample(waveform, orig_sr=sampling_rate, target_sr=16000)
33
+
34
+ # make PyTorch tensor
35
+ waveform = torch.tensor(waveform)
36
+ return waveform
37
+
38
+
39
+ def predict(audio, mic_audio, speaker):
40
+ # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels))
41
+ if mic_audio is not None:
42
+ sampling_rate, waveform = mic_audio
43
+ elif audio is not None:
44
+ sampling_rate, waveform = audio
45
+ else:
46
+ return (16000, np.zeros(0).astype(np.int16))
47
+
48
+ waveform = process_audio(sampling_rate, waveform)
49
+ inputs = processor(audio=waveform, sampling_rate=16000, return_tensors="pt")
50
+
51
+ speaker_embedding = np.load(speaker_embeddings[speaker[:3]])
52
+ speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
53
+
54
+ speech = model.generate_speech(inputs["input_values"], speaker_embedding, vocoder=vocoder)
55
+
56
+ speech = (speech.numpy() * 32767).astype(np.int16)
57
+ return (16000, speech)
58
+
59
+
60
+ title = "SpeechT5: Voice Conversion"
61
+
62
+ description = """
63
+ The <b>SpeechT5</b> model is pre-trained on text as well as speech inputs, with targets that are also a mix of text and speech.
64
+ By pre-training on text and speech at the same time, it learns unified representations for both, resulting in improved modeling capabilities.
65
+
66
+ SpeechT5 can be fine-tuned for different speech tasks. This space demonstrates the <b>speech-to-speech</b> checkpoint for (American) English
67
+ language voice conversion.
68
+
69
+ See also the <a href="https://huggingface.co/spaces/Matthijs/speecht5-asr-demo">speech recognition (ASR) demo</a>
70
+ and the <a href="https://huggingface.co/spaces/Matthijs/speecht5-tts-demo">text-to-speech (TTS) demo</a>.
71
+
72
+ <b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
73
+ being passed into the model. The output is a mel spectrogram, which is converted to a mono 16 kHz waveform by the HiFi-GAN vocoder.
74
+ Because the model always applies random dropout, each attempt will give slightly different results.
75
+ """
76
+
77
+ article = """
78
+ <div style='margin:20px auto;'>
79
+
80
+ <p>References: <a href="https://arxiv.org/abs/2110.07205">SpeechT5 paper</a> |
81
+ <a href="https://github.com/microsoft/SpeechT5/">original GitHub</a> |
82
+ <a href="https://huggingface.co/mechanicalsea/speecht5-vc">original weights</a></p>
83
+
84
+ <pre>
85
+ @article{Ao2021SpeechT5,
86
+ title = {SpeechT5: Unified-Modal Encoder-Decoder Pre-training for Spoken Language Processing},
87
+ author = {Junyi Ao and Rui Wang and Long Zhou and Chengyi Wang and Shuo Ren and Yu Wu and Shujie Liu and Tom Ko and Qing Li and Yu Zhang and Zhihua Wei and Yao Qian and Jinyu Li and Furu Wei},
88
+ eprint={2110.07205},
89
+ archivePrefix={arXiv},
90
+ primaryClass={eess.AS},
91
+ year={2021}
92
+ }
93
+ </pre>
94
+
95
+ <p>Example sound credits:<p>
96
+
97
+ <ul>
98
+ <li>"Hmm, I don't know" from <a href="https://freesound.org/people/InspectorJ/sounds/519189/">InspectorJ</a> (CC BY 4.0 license)
99
+ <li>"Henry V" excerpt from <a href="https://freesound.org/people/acclivity/sounds/24096/">acclivity</a> (CC BY-NC 4.0 license)
100
+ <li>"You can see it in the eyes" from <a href="https://freesound.org/people/JoyOhJoy/sounds/165348/">JoyOhJoy</a> (CC0 license)
101
+ <li>"We yearn for time" from <a href="https://freesound.org/people/Sample_Me/sounds/610529/">Sample_Me</a> (CC0 license)
102
+ </ul>
103
+
104
+ <p>Speaker embeddings were generated from <a href="http://www.festvox.org/cmu_arctic/">CMU ARCTIC</a> using <a href="https://huggingface.co/mechanicalsea/speecht5-vc/blob/main/manifest/utils/prep_cmu_arctic_spkemb.py">this script</a>.</p>
105
+
106
+ </div>
107
+ """
108
+
109
+ examples = [
110
+ ["examples/yearn_for_time.mp3", None, "BDL (male)"],
111
+ ["examples/henry5.mp3", None, "CLB (female)"],
112
+ ["examples/see_in_eyes.wav", None, "RMS (male)"],
113
+ ["examples/hmm_i_dont_know.wav", None, "SLT (female)"],
114
+ ]
115
+
116
+ gr.Interface(
117
+ fn=predict,
118
+ inputs=[
119
+ gr.Audio(label="Upload Speech", source="upload", type="numpy"),
120
+ gr.Audio(label="Record Speech", source="microphone", type="numpy"),
121
+ gr.Radio(label="Speaker", choices=["BDL (male)", "CLB (female)", "RMS (male)", "SLT (female)"], value="BDL (male)"),
122
+ ],
123
+ outputs=[
124
+ gr.Audio(label="Converted Speech", type="numpy"),
125
+ ],
126
+ title=title,
127
+ description=description,
128
+ article=article,
129
+ examples=examples,
130
+ ).launch()
examples/henry5.mp3 ADDED
Binary file (375 kB). View file
 
examples/hmm_i_dont_know.wav ADDED
Binary file (203 kB). View file
 
examples/see_in_eyes.wav ADDED
Binary file (65.2 kB). View file
 
examples/yearn_for_time.mp3 ADDED
Binary file (56.3 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/hollance/transformers.git@speecht5
2
+ torch
3
+ torchaudio
4
+ soundfile
5
+ librosa
6
+ samplerate
7
+ resampy
8
+ sentencepiece
spkemb/cmu_us_bdl_arctic-wav-arctic_a0009.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215326eae3a428af8934c385fbe043b36c72849ca17d1d013adeb189e6bd6962
3
+ size 2176
spkemb/cmu_us_clb_arctic-wav-arctic_a0144.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf67b36c47edfb1851466a1dff081b436bc6809b5ebc12811d9df0c0d0f28d0e
3
+ size 2176
spkemb/cmu_us_rms_arctic-wav-arctic_b0353.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49dac3e9c3a71a4dbca4c364233c7915ae6e0cb71b2ceaed97296231b95cb50
3
+ size 2176
spkemb/cmu_us_slt_arctic-wav-arctic_a0508.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71ffadda3f3a4de079740a0b34963824dc644d9d5442283bd0a2b0d4f44ff0b
3
+ size 2176