File size: 5,331 Bytes
b2458f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import argparse
import numpy
import numpy as np
import pydub
import torch
import commons
import utils
from models import SynthesizerTrn
from text import cleaned_text_to_sequence, get_bert
from text.cleaner import clean_text
from text.symbols import symbols
# 当前版本信息
latest_version = "2.0"
def get_net_g(model_path: str, device: str, hps):
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
).to(device)
_ = net_g.eval()
_ = utils.load_checkpoint(model_path, net_g, None, skip_optimizer=True)
return net_g
def get_text(text, language_str, hps, device):
# 在此处实现当前版本的get_text
norm_text, phone, tone, word2ph = clean_text(text, language_str)
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
if hps.data.add_blank:
phone = commons.intersperse(phone, 0)
tone = commons.intersperse(tone, 0)
language = commons.intersperse(language, 0)
for i in range(len(word2ph)):
word2ph[i] = word2ph[i] * 2
word2ph[0] += 1
bert = get_bert(norm_text, word2ph, language_str, device)
del word2ph
assert bert.shape[-1] == len(phone), phone
if language_str == "ZH":
bert = bert
sh_bert = torch.zeros(1024, len(phone))
en_bert = torch.zeros(1024, len(phone))
elif language_str == "SH":
bert = torch.zeros(1024, len(phone))
sh_bert = bert
en_bert = torch.zeros(1024, len(phone))
elif language_str == "EN":
bert = torch.zeros(1024, len(phone))
sh_bert = torch.zeros(1024, len(phone))
en_bert = bert
else:
raise ValueError("language_str should be ZH, SH or EN")
assert bert.shape[-1] == len(phone), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
phone = torch.LongTensor(phone)
tone = torch.LongTensor(tone)
language = torch.LongTensor(language)
return bert, sh_bert, en_bert, phone, tone, language
def infer(
text,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
sid,
language,
hps,
net_g,
device,
):
bert, sh_bert, en_bert, phones, tones, lang_ids = get_text(text, language, hps, device)
with torch.no_grad():
x_tst = phones.to(device).unsqueeze(0)
tones = tones.to(device).unsqueeze(0)
lang_ids = lang_ids.to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
sh_bert = sh_bert.to(device).unsqueeze(0)
en_bert = en_bert.to(device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
del phones
speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
audio = (
net_g.infer(
x_tst,
x_tst_lengths,
speakers,
tones,
lang_ids,
bert,
sh_bert,
en_bert,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
)[0][0, 0]
.data.cpu()
.float()
.numpy()
)
del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
torch.cuda.empty_cache()
return audio
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='configs/config.json')
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--model_path', type=str, default='models/G_1000.pth')
parser.add_argument('--output', type=str, default='sample')
args = parser.parse_args()
hps = utils.get_hparams_from_file(args.config)
net_g = get_net_g(args.model_path, device=args.device, hps=hps)
# noise_scale = 0.667
# noise_scale_w = 0.8
# length_scale = 0.9
sdp_ratio = 0
noise_scale = 0.667
noise_scale_w = 0.8
length_scale = 0.9
def do_sample(texts, sid, export_tag):
audio_data = numpy.array([], dtype=numpy.float32)
for (sub_text, language) in texts:
sub_audio_data = infer(sub_text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language, hps, net_g, args.device)
audio_data = np.concatenate((audio_data, sub_audio_data))
audio_data = audio_data / numpy.abs(audio_data).max()
audio_data = audio_data * 32767
audio_data = audio_data.astype(numpy.int16)
sound = pydub.AudioSegment(audio_data, frame_rate=hps.data.sampling_rate, sample_width=audio_data.dtype.itemsize, channels=1)
export_filename = args.output + export_tag + sid + '.mp3'
sound.export(export_filename, format='mp3')
print(export_filename)
text = [('我觉得有点贵。', 'ZH'), ('so expensive, can they?', 'EN'), ('哈巨,吃不消它。', 'SH')]
do_sample(text, '小庄', '_1_')
do_sample(text, '小嘟', '_1_')
do_sample(text, 'Jane', '_1_')
do_sample(text, '小贝', '_1_')
do_sample(text, '老克勒', '_1_')
do_sample(text, '美琳', '_1_')
pass
if __name__ == "__main__":
main()
|