novateur commited on
Commit
26b7fce
·
verified ·
1 Parent(s): df714f0

Upload 2 files

Browse files
Files changed (2) hide show
  1. wavvae 2.zip +3 -0
  2. wavvae3.py +268 -0
wavvae 2.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e65acb0f186a7d25a66b8211773cf5a1030d1edd972f11882cf5ca2135521dd
3
+ size 43138
wavvae3.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import filecmp
3
+ import multiprocessing
4
+ import os
5
+ import subprocess
6
+ import librosa
7
+ from functools import partial
8
+ from multiprocessing import Pool, Process
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ import torch.distributed as dist
15
+ from torch.optim import AdamW
16
+
17
+ from modules.vocoder.commons.stft_loss import MultiResolutionSTFTLoss
18
+ from modules.vocoder.hifigan.hifigan import MultiPeriodDiscriminator, MultiScaleDiscriminator, \
19
+ generator_loss, feature_loss, discriminator_loss
20
+ from modules.vocoder.hifigan.mel_utils import mel_spectrogram
21
+ from modules.vocoder.univnet.mrd import MultiResolutionDiscriminator
22
+ from modules.tts.wavvae.decoder.wavvae_v3 import WavVAE_V3
23
+ from tasks.tts.utils.audio import torch_wav2spec
24
+ from tasks.tts.utils.audio.align import mel2token_to_dur
25
+ from utils.commons.ckpt_utils import load_ckpt
26
+ from utils.commons.hparams import hparams
27
+
28
+ from attrdict import AttrDict
29
+ from tasks.tts.dataset_mixin import TTSDatasetMixin
30
+ from utils.commons.base_task import BaseTask
31
+ from utils.commons.import_utils import import_module_bystr
32
+ from utils.nn.schedulers import WarmupSchedule, CosineSchedule
33
+
34
+
35
+ class WavVAETask(TTSDatasetMixin, BaseTask):
36
+ def __init__(self):
37
+ super().__init__()
38
+ self.dataset_cls = import_module_bystr(hparams['dataset_cls'])
39
+ self.val_dataset_cls = import_module_bystr(hparams['val_dataset_cls'])
40
+ self.processer_fn = import_module_bystr(hparams['processer_fn'])
41
+ self.build_fast_dataloader = import_module_bystr(hparams['build_fast_dataloader'])
42
+ self.hparams = hparams
43
+ self.config = AttrDict(hparams)
44
+
45
+ # Online load mel with GPU
46
+ sample_rate = hparams["audio_sample_rate"]
47
+ fft_size = hparams["win_size"]
48
+ win_size = hparams["win_size"]
49
+ hop_size = hparams["hop_size"]
50
+ num_mels = hparams["audio_num_mel_bins"]
51
+ fmin = hparams["fmin"]
52
+ fmax = hparams["fmax"]
53
+ mel_basis = librosa.filters.mel(
54
+ sr=sample_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax
55
+ )
56
+ self.torch_wav2spec_ = partial(
57
+ torch_wav2spec, mel_basis=mel_basis, fft_size=fft_size, hop_size=hop_size, win_length=win_size,
58
+ )
59
+
60
+ def build_model(self):
61
+ self.model_gen = WavVAE_V3(hparams=hparams)
62
+
63
+ self.model_disc = torch.nn.ModuleDict()
64
+ self.model_disc['mpd'] = MultiPeriodDiscriminator(hparams['mpd'], use_cond=hparams['use_cond_disc'])
65
+ self.model_disc['msd'] = MultiScaleDiscriminator(use_cond=hparams['use_cond_disc'])
66
+ if hparams['use_mrd']:
67
+ self.model_disc['mrd'] = MultiResolutionDiscriminator(hparams)
68
+ self.stft_loss = MultiResolutionSTFTLoss()
69
+
70
+ load_ckpt(self.model_gen.encoder, './checkpoints/1231_megatts3_wavvae_v2_25hz', 'model.module.encoder', strict=False)
71
+ load_ckpt(self.model_gen.decoder, './checkpoints/1117_melgan-nsf_full_1', 'model_gen', force=True, strict=True)
72
+ load_ckpt(self.model_disc, './checkpoints/1117_melgan-nsf_full_1', 'model_disc', force=True, strict=True)
73
+ return {'trainable': [self.model_gen, self.model_disc['mpd'], self.model_disc['msd'], self.model_disc['mrd']], 'others': []}
74
+
75
+ def load_model(self):
76
+ if hparams.get('load_ckpt', '') != '':
77
+ load_ckpt(self.model, hparams['load_ckpt'], 'model', strict=False)
78
+
79
+ def build_optimizer(self):
80
+ optimizer_gen = torch.optim.AdamW(self.model_gen.parameters(), lr=hparams['lr'],
81
+ betas=[hparams['adam_b1'], hparams['adam_b2']])
82
+ optimizer_disc = torch.optim.AdamW(self.model_disc.parameters(),
83
+ lr=hparams.get('disc_lr', hparams['lr']),
84
+ betas=[hparams['adam_b1'], hparams['adam_b2']])
85
+ return [optimizer_gen, optimizer_disc]
86
+
87
+ def build_scheduler(self, optimizer):
88
+ return None
89
+
90
+ def _training_step(self, sample, batch_idx, optimizer_idx):
91
+ log_outputs = {}
92
+ loss_weights = {}
93
+ sample['wavs'] = sample['wavs'].float()
94
+ # return None, {}
95
+
96
+ if self.global_step % 100 == 0:
97
+ devices = os.environ.get('CUDA_VISIBLE_DEVICES', '').split(",")
98
+ for d in devices:
99
+ os.system(f'pkill -f "voidgpu{d}"')
100
+
101
+ y = sample['wavs']
102
+ loss_output = {}
103
+ if optimizer_idx == 0:
104
+ #######################
105
+ # Generator #
106
+ #######################
107
+ y_, posterior = self.model_gen(y)
108
+ y = y.unsqueeze(1)
109
+ y_mel = mel_spectrogram(y.squeeze(1), hparams).transpose(1, 2)
110
+ y_hat_mel = mel_spectrogram(y_.squeeze(1), hparams).transpose(1, 2)
111
+ loss_output['mel'] = F.l1_loss(y_hat_mel, y_mel) * hparams['lambda_mel']
112
+ if self.training:
113
+ _, y_p_hat_g, fmap_f_r, fmap_f_g = self.model_disc['mpd'](y, y_, None)
114
+ _, y_s_hat_g, fmap_s_r, fmap_s_g = self.model_disc['msd'](y, y_, None)
115
+ loss_output['a_p'] = generator_loss(y_p_hat_g) * hparams['lambda_adv'] * hparams.get('lambda_mpd', 1.0)
116
+ loss_output['a_s'] = generator_loss(y_s_hat_g) * hparams['lambda_adv'] * hparams.get('lambda_msd', 1.0)
117
+ if hparams['use_mrd']:
118
+ y_r_hat_g = [x[1] for x in self.model_disc['mrd'](y_)]
119
+ loss_output['a_r'] = generator_loss(y_r_hat_g) \
120
+ * hparams['lambda_adv'] * hparams.get('lambda_mrd', 1.0)
121
+ if hparams['use_ms_stft']:
122
+ loss_output['sc'], loss_output['mag'] = self.stft_loss(y.squeeze(1), y_.squeeze(1))
123
+ loss_output['kl_loss'] = posterior.kl().mean() * hparams.get('lambda_kl', 1.0)
124
+ self.y_ = y_.detach()
125
+ else:
126
+ #######################
127
+ # Discriminator #
128
+ #######################
129
+ if not self.training:
130
+ return None
131
+ y = y.unsqueeze(1)
132
+ y_ = self.y_
133
+ # MPD
134
+ y_p_hat_r, y_p_hat_g, _, _ = self.model_disc['mpd'](y, y_.detach(), None)
135
+ loss_output['r_p'], loss_output['f_p'] = discriminator_loss(y_p_hat_r, y_p_hat_g)
136
+ # MSD
137
+ y_s_hat_r, y_s_hat_g, _, _ = self.model_disc['msd'](y, y_.detach(), None)
138
+ loss_output['r_s'], loss_output['f_s'] = discriminator_loss(y_s_hat_r, y_s_hat_g)
139
+ # MRD
140
+ if hparams['use_mrd']:
141
+ y_r_hat_r = [x[1] for x in self.model_disc['mrd'](y)]
142
+ y_r_hat_g = [x[1] for x in self.model_disc['mrd'](y_.detach())]
143
+ loss_output['r_r'], loss_output['f_r'] = discriminator_loss(y_r_hat_r, y_r_hat_g)
144
+ total_loss = sum(loss_output.values())
145
+ loss_output['bs'] = sample['wavs'].shape[0]
146
+ return total_loss, loss_output
147
+
148
+ def save_valid_result(self, sample, batch_idx, model_out):
149
+ sr = hparams['audio_sample_rate']
150
+ mel_out = model_out.get('mel_out')
151
+ f0 = sample.get('f0')
152
+ f0_gt = sample.get('f0')
153
+ if f0 is not None:
154
+ f0_gt = f0_gt.cpu()[-1]
155
+ if mel_out is not None:
156
+ f0_pred = self.predict_f0(sample['mels'])
157
+ self.plot_mel(batch_idx, sample['mels'], mel_out, f0s={'f0': f0_pred, 'f0g': f0_gt})
158
+ # gt wav
159
+ if self.global_step <= hparams['valid_infer_interval']:
160
+ mel_gt = sample['mels'][-1].cpu()
161
+ f0 = self.predict_f0(sample['mels'][-1:])
162
+ wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0)
163
+ self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr)
164
+
165
+ if self.global_step >= 0:
166
+ # with gt duration
167
+ model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True)
168
+ # dur_info = self.get_plot_dur_info(sample, model_out)
169
+ # del dur_info['dur_pred']
170
+ dur_info = None
171
+
172
+ f0 = self.predict_f0(model_out['mel_out'])
173
+ wav_pred = self.vocoder.spec2wav(model_out['mel_out'][-1].cpu(), f0=f0)
174
+ self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr)
175
+ self.plot_mel(batch_idx, sample['mels'][-1:], model_out['mel_out'][-1], f'mel_gdur_{batch_idx}',
176
+ dur_info=dur_info, f0s={'f0': f0, 'f0g': f0_gt})
177
+
178
+ # with pred duration
179
+ if not hparams['use_gt_dur'] and not hparams['use_gt_latent']:
180
+ model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False)
181
+ # dur_info = self.get_plot_dur_info(sample, model_out)
182
+ dur_info = None
183
+ f0 = self.predict_f0(model_out['mel_out'])
184
+ self.plot_mel(
185
+ batch_idx, sample['mels'], model_out['mel_out'][-1], f'mel_pdur_{batch_idx}',
186
+ dur_info=dur_info, f0s={'f0': f0, 'f0g': f0_gt})
187
+ wav_pred = self.vocoder.spec2wav(model_out['mel_out'][-1].cpu(), f0=f0)
188
+ self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr)
189
+
190
+ def get_plot_dur_info(self, sample, model_out):
191
+ T_txt = sample['txt_tokens'].shape[1]
192
+ dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[-1]
193
+ dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
194
+ txt = self.token_encoder.decode(sample['txt_tokens'][-1].cpu().numpy())
195
+ txt = txt.split(" ")
196
+ return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
197
+
198
+ def on_before_optimization(self, opt_idx):
199
+ if opt_idx == 0:
200
+ nn.utils.clip_grad_norm_(self.model_gen.parameters(), hparams['generator_grad_norm'])
201
+ else:
202
+ nn.utils.clip_grad_norm_(self.model_disc.parameters(), hparams["discriminator_grad_norm"])
203
+
204
+ def to(self, device=None, dtype=None):
205
+ super().to(device=device, dtype=dtype)
206
+ # trainer doesn't move ema to device automatically, we do it mannually
207
+ if hparams.get('use_ema', False):
208
+ self.ema.to(device=device, dtype=dtype)
209
+
210
+ def cuda(self,device):
211
+ super().cuda(device)
212
+ if hparams.get('use_ema', False):
213
+ self.ema.to(device=device)
214
+
215
+ @torch.no_grad()
216
+ def validation_step(self, sample, batch_idx):
217
+ infer_steps = self.hparams.get('infer_steps', 12)
218
+ outputs = self._validation_step(sample, batch_idx, infer_steps)
219
+ return outputs
220
+
221
+ def _validation_step(self, sample, batch_idx, infer_steps):
222
+ outputs = {}
223
+ if self.trainer.proc_rank == 0:
224
+ # self.vae.eval()
225
+ # with torch.inference_mode():
226
+ # with torch.cuda.amp.autocast(dtype=torch.bfloat16, enabled=True):
227
+ # lat = self.vae.get_latent(sample["mels"])
228
+ # lat_lens = latent_lengths.clamp(max=lat.size(1))
229
+ # mel = self.vae.decode(lat)
230
+ pass
231
+ # outputs['losses'], _ = self.run_model(sample)
232
+ # _, model_out = self.run_model(sample, infer=True, infer_steps=infer_steps)
233
+ # outputs = tensors_to_scalars(outputs)
234
+ # output_ldm = model_out['ldm_out']
235
+ # T = output_ldm.shape[1]
236
+ # ldm = sample['kps'][:, :T] # [B, T, nkp, kp_dim] [0, 1]
237
+ # B, T, nkp, kp_dim = ldm.shape
238
+ # output_ldm = self.denormalize_ldm(output_ldm)
239
+ # recon_ldm = model_out['recon_ldm']
240
+ # recon_ldm = self.denormalize_ldm(recon_ldm)
241
+
242
+ # results_dir = f"{hparams['work_dir']}/results/{self.global_step}_infersteps{infer_steps}_cfg{hparams['cfg_w']}"
243
+ # os.makedirs(results_dir, exist_ok=True)
244
+ # n_ctx = model_out['ctx_mask'][0, :, 0].sum().long().item()
245
+ # writer_kp = imageio.get_writer(f"{results_dir}/{batch_idx:06d}_kp.sil.mp4", fps=25)
246
+ # writer_gt = imageio.get_writer(f"{results_dir}/{batch_idx:06d}_gt.sil.mp4", fps=25)
247
+ # writer_pred = imageio.get_writer(f"{results_dir}/{batch_idx:06d}_pred.sil.mp4", fps=25)
248
+ # for i in range(T):
249
+ # img = self.draw_ldm(recon_ldm[0, i])
250
+ # writer_gt.append_data(img)
251
+ # img = self.draw_ldm(ldm[0, i])
252
+ # writer_kp.append_data(img)
253
+ # if i < n_ctx:
254
+ # writer_pred.append_data(img)
255
+ # else:
256
+ # img = self.draw_ldm(
257
+ # output_ldm[0, i], color=(255, 255, 0),
258
+ # )
259
+ # writer_pred.append_data(img)
260
+ # writer_gt.close()
261
+ # writer_kp.close()
262
+ # writer_pred.close()
263
+ return outputs
264
+
265
+ @torch.no_grad()
266
+ def test_step(self, sample, batch_idx):
267
+ infer_steps = hparams['infer_steps']
268
+ return self._validation_step(sample, batch_idx, infer_steps)