BartPoint commited on
Commit
f515bbc
1 Parent(s): 579e129

Fix this ?

Browse files
Files changed (1) hide show
  1. vc_infer_pipeline.py +31 -77
vc_infer_pipeline.py CHANGED
@@ -8,12 +8,11 @@ from functools import lru_cache
8
 
9
  bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
10
 
11
- input_audio_path2wav = {}
12
-
13
 
14
  @lru_cache
15
- def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
16
- audio = input_audio_path2wav[input_audio_path]
17
  f0, t = pyworld.harvest(
18
  audio,
19
  fs=fs,
@@ -24,29 +23,18 @@ def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
24
  f0 = pyworld.stonemask(audio, f0, t, fs)
25
  return f0
26
 
27
-
28
- def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
29
  # print(data1.max(),data2.max())
30
- rms1 = librosa.feature.rms(
31
- y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
32
- ) # 每半秒一个点
33
- rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
34
- rms1 = torch.from_numpy(rms1)
35
- rms1 = F.interpolate(
36
- rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
37
- ).squeeze()
38
- rms2 = torch.from_numpy(rms2)
39
- rms2 = F.interpolate(
40
- rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
41
- ).squeeze()
42
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
43
- data2 *= (
44
- torch.pow(rms1, torch.tensor(1 - rate))
45
- * torch.pow(rms2, torch.tensor(rate - 1))
46
- ).numpy()
47
  return data2
48
 
49
-
50
  class VC(object):
51
  def __init__(self, tgt_sr, config):
52
  self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
@@ -66,16 +54,7 @@ class VC(object):
66
  self.t_max = self.sr * self.x_max # 免查询时长阈值
67
  self.device = config.device
68
 
69
- def get_f0(
70
- self,
71
- input_audio_path,
72
- x,
73
- p_len,
74
- f0_up_key,
75
- f0_method,
76
- filter_radius,
77
- inp_f0=None,
78
- ):
79
  global input_audio_path2wav
80
  time_step = self.window / self.sr * 1000
81
  f0_min = 50
@@ -99,9 +78,9 @@ class VC(object):
99
  f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
100
  )
101
  elif f0_method == "harvest":
102
- input_audio_path2wav[input_audio_path] = x.astype(np.double)
103
- f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
104
- if filter_radius > 2:
105
  f0 = signal.medfilt(f0, 3)
106
  elif f0_method == "crepe":
107
  model = "full"
@@ -146,7 +125,7 @@ class VC(object):
146
  ) + 1
147
  f0_mel[f0_mel <= 1] = 1
148
  f0_mel[f0_mel > 255] = 255
149
- f0_coarse = np.rint(f0_mel).astype(np.int)
150
  return f0_coarse, f0bak # 1-0
151
 
152
  def vc(
@@ -162,7 +141,6 @@ class VC(object):
162
  big_npy,
163
  index_rate,
164
  version,
165
- protect,
166
  ): # ,file_index,file_big_npy
167
  feats = torch.from_numpy(audio0)
168
  if self.is_half:
@@ -183,9 +161,8 @@ class VC(object):
183
  t0 = ttime()
184
  with torch.no_grad():
185
  logits = model.extract_features(**inputs)
186
- feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
187
- if protect < 0.5:
188
- feats0 = feats.clone()
189
  if (
190
  isinstance(index, type(None)) == False
191
  and isinstance(big_npy, type(None)) == False
@@ -211,10 +188,6 @@ class VC(object):
211
  )
212
 
213
  feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
214
- if protect < 0.5:
215
- feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
216
- 0, 2, 1
217
- )
218
  t1 = ttime()
219
  p_len = audio0.shape[0] // self.window
220
  if feats.shape[1] < p_len:
@@ -222,14 +195,6 @@ class VC(object):
222
  if pitch != None and pitchf != None:
223
  pitch = pitch[:, :p_len]
224
  pitchf = pitchf[:, :p_len]
225
-
226
- if protect < 0.5:
227
- pitchff = pitchf.clone()
228
- pitchff[pitchf > 0] = 1
229
- pitchff[pitchf < 1] = protect
230
- pitchff = pitchff.unsqueeze(-1)
231
- feats = feats * pitchff + feats0 * (1 - pitchff)
232
- feats = feats.to(feats0.dtype)
233
  p_len = torch.tensor([p_len], device=self.device).long()
234
  with torch.no_grad():
235
  if pitch != None and pitchf != None:
@@ -241,7 +206,10 @@ class VC(object):
241
  )
242
  else:
243
  audio1 = (
244
- (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
 
 
 
245
  )
246
  del feats, p_len, padding_mask
247
  if torch.cuda.is_available():
@@ -270,7 +238,6 @@ class VC(object):
270
  resample_sr,
271
  rms_mix_rate,
272
  version,
273
- protect,
274
  f0_file=None,
275
  ):
276
  if (
@@ -325,15 +292,7 @@ class VC(object):
325
  sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
326
  pitch, pitchf = None, None
327
  if if_f0 == 1:
328
- pitch, pitchf = self.get_f0(
329
- input_audio_path,
330
- audio_pad,
331
- p_len,
332
- f0_up_key,
333
- f0_method,
334
- filter_radius,
335
- inp_f0,
336
- )
337
  pitch = pitch[:p_len]
338
  pitchf = pitchf[:p_len]
339
  if self.device == "mps":
@@ -358,7 +317,6 @@ class VC(object):
358
  big_npy,
359
  index_rate,
360
  version,
361
- protect,
362
  )[self.t_pad_tgt : -self.t_pad_tgt]
363
  )
364
  else:
@@ -375,7 +333,6 @@ class VC(object):
375
  big_npy,
376
  index_rate,
377
  version,
378
- protect,
379
  )[self.t_pad_tgt : -self.t_pad_tgt]
380
  )
381
  s = t
@@ -393,7 +350,6 @@ class VC(object):
393
  big_npy,
394
  index_rate,
395
  version,
396
- protect,
397
  )[self.t_pad_tgt : -self.t_pad_tgt]
398
  )
399
  else:
@@ -410,21 +366,19 @@ class VC(object):
410
  big_npy,
411
  index_rate,
412
  version,
413
- protect,
414
  )[self.t_pad_tgt : -self.t_pad_tgt]
415
  )
416
  audio_opt = np.concatenate(audio_opt)
417
- if rms_mix_rate != 1:
418
- audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
419
- if resample_sr >= 16000 and tgt_sr != resample_sr:
420
  audio_opt = librosa.resample(
421
  audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
422
  )
423
- audio_max = np.abs(audio_opt).max() / 0.99
424
- max_int16 = 32768
425
- if audio_max > 1:
426
- max_int16 /= audio_max
427
- audio_opt = (audio_opt * max_int16).astype(np.int16)
428
  del pitch, pitchf, sid
429
  if torch.cuda.is_available():
430
  torch.cuda.empty_cache()
 
8
 
9
  bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
10
 
11
+ input_audio_path2wav={}
 
12
 
13
  @lru_cache
14
+ def cache_harvest_f0(input_audio_path,fs,f0max,f0min,frame_period):
15
+ audio=input_audio_path2wav[input_audio_path]
16
  f0, t = pyworld.harvest(
17
  audio,
18
  fs=fs,
 
23
  f0 = pyworld.stonemask(audio, f0, t, fs)
24
  return f0
25
 
26
+ def change_rms(data1,sr1,data2,sr2,rate):#1是输入音频,2是输出音频,rate是2的占比
 
27
  # print(data1.max(),data2.max())
28
+ rms1 = librosa.feature.rms(y=data1, frame_length=sr1//2*2, hop_length=sr1//2)#每半秒一个点
29
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2//2*2, hop_length=sr2//2)
30
+ rms1=torch.from_numpy(rms1)
31
+ rms1=F.interpolate(rms1.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze()
32
+ rms2=torch.from_numpy(rms2)
33
+ rms2=F.interpolate(rms2.unsqueeze(0), size=data2.shape[0],mode='linear').squeeze()
34
+ rms2=torch.max(rms2,torch.zeros_like(rms2)+1e-6)
35
+ data2*=(torch.pow(rms1,torch.tensor(1-rate))*torch.pow(rms2,torch.tensor(rate-1))).numpy()
 
 
 
 
 
 
 
 
 
36
  return data2
37
 
 
38
  class VC(object):
39
  def __init__(self, tgt_sr, config):
40
  self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
 
54
  self.t_max = self.sr * self.x_max # 免查询时长阈值
55
  self.device = config.device
56
 
57
+ def get_f0(self, input_audio_path,x, p_len, f0_up_key, f0_method,filter_radius, inp_f0=None):
 
 
 
 
 
 
 
 
 
58
  global input_audio_path2wav
59
  time_step = self.window / self.sr * 1000
60
  f0_min = 50
 
78
  f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
79
  )
80
  elif f0_method == "harvest":
81
+ input_audio_path2wav[input_audio_path]=x.astype(np.double)
82
+ f0=cache_harvest_f0(input_audio_path,self.sr,f0_max,f0_min,10)
83
+ if(filter_radius>2):
84
  f0 = signal.medfilt(f0, 3)
85
  elif f0_method == "crepe":
86
  model = "full"
 
125
  ) + 1
126
  f0_mel[f0_mel <= 1] = 1
127
  f0_mel[f0_mel > 255] = 255
128
+ f0_coarse = np.rint(f0_mel).astype(int)
129
  return f0_coarse, f0bak # 1-0
130
 
131
  def vc(
 
141
  big_npy,
142
  index_rate,
143
  version,
 
144
  ): # ,file_index,file_big_npy
145
  feats = torch.from_numpy(audio0)
146
  if self.is_half:
 
161
  t0 = ttime()
162
  with torch.no_grad():
163
  logits = model.extract_features(**inputs)
164
+ feats = model.final_proj(logits[0])if version=="v1"else logits[0]
165
+
 
166
  if (
167
  isinstance(index, type(None)) == False
168
  and isinstance(big_npy, type(None)) == False
 
188
  )
189
 
190
  feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
 
 
 
 
191
  t1 = ttime()
192
  p_len = audio0.shape[0] // self.window
193
  if feats.shape[1] < p_len:
 
195
  if pitch != None and pitchf != None:
196
  pitch = pitch[:, :p_len]
197
  pitchf = pitchf[:, :p_len]
 
 
 
 
 
 
 
 
198
  p_len = torch.tensor([p_len], device=self.device).long()
199
  with torch.no_grad():
200
  if pitch != None and pitchf != None:
 
206
  )
207
  else:
208
  audio1 = (
209
+ (net_g.infer(feats, p_len, sid)[0][0, 0])
210
+ .data.cpu()
211
+ .float()
212
+ .numpy()
213
  )
214
  del feats, p_len, padding_mask
215
  if torch.cuda.is_available():
 
238
  resample_sr,
239
  rms_mix_rate,
240
  version,
 
241
  f0_file=None,
242
  ):
243
  if (
 
292
  sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
293
  pitch, pitchf = None, None
294
  if if_f0 == 1:
295
+ pitch, pitchf = self.get_f0(input_audio_path,audio_pad, p_len, f0_up_key, f0_method,filter_radius, inp_f0)
 
 
 
 
 
 
 
 
296
  pitch = pitch[:p_len]
297
  pitchf = pitchf[:p_len]
298
  if self.device == "mps":
 
317
  big_npy,
318
  index_rate,
319
  version,
 
320
  )[self.t_pad_tgt : -self.t_pad_tgt]
321
  )
322
  else:
 
333
  big_npy,
334
  index_rate,
335
  version,
 
336
  )[self.t_pad_tgt : -self.t_pad_tgt]
337
  )
338
  s = t
 
350
  big_npy,
351
  index_rate,
352
  version,
 
353
  )[self.t_pad_tgt : -self.t_pad_tgt]
354
  )
355
  else:
 
366
  big_npy,
367
  index_rate,
368
  version,
 
369
  )[self.t_pad_tgt : -self.t_pad_tgt]
370
  )
371
  audio_opt = np.concatenate(audio_opt)
372
+ if(rms_mix_rate!=1):
373
+ audio_opt=change_rms(audio,16000,audio_opt,tgt_sr,rms_mix_rate)
374
+ if(resample_sr>=16000 and tgt_sr!=resample_sr):
375
  audio_opt = librosa.resample(
376
  audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
377
  )
378
+ audio_max=np.abs(audio_opt).max()/0.99
379
+ max_int16=32768
380
+ if(audio_max>1):max_int16/=audio_max
381
+ audio_opt=(audio_opt * max_int16).astype(np.int16)
 
382
  del pitch, pitchf, sid
383
  if torch.cuda.is_available():
384
  torch.cuda.empty_cache()