Nithya commited on
Commit
60db161
·
1 Parent(s): ee8cf1f

updated gpu decorators

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -155,7 +155,10 @@ def generate_audio(audio_model, f0s, invert_audio_fn, singers=[3], num_steps=100
155
 
156
  @spaces.GPU(duration=150)
157
  def generate(pitch, num_samples=1, num_steps=100, singers=[3], outfolder='temp', audio_seq_len=750, pitch_qt=None, type='response', invert_pitch_fn=None):
158
-
 
 
 
159
  logging.log(logging.INFO, 'Generate function')
160
  # load pitch values onto GPU
161
  pitch = torch.tensor(pitch).float().unsqueeze(0).unsqueeze(0).to(device)
@@ -173,7 +176,6 @@ def generate(pitch, num_samples=1, num_steps=100, singers=[3], outfolder='temp',
173
  # if there is not pitch quantile transformer, undo the default quantile transformation that occurs
174
  def undo_qt(x, min_clip=200):
175
  pitch= pitch_qt.inverse_transform(x.reshape(-1, 1)).reshape(1, -1)
176
- print(pitch.device)
177
  pitch = torch.round(pitch) # round to nearest integer, done in preprocessing of pitch contour fed into model
178
  pitch[pitch < 200] = np.nan
179
  return pitch
@@ -197,7 +199,7 @@ audio_model, audio_qt, audio_seq_len, invert_audio_fn = load_audio_fns(
197
  os.path.join(audio_path, 'last.ckpt'),
198
  qt_path = os.path.join(audio_path, 'qt.joblib'),
199
  config_path = os.path.join(audio_path, 'config.gin'),
200
- device = device
201
  )
202
 
203
 
@@ -209,7 +211,7 @@ def load_pitch_model(model_selection):
209
  model_type = 'diffusion', \
210
  config_path = os.path.join(pitch_path, 'config.gin'), \
211
  qt_path = os.path.join(pitch_path, 'qt.joblib'), \
212
- device = device
213
  )
214
  return pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn
215
 
 
155
 
156
  @spaces.GPU(duration=150)
157
  def generate(pitch, num_samples=1, num_steps=100, singers=[3], outfolder='temp', audio_seq_len=750, pitch_qt=None, type='response', invert_pitch_fn=None):
158
+ global pitch_model, audio_model
159
+ # move the models to device
160
+ pitch_model = pitch_model.to(device)
161
+ audio_model = audio_model.to(device)
162
  logging.log(logging.INFO, 'Generate function')
163
  # load pitch values onto GPU
164
  pitch = torch.tensor(pitch).float().unsqueeze(0).unsqueeze(0).to(device)
 
176
  # if there is not pitch quantile transformer, undo the default quantile transformation that occurs
177
  def undo_qt(x, min_clip=200):
178
  pitch= pitch_qt.inverse_transform(x.reshape(-1, 1)).reshape(1, -1)
 
179
  pitch = torch.round(pitch) # round to nearest integer, done in preprocessing of pitch contour fed into model
180
  pitch[pitch < 200] = np.nan
181
  return pitch
 
199
  os.path.join(audio_path, 'last.ckpt'),
200
  qt_path = os.path.join(audio_path, 'qt.joblib'),
201
  config_path = os.path.join(audio_path, 'config.gin'),
202
+ device = 'cpu'
203
  )
204
 
205
 
 
211
  model_type = 'diffusion', \
212
  config_path = os.path.join(pitch_path, 'config.gin'), \
213
  qt_path = os.path.join(pitch_path, 'qt.joblib'), \
214
+ device = 'cpu'
215
  )
216
  return pitch_model, pitch_qt, pitch_task_fn, invert_pitch_fn
217