Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import torchaudio
|
@@ -9,7 +10,7 @@ import numpy as np
|
|
9 |
from pydub import AudioSegment
|
10 |
|
11 |
# Load model and configuration
|
12 |
-
device =
|
13 |
|
14 |
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
|
15 |
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
|
@@ -128,6 +129,7 @@ def crossfade(chunk1, chunk2, overlap):
|
|
128 |
overlap_frame_len = 16
|
129 |
bitrate = "320k"
|
130 |
|
|
|
131 |
@torch.no_grad()
|
132 |
@torch.inference_mode()
|
133 |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift):
|
|
|
1 |
+
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
import torchaudio
|
|
|
10 |
from pydub import AudioSegment
|
11 |
|
12 |
# Load model and configuration
|
13 |
+
device = 'cuda'
|
14 |
|
15 |
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
|
16 |
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
|
|
|
129 |
overlap_frame_len = 16
|
130 |
bitrate = "320k"
|
131 |
|
132 |
+
@spaces.GPU
|
133 |
@torch.no_grad()
|
134 |
@torch.inference_mode()
|
135 |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift):
|