Spaces:
Sleeping
Sleeping
Aitron Emper
commited on
Commit
•
889c346
1
Parent(s):
c5d2a9e
Upload 50 files
Browse files
rvc/configs/config.py
CHANGED
@@ -137,6 +137,7 @@ class Config:
|
|
137 |
|
138 |
return x_pad, x_query, x_center, x_max
|
139 |
|
|
|
140 |
def max_vram_gpu(gpu):
|
141 |
if torch.cuda.is_available():
|
142 |
gpu_properties = torch.cuda.get_device_properties(gpu)
|
@@ -144,7 +145,8 @@ def max_vram_gpu(gpu):
|
|
144 |
return total_memory_gb
|
145 |
else:
|
146 |
return "0"
|
147 |
-
|
|
|
148 |
def get_gpu_info():
|
149 |
ngpu = torch.cuda.device_count()
|
150 |
gpu_infos = []
|
@@ -159,7 +161,5 @@ def get_gpu_info():
|
|
159 |
if len(gpu_infos) > 0:
|
160 |
gpu_info = "\n".join(gpu_infos)
|
161 |
else:
|
162 |
-
gpu_info =
|
163 |
-
|
164 |
-
)
|
165 |
-
return gpu_info
|
|
|
137 |
|
138 |
return x_pad, x_query, x_center, x_max
|
139 |
|
140 |
+
|
141 |
def max_vram_gpu(gpu):
|
142 |
if torch.cuda.is_available():
|
143 |
gpu_properties = torch.cuda.get_device_properties(gpu)
|
|
|
145 |
return total_memory_gb
|
146 |
else:
|
147 |
return "0"
|
148 |
+
|
149 |
+
|
150 |
def get_gpu_info():
|
151 |
ngpu = torch.cuda.device_count()
|
152 |
gpu_infos = []
|
|
|
161 |
if len(gpu_infos) > 0:
|
162 |
gpu_info = "\n".join(gpu_infos)
|
163 |
else:
|
164 |
+
gpu_info = "Unfortunately, there is no compatible GPU available to support your training."
|
165 |
+
return gpu_info
|
|
|
|
rvc/train/extract/extract_f0_print.py
CHANGED
@@ -48,9 +48,11 @@ class FeatureInput:
|
|
48 |
torch_device = (
|
49 |
torch.device(f"cuda:{torch_device_index % torch.cuda.device_count()}")
|
50 |
if torch.cuda.is_available()
|
51 |
-
else
|
52 |
-
|
53 |
-
|
|
|
|
|
54 |
)
|
55 |
|
56 |
audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True)
|
|
|
48 |
torch_device = (
|
49 |
torch.device(f"cuda:{torch_device_index % torch.cuda.device_count()}")
|
50 |
if torch.cuda.is_available()
|
51 |
+
else (
|
52 |
+
torch.device("mps")
|
53 |
+
if torch.backends.mps.is_available()
|
54 |
+
else torch.device("cpu")
|
55 |
+
)
|
56 |
)
|
57 |
|
58 |
audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True)
|
rvc/train/losses.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import torch
|
2 |
|
|
|
3 |
def feature_loss(fmap_r, fmap_g):
|
4 |
loss = 0
|
5 |
for dr, dg in zip(fmap_r, fmap_g):
|
|
|
1 |
import torch
|
2 |
|
3 |
+
|
4 |
def feature_loss(fmap_r, fmap_g):
|
5 |
loss = 0
|
6 |
for dr, dg in zip(fmap_r, fmap_g):
|
rvc/train/process/change_info.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import torch
|
3 |
|
|
|
4 |
def change_info(path, info, name):
|
5 |
try:
|
6 |
ckpt = torch.load(path, map_location="cpu")
|
@@ -10,4 +11,4 @@ def change_info(path, info, name):
|
|
10 |
torch.save(ckpt, f"logs/{name}/{name}")
|
11 |
return "Success."
|
12 |
except Exception as error:
|
13 |
-
print(error)
|
|
|
1 |
import os
|
2 |
import torch
|
3 |
|
4 |
+
|
5 |
def change_info(path, info, name):
|
6 |
try:
|
7 |
ckpt = torch.load(path, map_location="cpu")
|
|
|
11 |
torch.save(ckpt, f"logs/{name}/{name}")
|
12 |
return "Success."
|
13 |
except Exception as error:
|
14 |
+
print(error)
|