Files changed (1) hide show
  1. lib/infer.py +1 -32
lib/infer.py CHANGED
@@ -19,38 +19,7 @@ class Configs:
19
  if torch.cuda.is_available():
20
  i_device = int(self.device.split(":")[-1])
21
  self.gpu_name = torch.cuda.get_device_name(i_device)
22
- #if (
23
- # ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
24
- # or "P40" in self.gpu_name.upper()
25
- # or "1060" in self.gpu_name
26
- # or "1070" in self.gpu_name
27
- # or "1080" in self.gpu_name
28
- # ):
29
- # print("16 series/10 series P40 forced single precision")
30
- # self.is_half = False
31
- # for config_file in ["32k.json", "40k.json", "48k.json"]:
32
- # with open(BASE_DIR / "src" / "configs" / config_file, "r") as f:
33
- # strr = f.read().replace("true", "false")
34
- # with open(BASE_DIR / "src" / "configs" / config_file, "w") as f:
35
- # f.write(strr)
36
- # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
37
- # strr = f.read().replace("3.7", "3.0")
38
- # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
39
- # f.write(strr)
40
- # else:
41
- # self.gpu_name = None
42
- # self.gpu_mem = int(
43
- # torch.cuda.get_device_properties(i_device).total_memory
44
- # / 1024
45
- # / 1024
46
- # / 1024
47
- # + 0.4
48
- # )
49
- # if self.gpu_mem <= 4:
50
- # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
51
- # strr = f.read().replace("3.7", "3.0")
52
- # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
53
- # f.write(strr)
54
  elif torch.backends.mps.is_available():
55
  print("No supported N-card found, use MPS for inference")
56
  self.device = "mps"
 
19
  if torch.cuda.is_available():
20
  i_device = int(self.device.split(":")[-1])
21
  self.gpu_name = torch.cuda.get_device_name(i_device)
22
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  elif torch.backends.mps.is_available():
24
  print("No supported N-card found, use MPS for inference")
25
  self.device = "mps"