Upload config.py
Browse files
config.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
from multiprocessing import cpu_count
|
5 |
+
|
6 |
+
|
7 |
+
class Config:
|
8 |
+
def __init__(self):
|
9 |
+
self.device = "cuda:0"
|
10 |
+
self.is_half = True
|
11 |
+
self.n_cpu = 0
|
12 |
+
self.gpu_name = None
|
13 |
+
self.gpu_mem = None
|
14 |
+
(
|
15 |
+
self.python_cmd,
|
16 |
+
self.listen_port,
|
17 |
+
self.iscolab,
|
18 |
+
self.noparallel,
|
19 |
+
self.noautoopen,
|
20 |
+
) = self.arg_parse()
|
21 |
+
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def arg_parse() -> tuple:
|
25 |
+
exe = sys.executable or "python"
|
26 |
+
parser = argparse.ArgumentParser()
|
27 |
+
parser.add_argument("--port", type=int, default=7865, help="Listen port")
|
28 |
+
parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
|
29 |
+
parser.add_argument("--colab", action="store_true", help="Launch in colab")
|
30 |
+
parser.add_argument(
|
31 |
+
"--noparallel", action="store_true", help="Disable parallel processing"
|
32 |
+
)
|
33 |
+
parser.add_argument(
|
34 |
+
"--noautoopen",
|
35 |
+
action="store_true",
|
36 |
+
help="Do not open in browser automatically",
|
37 |
+
)
|
38 |
+
cmd_opts = parser.parse_args()
|
39 |
+
|
40 |
+
cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
|
41 |
+
|
42 |
+
return (
|
43 |
+
cmd_opts.pycmd,
|
44 |
+
cmd_opts.port,
|
45 |
+
cmd_opts.colab,
|
46 |
+
cmd_opts.noparallel,
|
47 |
+
cmd_opts.noautoopen,
|
48 |
+
)
|
49 |
+
|
50 |
+
# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
|
51 |
+
# check `getattr` and try it for compatibility
|
52 |
+
@staticmethod
|
53 |
+
def has_mps() -> bool:
|
54 |
+
if not torch.backends.mps.is_available():
|
55 |
+
return False
|
56 |
+
try:
|
57 |
+
torch.zeros(1).to(torch.device("mps"))
|
58 |
+
return True
|
59 |
+
except Exception:
|
60 |
+
return False
|
61 |
+
|
62 |
+
def device_config(self) -> tuple:
|
63 |
+
if torch.cuda.is_available():
|
64 |
+
i_device = int(self.device.split(":")[-1])
|
65 |
+
self.gpu_name = torch.cuda.get_device_name(i_device)
|
66 |
+
if (
|
67 |
+
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
|
68 |
+
or "P40" in self.gpu_name.upper()
|
69 |
+
or "1060" in self.gpu_name
|
70 |
+
or "1070" in self.gpu_name
|
71 |
+
or "1080" in self.gpu_name
|
72 |
+
):
|
73 |
+
print("Found GPU", self.gpu_name, ", force to fp32")
|
74 |
+
self.is_half = False
|
75 |
+
else:
|
76 |
+
print("Found GPU", self.gpu_name)
|
77 |
+
self.gpu_mem = int(
|
78 |
+
torch.cuda.get_device_properties(i_device).total_memory
|
79 |
+
/ 1024
|
80 |
+
/ 1024
|
81 |
+
/ 1024
|
82 |
+
+ 0.4
|
83 |
+
)
|
84 |
+
elif self.has_mps():
|
85 |
+
print("No supported Nvidia GPU found, use MPS instead")
|
86 |
+
self.device = "mps"
|
87 |
+
self.is_half = False
|
88 |
+
else:
|
89 |
+
print("No supported Nvidia GPU found, use CPU instead")
|
90 |
+
self.device = "cpu"
|
91 |
+
self.is_half = False
|
92 |
+
|
93 |
+
if self.n_cpu == 0:
|
94 |
+
self.n_cpu = cpu_count()
|
95 |
+
|
96 |
+
if self.is_half:
|
97 |
+
# 6G显存配置
|
98 |
+
x_pad = 3
|
99 |
+
x_query = 10
|
100 |
+
x_center = 60
|
101 |
+
x_max = 65
|
102 |
+
else:
|
103 |
+
# 5G显存配置
|
104 |
+
x_pad = 1
|
105 |
+
x_query = 6
|
106 |
+
x_center = 38
|
107 |
+
x_max = 41
|
108 |
+
|
109 |
+
if self.gpu_mem != None and self.gpu_mem <= 4:
|
110 |
+
x_pad = 1
|
111 |
+
x_query = 5
|
112 |
+
x_center = 30
|
113 |
+
x_max = 32
|
114 |
+
|
115 |
+
return x_pad, x_query, x_center, x_max
|