AI-C commited on
Commit
01013b6
1 Parent(s): c92f055

Upload config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.py +117 -0
config.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import sys
3
+ import torch
4
+ from multiprocessing import cpu_count
5
+
6
+ class Config:
7
+ def __init__(self):
8
+ self.device = "cuda:0"
9
+ self.is_half = True
10
+ self.n_cpu = 0
11
+ self.gpu_name = None
12
+ self.gpu_mem = None
13
+ (
14
+ self.python_cmd,
15
+ self.listen_port,
16
+ self.colab,
17
+ self.noparallel,
18
+ self.noautoopen,
19
+ self.api
20
+ ) = self.arg_parse()
21
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
22
+
23
+ @staticmethod
24
+ def arg_parse() -> tuple:
25
+ exe = sys.executable or "python"
26
+ parser = argparse.ArgumentParser()
27
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
28
+ parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
29
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
30
+ parser.add_argument(
31
+ "--noparallel", action="store_true", help="Disable parallel processing"
32
+ )
33
+ parser.add_argument(
34
+ "--noautoopen",
35
+ action="store_true",
36
+ help="Do not open in browser automatically",
37
+ )
38
+ parser.add_argument("--api", action="store_true", help="Launch with api")
39
+ cmd_opts = parser.parse_args()
40
+
41
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
42
+
43
+ return (
44
+ cmd_opts.pycmd,
45
+ cmd_opts.port,
46
+ cmd_opts.colab,
47
+ cmd_opts.noparallel,
48
+ cmd_opts.noautoopen,
49
+ cmd_opts.api
50
+ )
51
+
52
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
53
+ # check `getattr` and try it for compatibility
54
+ @staticmethod
55
+ def has_mps() -> bool:
56
+ if not torch.backends.mps.is_available():
57
+ return False
58
+ try:
59
+ torch.zeros(1).to(torch.device("mps"))
60
+ return True
61
+ except Exception:
62
+ return False
63
+
64
+ def device_config(self) -> tuple:
65
+ if torch.cuda.is_available():
66
+ i_device = int(self.device.split(":")[-1])
67
+ self.gpu_name = torch.cuda.get_device_name(i_device)
68
+ if (
69
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
70
+ or "P40" in self.gpu_name.upper()
71
+ or "1060" in self.gpu_name
72
+ or "1070" in self.gpu_name
73
+ or "1080" in self.gpu_name
74
+ ):
75
+ print("Found GPU", self.gpu_name, ", force to fp32")
76
+ self.is_half = False
77
+ else:
78
+ print("Found GPU", self.gpu_name)
79
+ self.gpu_mem = int(
80
+ torch.cuda.get_device_properties(i_device).total_memory
81
+ / 1024
82
+ / 1024
83
+ / 1024
84
+ + 0.4
85
+ )
86
+ elif self.has_mps():
87
+ print("No supported Nvidia GPU found, use MPS instead")
88
+ self.device = "mps"
89
+ self.is_half = False
90
+ else:
91
+ print("No supported Nvidia GPU found, use CPU instead")
92
+ self.device = "cpu"
93
+ self.is_half = False
94
+
95
+ if self.n_cpu == 0:
96
+ self.n_cpu = cpu_count()
97
+
98
+ if self.is_half:
99
+ # 6G显存配置
100
+ x_pad = 3
101
+ x_query = 10
102
+ x_center = 60
103
+ x_max = 65
104
+ else:
105
+ # 5G显存配置
106
+ x_pad = 1
107
+ x_query = 6
108
+ x_center = 38
109
+ x_max = 41
110
+
111
+ if self.gpu_mem != None and self.gpu_mem <= 4:
112
+ x_pad = 1
113
+ x_query = 5
114
+ x_center = 30
115
+ x_max = 32
116
+
117
+ return x_pad, x_query, x_center, x_max