LaynzID12 commited on
Commit
bf97ab6
1 Parent(s): 98d8ea8

Create lib/config/config.py

Browse files
Files changed (1) hide show
  1. lib/config/config.py +99 -0
lib/config/config.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import sys
3
+ import torch
4
+ from multiprocessing import cpu_count
5
+
6
+ class Config:
7
+ def __init__(self):
8
+ self.device = "cuda:0"
9
+ self.is_half = True
10
+ self.n_cpu = 0
11
+ self.gpu_name = None
12
+ self.gpu_mem = None
13
+ (
14
+ self.share,
15
+ self.api,
16
+ self.unsupported,
17
+ ) = self.arg_parse()
18
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
19
+
20
+ @staticmethod
21
+ def arg_parse() -> tuple:
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument("--share", action="store_true", help="Launch with public link")
24
+ parser.add_argument("--api", action="store_true", help="Launch with api")
25
+ parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
26
+ cmd_opts = parser.parse_args()
27
+
28
+ return (
29
+ cmd_opts.share,
30
+ cmd_opts.api,
31
+ cmd_opts.unsupported,
32
+ )
33
+
34
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
35
+ # check `getattr` and try it for compatibility
36
+ @staticmethod
37
+ def has_mps() -> bool:
38
+ if not torch.backends.mps.is_available():
39
+ return False
40
+ try:
41
+ torch.zeros(1).to(torch.device("mps"))
42
+ return True
43
+ except Exception:
44
+ return False
45
+
46
+ def device_config(self) -> tuple:
47
+ if torch.cuda.is_available():
48
+ i_device = int(self.device.split(":")[-1])
49
+ self.gpu_name = torch.cuda.get_device_name(i_device)
50
+ if (
51
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
52
+ or "P40" in self.gpu_name.upper()
53
+ or "1060" in self.gpu_name
54
+ or "1070" in self.gpu_name
55
+ or "1080" in self.gpu_name
56
+ ):
57
+ print("INFO: Found GPU", self.gpu_name, ", force to fp32")
58
+ self.is_half = False
59
+ else:
60
+ print("INFO: Found GPU", self.gpu_name)
61
+ self.gpu_mem = int(
62
+ torch.cuda.get_device_properties(i_device).total_memory
63
+ / 1024
64
+ / 1024
65
+ / 1024
66
+ + 0.4
67
+ )
68
+ elif self.has_mps():
69
+ print("INFO: No supported Nvidia GPU found, use MPS instead")
70
+ self.device = "mps"
71
+ self.is_half = False
72
+ else:
73
+ print("INFO: No supported Nvidia GPU found, use CPU instead")
74
+ self.device = "cpu"
75
+ self.is_half = False
76
+
77
+ if self.n_cpu == 0:
78
+ self.n_cpu = cpu_count()
79
+
80
+ if self.is_half:
81
+ # 6G显存配置
82
+ x_pad = 3
83
+ x_query = 10
84
+ x_center = 60
85
+ x_max = 65
86
+ else:
87
+ # 5G显存配置
88
+ x_pad = 1
89
+ x_query = 6
90
+ x_center = 38
91
+ x_max = 41
92
+
93
+ if self.gpu_mem != None and self.gpu_mem <= 4:
94
+ x_pad = 1
95
+ x_query = 5
96
+ x_center = 30
97
+ x_max = 32
98
+
99
+ return x_pad, x_query, x_center, x_max