Spaces:
Sleeping
Sleeping
print device to confirm
Browse files- app-img2img.py +5 -4
- app-txt2img.py +5 -4
app-img2img.py
CHANGED
@@ -26,16 +26,17 @@ MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
|
26 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
27 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
28 |
|
29 |
-
print(f"TIMEOUT: {TIMEOUT}")
|
30 |
-
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
31 |
-
print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
|
32 |
-
|
33 |
# check if MPS is available OSX only M1/M2/M3 chips
|
34 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
35 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
torch_device = device
|
37 |
torch_dtype = torch.float16
|
38 |
|
|
|
|
|
|
|
|
|
|
|
39 |
if mps_available:
|
40 |
device = torch.device("mps")
|
41 |
torch_device = "cpu"
|
|
|
26 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
27 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
28 |
|
|
|
|
|
|
|
|
|
29 |
# check if MPS is available OSX only M1/M2/M3 chips
|
30 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
31 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
32 |
torch_device = device
|
33 |
torch_dtype = torch.float16
|
34 |
|
35 |
+
print(f"TIMEOUT: {TIMEOUT}")
|
36 |
+
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
37 |
+
print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
|
38 |
+
print(f"device: {device}")
|
39 |
+
|
40 |
if mps_available:
|
41 |
device = torch.device("mps")
|
42 |
torch_device = "cpu"
|
app-txt2img.py
CHANGED
@@ -26,16 +26,17 @@ MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
|
26 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
27 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
28 |
|
29 |
-
print(f"TIMEOUT: {TIMEOUT}")
|
30 |
-
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
31 |
-
print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
|
32 |
-
|
33 |
# check if MPS is available OSX only M1/M2/M3 chips
|
34 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
35 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
torch_device = device
|
37 |
torch_dtype = torch.float16
|
38 |
|
|
|
|
|
|
|
|
|
|
|
39 |
if mps_available:
|
40 |
device = torch.device("mps")
|
41 |
torch_device = "cpu"
|
|
|
26 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
27 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
28 |
|
|
|
|
|
|
|
|
|
29 |
# check if MPS is available OSX only M1/M2/M3 chips
|
30 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
31 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
32 |
torch_device = device
|
33 |
torch_dtype = torch.float16
|
34 |
|
35 |
+
print(f"TIMEOUT: {TIMEOUT}")
|
36 |
+
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
37 |
+
print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
|
38 |
+
print(f"device: {device}")
|
39 |
+
|
40 |
if mps_available:
|
41 |
device = torch.device("mps")
|
42 |
torch_device = "cpu"
|