Spaces:
Running
on
Zero
Running
on
Zero
adamelliotfields
commited on
Commit
•
aafe7f2
1
Parent(s):
b70fffe
Better logging
Browse files- .gitignore +1 -0
- app.py +8 -0
- data/prompts.json +1 -1
- lib/__init__.py +3 -0
- lib/inference.py +7 -8
- lib/loader.py +35 -36
- lib/logger.py +73 -0
.gitignore
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
__pycache__/
|
2 |
.venv/
|
3 |
loras/
|
|
|
|
1 |
__pycache__/
|
2 |
.venv/
|
3 |
loras/
|
4 |
+
app.log
|
app.py
CHANGED
@@ -2,11 +2,19 @@ import argparse
|
|
2 |
import json
|
3 |
import os
|
4 |
import random
|
|
|
5 |
|
6 |
import gradio as gr
|
|
|
|
|
7 |
|
8 |
from lib import Config, async_call, download_civit_file, download_repo_files, generate, read_file
|
9 |
|
|
|
|
|
|
|
|
|
|
|
10 |
# the CSS `content` attribute expects a string so we need to wrap the number in quotes
|
11 |
refresh_seed_js = """
|
12 |
() => {
|
|
|
2 |
import json
|
3 |
import os
|
4 |
import random
|
5 |
+
from warnings import filterwarnings
|
6 |
|
7 |
import gradio as gr
|
8 |
+
from diffusers.utils import logging as diffusers_logging
|
9 |
+
from transformers import logging as transformers_logging
|
10 |
|
11 |
from lib import Config, async_call, download_civit_file, download_repo_files, generate, read_file
|
12 |
|
13 |
+
filterwarnings("ignore", category=FutureWarning, module="diffusers")
|
14 |
+
filterwarnings("ignore", category=FutureWarning, module="transformers")
|
15 |
+
diffusers_logging.set_verbosity_error()
|
16 |
+
transformers_logging.set_verbosity_error()
|
17 |
+
|
18 |
# the CSS `content` attribute expects a string so we need to wrap the number in quotes
|
19 |
refresh_seed_js = """
|
20 |
() => {
|
data/prompts.json
CHANGED
@@ -32,7 +32,7 @@
|
|
32 |
"bowl of steaming hot ramen with a sliced egg, thin slices of meat, green onions, noodles, chopsticks, solo, minimal",
|
33 |
"large pizza with melted cheese, seared pepperoni, crispy crust, solo, minimal",
|
34 |
"sizzling hot sirloin steak with a perfect crust, seared to perfection, served with a side of roasted vegetables and mashed potatoes, solo, minimal",
|
35 |
-
"
|
36 |
"baked salmon fillet with a perfectly crispy skin and flaky flesh, side of steamed vegetables and quinoa, healthy, fresh, solo, minimal",
|
37 |
"steaming bowl of hearty chili with tender chunks of beef, rich tomato sauce, topped with grated cheddar cheese and green onions, solo, minimal",
|
38 |
"platter of sushi rolls, tuna, salmon, california maki, rainbow, colorful, beautiful arrangement, solo, minimal",
|
|
|
32 |
"bowl of steaming hot ramen with a sliced egg, thin slices of meat, green onions, noodles, chopsticks, solo, minimal",
|
33 |
"large pizza with melted cheese, seared pepperoni, crispy crust, solo, minimal",
|
34 |
"sizzling hot sirloin steak with a perfect crust, seared to perfection, served with a side of roasted vegetables and mashed potatoes, solo, minimal",
|
35 |
+
"wedding cake, white frosting, colorful accent flowers, fresh berry garnish, tiers, layers, elegant, minimal",
|
36 |
"baked salmon fillet with a perfectly crispy skin and flaky flesh, side of steamed vegetables and quinoa, healthy, fresh, solo, minimal",
|
37 |
"steaming bowl of hearty chili with tender chunks of beef, rich tomato sauce, topped with grated cheddar cheese and green onions, solo, minimal",
|
38 |
"platter of sushi rolls, tuna, salmon, california maki, rainbow, colorful, beautiful arrangement, solo, minimal",
|
lib/__init__.py
CHANGED
@@ -1,17 +1,20 @@
|
|
1 |
from .config import Config
|
2 |
from .inference import generate
|
3 |
from .loader import Loader
|
|
|
4 |
from .upscaler import RealESRGAN
|
5 |
from .utils import async_call, download_civit_file, download_repo_files, load_json, read_file
|
6 |
|
7 |
__all__ = [
|
8 |
"Config",
|
9 |
"Loader",
|
|
|
10 |
"RealESRGAN",
|
11 |
"async_call",
|
12 |
"download_civit_file",
|
13 |
"download_repo_files",
|
14 |
"generate",
|
15 |
"load_json",
|
|
|
16 |
"read_file",
|
17 |
]
|
|
|
1 |
from .config import Config
|
2 |
from .inference import generate
|
3 |
from .loader import Loader
|
4 |
+
from .logger import Logger, log_fn
|
5 |
from .upscaler import RealESRGAN
|
6 |
from .utils import async_call, download_civit_file, download_repo_files, load_json, read_file
|
7 |
|
8 |
__all__ = [
|
9 |
"Config",
|
10 |
"Loader",
|
11 |
+
"Logger",
|
12 |
"RealESRGAN",
|
13 |
"async_call",
|
14 |
"download_civit_file",
|
15 |
"download_repo_files",
|
16 |
"generate",
|
17 |
"load_json",
|
18 |
+
"log_fn",
|
19 |
"read_file",
|
20 |
]
|
lib/inference.py
CHANGED
@@ -15,11 +15,9 @@ from PIL import Image
|
|
15 |
|
16 |
from .config import Config
|
17 |
from .loader import Loader
|
|
|
18 |
from .utils import load_json
|
19 |
|
20 |
-
__import__("warnings").filterwarnings("ignore", category=FutureWarning, module="transformers")
|
21 |
-
__import__("transformers").logging.set_verbosity_error()
|
22 |
-
|
23 |
|
24 |
def parse_prompt_with_arrays(prompt: str) -> list[str]:
|
25 |
arrays = re.findall(r"\[\[(.*?)\]\]", prompt)
|
@@ -170,6 +168,7 @@ def generate(
|
|
170 |
return latents
|
171 |
|
172 |
start = time.perf_counter()
|
|
|
173 |
loader = Loader()
|
174 |
loader.load(
|
175 |
KIND,
|
@@ -249,10 +248,9 @@ def generate(
|
|
249 |
images = []
|
250 |
current_seed = seed
|
251 |
for i in range(num_images):
|
252 |
-
# seeded generator for each iteration
|
253 |
-
generator = torch.Generator(device=pipe.device).manual_seed(current_seed)
|
254 |
-
|
255 |
try:
|
|
|
|
|
256 |
positive_prompts = parse_prompt_with_arrays(positive_prompt)
|
257 |
index = i % len(positive_prompts)
|
258 |
positive_styled, negative_styled = apply_style(
|
@@ -270,7 +268,6 @@ def generate(
|
|
270 |
for embedding in embeddings:
|
271 |
negative_styled += f", <{embedding}>"
|
272 |
|
273 |
-
# print prompts
|
274 |
positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
|
275 |
[compel(positive_styled), compel(negative_styled)]
|
276 |
)
|
@@ -317,6 +314,8 @@ def generate(
|
|
317 |
CURRENT_IMAGE += 1
|
318 |
|
319 |
diff = time.perf_counter() - start
|
|
|
|
|
320 |
if Info:
|
321 |
-
Info(
|
322 |
return images
|
|
|
15 |
|
16 |
from .config import Config
|
17 |
from .loader import Loader
|
18 |
+
from .logger import Logger
|
19 |
from .utils import load_json
|
20 |
|
|
|
|
|
|
|
21 |
|
22 |
def parse_prompt_with_arrays(prompt: str) -> list[str]:
|
23 |
arrays = re.findall(r"\[\[(.*?)\]\]", prompt)
|
|
|
168 |
return latents
|
169 |
|
170 |
start = time.perf_counter()
|
171 |
+
log = Logger("generate")
|
172 |
loader = Loader()
|
173 |
loader.load(
|
174 |
KIND,
|
|
|
248 |
images = []
|
249 |
current_seed = seed
|
250 |
for i in range(num_images):
|
|
|
|
|
|
|
251 |
try:
|
252 |
+
generator = torch.Generator(device=pipe.device).manual_seed(current_seed)
|
253 |
+
|
254 |
positive_prompts = parse_prompt_with_arrays(positive_prompt)
|
255 |
index = i % len(positive_prompts)
|
256 |
positive_styled, negative_styled = apply_style(
|
|
|
268 |
for embedding in embeddings:
|
269 |
negative_styled += f", <{embedding}>"
|
270 |
|
|
|
271 |
positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
|
272 |
[compel(positive_styled), compel(negative_styled)]
|
273 |
)
|
|
|
314 |
CURRENT_IMAGE += 1
|
315 |
|
316 |
diff = time.perf_counter() - start
|
317 |
+
msg = f"Generated {len(images)} image{'s' if len(images) > 1 else ''} in {diff:.2f}s"
|
318 |
+
log.info(msg)
|
319 |
if Info:
|
320 |
+
Info(msg)
|
321 |
return images
|
lib/loader.py
CHANGED
@@ -1,21 +1,16 @@
|
|
1 |
import gc
|
2 |
from threading import Lock
|
3 |
-
from warnings import filterwarnings
|
4 |
|
5 |
import torch
|
6 |
from DeepCache import DeepCacheSDHelper
|
7 |
from diffusers import StableDiffusionImg2ImgPipeline, StableDiffusionPipeline
|
8 |
from diffusers.models import AutoencoderKL, AutoencoderTiny
|
9 |
from diffusers.models.attention_processor import AttnProcessor2_0, IPAdapterAttnProcessor2_0
|
10 |
-
from torch._dynamo import OptimizedModule
|
11 |
|
12 |
from .config import Config
|
|
|
13 |
from .upscaler import RealESRGAN
|
14 |
|
15 |
-
__import__("diffusers").logging.set_verbosity_error()
|
16 |
-
filterwarnings("ignore", category=FutureWarning, module="torch")
|
17 |
-
filterwarnings("ignore", category=FutureWarning, module="diffusers")
|
18 |
-
|
19 |
|
20 |
class Loader:
|
21 |
_instance = None
|
@@ -30,10 +25,16 @@ class Loader:
|
|
30 |
cls._instance.ip_adapter = None
|
31 |
cls._instance.upscaler_2x = None
|
32 |
cls._instance.upscaler_4x = None
|
|
|
33 |
return cls._instance
|
34 |
|
35 |
-
def _should_unload_ip_adapter(self, ip_adapter=""):
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
def _should_unload_pipeline(self, kind="", model=""):
|
39 |
if self.pipe is None:
|
@@ -48,7 +49,10 @@ class Loader:
|
|
48 |
|
49 |
# https://github.com/huggingface/diffusers/blob/v0.28.0/src/diffusers/loaders/ip_adapter.py#L300
|
50 |
def _unload_ip_adapter(self):
|
51 |
-
|
|
|
|
|
|
|
52 |
if not isinstance(self.pipe, StableDiffusionImg2ImgPipeline):
|
53 |
self.pipe.image_encoder = None
|
54 |
self.pipe.register_to_config(image_encoder=[None, None])
|
@@ -72,13 +76,12 @@ class Loader:
|
|
72 |
gc.collect()
|
73 |
torch.cuda.empty_cache()
|
74 |
torch.cuda.ipc_collect()
|
75 |
-
torch.cuda.reset_max_memory_allocated()
|
76 |
torch.cuda.reset_peak_memory_stats()
|
77 |
torch.cuda.synchronize()
|
78 |
|
79 |
def _unload(self, kind="", model="", ip_adapter=""):
|
80 |
to_unload = []
|
81 |
-
if self._should_unload_ip_adapter(ip_adapter):
|
82 |
self._unload_ip_adapter()
|
83 |
to_unload.append("ip_adapter")
|
84 |
if self._should_unload_pipeline(kind, model):
|
@@ -91,8 +94,8 @@ class Loader:
|
|
91 |
setattr(self, component, None)
|
92 |
|
93 |
def _load_ip_adapter(self, ip_adapter=""):
|
94 |
-
if self.ip_adapter
|
95 |
-
|
96 |
self.pipe.load_ip_adapter(
|
97 |
"h94/IP-Adapter",
|
98 |
subfolder="models",
|
@@ -102,29 +105,30 @@ class Loader:
|
|
102 |
self.pipe.set_ip_adapter_scale(0.5)
|
103 |
self.ip_adapter = ip_adapter
|
104 |
|
|
|
105 |
def _load_upscaler(self, scale=1):
|
106 |
if scale == 2 and self.upscaler_2x is None:
|
107 |
try:
|
108 |
-
|
109 |
self.upscaler_2x = RealESRGAN(2, "cuda")
|
110 |
self.upscaler_2x.load_weights()
|
111 |
except Exception as e:
|
112 |
-
|
113 |
self.upscaler_2x = None
|
114 |
if scale == 4 and self.upscaler_4x is None:
|
115 |
try:
|
116 |
-
|
117 |
self.upscaler_4x = RealESRGAN(4, "cuda")
|
118 |
self.upscaler_4x.load_weights()
|
119 |
except Exception as e:
|
120 |
-
|
121 |
self.upscaler_4x = None
|
122 |
|
123 |
def _load_pipeline(self, kind, model, tqdm, **kwargs):
|
124 |
pipeline = Config.PIPELINES[kind]
|
125 |
if self.pipe is None:
|
126 |
try:
|
127 |
-
|
128 |
self.model = model
|
129 |
if model.lower() in Config.MODEL_CHECKPOINTS.keys():
|
130 |
self.pipe = pipeline.from_single_file(
|
@@ -134,7 +138,7 @@ class Loader:
|
|
134 |
else:
|
135 |
self.pipe = pipeline.from_pretrained(model, **kwargs).to("cuda")
|
136 |
except Exception as e:
|
137 |
-
|
138 |
self.model = None
|
139 |
self.pipe = None
|
140 |
return
|
@@ -145,38 +149,32 @@ class Loader:
|
|
145 |
|
146 |
def _load_vae(self, taesd=False, model=""):
|
147 |
vae_type = type(self.pipe.vae)
|
148 |
-
is_kl = issubclass(vae_type,
|
149 |
is_tiny = issubclass(vae_type, AutoencoderTiny)
|
150 |
|
151 |
# by default all models use KL
|
152 |
if is_kl and taesd:
|
153 |
-
|
154 |
self.pipe.vae = AutoencoderTiny.from_pretrained(
|
155 |
-
# can't compile tiny VAE
|
156 |
pretrained_model_name_or_path="madebyollin/taesd",
|
157 |
torch_dtype=self.pipe.dtype,
|
158 |
).to(self.pipe.device)
|
159 |
return
|
160 |
|
161 |
if is_tiny and not taesd:
|
162 |
-
|
163 |
if model.lower() in Config.MODEL_CHECKPOINTS.keys():
|
164 |
-
vae = AutoencoderKL.from_single_file(
|
165 |
f"https://huggingface.co/{model}/{Config.MODEL_CHECKPOINTS[model.lower()]}",
|
166 |
torch_dtype=self.pipe.dtype,
|
167 |
).to(self.pipe.device)
|
168 |
else:
|
169 |
-
vae = AutoencoderKL.from_pretrained(
|
170 |
pretrained_model_name_or_path=model,
|
171 |
torch_dtype=self.pipe.dtype,
|
172 |
subfolder="vae",
|
173 |
variant="fp16",
|
174 |
).to(self.pipe.device)
|
175 |
-
self.pipe.vae = torch.compile(
|
176 |
-
mode="reduce-overhead",
|
177 |
-
fullgraph=True,
|
178 |
-
model=vae,
|
179 |
-
)
|
180 |
|
181 |
def _load_deepcache(self, interval=1):
|
182 |
has_deepcache = hasattr(self.pipe, "deepcache")
|
@@ -185,6 +183,7 @@ class Loader:
|
|
185 |
if has_deepcache:
|
186 |
self.pipe.deepcache.disable()
|
187 |
else:
|
|
|
188 |
self.pipe.deepcache = DeepCacheSDHelper(pipe=self.pipe)
|
189 |
self.pipe.deepcache.set_params(cache_interval=interval)
|
190 |
self.pipe.deepcache.enable()
|
@@ -195,10 +194,10 @@ class Loader:
|
|
195 |
attrs = ["b1", "b2", "s1", "s2"]
|
196 |
has_freeu = all(getattr(block, attr, None) is not None for attr in attrs)
|
197 |
if has_freeu and not freeu:
|
198 |
-
|
199 |
self.pipe.disable_freeu()
|
200 |
elif not has_freeu and freeu:
|
201 |
-
|
202 |
self.pipe.enable_freeu(b1=1.5, b2=1.6, s1=0.9, s2=0.2)
|
203 |
|
204 |
def load(
|
@@ -271,14 +270,14 @@ class Loader:
|
|
271 |
# same model, different scheduler
|
272 |
if self.model.lower() == model.lower():
|
273 |
if not same_scheduler:
|
274 |
-
|
275 |
if not same_karras:
|
276 |
-
|
277 |
if not same_scheduler or not same_karras:
|
278 |
self.pipe.scheduler = Config.SCHEDULERS[scheduler](**scheduler_kwargs)
|
279 |
|
280 |
-
self._load_freeu(freeu)
|
281 |
self._load_vae(taesd, model)
|
|
|
|
|
282 |
self._load_deepcache(deepcache)
|
283 |
self._load_ip_adapter(ip_adapter)
|
284 |
-
self._load_upscaler(scale)
|
|
|
1 |
import gc
|
2 |
from threading import Lock
|
|
|
3 |
|
4 |
import torch
|
5 |
from DeepCache import DeepCacheSDHelper
|
6 |
from diffusers import StableDiffusionImg2ImgPipeline, StableDiffusionPipeline
|
7 |
from diffusers.models import AutoencoderKL, AutoencoderTiny
|
8 |
from diffusers.models.attention_processor import AttnProcessor2_0, IPAdapterAttnProcessor2_0
|
|
|
9 |
|
10 |
from .config import Config
|
11 |
+
from .logger import Logger
|
12 |
from .upscaler import RealESRGAN
|
13 |
|
|
|
|
|
|
|
|
|
14 |
|
15 |
class Loader:
|
16 |
_instance = None
|
|
|
25 |
cls._instance.ip_adapter = None
|
26 |
cls._instance.upscaler_2x = None
|
27 |
cls._instance.upscaler_4x = None
|
28 |
+
cls._instance.log = Logger("Loader")
|
29 |
return cls._instance
|
30 |
|
31 |
+
def _should_unload_ip_adapter(self, model="", ip_adapter=""):
|
32 |
+
# unload if model changed
|
33 |
+
if self.model and self.model.lower() != model.lower():
|
34 |
+
return True
|
35 |
+
if self.ip_adapter and not ip_adapter:
|
36 |
+
return True
|
37 |
+
return False
|
38 |
|
39 |
def _should_unload_pipeline(self, kind="", model=""):
|
40 |
if self.pipe is None:
|
|
|
49 |
|
50 |
# https://github.com/huggingface/diffusers/blob/v0.28.0/src/diffusers/loaders/ip_adapter.py#L300
|
51 |
def _unload_ip_adapter(self):
|
52 |
+
if self.ip_adapter is None:
|
53 |
+
return
|
54 |
+
|
55 |
+
self.log.info("Unloading IP-Adapter")
|
56 |
if not isinstance(self.pipe, StableDiffusionImg2ImgPipeline):
|
57 |
self.pipe.image_encoder = None
|
58 |
self.pipe.register_to_config(image_encoder=[None, None])
|
|
|
76 |
gc.collect()
|
77 |
torch.cuda.empty_cache()
|
78 |
torch.cuda.ipc_collect()
|
|
|
79 |
torch.cuda.reset_peak_memory_stats()
|
80 |
torch.cuda.synchronize()
|
81 |
|
82 |
def _unload(self, kind="", model="", ip_adapter=""):
|
83 |
to_unload = []
|
84 |
+
if self._should_unload_ip_adapter(model, ip_adapter):
|
85 |
self._unload_ip_adapter()
|
86 |
to_unload.append("ip_adapter")
|
87 |
if self._should_unload_pipeline(kind, model):
|
|
|
94 |
setattr(self, component, None)
|
95 |
|
96 |
def _load_ip_adapter(self, ip_adapter=""):
|
97 |
+
if not self.ip_adapter and ip_adapter:
|
98 |
+
self.log.info(f"Loading IP-Adapter: {ip_adapter}")
|
99 |
self.pipe.load_ip_adapter(
|
100 |
"h94/IP-Adapter",
|
101 |
subfolder="models",
|
|
|
105 |
self.pipe.set_ip_adapter_scale(0.5)
|
106 |
self.ip_adapter = ip_adapter
|
107 |
|
108 |
+
# upscalers don't need to be unloaded
|
109 |
def _load_upscaler(self, scale=1):
|
110 |
if scale == 2 and self.upscaler_2x is None:
|
111 |
try:
|
112 |
+
self.log.info("Loading 2x upscaler")
|
113 |
self.upscaler_2x = RealESRGAN(2, "cuda")
|
114 |
self.upscaler_2x.load_weights()
|
115 |
except Exception as e:
|
116 |
+
self.log.error(f"Error loading 2x upscaler: {e}")
|
117 |
self.upscaler_2x = None
|
118 |
if scale == 4 and self.upscaler_4x is None:
|
119 |
try:
|
120 |
+
self.log.info("Loading 4x upscaler")
|
121 |
self.upscaler_4x = RealESRGAN(4, "cuda")
|
122 |
self.upscaler_4x.load_weights()
|
123 |
except Exception as e:
|
124 |
+
self.log.error(f"Error loading 4x upscaler: {e}")
|
125 |
self.upscaler_4x = None
|
126 |
|
127 |
def _load_pipeline(self, kind, model, tqdm, **kwargs):
|
128 |
pipeline = Config.PIPELINES[kind]
|
129 |
if self.pipe is None:
|
130 |
try:
|
131 |
+
self.log.info(f"Loading {model}")
|
132 |
self.model = model
|
133 |
if model.lower() in Config.MODEL_CHECKPOINTS.keys():
|
134 |
self.pipe = pipeline.from_single_file(
|
|
|
138 |
else:
|
139 |
self.pipe = pipeline.from_pretrained(model, **kwargs).to("cuda")
|
140 |
except Exception as e:
|
141 |
+
self.log.error(f"Error loading {model}: {e}")
|
142 |
self.model = None
|
143 |
self.pipe = None
|
144 |
return
|
|
|
149 |
|
150 |
def _load_vae(self, taesd=False, model=""):
|
151 |
vae_type = type(self.pipe.vae)
|
152 |
+
is_kl = issubclass(vae_type, AutoencoderKL)
|
153 |
is_tiny = issubclass(vae_type, AutoencoderTiny)
|
154 |
|
155 |
# by default all models use KL
|
156 |
if is_kl and taesd:
|
157 |
+
self.log.info("Switching to Tiny VAE")
|
158 |
self.pipe.vae = AutoencoderTiny.from_pretrained(
|
|
|
159 |
pretrained_model_name_or_path="madebyollin/taesd",
|
160 |
torch_dtype=self.pipe.dtype,
|
161 |
).to(self.pipe.device)
|
162 |
return
|
163 |
|
164 |
if is_tiny and not taesd:
|
165 |
+
self.log.info("Switching to KL VAE")
|
166 |
if model.lower() in Config.MODEL_CHECKPOINTS.keys():
|
167 |
+
self.pipe.vae = AutoencoderKL.from_single_file(
|
168 |
f"https://huggingface.co/{model}/{Config.MODEL_CHECKPOINTS[model.lower()]}",
|
169 |
torch_dtype=self.pipe.dtype,
|
170 |
).to(self.pipe.device)
|
171 |
else:
|
172 |
+
self.pipe.vae = AutoencoderKL.from_pretrained(
|
173 |
pretrained_model_name_or_path=model,
|
174 |
torch_dtype=self.pipe.dtype,
|
175 |
subfolder="vae",
|
176 |
variant="fp16",
|
177 |
).to(self.pipe.device)
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
def _load_deepcache(self, interval=1):
|
180 |
has_deepcache = hasattr(self.pipe, "deepcache")
|
|
|
183 |
if has_deepcache:
|
184 |
self.pipe.deepcache.disable()
|
185 |
else:
|
186 |
+
self.log.info("Loading DeepCache")
|
187 |
self.pipe.deepcache = DeepCacheSDHelper(pipe=self.pipe)
|
188 |
self.pipe.deepcache.set_params(cache_interval=interval)
|
189 |
self.pipe.deepcache.enable()
|
|
|
194 |
attrs = ["b1", "b2", "s1", "s2"]
|
195 |
has_freeu = all(getattr(block, attr, None) is not None for attr in attrs)
|
196 |
if has_freeu and not freeu:
|
197 |
+
self.log.info("Disabling FreeU")
|
198 |
self.pipe.disable_freeu()
|
199 |
elif not has_freeu and freeu:
|
200 |
+
self.log.info("Enabling FreeU")
|
201 |
self.pipe.enable_freeu(b1=1.5, b2=1.6, s1=0.9, s2=0.2)
|
202 |
|
203 |
def load(
|
|
|
270 |
# same model, different scheduler
|
271 |
if self.model.lower() == model.lower():
|
272 |
if not same_scheduler:
|
273 |
+
self.log.info(f"Switching to {scheduler}")
|
274 |
if not same_karras:
|
275 |
+
self.log.info(f"{'Enabling' if karras else 'Disabling'} Karras sigmas")
|
276 |
if not same_scheduler or not same_karras:
|
277 |
self.pipe.scheduler = Config.SCHEDULERS[scheduler](**scheduler_kwargs)
|
278 |
|
|
|
279 |
self._load_vae(taesd, model)
|
280 |
+
self._load_upscaler(scale)
|
281 |
+
self._load_freeu(freeu)
|
282 |
self._load_deepcache(deepcache)
|
283 |
self._load_ip_adapter(ip_adapter)
|
|
lib/logger.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from functools import wraps
|
3 |
+
from threading import Lock
|
4 |
+
|
5 |
+
|
6 |
+
class Logger:
|
7 |
+
_instances = {}
|
8 |
+
_lock = Lock()
|
9 |
+
|
10 |
+
def __new__(cls, name="root"):
|
11 |
+
with cls._lock:
|
12 |
+
if name not in cls._instances:
|
13 |
+
instance = super().__new__(cls)
|
14 |
+
instance._init(name)
|
15 |
+
cls._instances[name] = instance
|
16 |
+
return cls._instances[name]
|
17 |
+
|
18 |
+
def _init(self, name):
|
19 |
+
self.logger = logging.getLogger(name)
|
20 |
+
self.logger.setLevel(logging.DEBUG)
|
21 |
+
self.logger.propagate = False
|
22 |
+
|
23 |
+
console_handler = logging.StreamHandler()
|
24 |
+
console_handler.setLevel(logging.INFO)
|
25 |
+
|
26 |
+
file_handler = logging.FileHandler("app.log")
|
27 |
+
file_handler.setLevel(logging.DEBUG)
|
28 |
+
|
29 |
+
formatter = logging.Formatter(
|
30 |
+
"%(asctime)s [%(threadName)s] %(levelname)-5s %(name)s - %(message)s",
|
31 |
+
datefmt="%Y-%m-%d %H:%M:%S", # no milliseconds
|
32 |
+
)
|
33 |
+
console_handler.setFormatter(formatter)
|
34 |
+
file_handler.setFormatter(formatter)
|
35 |
+
|
36 |
+
self.logger.addHandler(console_handler)
|
37 |
+
self.logger.addHandler(file_handler)
|
38 |
+
|
39 |
+
def _log(self, level, message):
|
40 |
+
log_message = f"{message}".strip()
|
41 |
+
self.logger.log(level, log_message)
|
42 |
+
|
43 |
+
def debug(self, message, **kwargs):
|
44 |
+
self._log(logging.DEBUG, message, **kwargs)
|
45 |
+
|
46 |
+
def info(self, message, **kwargs):
|
47 |
+
self._log(logging.INFO, message, **kwargs)
|
48 |
+
|
49 |
+
def warning(self, message, **kwargs):
|
50 |
+
self._log(logging.WARNING, message, **kwargs)
|
51 |
+
|
52 |
+
def error(self, message, **kwargs):
|
53 |
+
self._log(logging.ERROR, message, **kwargs)
|
54 |
+
|
55 |
+
def critical(self, message, **kwargs):
|
56 |
+
self._log(logging.CRITICAL, message, **kwargs)
|
57 |
+
|
58 |
+
|
59 |
+
# decorator for logging function calls
|
60 |
+
def log_fn(name=None):
|
61 |
+
def decorator(fn):
|
62 |
+
@wraps(fn)
|
63 |
+
def wrapper(*args, **kwargs):
|
64 |
+
log = Logger(name or fn.__name__)
|
65 |
+
log.info("begin")
|
66 |
+
result = fn(*args, **kwargs)
|
67 |
+
log.info("end")
|
68 |
+
|
69 |
+
return result
|
70 |
+
|
71 |
+
return wrapper
|
72 |
+
|
73 |
+
return decorator
|