Files changed (1) hide show
  1. app.py +0 -1716
app.py DELETED
@@ -1,1716 +0,0 @@
1
- import spaces
2
- import os
3
- from stablepy import Model_Diffusers
4
- from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
5
- from stablepy.diffusers_vanilla.constants import FLUX_CN_UNION_MODES
6
- import torch
7
- import re
8
- from huggingface_hub import HfApi
9
- from stablepy import (
10
- CONTROLNET_MODEL_IDS,
11
- VALID_TASKS,
12
- T2I_PREPROCESSOR_NAME,
13
- FLASH_LORA,
14
- SCHEDULER_CONFIG_MAP,
15
- scheduler_names,
16
- IP_ADAPTER_MODELS,
17
- IP_ADAPTERS_SD,
18
- IP_ADAPTERS_SDXL,
19
- REPO_IMAGE_ENCODER,
20
- ALL_PROMPT_WEIGHT_OPTIONS,
21
- SD15_TASKS,
22
- SDXL_TASKS,
23
- )
24
- import time
25
- from PIL import ImageFile
26
- # import urllib.parse
27
-
28
- ImageFile.LOAD_TRUNCATED_IMAGES = True
29
- print(os.getenv("SPACES_ZERO_GPU"))
30
-
31
- # - **Download SD 1.5 Models**
32
- download_model = "https://civitai.com/api/download/models/574369, https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
33
- # - **Download VAEs**
34
- download_vae = "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
35
- # - **Download LoRAs**
36
- download_lora = "https://civitai.com/api/download/models/28907, https://huggingface.co/Leopain/color/resolve/main/Coloring_book_-_LineArt.safetensors, https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
37
- load_diffusers_format_model = [
38
- 'stabilityai/stable-diffusion-xl-base-1.0',
39
- 'black-forest-labs/FLUX.1-dev',
40
- 'John6666/blue-pencil-flux1-v021-fp8-flux',
41
- 'John6666/wai-ani-flux-v10forfp8-fp8-flux',
42
- 'John6666/xe-anime-flux-v04-fp8-flux',
43
- 'John6666/lyh-anime-flux-v2a1-fp8-flux',
44
- 'John6666/carnival-unchained-v10-fp8-flux',
45
- 'cagliostrolab/animagine-xl-3.1',
46
- 'John6666/epicrealism-xl-v8kiss-sdxl',
47
- 'misri/epicrealismXL_v7FinalDestination',
48
- 'misri/juggernautXL_juggernautX',
49
- 'misri/zavychromaxl_v80',
50
- 'SG161222/RealVisXL_V4.0',
51
- 'SG161222/RealVisXL_V5.0',
52
- 'misri/newrealityxlAllInOne_Newreality40',
53
- 'eienmojiki/Anything-XL',
54
- 'eienmojiki/Starry-XL-v5.2',
55
- 'gsdf/CounterfeitXL',
56
- 'KBlueLeaf/Kohaku-XL-Zeta',
57
- 'John6666/silvermoon-mix-01xl-v11-sdxl',
58
- 'WhiteAiZ/autismmixSDXL_autismmixConfetti_diffusers',
59
- 'kitty7779/ponyDiffusionV6XL',
60
- 'GraydientPlatformAPI/aniverse-pony',
61
- 'John6666/ras-real-anime-screencap-v1-sdxl',
62
- 'John6666/duchaiten-pony-xl-no-score-v60-sdxl',
63
- 'John6666/mistoon-anime-ponyalpha-sdxl',
64
- 'John6666/3x3x3mixxl-v2-sdxl',
65
- 'John6666/3x3x3mixxl-3dv01-sdxl',
66
- 'John6666/ebara-mfcg-pony-mix-v12-sdxl',
67
- 'John6666/t-ponynai3-v51-sdxl',
68
- 'John6666/t-ponynai3-v65-sdxl',
69
- 'John6666/prefect-pony-xl-v3-sdxl',
70
- 'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
71
- 'John6666/wai-real-mix-v11-sdxl',
72
- 'John6666/wai-c-v6-sdxl',
73
- 'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
74
- 'John6666/photo-realistic-pony-v5-sdxl',
75
- 'John6666/pony-realism-v21main-sdxl',
76
- 'John6666/pony-realism-v22main-sdxl',
77
- 'John6666/cyberrealistic-pony-v63-sdxl',
78
- 'John6666/cyberrealistic-pony-v64-sdxl',
79
- 'GraydientPlatformAPI/realcartoon-pony-diffusion',
80
- 'John6666/nova-anime-xl-pony-v5-sdxl',
81
- 'John6666/autismmix-sdxl-autismmix-pony-sdxl',
82
- 'John6666/aimz-dream-real-pony-mix-v3-sdxl',
83
- 'John6666/duchaiten-pony-real-v11fix-sdxl',
84
- 'John6666/duchaiten-pony-real-v20-sdxl',
85
- 'yodayo-ai/kivotos-xl-2.0',
86
- 'yodayo-ai/holodayo-xl-2.1',
87
- 'yodayo-ai/clandestine-xl-1.0',
88
- 'digiplay/majicMIX_sombre_v2',
89
- 'digiplay/majicMIX_realistic_v6',
90
- 'digiplay/majicMIX_realistic_v7',
91
- 'digiplay/DreamShaper_8',
92
- 'digiplay/BeautifulArt_v1',
93
- 'digiplay/DarkSushi2.5D_v1',
94
- 'digiplay/darkphoenix3D_v1.1',
95
- 'digiplay/BeenYouLiteL11_diffusers',
96
- 'Yntec/RevAnimatedV2Rebirth',
97
- 'youknownothing/cyberrealistic_v50',
98
- 'youknownothing/deliberate-v6',
99
- 'GraydientPlatformAPI/deliberate-cyber3',
100
- 'GraydientPlatformAPI/picx-real',
101
- 'GraydientPlatformAPI/perfectworld6',
102
- 'emilianJR/epiCRealism',
103
- 'votepurchase/counterfeitV30_v30',
104
- 'votepurchase/ChilloutMix',
105
- 'Meina/MeinaMix_V11',
106
- 'Meina/MeinaUnreal_V5',
107
- 'Meina/MeinaPastel_V7',
108
- 'GraydientPlatformAPI/realcartoon3d-17',
109
- 'GraydientPlatformAPI/realcartoon-pixar11',
110
- 'GraydientPlatformAPI/realcartoon-real17',
111
- ]
112
-
113
- DIFFUSERS_FORMAT_LORAS = [
114
- "nerijs/animation2k-flux",
115
- "XLabs-AI/flux-RealismLora",
116
- ]
117
-
118
- CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
119
- HF_TOKEN = os.environ.get("HF_READ_TOKEN")
120
-
121
- PREPROCESSOR_CONTROLNET = {
122
- "openpose": [
123
- "Openpose",
124
- "None",
125
- ],
126
- "scribble": [
127
- "HED",
128
- "PidiNet",
129
- "None",
130
- ],
131
- "softedge": [
132
- "PidiNet",
133
- "HED",
134
- "HED safe",
135
- "PidiNet safe",
136
- "None",
137
- ],
138
- "segmentation": [
139
- "UPerNet",
140
- "None",
141
- ],
142
- "depth": [
143
- "DPT",
144
- "Midas",
145
- "None",
146
- ],
147
- "normalbae": [
148
- "NormalBae",
149
- "None",
150
- ],
151
- "lineart": [
152
- "Lineart",
153
- "Lineart coarse",
154
- "Lineart (anime)",
155
- "None",
156
- "None (anime)",
157
- ],
158
- "lineart_anime": [
159
- "Lineart",
160
- "Lineart coarse",
161
- "Lineart (anime)",
162
- "None",
163
- "None (anime)",
164
- ],
165
- "shuffle": [
166
- "ContentShuffle",
167
- "None",
168
- ],
169
- "canny": [
170
- "Canny",
171
- "None",
172
- ],
173
- "mlsd": [
174
- "MLSD",
175
- "None",
176
- ],
177
- "ip2p": [
178
- "ip2p"
179
- ],
180
- "recolor": [
181
- "Recolor luminance",
182
- "Recolor intensity",
183
- "None",
184
- ],
185
- "tile": [
186
- "Mild Blur",
187
- "Moderate Blur",
188
- "Heavy Blur",
189
- "None",
190
- ],
191
-
192
- }
193
-
194
- TASK_STABLEPY = {
195
- 'txt2img': 'txt2img',
196
- 'img2img': 'img2img',
197
- 'inpaint': 'inpaint',
198
- # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
199
- # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
200
- # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
201
- # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
202
- # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
203
- 'openpose ControlNet': 'openpose',
204
- 'canny ControlNet': 'canny',
205
- 'mlsd ControlNet': 'mlsd',
206
- 'scribble ControlNet': 'scribble',
207
- 'softedge ControlNet': 'softedge',
208
- 'segmentation ControlNet': 'segmentation',
209
- 'depth ControlNet': 'depth',
210
- 'normalbae ControlNet': 'normalbae',
211
- 'lineart ControlNet': 'lineart',
212
- 'lineart_anime ControlNet': 'lineart_anime',
213
- 'shuffle ControlNet': 'shuffle',
214
- 'ip2p ControlNet': 'ip2p',
215
- 'optical pattern ControlNet': 'pattern',
216
- 'recolor ControlNet': 'recolor',
217
- 'tile ControlNet': 'tile',
218
- }
219
-
220
- TASK_MODEL_LIST = list(TASK_STABLEPY.keys())
221
-
222
- UPSCALER_DICT_GUI = {
223
- None: None,
224
- "Lanczos": "Lanczos",
225
- "Nearest": "Nearest",
226
- 'Latent': 'Latent',
227
- 'Latent (antialiased)': 'Latent (antialiased)',
228
- 'Latent (bicubic)': 'Latent (bicubic)',
229
- 'Latent (bicubic antialiased)': 'Latent (bicubic antialiased)',
230
- 'Latent (nearest)': 'Latent (nearest)',
231
- 'Latent (nearest-exact)': 'Latent (nearest-exact)',
232
- "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
233
- "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
234
- "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
235
- "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
236
- "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
237
- "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
238
- "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
239
- "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
240
- "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
241
- "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
242
- "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
243
- "lollypop": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
244
- "RealisticRescaler4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
245
- "NickelbackFS4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
246
- }
247
-
248
- UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
249
-
250
-
251
- def download_things(directory, url, hf_token="", civitai_api_key=""):
252
- url = url.strip()
253
-
254
- if "drive.google.com" in url:
255
- original_dir = os.getcwd()
256
- os.chdir(directory)
257
- os.system(f"gdown --fuzzy {url}")
258
- os.chdir(original_dir)
259
- elif "huggingface.co" in url:
260
- url = url.replace("?download=true", "")
261
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
262
- if "/blob/" in url:
263
- url = url.replace("/blob/", "/resolve/")
264
- user_header = f'"Authorization: Bearer {hf_token}"'
265
- if hf_token:
266
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
267
- else:
268
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
269
- elif "civitai.com" in url:
270
- if "?" in url:
271
- url = url.split("?")[0]
272
- if civitai_api_key:
273
- url = url + f"?token={civitai_api_key}"
274
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
275
- else:
276
- print("\033[91mYou need an API key to download Civitai models.\033[0m")
277
- else:
278
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
279
-
280
-
281
- def get_model_list(directory_path):
282
- model_list = []
283
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
284
-
285
- for filename in os.listdir(directory_path):
286
- if os.path.splitext(filename)[1] in valid_extensions:
287
- # name_without_extension = os.path.splitext(filename)[0]
288
- file_path = os.path.join(directory_path, filename)
289
- # model_list.append((name_without_extension, file_path))
290
- model_list.append(file_path)
291
- print('\033[34mFILE: ' + file_path + '\033[0m')
292
- return model_list
293
-
294
-
295
- directory_models = 'models'
296
- os.makedirs(directory_models, exist_ok=True)
297
- directory_loras = 'loras'
298
- os.makedirs(directory_loras, exist_ok=True)
299
- directory_vaes = 'vaes'
300
- os.makedirs(directory_vaes, exist_ok=True)
301
-
302
- # Download stuffs
303
- for url in [url.strip() for url in download_model.split(',')]:
304
- if not os.path.exists(f"./models/{url.split('/')[-1]}"):
305
- download_things(directory_models, url, HF_TOKEN, CIVITAI_API_KEY)
306
- for url in [url.strip() for url in download_vae.split(',')]:
307
- if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
308
- download_things(directory_vaes, url, HF_TOKEN, CIVITAI_API_KEY)
309
- for url in [url.strip() for url in download_lora.split(',')]:
310
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
311
- download_things(directory_loras, url, HF_TOKEN, CIVITAI_API_KEY)
312
-
313
- # Download Embeddings
314
- directory_embeds = 'embedings'
315
- os.makedirs(directory_embeds, exist_ok=True)
316
- download_embeds = [
317
- 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
318
- 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
319
- 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
320
- ]
321
-
322
- for url_embed in download_embeds:
323
- if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
324
- download_things(directory_embeds, url_embed, HF_TOKEN, CIVITAI_API_KEY)
325
-
326
- # Build list models
327
- embed_list = get_model_list(directory_embeds)
328
- model_list = get_model_list(directory_models)
329
- model_list = load_diffusers_format_model + model_list
330
- lora_model_list = get_model_list(directory_loras)
331
- lora_model_list.insert(0, "None")
332
- lora_model_list = lora_model_list + DIFFUSERS_FORMAT_LORAS
333
- vae_model_list = get_model_list(directory_vaes)
334
- vae_model_list.insert(0, "None")
335
-
336
- print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
337
-
338
- #######################
339
- # GUI
340
- #######################
341
- import gradio as gr
342
- import logging
343
- logging.getLogger("diffusers").setLevel(logging.ERROR)
344
- import diffusers
345
- diffusers.utils.logging.set_verbosity(40)
346
- import warnings
347
- warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
348
- warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
349
- warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
350
- from stablepy import logger
351
-
352
- logger.setLevel(logging.DEBUG)
353
-
354
- msg_inc_vae = (
355
- "Use the right VAE for your model to maintain image quality. The wrong"
356
- " VAE can lead to poor results, like blurriness in the generated images."
357
- )
358
-
359
- SDXL_TASK = [k for k, v in TASK_STABLEPY.items() if v in SDXL_TASKS]
360
- SD_TASK = [k for k, v in TASK_STABLEPY.items() if v in SD15_TASKS]
361
- FLUX_TASK = list(TASK_STABLEPY.keys())[:3] + [k for k, v in TASK_STABLEPY.items() if v in FLUX_CN_UNION_MODES.keys()]
362
-
363
- MODEL_TYPE_TASK = {
364
- "SD 1.5": SD_TASK,
365
- "SDXL": SDXL_TASK,
366
- "FLUX": FLUX_TASK,
367
- }
368
-
369
- MODEL_TYPE_CLASS = {
370
- "diffusers:StableDiffusionPipeline": "SD 1.5",
371
- "diffusers:StableDiffusionXLPipeline": "SDXL",
372
- "diffusers:FluxPipeline": "FLUX",
373
- }
374
-
375
- POST_PROCESSING_SAMPLER = ["Use same sampler"] + scheduler_names[:-2]
376
-
377
- CSS = """
378
- .contain { display: flex; flex-direction: column; }
379
- #component-0 { height: 100%; }
380
- #gallery { flex-grow: 1; }
381
- """
382
-
383
- SUBTITLE_GUI = (
384
- "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
385
- " to perform different tasks in image generation."
386
- )
387
-
388
-
389
- def extract_parameters(input_string):
390
- parameters = {}
391
- input_string = input_string.replace("\n", "")
392
-
393
- if "Negative prompt:" not in input_string:
394
- if "Steps:" in input_string:
395
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
396
- else:
397
- print("Invalid metadata")
398
- parameters["prompt"] = input_string
399
- return parameters
400
-
401
- parm = input_string.split("Negative prompt:")
402
- parameters["prompt"] = parm[0].strip()
403
- if "Steps:" not in parm[1]:
404
- print("Steps not detected")
405
- parameters["neg_prompt"] = parm[1].strip()
406
- return parameters
407
- parm = parm[1].split("Steps:")
408
- parameters["neg_prompt"] = parm[0].strip()
409
- input_string = "Steps:" + parm[1]
410
-
411
- # Extracting Steps
412
- steps_match = re.search(r'Steps: (\d+)', input_string)
413
- if steps_match:
414
- parameters['Steps'] = int(steps_match.group(1))
415
-
416
- # Extracting Size
417
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
418
- if size_match:
419
- parameters['Size'] = size_match.group(1)
420
- width, height = map(int, parameters['Size'].split('x'))
421
- parameters['width'] = width
422
- parameters['height'] = height
423
-
424
- # Extracting other parameters
425
- other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
426
- for param in other_parameters:
427
- parameters[param[0]] = param[1].strip('"')
428
-
429
- return parameters
430
-
431
-
432
- def get_my_lora(link_url):
433
- for url in [url.strip() for url in link_url.split(',')]:
434
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
435
- download_things(directory_loras, url, HF_TOKEN, CIVITAI_API_KEY)
436
- new_lora_model_list = get_model_list(directory_loras)
437
- new_lora_model_list.insert(0, "None")
438
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
439
-
440
- return gr.update(
441
- choices=new_lora_model_list
442
- ), gr.update(
443
- choices=new_lora_model_list
444
- ), gr.update(
445
- choices=new_lora_model_list
446
- ), gr.update(
447
- choices=new_lora_model_list
448
- ), gr.update(
449
- choices=new_lora_model_list
450
- ),
451
-
452
-
453
- def info_html(json_data, title, subtitle):
454
- return f"""
455
- <div style='padding: 0; border-radius: 10px;'>
456
- <p style='margin: 0; font-weight: bold;'>{title}</p>
457
- <details>
458
- <summary>Details</summary>
459
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
460
- </details>
461
- </div>
462
- """
463
-
464
-
465
- def get_model_type(repo_id: str):
466
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
467
- default = "SD 1.5"
468
- try:
469
- model = api.model_info(repo_id=repo_id, timeout=5.0)
470
- tags = model.tags
471
- for tag in tags:
472
- if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
473
- except Exception:
474
- return default
475
- return default
476
-
477
-
478
- class GuiSD:
479
- def __init__(self, stream=True):
480
- self.model = None
481
-
482
- print("Loading model...")
483
- self.model = Model_Diffusers(
484
- base_model_id="Lykon/dreamshaper-8",
485
- task_name="txt2img",
486
- vae_model=None,
487
- type_model_precision=torch.float16,
488
- retain_task_model_in_cache=False,
489
- device="cpu",
490
- )
491
- self.model.load_beta_styles()
492
-
493
- def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
494
-
495
- yield f"Loading model: {model_name}"
496
-
497
- vae_model = vae_model if vae_model != "None" else None
498
- model_type = get_model_type(model_name)
499
-
500
- if vae_model:
501
- vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
502
- if model_type != vae_type:
503
- gr.Warning(msg_inc_vae)
504
-
505
- self.model.device = torch.device("cpu")
506
- dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
507
-
508
- self.model.load_pipe(
509
- model_name,
510
- task_name=TASK_STABLEPY[task],
511
- vae_model=vae_model,
512
- type_model_precision=dtype_model,
513
- retain_task_model_in_cache=False,
514
- )
515
-
516
- yield f"Model loaded: {model_name}"
517
-
518
- # @spaces.GPU(duration=59)
519
- @torch.inference_mode()
520
- def generate_pipeline(
521
- self,
522
- prompt,
523
- neg_prompt,
524
- num_images,
525
- steps,
526
- cfg,
527
- clip_skip,
528
- seed,
529
- lora1,
530
- lora_scale1,
531
- lora2,
532
- lora_scale2,
533
- lora3,
534
- lora_scale3,
535
- lora4,
536
- lora_scale4,
537
- lora5,
538
- lora_scale5,
539
- sampler,
540
- img_height,
541
- img_width,
542
- model_name,
543
- vae_model,
544
- task,
545
- image_control,
546
- preprocessor_name,
547
- preprocess_resolution,
548
- image_resolution,
549
- style_prompt, # list []
550
- style_json_file,
551
- image_mask,
552
- strength,
553
- low_threshold,
554
- high_threshold,
555
- value_threshold,
556
- distance_threshold,
557
- controlnet_output_scaling_in_unet,
558
- controlnet_start_threshold,
559
- controlnet_stop_threshold,
560
- textual_inversion,
561
- syntax_weights,
562
- upscaler_model_path,
563
- upscaler_increases_size,
564
- esrgan_tile,
565
- esrgan_tile_overlap,
566
- hires_steps,
567
- hires_denoising_strength,
568
- hires_sampler,
569
- hires_prompt,
570
- hires_negative_prompt,
571
- hires_before_adetailer,
572
- hires_after_adetailer,
573
- loop_generation,
574
- leave_progress_bar,
575
- disable_progress_bar,
576
- image_previews,
577
- display_images,
578
- save_generated_images,
579
- image_storage_location,
580
- retain_compel_previous_load,
581
- retain_detailfix_model_previous_load,
582
- retain_hires_model_previous_load,
583
- t2i_adapter_preprocessor,
584
- t2i_adapter_conditioning_scale,
585
- t2i_adapter_conditioning_factor,
586
- xformers_memory_efficient_attention,
587
- freeu,
588
- generator_in_cpu,
589
- adetailer_inpaint_only,
590
- adetailer_verbose,
591
- adetailer_sampler,
592
- adetailer_active_a,
593
- prompt_ad_a,
594
- negative_prompt_ad_a,
595
- strength_ad_a,
596
- face_detector_ad_a,
597
- person_detector_ad_a,
598
- hand_detector_ad_a,
599
- mask_dilation_a,
600
- mask_blur_a,
601
- mask_padding_a,
602
- adetailer_active_b,
603
- prompt_ad_b,
604
- negative_prompt_ad_b,
605
- strength_ad_b,
606
- face_detector_ad_b,
607
- person_detector_ad_b,
608
- hand_detector_ad_b,
609
- mask_dilation_b,
610
- mask_blur_b,
611
- mask_padding_b,
612
- retain_task_cache_gui,
613
- image_ip1,
614
- mask_ip1,
615
- model_ip1,
616
- mode_ip1,
617
- scale_ip1,
618
- image_ip2,
619
- mask_ip2,
620
- model_ip2,
621
- mode_ip2,
622
- scale_ip2,
623
- pag_scale,
624
- ):
625
-
626
- vae_model = vae_model if vae_model != "None" else None
627
- loras_list = [lora1, lora2, lora3, lora4, lora5]
628
- vae_msg = f"VAE: {vae_model}" if vae_model else ""
629
- msg_lora = ""
630
-
631
- print("Config model:", model_name, vae_model, loras_list)
632
-
633
- task = TASK_STABLEPY[task]
634
-
635
- params_ip_img = []
636
- params_ip_msk = []
637
- params_ip_model = []
638
- params_ip_mode = []
639
- params_ip_scale = []
640
-
641
- all_adapters = [
642
- (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
643
- (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
644
- ]
645
-
646
- for imgip, mskip, modelip, modeip, scaleip in all_adapters:
647
- if imgip:
648
- params_ip_img.append(imgip)
649
- if mskip:
650
- params_ip_msk.append(mskip)
651
- params_ip_model.append(modelip)
652
- params_ip_mode.append(modeip)
653
- params_ip_scale.append(scaleip)
654
-
655
- self.model.stream_config(concurrency=5, latent_resize_by=1, vae_decoding=False)
656
-
657
- if task != "txt2img" and not image_control:
658
- raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
659
-
660
- if task == "inpaint" and not image_mask:
661
- raise ValueError("No mask image found: Specify one in 'Image Mask'")
662
-
663
- if upscaler_model_path in UPSCALER_KEYS[:9]:
664
- upscaler_model = upscaler_model_path
665
- else:
666
- directory_upscalers = 'upscalers'
667
- os.makedirs(directory_upscalers, exist_ok=True)
668
-
669
- url_upscaler = UPSCALER_DICT_GUI[upscaler_model_path]
670
-
671
- if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
672
- download_things(directory_upscalers, url_upscaler, HF_TOKEN)
673
-
674
- upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
675
-
676
- logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
677
-
678
- adetailer_params_A = {
679
- "face_detector_ad": face_detector_ad_a,
680
- "person_detector_ad": person_detector_ad_a,
681
- "hand_detector_ad": hand_detector_ad_a,
682
- "prompt": prompt_ad_a,
683
- "negative_prompt": negative_prompt_ad_a,
684
- "strength": strength_ad_a,
685
- # "image_list_task" : None,
686
- "mask_dilation": mask_dilation_a,
687
- "mask_blur": mask_blur_a,
688
- "mask_padding": mask_padding_a,
689
- "inpaint_only": adetailer_inpaint_only,
690
- "sampler": adetailer_sampler,
691
- }
692
-
693
- adetailer_params_B = {
694
- "face_detector_ad": face_detector_ad_b,
695
- "person_detector_ad": person_detector_ad_b,
696
- "hand_detector_ad": hand_detector_ad_b,
697
- "prompt": prompt_ad_b,
698
- "negative_prompt": negative_prompt_ad_b,
699
- "strength": strength_ad_b,
700
- # "image_list_task" : None,
701
- "mask_dilation": mask_dilation_b,
702
- "mask_blur": mask_blur_b,
703
- "mask_padding": mask_padding_b,
704
- }
705
- pipe_params = {
706
- "prompt": prompt,
707
- "negative_prompt": neg_prompt,
708
- "img_height": img_height,
709
- "img_width": img_width,
710
- "num_images": num_images,
711
- "num_steps": steps,
712
- "guidance_scale": cfg,
713
- "clip_skip": clip_skip,
714
- "pag_scale": float(pag_scale),
715
- "seed": seed,
716
- "image": image_control,
717
- "preprocessor_name": preprocessor_name,
718
- "preprocess_resolution": preprocess_resolution,
719
- "image_resolution": image_resolution,
720
- "style_prompt": style_prompt if style_prompt else "",
721
- "style_json_file": "",
722
- "image_mask": image_mask, # only for Inpaint
723
- "strength": strength, # only for Inpaint or ...
724
- "low_threshold": low_threshold,
725
- "high_threshold": high_threshold,
726
- "value_threshold": value_threshold,
727
- "distance_threshold": distance_threshold,
728
- "lora_A": lora1 if lora1 != "None" else None,
729
- "lora_scale_A": lora_scale1,
730
- "lora_B": lora2 if lora2 != "None" else None,
731
- "lora_scale_B": lora_scale2,
732
- "lora_C": lora3 if lora3 != "None" else None,
733
- "lora_scale_C": lora_scale3,
734
- "lora_D": lora4 if lora4 != "None" else None,
735
- "lora_scale_D": lora_scale4,
736
- "lora_E": lora5 if lora5 != "None" else None,
737
- "lora_scale_E": lora_scale5,
738
- "textual_inversion": embed_list if textual_inversion and self.model.class_name != "StableDiffusionXLPipeline" else [],
739
- "syntax_weights": syntax_weights, # "Classic"
740
- "sampler": sampler,
741
- "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
742
- "gui_active": True,
743
- "loop_generation": loop_generation,
744
- "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
745
- "control_guidance_start": float(controlnet_start_threshold),
746
- "control_guidance_end": float(controlnet_stop_threshold),
747
- "generator_in_cpu": generator_in_cpu,
748
- "FreeU": freeu,
749
- "adetailer_A": adetailer_active_a,
750
- "adetailer_A_params": adetailer_params_A,
751
- "adetailer_B": adetailer_active_b,
752
- "adetailer_B_params": adetailer_params_B,
753
- "leave_progress_bar": leave_progress_bar,
754
- "disable_progress_bar": disable_progress_bar,
755
- "image_previews": image_previews,
756
- "display_images": display_images,
757
- "save_generated_images": save_generated_images,
758
- "image_storage_location": image_storage_location,
759
- "retain_compel_previous_load": retain_compel_previous_load,
760
- "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
761
- "retain_hires_model_previous_load": retain_hires_model_previous_load,
762
- "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
763
- "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
764
- "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
765
- "upscaler_model_path": upscaler_model,
766
- "upscaler_increases_size": upscaler_increases_size,
767
- "esrgan_tile": esrgan_tile,
768
- "esrgan_tile_overlap": esrgan_tile_overlap,
769
- "hires_steps": hires_steps,
770
- "hires_denoising_strength": hires_denoising_strength,
771
- "hires_prompt": hires_prompt,
772
- "hires_negative_prompt": hires_negative_prompt,
773
- "hires_sampler": hires_sampler,
774
- "hires_before_adetailer": hires_before_adetailer,
775
- "hires_after_adetailer": hires_after_adetailer,
776
- "ip_adapter_image": params_ip_img,
777
- "ip_adapter_mask": params_ip_msk,
778
- "ip_adapter_model": params_ip_model,
779
- "ip_adapter_mode": params_ip_mode,
780
- "ip_adapter_scale": params_ip_scale,
781
- }
782
-
783
- self.model.device = torch.device("cuda:0")
784
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
785
- self.model.pipe.transformer.to(self.model.device)
786
- print("transformer to cuda")
787
-
788
- info_state = "PROCESSING "
789
- for img, seed, image_path, metadata in self.model(**pipe_params):
790
- info_state += ">"
791
- if image_path:
792
- info_state = f"COMPLETE. Seeds: {str(seed)}"
793
- if vae_msg:
794
- info_state = info_state + "<br>" + vae_msg
795
-
796
- for status, lora in zip(self.model.lora_status, self.model.lora_memory):
797
- if status:
798
- msg_lora += f"<br>Loaded: {lora}"
799
- elif status is not None:
800
- msg_lora += f"<br>Error with: {lora}"
801
-
802
- if msg_lora:
803
- info_state += msg_lora
804
-
805
- info_state = info_state + "<br>" + "GENERATION DATA:<br>" + metadata[0].replace("\n", "<br>") + "<br>-------<br>"
806
-
807
- download_links = "<br>".join(
808
- [
809
- f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
810
- for i, path in enumerate(image_path)
811
- ]
812
- )
813
- if save_generated_images:
814
- info_state += f"<br>{download_links}"
815
-
816
- yield img, info_state
817
-
818
-
819
- def update_task_options(model_name, task_name):
820
- new_choices = MODEL_TYPE_TASK[get_model_type(model_name)]
821
-
822
- if task_name not in new_choices:
823
- task_name = "txt2img"
824
-
825
- return gr.update(value=task_name, choices=new_choices)
826
-
827
-
828
- def dynamic_gpu_duration(func, duration, *args):
829
-
830
- @spaces.GPU(duration=duration)
831
- def wrapped_func():
832
- yield from func(*args)
833
-
834
- return wrapped_func()
835
-
836
-
837
- @spaces.GPU
838
- def dummy_gpu():
839
- return None
840
-
841
-
842
- def sd_gen_generate_pipeline(*args):
843
-
844
- gpu_duration_arg = int(args[-1]) if args[-1] else 59
845
- verbose_arg = int(args[-2])
846
- load_lora_cpu = args[-3]
847
- generation_args = args[:-3]
848
- lora_list = [
849
- None if item == "None" else item
850
- for item in [args[7], args[9], args[11], args[13], args[15]]
851
- ]
852
- lora_status = [None] * 5
853
-
854
- msg_load_lora = "Updating LoRAs in GPU..."
855
- if load_lora_cpu:
856
- msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
857
-
858
- if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
859
- yield None, msg_load_lora
860
-
861
- # Load lora in CPU
862
- if load_lora_cpu:
863
- lora_status = sd_gen.model.lora_merge(
864
- lora_A=lora_list[0], lora_scale_A=args[8],
865
- lora_B=lora_list[1], lora_scale_B=args[10],
866
- lora_C=lora_list[2], lora_scale_C=args[12],
867
- lora_D=lora_list[3], lora_scale_D=args[14],
868
- lora_E=lora_list[4], lora_scale_E=args[16],
869
- )
870
- print(lora_status)
871
-
872
- if verbose_arg:
873
- for status, lora in zip(lora_status, lora_list):
874
- if status:
875
- gr.Info(f"LoRA loaded in CPU: {lora}")
876
- elif status is not None:
877
- gr.Warning(f"Failed to load LoRA: {lora}")
878
-
879
- if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
880
- lora_cache_msg = ", ".join(
881
- str(x) for x in sd_gen.model.lora_memory if x is not None
882
- )
883
- gr.Info(f"LoRAs in cache: {lora_cache_msg}")
884
-
885
- msg_request = f"Requesting {gpu_duration_arg}s. of GPU time"
886
- gr.Info(msg_request)
887
- print(msg_request)
888
-
889
- # yield from sd_gen.generate_pipeline(*generation_args)
890
-
891
- start_time = time.time()
892
-
893
- yield from dynamic_gpu_duration(
894
- sd_gen.generate_pipeline,
895
- gpu_duration_arg,
896
- *generation_args,
897
- )
898
-
899
- end_time = time.time()
900
-
901
- if verbose_arg:
902
- execution_time = end_time - start_time
903
- msg_task_complete = (
904
- f"GPU task complete in: {round(execution_time, 0) + 1} seconds"
905
- )
906
- gr.Info(msg_task_complete)
907
- print(msg_task_complete)
908
-
909
-
910
- def extract_exif_data(image):
911
- if image is None: return ""
912
-
913
- try:
914
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
915
-
916
- for key in metadata_keys:
917
- if key in image.info:
918
- return image.info[key]
919
-
920
- return str(image.info)
921
-
922
- except Exception as e:
923
- return f"Error extracting metadata: {str(e)}"
924
-
925
-
926
- @spaces.GPU(duration=20)
927
- def esrgan_upscale(image, upscaler_name, upscaler_size):
928
- if image is None: return None
929
-
930
- from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
931
- from stablepy import UpscalerESRGAN
932
-
933
- exif_image = extract_exif_data(image)
934
-
935
- url_upscaler = UPSCALER_DICT_GUI[upscaler_name]
936
- directory_upscalers = 'upscalers'
937
- os.makedirs(directory_upscalers, exist_ok=True)
938
- if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
939
- download_things(directory_upscalers, url_upscaler, HF_TOKEN)
940
-
941
- scaler_beta = UpscalerESRGAN(0, 0)
942
- image_up = scaler_beta.upscale(image, upscaler_size, f"./upscalers/{url_upscaler.split('/')[-1]}")
943
-
944
- image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
945
-
946
- return image_path
947
-
948
-
949
- dynamic_gpu_duration.zerogpu = True
950
- sd_gen_generate_pipeline.zerogpu = True
951
- sd_gen = GuiSD()
952
-
953
- with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
954
- gr.Markdown("# 🧩 DiffuseCraft")
955
- gr.Markdown(SUBTITLE_GUI)
956
- with gr.Tab("Generation"):
957
- with gr.Row():
958
-
959
- with gr.Column(scale=2):
960
-
961
- task_gui = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
962
- model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
963
- prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
964
- neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
965
- with gr.Row(equal_height=False):
966
- set_params_gui = gr.Button(value="↙️", variant="secondary", size="sm")
967
- clear_prompt_gui = gr.Button(value="🗑️", variant="secondary", size="sm")
968
- set_random_seed = gr.Button(value="🎲", variant="secondary", size="sm")
969
- generate_button = gr.Button(value="GENERATE IMAGE", variant="primary")
970
-
971
- model_name_gui.change(
972
- update_task_options,
973
- [model_name_gui, task_gui],
974
- [task_gui],
975
- )
976
-
977
- load_model_gui = gr.HTML()
978
-
979
- result_images = gr.Gallery(
980
- label="Generated images",
981
- show_label=False,
982
- elem_id="gallery",
983
- columns=[2],
984
- rows=[2],
985
- object_fit="contain",
986
- # height="auto",
987
- interactive=False,
988
- preview=False,
989
- selected_index=50,
990
- )
991
-
992
- actual_task_info = gr.HTML()
993
-
994
- with gr.Row(equal_height=False, variant="default"):
995
- gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
996
- with gr.Column():
997
- verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
998
- load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU (Save GPU time)")
999
-
1000
- with gr.Column(scale=1):
1001
- steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
1002
- cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7., label="CFG")
1003
- sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
1004
- img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
1005
- img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
1006
- seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
1007
- pag_scale_gui = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
1008
- with gr.Row():
1009
- clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
1010
- free_u_gui = gr.Checkbox(value=False, label="FreeU")
1011
-
1012
- with gr.Row(equal_height=False):
1013
-
1014
- def run_set_params_gui(base_prompt, name_model):
1015
- valid_receptors = { # default values
1016
- "prompt": gr.update(value=base_prompt),
1017
- "neg_prompt": gr.update(value=""),
1018
- "Steps": gr.update(value=30),
1019
- "width": gr.update(value=1024),
1020
- "height": gr.update(value=1024),
1021
- "Seed": gr.update(value=-1),
1022
- "Sampler": gr.update(value="Euler a"),
1023
- "scale": gr.update(value=7.), # cfg
1024
- "skip": gr.update(value=True),
1025
- "Model": gr.update(value=name_model),
1026
- }
1027
- valid_keys = list(valid_receptors.keys())
1028
-
1029
- parameters = extract_parameters(base_prompt)
1030
-
1031
- for key, val in parameters.items():
1032
- # print(val)
1033
- if key in valid_keys:
1034
- try:
1035
- if key == "Sampler":
1036
- if val not in scheduler_names:
1037
- continue
1038
- elif key == "skip":
1039
- if "," in str(val):
1040
- val = val.replace(",", "")
1041
- if int(val) >= 2:
1042
- val = True
1043
- if key == "prompt":
1044
- if ">" in val and "<" in val:
1045
- val = re.sub(r'<[^>]+>', '', val)
1046
- print("Removed LoRA written in the prompt")
1047
- if key in ["prompt", "neg_prompt"]:
1048
- val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
1049
- if key in ["Steps", "width", "height", "Seed"]:
1050
- val = int(val)
1051
- if key == "scale":
1052
- val = float(val)
1053
- if key == "Model":
1054
- filtered_models = [m for m in model_list if val in m]
1055
- if filtered_models:
1056
- val = filtered_models[0]
1057
- else:
1058
- val = name_model
1059
- if key == "Seed":
1060
- continue
1061
- valid_receptors[key] = gr.update(value=val)
1062
- # print(val, type(val))
1063
- # print(valid_receptors)
1064
- except Exception as e:
1065
- print(str(e))
1066
- return [value for value in valid_receptors.values()]
1067
-
1068
- set_params_gui.click(
1069
- run_set_params_gui, [prompt_gui, model_name_gui], [
1070
- prompt_gui,
1071
- neg_prompt_gui,
1072
- steps_gui,
1073
- img_width_gui,
1074
- img_height_gui,
1075
- seed_gui,
1076
- sampler_gui,
1077
- cfg_gui,
1078
- clip_skip_gui,
1079
- model_name_gui,
1080
- ],
1081
- )
1082
-
1083
- def run_clear_prompt_gui():
1084
- return gr.update(value=""), gr.update(value="")
1085
- clear_prompt_gui.click(
1086
- run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
1087
- )
1088
-
1089
- def run_set_random_seed():
1090
- return -1
1091
- set_random_seed.click(
1092
- run_set_random_seed, [], seed_gui
1093
- )
1094
-
1095
- num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
1096
- prompt_s_options = [
1097
- ("Compel format: (word)weight", "Compel"),
1098
- ("Classic format: (word:weight)", "Classic"),
1099
- ("Classic-original format: (word:weight)", "Classic-original"),
1100
- ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
1101
- ("Classic-ignore", "Classic-ignore"),
1102
- ("None", "None"),
1103
- ]
1104
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[1][1])
1105
- vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
1106
-
1107
- with gr.Accordion("Hires fix", open=False, visible=True):
1108
-
1109
- upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
1110
- upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
1111
- esrgan_tile_gui = gr.Slider(minimum=0, value=0, maximum=500, step=1, label="ESRGAN Tile")
1112
- esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=8, label="ESRGAN Tile Overlap")
1113
- hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
1114
- hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
1115
- hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
1116
- hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
1117
- hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
1118
-
1119
- with gr.Accordion("LoRA", open=False, visible=True):
1120
-
1121
- def lora_dropdown(label):
1122
- return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True)
1123
-
1124
- def lora_scale_slider(label):
1125
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label)
1126
-
1127
- lora1_gui = lora_dropdown("Lora1")
1128
- lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
1129
- lora2_gui = lora_dropdown("Lora2")
1130
- lora_scale_2_gui = lora_scale_slider("Lora Scale 2")
1131
- lora3_gui = lora_dropdown("Lora3")
1132
- lora_scale_3_gui = lora_scale_slider("Lora Scale 3")
1133
- lora4_gui = lora_dropdown("Lora4")
1134
- lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1135
- lora5_gui = lora_dropdown("Lora5")
1136
- lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1137
-
1138
- with gr.Accordion("From URL", open=False, visible=True):
1139
- text_lora = gr.Textbox(label="LoRA URL", placeholder="https://civitai.com/api/download/models/28907", lines=1)
1140
- button_lora = gr.Button("Get and update lists of LoRAs")
1141
- button_lora.click(
1142
- get_my_lora,
1143
- [text_lora],
1144
- [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui]
1145
- )
1146
-
1147
- with gr.Accordion("IP-Adapter", open=False, visible=True):
1148
-
1149
- IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
1150
- MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
1151
-
1152
- with gr.Accordion("IP-Adapter 1", open=False, visible=True):
1153
- image_ip1 = gr.Image(label="IP Image", type="filepath")
1154
- mask_ip1 = gr.Image(label="IP Mask", type="filepath")
1155
- model_ip1 = gr.Dropdown(value="plus_face", label="Model", choices=IP_MODELS)
1156
- mode_ip1 = gr.Dropdown(value="original", label="Mode", choices=MODE_IP_OPTIONS)
1157
- scale_ip1 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
1158
- with gr.Accordion("IP-Adapter 2", open=False, visible=True):
1159
- image_ip2 = gr.Image(label="IP Image", type="filepath")
1160
- mask_ip2 = gr.Image(label="IP Mask (optional)", type="filepath")
1161
- model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
1162
- mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
1163
- scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
1164
-
1165
- with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
1166
- image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
1167
- image_mask_gui = gr.Image(label="Image Mask", type="filepath")
1168
- strength_gui = gr.Slider(
1169
- minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
1170
- info="This option adjusts the level of changes for img2img and inpainting."
1171
- )
1172
- image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
1173
- preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=PREPROCESSOR_CONTROLNET["canny"])
1174
-
1175
- def change_preprocessor_choices(task):
1176
- task = TASK_STABLEPY[task]
1177
- if task in PREPROCESSOR_CONTROLNET.keys():
1178
- choices_task = PREPROCESSOR_CONTROLNET[task]
1179
- else:
1180
- choices_task = PREPROCESSOR_CONTROLNET["canny"]
1181
- return gr.update(choices=choices_task, value=choices_task[0])
1182
-
1183
- task_gui.change(
1184
- change_preprocessor_choices,
1185
- [task_gui],
1186
- [preprocessor_name_gui],
1187
- )
1188
- preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocess Resolution")
1189
- low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="Canny low threshold")
1190
- high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="Canny high threshold")
1191
- value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="Hough value threshold (MLSD)")
1192
- distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="Hough distance threshold (MLSD)")
1193
- control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1194
- control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1195
- control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
1196
-
1197
- with gr.Accordion("T2I adapter", open=False, visible=False):
1198
- t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1199
- adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
1200
- adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
1201
-
1202
- with gr.Accordion("Styles", open=False, visible=True):
1203
-
1204
- try:
1205
- style_names_found = sd_gen.model.STYLE_NAMES
1206
- except Exception:
1207
- style_names_found = STYLE_NAMES
1208
-
1209
- style_prompt_gui = gr.Dropdown(
1210
- style_names_found,
1211
- multiselect=True,
1212
- value=None,
1213
- label="Style Prompt",
1214
- interactive=True,
1215
- )
1216
- style_json_gui = gr.File(label="Style JSON File")
1217
- style_button = gr.Button("Load styles")
1218
-
1219
- def load_json_style_file(json):
1220
- if not sd_gen.model:
1221
- gr.Info("First load the model")
1222
- return gr.update(value=None, choices=STYLE_NAMES)
1223
-
1224
- sd_gen.model.load_style_file(json)
1225
- gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1226
- return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1227
-
1228
- style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1229
-
1230
- with gr.Accordion("Textual inversion", open=False, visible=False):
1231
- active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
1232
-
1233
- with gr.Accordion("Detailfix", open=False, visible=True):
1234
-
1235
- # Adetailer Inpaint Only
1236
- adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
1237
-
1238
- # Adetailer Verbose
1239
- adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
1240
-
1241
- # Adetailer Sampler
1242
- adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
1243
-
1244
- with gr.Accordion("Detailfix A", open=False, visible=True):
1245
- # Adetailer A
1246
- adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
1247
- prompt_ad_a_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
1248
- negative_prompt_ad_a_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
1249
- strength_ad_a_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
1250
- face_detector_ad_a_gui = gr.Checkbox(label="Face detector", value=True)
1251
- person_detector_ad_a_gui = gr.Checkbox(label="Person detector", value=True)
1252
- hand_detector_ad_a_gui = gr.Checkbox(label="Hand detector", value=False)
1253
- mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1254
- mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1255
- mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
1256
-
1257
- with gr.Accordion("Detailfix B", open=False, visible=True):
1258
- # Adetailer B
1259
- adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
1260
- prompt_ad_b_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
1261
- negative_prompt_ad_b_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
1262
- strength_ad_b_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
1263
- face_detector_ad_b_gui = gr.Checkbox(label="Face detector", value=True)
1264
- person_detector_ad_b_gui = gr.Checkbox(label="Person detector", value=True)
1265
- hand_detector_ad_b_gui = gr.Checkbox(label="Hand detector", value=False)
1266
- mask_dilation_b_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1267
- mask_blur_b_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1268
- mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
1269
-
1270
- with gr.Accordion("Other settings", open=False, visible=True):
1271
- save_generated_images_gui = gr.Checkbox(value=True, label="Create a download link for the images")
1272
- hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1273
- hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1274
- generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1275
-
1276
- with gr.Accordion("More settings", open=False, visible=False):
1277
- loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1278
- retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1279
- leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1280
- disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1281
- display_images_gui = gr.Checkbox(value=True, label="Display Images")
1282
- image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1283
- image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1284
- retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1285
- retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1286
- retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1287
- xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1288
-
1289
- with gr.Accordion("Examples and help", open=False, visible=True):
1290
- gr.Markdown(
1291
- """### Help:
1292
- - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
1293
- - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
1294
- - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
1295
- """
1296
- )
1297
- gr.Markdown(
1298
- """### The following examples perform specific tasks:
1299
- 1. Generation with SDXL and upscale
1300
- 2. Generation with FLUX dev
1301
- 3. ControlNet Canny SDXL
1302
- 4. Optical pattern (Optical illusion) SDXL
1303
- 5. Convert an image to a coloring drawing
1304
- 6. ControlNet OpenPose SD 1.5 and Latent upscale
1305
-
1306
- - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
1307
- """
1308
- )
1309
- gr.Examples(
1310
- examples=[
1311
- [
1312
- "1girl, souryuu asuka langley, neon genesis evangelion, rebuild of evangelion, lance of longinus, cat hat, plugsuit, pilot suit, red bodysuit, sitting, crossed legs, black eye patch, throne, looking down, from bottom, looking at viewer, outdoors, (masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
1313
- "nfsw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, unfinished, very displeasing, oldest, early, chromatic aberration, artistic error, scan, abstract",
1314
- 28,
1315
- 7.0,
1316
- -1,
1317
- "None",
1318
- 0.33,
1319
- "Euler a",
1320
- 1152,
1321
- 896,
1322
- "cagliostrolab/animagine-xl-3.1",
1323
- "txt2img",
1324
- "image.webp", # img conttol
1325
- 1024, # img resolution
1326
- 0.35, # strength
1327
- 1.0, # cn scale
1328
- 0.0, # cn start
1329
- 1.0, # cn end
1330
- "Classic",
1331
- "Nearest",
1332
- 45,
1333
- False,
1334
- ],
1335
- [
1336
- "a digital illustration of a movie poster titled 'Finding Emo', finding nemo parody poster, featuring a depressed cartoon clownfish with black emo hair, eyeliner, and piercings, bored expression, swimming in a dark underwater scene, in the background, movie title in a dripping, grungy font, moody blue and purple color palette",
1337
- "",
1338
- 24,
1339
- 3.5,
1340
- -1,
1341
- "None",
1342
- 0.33,
1343
- "Euler a",
1344
- 1152,
1345
- 896,
1346
- "black-forest-labs/FLUX.1-dev",
1347
- "txt2img",
1348
- None, # img conttol
1349
- 1024, # img resolution
1350
- 0.35, # strength
1351
- 1.0, # cn scale
1352
- 0.0, # cn start
1353
- 1.0, # cn end
1354
- "Classic",
1355
- None,
1356
- 70,
1357
- True,
1358
- ],
1359
- [
1360
- "((masterpiece)), best quality, blonde disco girl, detailed face, realistic face, realistic hair, dynamic pose, pink pvc, intergalactic disco background, pastel lights, dynamic contrast, airbrush, fine detail, 70s vibe, midriff",
1361
- "(worst quality:1.2), (bad quality:1.2), (poor quality:1.2), (missing fingers:1.2), bad-artist-anime, bad-artist, bad-picture-chill-75v",
1362
- 48,
1363
- 3.5,
1364
- -1,
1365
- "None",
1366
- 0.33,
1367
- "DPM++ 2M SDE Lu",
1368
- 1024,
1369
- 1024,
1370
- "misri/epicrealismXL_v7FinalDestination",
1371
- "canny ControlNet",
1372
- "image.webp", # img conttol
1373
- 1024, # img resolution
1374
- 0.35, # strength
1375
- 1.0, # cn scale
1376
- 0.0, # cn start
1377
- 1.0, # cn end
1378
- "Classic",
1379
- None,
1380
- 44,
1381
- False,
1382
- ],
1383
- [
1384
- "cinematic scenery old city ruins",
1385
- "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), (illustration, 3d, 2d, painting, cartoons, sketch, blurry, film grain, noise), (low quality, worst quality:1.2)",
1386
- 50,
1387
- 4.0,
1388
- -1,
1389
- "None",
1390
- 0.33,
1391
- "Euler a",
1392
- 1024,
1393
- 1024,
1394
- "misri/juggernautXL_juggernautX",
1395
- "optical pattern ControlNet",
1396
- "spiral_no_transparent.png", # img conttol
1397
- 1024, # img resolution
1398
- 0.35, # strength
1399
- 1.0, # cn scale
1400
- 0.05, # cn start
1401
- 0.75, # cn end
1402
- "Classic",
1403
- None,
1404
- 35,
1405
- False,
1406
- ],
1407
- [
1408
- "black and white, line art, coloring drawing, clean line art, black strokes, no background, white, black, free lines, black scribbles, on paper, A blend of comic book art and lineart full of black and white color, masterpiece, high-resolution, trending on Pixiv fan box, palette knife, brush strokes, two-dimensional, planar vector, T-shirt design, stickers, and T-shirt design, vector art, fantasy art, Adobe Illustrator, hand-painted, digital painting, low polygon, soft lighting, aerial view, isometric style, retro aesthetics, 8K resolution, black sketch lines, monochrome, invert color",
1409
- "color, red, green, yellow, colored, duplicate, blurry, abstract, disfigured, deformed, animated, toy, figure, framed, 3d, bad art, poorly drawn, extra limbs, close up, b&w, weird colors, blurry, watermark, blur haze, 2 heads, long neck, watermark, elongated body, cropped image, out of frame, draft, deformed hands, twisted fingers, double image, malformed hands, multiple heads, extra limb, ugly, poorly drawn hands, missing limb, cut-off, over satured, grain, lowères, bad anatomy, poorly drawn face, mutation, mutated, floating limbs, disconnected limbs, out of focus, long body, disgusting, extra fingers, groos proportions, missing arms, mutated hands, cloned face, missing legs, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, bluelish, blue",
1410
- 20,
1411
- 4.0,
1412
- -1,
1413
- "loras/Coloring_book_-_LineArt.safetensors",
1414
- 1.0,
1415
- "DPM++ 2M SDE Karras",
1416
- 1024,
1417
- 1024,
1418
- "cagliostrolab/animagine-xl-3.1",
1419
- "lineart ControlNet",
1420
- "color_image.png", # img conttol
1421
- 896, # img resolution
1422
- 0.35, # strength
1423
- 1.0, # cn scale
1424
- 0.0, # cn start
1425
- 1.0, # cn end
1426
- "Compel",
1427
- None,
1428
- 35,
1429
- False,
1430
- ],
1431
- [
1432
- "1girl,face,curly hair,red hair,white background,",
1433
- "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
1434
- 38,
1435
- 5.0,
1436
- -1,
1437
- "None",
1438
- 0.33,
1439
- "DPM++ 2M SDE Karras",
1440
- 512,
1441
- 512,
1442
- "digiplay/majicMIX_realistic_v7",
1443
- "openpose ControlNet",
1444
- "image.webp", # img conttol
1445
- 1024, # img resolution
1446
- 0.35, # strength
1447
- 1.0, # cn scale
1448
- 0.0, # cn start
1449
- 0.9, # cn end
1450
- "Compel",
1451
- "Latent (antialiased)",
1452
- 46,
1453
- False,
1454
- ],
1455
- ],
1456
- fn=sd_gen.generate_pipeline,
1457
- inputs=[
1458
- prompt_gui,
1459
- neg_prompt_gui,
1460
- steps_gui,
1461
- cfg_gui,
1462
- seed_gui,
1463
- lora1_gui,
1464
- lora_scale_1_gui,
1465
- sampler_gui,
1466
- img_height_gui,
1467
- img_width_gui,
1468
- model_name_gui,
1469
- task_gui,
1470
- image_control,
1471
- image_resolution_gui,
1472
- strength_gui,
1473
- control_net_output_scaling_gui,
1474
- control_net_start_threshold_gui,
1475
- control_net_stop_threshold_gui,
1476
- prompt_syntax_gui,
1477
- upscaler_model_path_gui,
1478
- gpu_duration_gui,
1479
- load_lora_cpu_gui,
1480
- ],
1481
- outputs=[result_images, actual_task_info],
1482
- cache_examples=False,
1483
- )
1484
- gr.Markdown(
1485
- """### Resources
1486
- - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
1487
- - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
1488
- """
1489
- )
1490
-
1491
- with gr.Tab("Inpaint mask maker", render=True):
1492
-
1493
- def create_mask_now(img, invert):
1494
- import numpy as np
1495
- import time
1496
-
1497
- time.sleep(0.5)
1498
-
1499
- transparent_image = img["layers"][0]
1500
-
1501
- # Extract the alpha channel
1502
- alpha_channel = np.array(transparent_image)[:, :, 3]
1503
-
1504
- # Create a binary mask by thresholding the alpha channel
1505
- binary_mask = alpha_channel > 1
1506
-
1507
- if invert:
1508
- print("Invert")
1509
- # Invert the binary mask so that the drawn shape is white and the rest is black
1510
- binary_mask = np.invert(binary_mask)
1511
-
1512
- # Convert the binary mask to a 3-channel RGB mask
1513
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
1514
-
1515
- # Convert the mask to uint8
1516
- rgb_mask = rgb_mask.astype(np.uint8) * 255
1517
-
1518
- return img["background"], rgb_mask
1519
-
1520
- with gr.Row():
1521
- with gr.Column(scale=2):
1522
- image_base = gr.ImageEditor(
1523
- sources=["upload", "clipboard"],
1524
- # crop_size="1:1",
1525
- # enable crop (or disable it)
1526
- # transforms=["crop"],
1527
- brush=gr.Brush(
1528
- default_size="16", # or leave it as 'auto'
1529
- color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1530
- # default_color="black", # html names are supported
1531
- colors=[
1532
- "rgba(0, 0, 0, 1)", # rgb(a)
1533
- "rgba(0, 0, 0, 0.1)",
1534
- "rgba(255, 255, 255, 0.1)",
1535
- # "hsl(360, 120, 120)" # in fact any valid colorstring
1536
- ]
1537
- ),
1538
- eraser=gr.Eraser(default_size="16")
1539
- )
1540
- invert_mask = gr.Checkbox(value=False, label="Invert mask")
1541
- btn = gr.Button("Create mask")
1542
- with gr.Column(scale=1):
1543
- img_source = gr.Image(interactive=False)
1544
- img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
1545
- btn_send = gr.Button("Send to the first tab")
1546
-
1547
- btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result])
1548
-
1549
- def send_img(img_source, img_result):
1550
- return img_source, img_result
1551
- btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1552
-
1553
- with gr.Tab("PNG Info"):
1554
-
1555
- with gr.Row():
1556
- with gr.Column():
1557
- image_metadata = gr.Image(label="Image with metadata", type="pil", sources=["upload"])
1558
-
1559
- with gr.Column():
1560
- result_metadata = gr.Textbox(label="Metadata", show_label=True, show_copy_button=True, interactive=False, container=True, max_lines=99)
1561
-
1562
- image_metadata.change(
1563
- fn=extract_exif_data,
1564
- inputs=[image_metadata],
1565
- outputs=[result_metadata],
1566
- )
1567
-
1568
- with gr.Tab("Upscaler"):
1569
-
1570
- with gr.Row():
1571
- with gr.Column():
1572
- image_up_tab = gr.Image(label="Image", type="pil", sources=["upload"])
1573
- upscaler_tab = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS[9:], value=UPSCALER_KEYS[11])
1574
- upscaler_size_tab = gr.Slider(minimum=1., maximum=4., step=0.1, value=1.1, label="Upscale by")
1575
- generate_button_up_tab = gr.Button(value="START UPSCALE", variant="primary")
1576
-
1577
- with gr.Column():
1578
- result_up_tab = gr.Image(label="Result", type="pil", interactive=False, format="png")
1579
-
1580
- generate_button_up_tab.click(
1581
- fn=esrgan_upscale,
1582
- inputs=[image_up_tab, upscaler_tab, upscaler_size_tab],
1583
- outputs=[result_up_tab],
1584
- )
1585
-
1586
- generate_button.click(
1587
- fn=sd_gen.load_new_model,
1588
- inputs=[
1589
- model_name_gui,
1590
- vae_model_gui,
1591
- task_gui
1592
- ],
1593
- outputs=[load_model_gui],
1594
- queue=True,
1595
- show_progress="minimal",
1596
- ).success(
1597
- fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1598
- inputs=[
1599
- prompt_gui,
1600
- neg_prompt_gui,
1601
- num_images_gui,
1602
- steps_gui,
1603
- cfg_gui,
1604
- clip_skip_gui,
1605
- seed_gui,
1606
- lora1_gui,
1607
- lora_scale_1_gui,
1608
- lora2_gui,
1609
- lora_scale_2_gui,
1610
- lora3_gui,
1611
- lora_scale_3_gui,
1612
- lora4_gui,
1613
- lora_scale_4_gui,
1614
- lora5_gui,
1615
- lora_scale_5_gui,
1616
- sampler_gui,
1617
- img_height_gui,
1618
- img_width_gui,
1619
- model_name_gui,
1620
- vae_model_gui,
1621
- task_gui,
1622
- image_control,
1623
- preprocessor_name_gui,
1624
- preprocess_resolution_gui,
1625
- image_resolution_gui,
1626
- style_prompt_gui,
1627
- style_json_gui,
1628
- image_mask_gui,
1629
- strength_gui,
1630
- low_threshold_gui,
1631
- high_threshold_gui,
1632
- value_threshold_gui,
1633
- distance_threshold_gui,
1634
- control_net_output_scaling_gui,
1635
- control_net_start_threshold_gui,
1636
- control_net_stop_threshold_gui,
1637
- active_textual_inversion_gui,
1638
- prompt_syntax_gui,
1639
- upscaler_model_path_gui,
1640
- upscaler_increases_size_gui,
1641
- esrgan_tile_gui,
1642
- esrgan_tile_overlap_gui,
1643
- hires_steps_gui,
1644
- hires_denoising_strength_gui,
1645
- hires_sampler_gui,
1646
- hires_prompt_gui,
1647
- hires_negative_prompt_gui,
1648
- hires_before_adetailer_gui,
1649
- hires_after_adetailer_gui,
1650
- loop_generation_gui,
1651
- leave_progress_bar_gui,
1652
- disable_progress_bar_gui,
1653
- image_previews_gui,
1654
- display_images_gui,
1655
- save_generated_images_gui,
1656
- image_storage_location_gui,
1657
- retain_compel_previous_load_gui,
1658
- retain_detailfix_model_previous_load_gui,
1659
- retain_hires_model_previous_load_gui,
1660
- t2i_adapter_preprocessor_gui,
1661
- adapter_conditioning_scale_gui,
1662
- adapter_conditioning_factor_gui,
1663
- xformers_memory_efficient_attention_gui,
1664
- free_u_gui,
1665
- generator_in_cpu_gui,
1666
- adetailer_inpaint_only_gui,
1667
- adetailer_verbose_gui,
1668
- adetailer_sampler_gui,
1669
- adetailer_active_a_gui,
1670
- prompt_ad_a_gui,
1671
- negative_prompt_ad_a_gui,
1672
- strength_ad_a_gui,
1673
- face_detector_ad_a_gui,
1674
- person_detector_ad_a_gui,
1675
- hand_detector_ad_a_gui,
1676
- mask_dilation_a_gui,
1677
- mask_blur_a_gui,
1678
- mask_padding_a_gui,
1679
- adetailer_active_b_gui,
1680
- prompt_ad_b_gui,
1681
- negative_prompt_ad_b_gui,
1682
- strength_ad_b_gui,
1683
- face_detector_ad_b_gui,
1684
- person_detector_ad_b_gui,
1685
- hand_detector_ad_b_gui,
1686
- mask_dilation_b_gui,
1687
- mask_blur_b_gui,
1688
- mask_padding_b_gui,
1689
- retain_task_cache_gui,
1690
- image_ip1,
1691
- mask_ip1,
1692
- model_ip1,
1693
- mode_ip1,
1694
- scale_ip1,
1695
- image_ip2,
1696
- mask_ip2,
1697
- model_ip2,
1698
- mode_ip2,
1699
- scale_ip2,
1700
- pag_scale_gui,
1701
- load_lora_cpu_gui,
1702
- verbose_info_gui,
1703
- gpu_duration_gui,
1704
- ],
1705
- outputs=[result_images, actual_task_info],
1706
- queue=True,
1707
- show_progress="minimal",
1708
- )
1709
-
1710
- app.queue()
1711
-
1712
- app.launch(
1713
- show_error=True,
1714
- debug=True,
1715
- allowed_paths=["./images/"],
1716
- )