nahuiyang commited on
Commit
f04d740
1 Parent(s): dd9cd2d

Upload 61 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. analGapeCreampieLora_v3.safetensors +3 -0
  2. barbarianWoman_v10.safetensors +3 -0
  3. braBeautifulRealistic_brav3.safetensors +3 -0
  4. bukkakAI.pt +3 -0
  5. creampieHairyPussy_creampieV11.safetensors +3 -0
  6. ddetailer.py +542 -0
  7. ddsd.py +604 -0
  8. hauteCouture_v11.safetensors +3 -0
  9. jkBigNippleTipLite_V01.safetensors +3 -0
  10. jkPuffyNipples_V01.safetensors +3 -0
  11. k-military2-01.safetensors +3 -0
  12. k-military2-02.safetensors +3 -0
  13. k-military2-03.safetensors +3 -0
  14. k-military2-04.safetensors +3 -0
  15. k-military2-05.safetensors +3 -0
  16. k-military2-06.safetensors +3 -0
  17. k-military2-07.safetensors +3 -0
  18. k-military2-08.safetensors +3 -0
  19. k-military2-09.safetensors +3 -0
  20. k-military2-10.safetensors +3 -0
  21. latexIDLoraLatex_loconV02.safetensors +3 -0
  22. lightAndShadow_v10.safetensors +3 -0
  23. nippleHeartPiercing_2.safetensors +3 -0
  24. nipplePiercing_v20.safetensors +3 -0
  25. nymphs_v10.safetensors +3 -0
  26. oversizedHoodie_oversizedHoodieV10.safetensors +3 -0
  27. photorealisticQueenOf_v202.safetensors +3 -0
  28. photorealisticWombTattoos_401.safetensors +3 -0
  29. povMissionaryAnal_v5.safetensors +3 -0
  30. pregnant_v10.safetensors +3 -0
  31. ridingDildoSexActLora_v11.safetensors +3 -0
  32. schoolUniform_naughty.safetensors +3 -0
  33. sexyCostume_1.safetensors +3 -0
  34. shinyOiledSkin_v1.safetensors +3 -0
  35. sittingPussyPosition_highPrecision.pt +3 -0
  36. ssb_v17-40.safetensors +3 -0
  37. ssb_v18-10.safetensors +3 -0
  38. ssb_v18-12.safetensors +3 -0
  39. ssb_v18-14.safetensors +3 -0
  40. ssb_v18-16.safetensors +3 -0
  41. ssb_v18-18.safetensors +3 -0
  42. ssb_v18-20.safetensors +3 -0
  43. ssb_v21-80.safetensors +3 -0
  44. ssb_v21-90.safetensors +3 -0
  45. ssb_v7-10.safetensors +3 -0
  46. ssb_v8-30.safetensors +3 -0
  47. ssb_v9-000001.safetensors +3 -0
  48. ssb_v9-000002.safetensors +3 -0
  49. ssb_v9-000003.safetensors +3 -0
  50. ssb_v9-000004.safetensors +3 -0
analGapeCreampieLora_v3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aee78fef1e88080e0a8dc1d1d32dee0386cb7685d3bf3707f56b991fa7b234ed
3
+ size 151121086
barbarianWoman_v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d363c6078e23f2c7cb30b36b75c5d2af811c7a1ea22b9aa4f8d33b8aa65dc547
3
+ size 151113528
braBeautifulRealistic_brav3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c646be99050f3a7337748c7d5078649b016b849c1d0c6339bce8e35ed0ab11c
3
+ size 2132625918
bukkakAI.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8ae6ae855ebc1b0fdefc5263a61fc35331ac4f57cbd4e3ac165f2706edf7603
3
+ size 10155
creampieHairyPussy_creampieV11.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826d251c22ebad508061ed14bf016e90b7461dcbbc07aba734466251e3ed061f
3
+ size 37861700
ddetailer.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import cv2
4
+ from PIL import Image
5
+ import numpy as np
6
+ import gradio as gr
7
+
8
+ from modules import processing, images
9
+ from modules import scripts, script_callbacks, shared, devices, modelloader
10
+ from modules.processing import Processed, StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
11
+ from modules.shared import opts, cmd_opts, state
12
+ from modules.sd_models import model_hash
13
+ from modules.paths import models_path
14
+ from basicsr.utils.download_util import load_file_from_url
15
+
16
+ dd_models_path = os.path.join(models_path, "mmdet")
17
+
18
+ def list_models(model_path):
19
+ model_list = modelloader.load_models(model_path=model_path, ext_filter=[".pth"])
20
+
21
+ def modeltitle(path, shorthash):
22
+ abspath = os.path.abspath(path)
23
+
24
+ if abspath.startswith(model_path):
25
+ name = abspath.replace(model_path, '')
26
+ else:
27
+ name = os.path.basename(path)
28
+
29
+ if name.startswith("\\") or name.startswith("/"):
30
+ name = name[1:]
31
+
32
+ shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
33
+
34
+ return f'{name} [{shorthash}]', shortname
35
+
36
+ models = []
37
+ for filename in model_list:
38
+ h = model_hash(filename)
39
+ title, short_model_name = modeltitle(filename, h)
40
+ models.append(title)
41
+
42
+ return models
43
+
44
+ def startup():
45
+ from launch import is_installed, run
46
+ if not is_installed("mmdet"):
47
+ python = sys.executable
48
+ run(f'"{python}" -m pip install -U openmim==0.3.7', desc="Installing openmim", errdesc="Couldn't install openmim")
49
+ run(f'"{python}" -m mim install mmcv-full==1.7.1', desc=f"Installing mmcv-full", errdesc=f"Couldn't install mmcv-full")
50
+ run(f'"{python}" -m pip install mmdet==2.28.2', desc=f"Installing mmdet", errdesc=f"Couldn't install mmdet")
51
+
52
+ if (len(list_models(dd_models_path)) == 0):
53
+ print("No detection models found, downloading...")
54
+ bbox_path = os.path.join(dd_models_path, "bbox")
55
+ segm_path = os.path.join(dd_models_path, "segm")
56
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path)
57
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/bbox/mmdet_anime-face_yolov3.py", bbox_path)
58
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/segm/mmdet_dd-person_mask2former.pth", segm_path)
59
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/segm/mmdet_dd-person_mask2former.py", segm_path)
60
+
61
+ startup()
62
+
63
+ def gr_show(visible=True):
64
+ return {"visible": visible, "__type__": "update"}
65
+
66
+ class DetectionDetailerScript(scripts.Script):
67
+ def title(self):
68
+ return "Detection Detailer"
69
+
70
+ def show(self, is_img2img):
71
+ return True
72
+
73
+ def ui(self, is_img2img):
74
+ import modules.ui
75
+
76
+ model_list = list_models(dd_models_path)
77
+ model_list.insert(0, "None")
78
+ if is_img2img:
79
+ info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Use from inpaint tab, inpaint at full res ON, denoise <0.5</p>")
80
+ else:
81
+ info = gr.HTML("")
82
+ with gr.Group():
83
+ with gr.Row():
84
+ dd_model_a = gr.Dropdown(label="Primary detection model (A)", choices=model_list,value = "None", visible=True, type="value")
85
+
86
+ with gr.Row():
87
+ dd_conf_a = gr.Slider(label='Detection confidence threshold % (A)', minimum=0, maximum=100, step=1, value=30, visible=False)
88
+ dd_dilation_factor_a = gr.Slider(label='Dilation factor (A)', minimum=0, maximum=255, step=1, value=4, visible=False)
89
+
90
+ with gr.Row():
91
+ dd_offset_x_a = gr.Slider(label='X offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=False)
92
+ dd_offset_y_a = gr.Slider(label='Y offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=False)
93
+
94
+ with gr.Row():
95
+ dd_preprocess_b = gr.Checkbox(label='Inpaint model B detections before model A runs', value=False, visible=False)
96
+ dd_bitwise_op = gr.Radio(label='Bitwise operation', choices=['None', 'A&B', 'A-B'], value="None", visible=False)
97
+
98
+ br = gr.HTML("<br>")
99
+
100
+ with gr.Group():
101
+ with gr.Row():
102
+ dd_model_b = gr.Dropdown(label="Secondary detection model (B) (optional)", choices=model_list,value = "None", visible =False, type="value")
103
+
104
+ with gr.Row():
105
+ dd_conf_b = gr.Slider(label='Detection confidence threshold % (B)', minimum=0, maximum=100, step=1, value=30, visible=False)
106
+ dd_dilation_factor_b = gr.Slider(label='Dilation factor (B)', minimum=0, maximum=255, step=1, value=4, visible=False)
107
+
108
+ with gr.Row():
109
+ dd_offset_x_b = gr.Slider(label='X offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=False)
110
+ dd_offset_y_b = gr.Slider(label='Y offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=False)
111
+
112
+ with gr.Group():
113
+ with gr.Row():
114
+ dd_mask_blur = gr.Slider(label='Mask blur ', minimum=0, maximum=64, step=1, value=4, visible=(not is_img2img))
115
+ dd_denoising_strength = gr.Slider(label='Denoising strength (Inpaint)', minimum=0.0, maximum=1.0, step=0.01, value=0.4, visible=(not is_img2img))
116
+
117
+ with gr.Row():
118
+ dd_inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution ', value=True, visible = (not is_img2img))
119
+ dd_inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels ', minimum=0, maximum=256, step=4, value=32, visible=(not is_img2img))
120
+
121
+ with gr.Row():
122
+ dd_mimic_cfg = gr.Slider(label='Mimic CFG Scale', minimum=0, maximum=30, step=0.5, value=7, visible=True)
123
+
124
+ dd_model_a.change(
125
+ lambda modelname: {
126
+ dd_model_b:gr_show( modelname != "None" ),
127
+ dd_conf_a:gr_show( modelname != "None" ),
128
+ dd_dilation_factor_a:gr_show( modelname != "None"),
129
+ dd_offset_x_a:gr_show( modelname != "None" ),
130
+ dd_offset_y_a:gr_show( modelname != "None" )
131
+
132
+ },
133
+ inputs= [dd_model_a],
134
+ outputs =[dd_model_b, dd_conf_a, dd_dilation_factor_a, dd_offset_x_a, dd_offset_y_a]
135
+ )
136
+
137
+ dd_model_b.change(
138
+ lambda modelname: {
139
+ dd_preprocess_b:gr_show( modelname != "None" ),
140
+ dd_bitwise_op:gr_show( modelname != "None" ),
141
+ dd_conf_b:gr_show( modelname != "None" ),
142
+ dd_dilation_factor_b:gr_show( modelname != "None"),
143
+ dd_offset_x_b:gr_show( modelname != "None" ),
144
+ dd_offset_y_b:gr_show( modelname != "None" )
145
+ },
146
+ inputs= [dd_model_b],
147
+ outputs =[dd_preprocess_b, dd_bitwise_op, dd_conf_b, dd_dilation_factor_b, dd_offset_x_b, dd_offset_y_b]
148
+ )
149
+
150
+ return [info,
151
+ dd_model_a,
152
+ dd_conf_a, dd_dilation_factor_a,
153
+ dd_offset_x_a, dd_offset_y_a,
154
+ dd_preprocess_b, dd_bitwise_op,
155
+ br,
156
+ dd_model_b,
157
+ dd_conf_b, dd_dilation_factor_b,
158
+ dd_offset_x_b, dd_offset_y_b,
159
+ dd_mask_blur, dd_denoising_strength,
160
+ dd_inpaint_full_res, dd_inpaint_full_res_padding,
161
+ dd_mimic_cfg
162
+ ]
163
+
164
+ def run(self, p, info,
165
+ dd_model_a,
166
+ dd_conf_a, dd_dilation_factor_a,
167
+ dd_offset_x_a, dd_offset_y_a,
168
+ dd_preprocess_b, dd_bitwise_op,
169
+ br,
170
+ dd_model_b,
171
+ dd_conf_b, dd_dilation_factor_b,
172
+ dd_offset_x_b, dd_offset_y_b,
173
+ dd_mask_blur, dd_denoising_strength,
174
+ dd_inpaint_full_res, dd_inpaint_full_res_padding,
175
+ dd_mimic_cfg):
176
+
177
+ processing.fix_seed(p)
178
+ initial_info = None
179
+ seed = p.seed
180
+ p.batch_size = 1
181
+ ddetail_count = p.n_iter
182
+ p.n_iter = 1
183
+ p.do_not_save_grid = True
184
+ p.do_not_save_samples = True
185
+ is_txt2img = isinstance(p, StableDiffusionProcessingTxt2Img)
186
+ if (not is_txt2img):
187
+ orig_image = p.init_images[0]
188
+ else:
189
+ p_txt = p
190
+ print(f"mimic_scale = {dd_mimic_cfg}")
191
+ p = StableDiffusionProcessingImg2Img(
192
+ init_images = None,
193
+ resize_mode = 0,
194
+ denoising_strength = dd_denoising_strength,
195
+ mask = None,
196
+ mask_blur= dd_mask_blur,
197
+ inpainting_fill = 1,
198
+ inpaint_full_res = dd_inpaint_full_res,
199
+ inpaint_full_res_padding= dd_inpaint_full_res_padding,
200
+ inpainting_mask_invert= 0,
201
+ sd_model=p_txt.sd_model,
202
+ outpath_samples=p_txt.outpath_samples,
203
+ outpath_grids=p_txt.outpath_grids,
204
+ prompt=p_txt.prompt,
205
+ negative_prompt=p_txt.negative_prompt,
206
+ styles=p_txt.styles,
207
+ seed=p_txt.seed,
208
+ subseed=p_txt.subseed,
209
+ subseed_strength=p_txt.subseed_strength,
210
+ seed_resize_from_h=p_txt.seed_resize_from_h,
211
+ seed_resize_from_w=p_txt.seed_resize_from_w,
212
+ sampler_name=p_txt.sampler_name,
213
+ n_iter=p_txt.n_iter,
214
+ steps=p_txt.steps,
215
+ cfg_scale=dd_mimic_cfg,
216
+ width=p_txt.width,
217
+ height=p_txt.height,
218
+ tiling=p_txt.tiling,
219
+ )
220
+ p.do_not_save_grid = True
221
+ p.do_not_save_samples = True
222
+ output_images = []
223
+ state.job_count = ddetail_count
224
+ for n in range(ddetail_count):
225
+ devices.torch_gc()
226
+ start_seed = seed + n
227
+ if ( is_txt2img ):
228
+ print(f"Processing initial image for output generation {n + 1}.")
229
+ p_txt.seed = start_seed
230
+ processed = processing.process_images(p_txt)
231
+ init_image = processed.images[0]
232
+ else:
233
+ init_image = orig_image
234
+
235
+ output_images.append(init_image)
236
+ masks_a = []
237
+ masks_b_pre = []
238
+
239
+ # Optional secondary pre-processing run
240
+ if (dd_model_b != "None" and dd_preprocess_b):
241
+ label_b_pre = "B"
242
+ results_b_pre = inference(init_image, dd_model_b, dd_conf_b/100.0, label_b_pre)
243
+ masks_b_pre = create_segmasks(results_b_pre)
244
+ masks_b_pre = dilate_masks(masks_b_pre, dd_dilation_factor_b, 1)
245
+ masks_b_pre = offset_masks(masks_b_pre,dd_offset_x_b, dd_offset_y_b)
246
+ if (len(masks_b_pre) > 0):
247
+ results_b_pre = update_result_masks(results_b_pre, masks_b_pre)
248
+ segmask_preview_b = create_segmask_preview(results_b_pre, init_image)
249
+ shared.state.current_image = segmask_preview_b
250
+ if ( opts.dd_save_previews):
251
+ images.save_image(segmask_preview_b, opts.outdir_ddetailer_previews, "", start_seed, p.prompt, opts.samples_format, p=p)
252
+ gen_count = len(masks_b_pre)
253
+ state.job_count += gen_count
254
+ print(f"Processing {gen_count} model {label_b_pre} detections for output generation {n + 1}.")
255
+ p.seed = start_seed
256
+ p.init_images = [init_image]
257
+
258
+ for i in range(gen_count):
259
+ p.image_mask = masks_b_pre[i]
260
+ if ( opts.dd_save_masks):
261
+ images.save_image(masks_b_pre[i], opts.outdir_ddetailer_masks, "", start_seed, p.prompt, opts.samples_format, p=p)
262
+ processed = processing.process_images(p)
263
+ p.seed = processed.seed + 1
264
+ p.init_images = processed.images
265
+
266
+ if (gen_count > 0):
267
+ output_images[n] = processed.images[0]
268
+ init_image = processed.images[0]
269
+
270
+ else:
271
+ print(f"No model B detections for output generation {n} with current settings.")
272
+
273
+ # Primary run
274
+ if (dd_model_a != "None"):
275
+ label_a = "A"
276
+ if (dd_model_b != "None" and dd_bitwise_op != "None"):
277
+ label_a = dd_bitwise_op
278
+ results_a = inference(init_image, dd_model_a, dd_conf_a/100.0, label_a)
279
+ masks_a = create_segmasks(results_a)
280
+ masks_a = dilate_masks(masks_a, dd_dilation_factor_a, 1)
281
+ masks_a = offset_masks(masks_a,dd_offset_x_a, dd_offset_y_a)
282
+ if (dd_model_b != "None" and dd_bitwise_op != "None"):
283
+ label_b = "B"
284
+ results_b = inference(init_image, dd_model_b, dd_conf_b/100.0, label_b)
285
+ masks_b = create_segmasks(results_b)
286
+ masks_b = dilate_masks(masks_b, dd_dilation_factor_b, 1)
287
+ masks_b = offset_masks(masks_b,dd_offset_x_b, dd_offset_y_b)
288
+ if (len(masks_b) > 0):
289
+ combined_mask_b = combine_masks(masks_b)
290
+ for i in reversed(range(len(masks_a))):
291
+ if (dd_bitwise_op == "A&B"):
292
+ masks_a[i] = bitwise_and_masks(masks_a[i], combined_mask_b)
293
+ elif (dd_bitwise_op == "A-B"):
294
+ masks_a[i] = subtract_masks(masks_a[i], combined_mask_b)
295
+ if (is_allblack(masks_a[i])):
296
+ del masks_a[i]
297
+ for result in results_a:
298
+ del result[i]
299
+
300
+ else:
301
+ print("No model B detections to overlap with model A masks")
302
+ results_a = []
303
+ masks_a = []
304
+
305
+ if (len(masks_a) > 0):
306
+ results_a = update_result_masks(results_a, masks_a)
307
+ segmask_preview_a = create_segmask_preview(results_a, init_image)
308
+ shared.state.current_image = segmask_preview_a
309
+ if ( opts.dd_save_previews):
310
+ images.save_image(segmask_preview_a, opts.outdir_ddetailer_previews, "", start_seed, p.prompt, opts.samples_format, p=p)
311
+ gen_count = len(masks_a)
312
+ state.job_count += gen_count
313
+ print(f"Processing {gen_count} model {label_a} detections for output generation {n + 1}.")
314
+ p.seed = start_seed
315
+ p.init_images = [init_image]
316
+
317
+ for i in range(gen_count):
318
+ p.image_mask = masks_a[i]
319
+ if ( opts.dd_save_masks):
320
+ images.save_image(masks_a[i], opts.outdir_ddetailer_masks, "", start_seed, p.prompt, opts.samples_format, p=p)
321
+
322
+ processed = processing.process_images(p)
323
+ if initial_info is None:
324
+ initial_info = processed.info
325
+ p.seed = processed.seed + 1
326
+ p.init_images = processed.images
327
+
328
+ if (gen_count > 0):
329
+ output_images[n] = processed.images[0]
330
+ if ( opts.samples_save ):
331
+ images.save_image(processed.images[0], p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
332
+
333
+ else:
334
+ print(f"No model {label_a} detections for output generation {n} with current settings.")
335
+ state.job = f"Generation {n + 1} out of {state.job_count}"
336
+ if (initial_info is None):
337
+ initial_info = "No detections found."
338
+
339
+ return Processed(p, output_images, seed, initial_info)
340
+
341
+ def modeldataset(model_shortname):
342
+ path = modelpath(model_shortname)
343
+ if ("mmdet" in path and "segm" in path):
344
+ dataset = 'coco'
345
+ else:
346
+ dataset = 'bbox'
347
+ return dataset
348
+
349
+ def modelpath(model_shortname):
350
+ model_list = modelloader.load_models(model_path=dd_models_path, ext_filter=[".pth"])
351
+ model_h = model_shortname.split("[")[-1].split("]")[0]
352
+ for path in model_list:
353
+ if ( model_hash(path) == model_h):
354
+ return path
355
+
356
+ def update_result_masks(results, masks):
357
+ for i in range(len(masks)):
358
+ boolmask = np.array(masks[i], dtype=bool)
359
+ results[2][i] = boolmask
360
+ return results
361
+
362
+ def create_segmask_preview(results, image):
363
+ labels = results[0]
364
+ bboxes = results[1]
365
+ segms = results[2]
366
+
367
+ cv2_image = np.array(image)
368
+ cv2_image = cv2_image[:, :, ::-1].copy()
369
+
370
+ for i in range(len(segms)):
371
+ color = np.full_like(cv2_image, np.random.randint(100, 256, (1, 3), dtype=np.uint8))
372
+ alpha = 0.2
373
+ color_image = cv2.addWeighted(cv2_image, alpha, color, 1-alpha, 0)
374
+ cv2_mask = segms[i].astype(np.uint8) * 255
375
+ cv2_mask_bool = np.array(segms[i], dtype=bool)
376
+ centroid = np.mean(np.argwhere(cv2_mask_bool),axis=0)
377
+ centroid_x, centroid_y = int(centroid[1]), int(centroid[0])
378
+
379
+ cv2_mask_rgb = cv2.merge((cv2_mask, cv2_mask, cv2_mask))
380
+ cv2_image = np.where(cv2_mask_rgb == 255, color_image, cv2_image)
381
+ text_color = tuple([int(x) for x in ( color[0][0] - 100 )])
382
+ name = labels[i]
383
+ score = bboxes[i][4]
384
+ score = str(score)[:4]
385
+ text = name + ":" + score
386
+ cv2.putText(cv2_image, text, (centroid_x - 30, centroid_y), cv2.FONT_HERSHEY_DUPLEX, 0.4, text_color, 1, cv2.LINE_AA)
387
+
388
+ if ( len(segms) > 0):
389
+ preview_image = Image.fromarray(cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB))
390
+ else:
391
+ preview_image = image
392
+
393
+ return preview_image
394
+
395
+ def is_allblack(mask):
396
+ cv2_mask = np.array(mask)
397
+ return cv2.countNonZero(cv2_mask) == 0
398
+
399
+ def bitwise_and_masks(mask1, mask2):
400
+ cv2_mask1 = np.array(mask1)
401
+ cv2_mask2 = np.array(mask2)
402
+ cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2)
403
+ mask = Image.fromarray(cv2_mask)
404
+ return mask
405
+
406
+ def subtract_masks(mask1, mask2):
407
+ cv2_mask1 = np.array(mask1)
408
+ cv2_mask2 = np.array(mask2)
409
+ cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2)
410
+ mask = Image.fromarray(cv2_mask)
411
+ return mask
412
+
413
+ def dilate_masks(masks, dilation_factor, iter=1):
414
+ if dilation_factor == 0:
415
+ return masks
416
+ dilated_masks = []
417
+ kernel = np.ones((dilation_factor,dilation_factor), np.uint8)
418
+ for i in range(len(masks)):
419
+ cv2_mask = np.array(masks[i])
420
+ dilated_mask = cv2.dilate(cv2_mask, kernel, iter)
421
+ dilated_masks.append(Image.fromarray(dilated_mask))
422
+ return dilated_masks
423
+
424
+ def offset_masks(masks, offset_x, offset_y):
425
+ if (offset_x == 0 and offset_y == 0):
426
+ return masks
427
+ offset_masks = []
428
+ for i in range(len(masks)):
429
+ cv2_mask = np.array(masks[i])
430
+ offset_mask = cv2_mask.copy()
431
+ offset_mask = np.roll(offset_mask, -offset_y, axis=0)
432
+ offset_mask = np.roll(offset_mask, offset_x, axis=1)
433
+
434
+ offset_masks.append(Image.fromarray(offset_mask))
435
+ return offset_masks
436
+
437
+ def combine_masks(masks):
438
+ initial_cv2_mask = np.array(masks[0])
439
+ combined_cv2_mask = initial_cv2_mask
440
+ for i in range(1, len(masks)):
441
+ cv2_mask = np.array(masks[i])
442
+ combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask)
443
+
444
+ combined_mask = Image.fromarray(combined_cv2_mask)
445
+ return combined_mask
446
+
447
+ def on_ui_settings():
448
+ shared.opts.add_option("dd_save_previews", shared.OptionInfo(False, "Save mask previews", section=("ddetailer", "Detection Detailer")))
449
+ shared.opts.add_option("outdir_ddetailer_previews", shared.OptionInfo("extensions/ddetailer/outputs/masks-previews", 'Output directory for mask previews', section=("ddetailer", "Detection Detailer")))
450
+ shared.opts.add_option("dd_save_masks", shared.OptionInfo(False, "Save masks", section=("ddetailer", "Detection Detailer")))
451
+ shared.opts.add_option("outdir_ddetailer_masks", shared.OptionInfo("extensions/ddetailer/outputs/masks", 'Output directory for masks', section=("ddetailer", "Detection Detailer")))
452
+
453
+ def create_segmasks(results):
454
+ segms = results[2]
455
+ segmasks = []
456
+ for i in range(len(segms)):
457
+ cv2_mask = segms[i].astype(np.uint8) * 255
458
+ mask = Image.fromarray(cv2_mask)
459
+ segmasks.append(mask)
460
+
461
+ return segmasks
462
+
463
+ import mmcv
464
+ from mmdet.core import get_classes
465
+ from mmdet.apis import (inference_detector,
466
+ init_detector)
467
+
468
+ def get_device():
469
+ device_id = shared.cmd_opts.device_id
470
+ if device_id is not None:
471
+ cuda_device = f"cuda:{device_id}"
472
+ else:
473
+ cuda_device = "cpu"
474
+ return cuda_device
475
+
476
+ def inference(image, modelname, conf_thres, label):
477
+ path = modelpath(modelname)
478
+ if ( "mmdet" in path and "bbox" in path ):
479
+ results = inference_mmdet_bbox(image, modelname, conf_thres, label)
480
+ elif ( "mmdet" in path and "segm" in path):
481
+ results = inference_mmdet_segm(image, modelname, conf_thres, label)
482
+ return results
483
+
484
+ def inference_mmdet_segm(image, modelname, conf_thres, label):
485
+ model_checkpoint = modelpath(modelname)
486
+ model_config = os.path.splitext(model_checkpoint)[0] + ".py"
487
+ model_device = get_device()
488
+ model = init_detector(model_config, model_checkpoint, device=model_device)
489
+ mmdet_results = inference_detector(model, np.array(image))
490
+ bbox_results, segm_results = mmdet_results
491
+ dataset = modeldataset(modelname)
492
+ classes = get_classes(dataset)
493
+ labels = [
494
+ np.full(bbox.shape[0], i, dtype=np.int32)
495
+ for i, bbox in enumerate(bbox_results)
496
+ ]
497
+ n,m = bbox_results[0].shape
498
+ if (n == 0):
499
+ return [[],[],[]]
500
+ labels = np.concatenate(labels)
501
+ bboxes = np.vstack(bbox_results)
502
+ segms = mmcv.concat_list(segm_results)
503
+ filter_inds = np.where(bboxes[:,-1] > conf_thres)[0]
504
+ results = [[],[],[]]
505
+ for i in filter_inds:
506
+ results[0].append(label + "-" + classes[labels[i]])
507
+ results[1].append(bboxes[i])
508
+ results[2].append(segms[i])
509
+
510
+ return results
511
+
512
+ def inference_mmdet_bbox(image, modelname, conf_thres, label):
513
+ model_checkpoint = modelpath(modelname)
514
+ model_config = os.path.splitext(model_checkpoint)[0] + ".py"
515
+ model_device = get_device()
516
+ model = init_detector(model_config, model_checkpoint, device=model_device)
517
+ results = inference_detector(model, np.array(image))
518
+ cv2_image = np.array(image)
519
+ cv2_image = cv2_image[:, :, ::-1].copy()
520
+ cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
521
+
522
+ segms = []
523
+ for (x0, y0, x1, y1, conf) in results[0]:
524
+ cv2_mask = np.zeros((cv2_gray.shape), np.uint8)
525
+ cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1)
526
+ cv2_mask_bool = cv2_mask.astype(bool)
527
+ segms.append(cv2_mask_bool)
528
+
529
+ n,m = results[0].shape
530
+ if (n == 0):
531
+ return [[],[],[]]
532
+ bboxes = np.vstack(results[0])
533
+ filter_inds = np.where(bboxes[:,-1] > conf_thres)[0]
534
+ results = [[],[],[]]
535
+ for i in filter_inds:
536
+ results[0].append(label)
537
+ results[1].append(bboxes[i])
538
+ results[2].append(segms[i])
539
+
540
+ return results
541
+
542
+ script_callbacks.on_ui_settings(on_ui_settings)
ddsd.py ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import cv2
4
+ import math
5
+ import copy
6
+
7
+ import modules.scripts as scripts
8
+ import gradio as gr
9
+ import numpy as np
10
+ from PIL import Image
11
+
12
+ from modules import processing, shared, sd_samplers, images, devices, scripts, script_callbacks, modelloader
13
+ from modules.processing import Processed, process_images, fix_seed, StableDiffusionProcessingImg2Img, StableDiffusionProcessingTxt2Img
14
+ from modules.shared import opts, cmd_opts, state
15
+
16
+ from modules.sd_models import model_hash
17
+ from modules.paths import models_path
18
+ from basicsr.utils.download_util import load_file_from_url
19
+
20
+ dd_models_path = os.path.join(models_path, "mmdet")
21
+
22
+
23
+ def list_models(model_path):
24
+ model_list = modelloader.load_models(model_path=model_path, ext_filter=[".pth"])
25
+
26
+ def modeltitle(path, shorthash):
27
+ abspath = os.path.abspath(path)
28
+
29
+ if abspath.startswith(model_path):
30
+ name = abspath.replace(model_path, '')
31
+ else:
32
+ name = os.path.basename(path)
33
+
34
+ if name.startswith("\\") or name.startswith("/"):
35
+ name = name[1:]
36
+
37
+ shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
38
+
39
+ return f'{name} [{shorthash}]', shortname
40
+
41
+ models = []
42
+ for filename in model_list:
43
+ h = model_hash(filename)
44
+ title, short_model_name = modeltitle(filename, h)
45
+ models.append(title)
46
+
47
+ return models
48
+
49
+ def startup():
50
+ from launch import is_installed, run
51
+ if not is_installed("mmdet"):
52
+ python = sys.executable
53
+ run(f'"{python}" -m pip install -U openmim==0.3.7', desc="Installing openmim", errdesc="Couldn't install openmim")
54
+ run(f'"{python}" -m mim install mmcv-full==1.7.1', desc=f"Installing mmcv-full", errdesc=f"Couldn't install mmcv-full")
55
+ run(f'"{python}" -m pip install mmdet==2.28.2', desc=f"Installing mmdet", errdesc=f"Couldn't install mmdet")
56
+
57
+ if (len(list_models(dd_models_path)) == 0):
58
+ print("No detection models found, downloading...")
59
+ bbox_path = os.path.join(dd_models_path, "bbox")
60
+ segm_path = os.path.join(dd_models_path, "segm")
61
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path)
62
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/bbox/mmdet_anime-face_yolov3.py", bbox_path)
63
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/segm/mmdet_dd-person_mask2former.pth", segm_path)
64
+ load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/segm/mmdet_dd-person_mask2former.py", segm_path)
65
+
66
+ startup()
67
+
68
+ def gr_show(visible=True):
69
+ return {"visible": visible, "__type__": "update"}
70
+
71
+ class Script(scripts.Script):
72
+ def title(self):
73
+ return "ddetailer + sdupscale"
74
+
75
+ def show(self, is_img2img):
76
+ return not is_img2img
77
+
78
+ def ui(self, is_img2img):
79
+ import modules.ui
80
+
81
+ sample_list = [x.name for x in shared.list_samplers()]
82
+ sample_list.remove('PLMS')
83
+ sample_list.remove('UniPC')
84
+ sample_list.remove('DDIM')
85
+ sample_list.insert(0,"Original")
86
+ model_list = list_models(dd_models_path)
87
+ model_list.insert(0, "None")
88
+
89
+ enable_script_names = gr.Textbox(label="Enable Script(Extension)", elem_id="t2i_dd_prompt", value='dynamic_thresholding;dynamic_prompting',show_label=True, lines=1, placeholder="Extension python file name(ex - dynamic_thresholding;dynamic_prompting)")
90
+ scalevalue = gr.Slider(minimum=1, maximum=16, step=0.5, label='Resize', value=2)
91
+ overlap = gr.Slider(minimum=0, maximum=256, step=32, label='Tile overlap', value=32)
92
+ rewidth = gr.Slider(minimum=0, maximum=1024, step=64, label='Width', value=512)
93
+ reheight = gr.Slider(minimum=0, maximum=1024, step=64, label='Height', value=512)
94
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value='R-ESRGAN 4x+ Anime6B', type="index")
95
+ denoising_strength = gr.Slider(minimum=0, maximum=1.0, step=0.01, label='Denoising strength', value=0)
96
+ upscaler_sample = gr.Dropdown(label='Upscaler Sampling', choices=sample_list, value=sample_list[0], visible=True, type="value")
97
+ detailer_sample = gr.Dropdown(label='Detailer Sampling', choices=sample_list, value=sample_list[0], visible=True, type="value")
98
+
99
+
100
+ ret = [enable_script_names, scalevalue, upscaler_sample, detailer_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength]
101
+
102
+ with gr.Group():
103
+ if not is_img2img:
104
+ with gr.Row():
105
+ dd_prompt = gr.Textbox(label="dd_prompt", elem_id="t2i_dd_prompt", show_label=False, lines=3, placeholder="Ddetailer Prompt")
106
+
107
+ with gr.Row():
108
+ dd_neg_prompt = gr.Textbox(label="dd_neg_prompt", elem_id="t2i_dd_neg_prompt", show_label=False, lines=2, placeholder="Ddetailer Negative prompt")
109
+
110
+ with gr.Row():
111
+ dd_model_a = gr.Dropdown(label="Primary detection model (A)", choices=model_list,value = model_list[2], visible=True, type="value")
112
+
113
+ with gr.Row():
114
+ dd_conf_a = gr.Slider(label='Detection confidence threshold % (A)', minimum=0, maximum=100, step=1, value=30, visible=True)
115
+ dd_dilation_factor_a = gr.Slider(label='Dilation factor (A)', minimum=0, maximum=255, step=1, value=20, visible=True)
116
+
117
+ with gr.Row():
118
+ dd_offset_x_a = gr.Slider(label='X offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=True)
119
+ dd_offset_y_a = gr.Slider(label='Y offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=True)
120
+
121
+ with gr.Row():
122
+ dd_bitwise_op = gr.Radio(label='Bitwise operation', choices=['None', 'A&B', 'A-B'], value="A&B", visible=True)
123
+
124
+ br = gr.HTML("<br>")
125
+
126
+ with gr.Group():
127
+ with gr.Row():
128
+ dd_model_b = gr.Dropdown(label="Secondary detection model (B) (optional)", choices=model_list,value = model_list[1], visible =True, type="value")
129
+
130
+ with gr.Row():
131
+ dd_conf_b = gr.Slider(label='Detection confidence threshold % (B)', minimum=0, maximum=100, step=1, value=30, visible=True)
132
+ dd_dilation_factor_b = gr.Slider(label='Dilation factor (B)', minimum=0, maximum=255, step=1, value=10, visible=True)
133
+
134
+ with gr.Row():
135
+ dd_offset_x_b = gr.Slider(label='X offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=True)
136
+ dd_offset_y_b = gr.Slider(label='Y offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=True)
137
+
138
+ with gr.Group():
139
+ with gr.Row():
140
+ dd_mask_blur = gr.Slider(label='Mask blur ', minimum=0, maximum=64, step=1, value=4, visible=(not is_img2img))
141
+ dd_denoising_strength = gr.Slider(label='Denoising strength (Inpaint)', minimum=0.0, maximum=1.0, step=0.01, value=0.4, visible=(not is_img2img))
142
+
143
+ with gr.Row():
144
+ dd_inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution ', value=True, visible = (not is_img2img))
145
+ dd_inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels ', minimum=0, maximum=256, step=4, value=32, visible=(not is_img2img))
146
+
147
+ dd_model_a.change(
148
+ lambda modelname: {
149
+ dd_model_b:gr_show( modelname != "None" ),
150
+ dd_conf_a:gr_show( modelname != "None" ),
151
+ dd_dilation_factor_a:gr_show( modelname != "None"),
152
+ dd_offset_x_a:gr_show( modelname != "None" ),
153
+ dd_offset_y_a:gr_show( modelname != "None" )
154
+
155
+ },
156
+ inputs= [dd_model_a],
157
+ outputs =[dd_model_b, dd_conf_a, dd_dilation_factor_a, dd_offset_x_a, dd_offset_y_a]
158
+ )
159
+
160
+ dd_model_b.change(
161
+ lambda modelname: {
162
+ dd_bitwise_op:gr_show( modelname != "None" ),
163
+ dd_conf_b:gr_show( modelname != "None" ),
164
+ dd_dilation_factor_b:gr_show( modelname != "None"),
165
+ dd_offset_x_b:gr_show( modelname != "None" ),
166
+ dd_offset_y_b:gr_show( modelname != "None" )
167
+ },
168
+ inputs= [dd_model_b],
169
+ outputs =[dd_bitwise_op, dd_conf_b, dd_dilation_factor_b, dd_offset_x_b, dd_offset_y_b]
170
+ )
171
+
172
+ ret += [dd_model_a,
173
+ dd_conf_a, dd_dilation_factor_a,
174
+ dd_offset_x_a, dd_offset_y_a,
175
+ dd_bitwise_op,
176
+ br,
177
+ dd_model_b,
178
+ dd_conf_b, dd_dilation_factor_b,
179
+ dd_offset_x_b, dd_offset_y_b,
180
+ dd_mask_blur, dd_denoising_strength,
181
+ dd_inpaint_full_res, dd_inpaint_full_res_padding
182
+ ]
183
+ if not is_img2img:
184
+ ret += [dd_prompt, dd_neg_prompt]
185
+
186
+ return ret
187
+
188
+ def run(self, p, enable_script_names, scalevalue, upscaler_sample, detailer_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength,
189
+ dd_model_a,
190
+ dd_conf_a, dd_dilation_factor_a,
191
+ dd_offset_x_a, dd_offset_y_a,
192
+ dd_bitwise_op,
193
+ br,
194
+ dd_model_b,
195
+ dd_conf_b, dd_dilation_factor_b,
196
+ dd_offset_x_b, dd_offset_y_b,
197
+ dd_mask_blur, dd_denoising_strength,
198
+ dd_inpaint_full_res, dd_inpaint_full_res_padding,
199
+ dd_prompt=None, dd_neg_prompt=None):
200
+ processing.fix_seed(p)
201
+ initial_info = []
202
+ initial_prompt = []
203
+ initial_negative = []
204
+ p.batch_size = 1
205
+ ddetail_count = p.n_iter
206
+ p.n_iter = 1
207
+ p.do_not_save_grid = True
208
+ p.do_not_save_samples = True
209
+ p_txt = p
210
+ i2i_sample = ''
211
+ if detailer_sample == 'Original':
212
+ i2i_sample = 'Euler' if p_txt.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p_txt.sampler_name
213
+ else:
214
+ i2i_sample = detailer_sample
215
+ p = StableDiffusionProcessingImg2Img(
216
+ init_images = None,
217
+ resize_mode = 0,
218
+ denoising_strength = dd_denoising_strength,
219
+ mask = None,
220
+ mask_blur= dd_mask_blur,
221
+ inpainting_fill = 1,
222
+ inpaint_full_res = dd_inpaint_full_res,
223
+ inpaint_full_res_padding= dd_inpaint_full_res_padding,
224
+ inpainting_mask_invert= 0,
225
+ sd_model=p_txt.sd_model,
226
+ outpath_samples=p_txt.outpath_samples,
227
+ outpath_grids=p_txt.outpath_grids,
228
+ prompt='',
229
+ negative_prompt='',
230
+ styles=p_txt.styles,
231
+ seed=p_txt.seed,
232
+ subseed=p_txt.subseed,
233
+ subseed_strength=p_txt.subseed_strength,
234
+ seed_resize_from_h=p_txt.seed_resize_from_h,
235
+ seed_resize_from_w=p_txt.seed_resize_from_w,
236
+ sampler_name=i2i_sample,
237
+ n_iter=p_txt.n_iter,
238
+ steps=p_txt.steps,
239
+ cfg_scale=p_txt.cfg_scale,
240
+ width=p_txt.width,
241
+ height=p_txt.height,
242
+ tiling=p_txt.tiling,
243
+ )
244
+ p.do_not_save_grid = True
245
+ p.do_not_save_samples = True
246
+ p.override_settings = {}
247
+
248
+ if upscaler_sample == 'Original':
249
+ i2i_sample = 'Euler' if p_txt.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p_txt.sampler_name
250
+ else:
251
+ i2i_sample = upscaler_sample
252
+ p2 = StableDiffusionProcessingImg2Img(
253
+ sd_model=p_txt.sd_model,
254
+ outpath_samples=p_txt.outpath_samples,
255
+ outpath_grids=p_txt.outpath_grids,
256
+ prompt='',
257
+ negative_prompt='',
258
+ styles=p_txt.styles,
259
+ seed=p_txt.seed,
260
+ subseed=p_txt.subseed,
261
+ subseed_strength=p_txt.subseed_strength,
262
+ seed_resize_from_h=p_txt.seed_resize_from_h,
263
+ seed_resize_from_w=p_txt.seed_resize_from_w,
264
+ seed_enable_extras=True,
265
+ sampler_name=i2i_sample,
266
+ batch_size=1,
267
+ n_iter=1,
268
+ steps=p_txt.steps,
269
+ cfg_scale=p_txt.cfg_scale,
270
+ width=rewidth,
271
+ height=reheight,
272
+ restore_faces=p_txt.restore_faces,
273
+ tiling=p_txt.tiling,
274
+ init_images=[],
275
+ mask=None,
276
+ mask_blur=dd_mask_blur,
277
+ inpainting_fill=1,
278
+ resize_mode=0,
279
+ denoising_strength=denoising_strength,
280
+ inpaint_full_res=dd_inpaint_full_res,
281
+ inpaint_full_res_padding=dd_inpaint_full_res_padding,
282
+ inpainting_mask_invert=0,
283
+ )
284
+ p2.do_not_save_grid = True
285
+ p2.do_not_save_samples = True
286
+ p2.override_settings = {}
287
+
288
+ upscaler = shared.sd_upscalers[upscaler_index]
289
+ script_names_list = [x.strip()+'.py' for x in enable_script_names.split(';') if len(x) > 1]
290
+ processing.fix_seed(p2)
291
+ seed = p_txt.seed
292
+
293
+ p_txt.scripts.scripts = [x for x in p_txt.scripts.scripts if os.path.basename(x.filename) not in [__file__]]
294
+ t2i_scripts = p_txt.scripts.scripts.copy()
295
+ i2i_scripts = [x for x in t2i_scripts if os.path.basename(x.filename) in script_names_list]
296
+ t2i_scripts_always = p_txt.scripts.alwayson_scripts.copy()
297
+ i2i_scripts_always = [x for x in t2i_scripts_always if os.path.basename(x.filename) in script_names_list]
298
+ p.scripts = p_txt.scripts
299
+ p.script_args = p_txt.script_args
300
+ p2.scripts = p_txt.scripts
301
+ p2.script_args = p_txt.script_args
302
+
303
+ p_txt.extra_generation_params["Tile upscale value"] = scalevalue
304
+ p_txt.extra_generation_params["Tile upscale width"] = rewidth
305
+ p_txt.extra_generation_params["Tile upscale height"] = reheight
306
+ p_txt.extra_generation_params["Tile upscale overlap"] = overlap
307
+ p_txt.extra_generation_params["Tile upscale upscaler"] = upscaler.name
308
+
309
+ print(f"DDetailer {p.width}x{p.height}.")
310
+
311
+ output_images = []
312
+ result_images = []
313
+ state.job_count += ddetail_count
314
+ for n in range(ddetail_count):
315
+ devices.torch_gc()
316
+ start_seed = seed + n
317
+ print(f"Processing initial image for output generation {n + 1} (T2I).")
318
+ p_txt.seed = start_seed
319
+ p_txt.scripts.scripts = t2i_scripts
320
+ p_txt.scripts.alwayson_scripts = t2i_scripts_always
321
+ processed = processing.process_images(p_txt)
322
+ initial_info.append(processed.info)
323
+ posi, nega = processed.all_prompts[0], processed.all_negative_prompts[0]
324
+ initial_prompt.append(posi)
325
+ initial_negative.append(nega)
326
+ p.prompt = posi if not dd_prompt else dd_prompt
327
+ p.negative_prompt = nega if not dd_neg_prompt else dd_neg_prompt
328
+ init_image = processed.images[0]
329
+
330
+ output_images.append(init_image)
331
+ masks_a = []
332
+
333
+ # Primary run
334
+ if (dd_model_a != "None"):
335
+ label_a = "A"
336
+ if (dd_model_b != "None" and dd_bitwise_op != "None"):
337
+ label_a = dd_bitwise_op
338
+ results_a = inference(init_image, dd_model_a, dd_conf_a/100.0, label_a)
339
+ masks_a = create_segmasks(results_a)
340
+ masks_a = dilate_masks(masks_a, dd_dilation_factor_a, 1)
341
+ masks_a = offset_masks(masks_a,dd_offset_x_a, dd_offset_y_a)
342
+ if (dd_model_b != "None" and dd_bitwise_op != "None"):
343
+ label_b = "B"
344
+ results_b = inference(init_image, dd_model_b, dd_conf_b/100.0, label_b)
345
+ masks_b = create_segmasks(results_b)
346
+ masks_b = dilate_masks(masks_b, dd_dilation_factor_b, 1)
347
+ masks_b = offset_masks(masks_b,dd_offset_x_b, dd_offset_y_b)
348
+ if (len(masks_b) > 0):
349
+ combined_mask_b = combine_masks(masks_b)
350
+ for i in reversed(range(len(masks_a))):
351
+ if (dd_bitwise_op == "A&B"):
352
+ masks_a[i] = bitwise_and_masks(masks_a[i], combined_mask_b)
353
+ elif (dd_bitwise_op == "A-B"):
354
+ masks_a[i] = subtract_masks(masks_a[i], combined_mask_b)
355
+ if (is_allblack(masks_a[i])):
356
+ del masks_a[i]
357
+ for result in results_a:
358
+ del result[i]
359
+
360
+ else:
361
+ print("No model B detections to overlap with model A masks")
362
+ results_a = []
363
+ masks_a = []
364
+
365
+ if (len(masks_a) > 0):
366
+ results_a = update_result_masks(results_a, masks_a)
367
+ gen_count = len(masks_a)
368
+ state.job_count += gen_count
369
+ print(f"Processing {gen_count} model {label_a} detections for output generation {n + 1} (I2I).")
370
+ p.seed = start_seed
371
+ p.init_images = [init_image]
372
+
373
+ for i in range(gen_count):
374
+ p.image_mask = masks_a[i]
375
+
376
+ p.scripts.scripts = i2i_scripts
377
+ p.scripts.alwayson_scripts = i2i_scripts_always
378
+ processed = processing.process_images(p)
379
+ p.seed = processed.seed + 1
380
+ p.init_images = processed.images
381
+
382
+ if (gen_count > 0):
383
+ output_images[n] = processed.images[0]
384
+
385
+ else:
386
+ print(f"No model {label_a} detections for output generation {n} with current settings.")
387
+
388
+ state.job = f"Generation {n + 1} out of {state.job_count} DDetailer"
389
+
390
+ p2.init_images = [output_images[n]]
391
+ p2.prompt = initial_prompt[n]
392
+ p2.negative_prompt = initial_negative[n]
393
+
394
+ init_img = output_images[n]
395
+
396
+ if(upscaler.name != "None"):
397
+ img = upscaler.scaler.upscale(init_img, scalevalue, upscaler.data_path)
398
+ else:
399
+ img = init_img
400
+
401
+ devices.torch_gc()
402
+
403
+ grid = images.split_grid(img, tile_w=rewidth, tile_h=reheight, overlap=overlap)
404
+
405
+ batch_size = p2.batch_size
406
+
407
+ work = []
408
+
409
+ for y, h, row in grid.tiles:
410
+ for tiledata in row:
411
+ work.append(tiledata[2])
412
+
413
+ batch_count = math.ceil(len(work) / batch_size)
414
+ state.job_count += batch_count
415
+
416
+ print(f"Tile upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches (I2I).")
417
+
418
+ p2.seed = start_seed
419
+
420
+ work_results = []
421
+ for i in range(batch_count):
422
+ p2.batch_size = batch_size
423
+ p2.init_images = work[i*batch_size:(i+1)*batch_size]
424
+
425
+ state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
426
+ p2.scripts.scripts = i2i_scripts
427
+ p2.scripts.alwayson_scripts = i2i_scripts_always
428
+ processed = processing.process_images(p2)
429
+
430
+ p2.seed = processed.seed + 1
431
+ work_results += processed.images
432
+
433
+ image_index = 0
434
+ for y, h, row in grid.tiles:
435
+ for tiledata in row:
436
+ tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (rewidth, reheight))
437
+ image_index += 1
438
+ combined_image = images.combine_grid(grid)
439
+ result_images.append(combined_image)
440
+ images.save_image(combined_image, p.outpath_samples, "", start_seed, initial_prompt[n], opts.samples_format, info=initial_info[n], p=p_txt)
441
+
442
+ return Processed(p_txt, result_images, start_seed, initial_info[0], all_prompts=initial_prompt, all_negative_prompts=initial_negative, infotexts=initial_info)
443
+
444
+ def modeldataset(model_shortname):
445
+ path = modelpath(model_shortname)
446
+ if ("mmdet" in path and "segm" in path):
447
+ dataset = 'coco'
448
+ else:
449
+ dataset = 'bbox'
450
+ return dataset
451
+
452
+ def modelpath(model_shortname):
453
+ model_list = modelloader.load_models(model_path=dd_models_path, ext_filter=[".pth"])
454
+ model_h = model_shortname.split("[")[-1].split("]")[0]
455
+ for path in model_list:
456
+ if ( model_hash(path) == model_h):
457
+ return path
458
+
459
+ def update_result_masks(results, masks):
460
+ for i in range(len(masks)):
461
+ boolmask = np.array(masks[i], dtype=bool)
462
+ results[2][i] = boolmask
463
+ return results
464
+
465
+ def is_allblack(mask):
466
+ cv2_mask = np.array(mask)
467
+ return cv2.countNonZero(cv2_mask) == 0
468
+
469
+ def bitwise_and_masks(mask1, mask2):
470
+ cv2_mask1 = np.array(mask1)
471
+ cv2_mask2 = np.array(mask2)
472
+ cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2)
473
+ mask = Image.fromarray(cv2_mask)
474
+ return mask
475
+
476
+ def subtract_masks(mask1, mask2):
477
+ cv2_mask1 = np.array(mask1)
478
+ cv2_mask2 = np.array(mask2)
479
+ cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2)
480
+ mask = Image.fromarray(cv2_mask)
481
+ return mask
482
+
483
+ def dilate_masks(masks, dilation_factor, iter=1):
484
+ if dilation_factor == 0:
485
+ return masks
486
+ dilated_masks = []
487
+ kernel = np.ones((dilation_factor,dilation_factor), np.uint8)
488
+ for i in range(len(masks)):
489
+ cv2_mask = np.array(masks[i])
490
+ dilated_mask = cv2.dilate(cv2_mask, kernel, iter)
491
+ dilated_masks.append(Image.fromarray(dilated_mask))
492
+ return dilated_masks
493
+
494
+ def offset_masks(masks, offset_x, offset_y):
495
+ if (offset_x == 0 and offset_y == 0):
496
+ return masks
497
+ offset_masks = []
498
+ for i in range(len(masks)):
499
+ cv2_mask = np.array(masks[i])
500
+ offset_mask = cv2_mask.copy()
501
+ offset_mask = np.roll(offset_mask, -offset_y, axis=0)
502
+ offset_mask = np.roll(offset_mask, offset_x, axis=1)
503
+
504
+ offset_masks.append(Image.fromarray(offset_mask))
505
+ return offset_masks
506
+
507
+ def combine_masks(masks):
508
+ initial_cv2_mask = np.array(masks[0])
509
+ combined_cv2_mask = initial_cv2_mask
510
+ for i in range(1, len(masks)):
511
+ cv2_mask = np.array(masks[i])
512
+ combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask)
513
+
514
+ combined_mask = Image.fromarray(combined_cv2_mask)
515
+ return combined_mask
516
+
517
+ def create_segmasks(results):
518
+ segms = results[2]
519
+ segmasks = []
520
+ for i in range(len(segms)):
521
+ cv2_mask = segms[i].astype(np.uint8) * 255
522
+ mask = Image.fromarray(cv2_mask)
523
+ segmasks.append(mask)
524
+
525
+ return segmasks
526
+
527
+ import mmcv
528
+ from mmdet.core import get_classes
529
+ from mmdet.apis import (inference_detector,
530
+ init_detector)
531
+
532
+ def get_device():
533
+ device_id = shared.cmd_opts.device_id
534
+ if device_id is not None:
535
+ cuda_device = f"cuda:{device_id}"
536
+ else:
537
+ cuda_device = "cpu"
538
+ return cuda_device
539
+
540
+ def inference(image, modelname, conf_thres, label):
541
+ path = modelpath(modelname)
542
+ if ( "mmdet" in path and "bbox" in path ):
543
+ results = inference_mmdet_bbox(image, modelname, conf_thres, label)
544
+ elif ( "mmdet" in path and "segm" in path):
545
+ results = inference_mmdet_segm(image, modelname, conf_thres, label)
546
+ return results
547
+
548
+ def inference_mmdet_segm(image, modelname, conf_thres, label):
549
+ model_checkpoint = modelpath(modelname)
550
+ model_config = os.path.splitext(model_checkpoint)[0] + ".py"
551
+ model_device = get_device()
552
+ model = init_detector(model_config, model_checkpoint, device=model_device)
553
+ mmdet_results = inference_detector(model, np.array(image))
554
+ bbox_results, segm_results = mmdet_results
555
+ dataset = modeldataset(modelname)
556
+ classes = get_classes(dataset)
557
+ labels = [
558
+ np.full(bbox.shape[0], i, dtype=np.int32)
559
+ for i, bbox in enumerate(bbox_results)
560
+ ]
561
+ n,m = bbox_results[0].shape
562
+ if (n == 0):
563
+ return [[],[],[]]
564
+ labels = np.concatenate(labels)
565
+ bboxes = np.vstack(bbox_results)
566
+ segms = mmcv.concat_list(segm_results)
567
+ filter_inds = np.where(bboxes[:,-1] > conf_thres)[0]
568
+ results = [[],[],[]]
569
+ for i in filter_inds:
570
+ results[0].append(label + "-" + classes[labels[i]])
571
+ results[1].append(bboxes[i])
572
+ results[2].append(segms[i])
573
+
574
+ return results
575
+
576
+ def inference_mmdet_bbox(image, modelname, conf_thres, label):
577
+ model_checkpoint = modelpath(modelname)
578
+ model_config = os.path.splitext(model_checkpoint)[0] + ".py"
579
+ model_device = get_device()
580
+ model = init_detector(model_config, model_checkpoint, device=model_device)
581
+ results = inference_detector(model, np.array(image))
582
+ cv2_image = np.array(image)
583
+ cv2_image = cv2_image[:, :, ::-1].copy()
584
+ cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
585
+
586
+ segms = []
587
+ for (x0, y0, x1, y1, conf) in results[0]:
588
+ cv2_mask = np.zeros((cv2_gray.shape), np.uint8)
589
+ cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1)
590
+ cv2_mask_bool = cv2_mask.astype(bool)
591
+ segms.append(cv2_mask_bool)
592
+
593
+ n,m = results[0].shape
594
+ if (n == 0):
595
+ return [[],[],[]]
596
+ bboxes = np.vstack(results[0])
597
+ filter_inds = np.where(bboxes[:,-1] > conf_thres)[0]
598
+ results = [[],[],[]]
599
+ for i in filter_inds:
600
+ results[0].append(label)
601
+ results[1].append(bboxes[i])
602
+ results[2].append(segms[i])
603
+
604
+ return results
hauteCouture_v11.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd2d6348ed24cba19cb606c3cd2c23ed02092f728fc499906a08c6eaa99b2d2d
3
+ size 151113439
jkBigNippleTipLite_V01.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26be3fd584511d40258d20fc8fa6c136404cbb93459d121f9608501426b0e0d0
3
+ size 37868009
jkPuffyNipples_V01.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae5398b1bf167237aaa23e04d7ca400c6cda5f4135e2275c9afac82807d24e1b
3
+ size 4834292
k-military2-01.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cba07ad23b0a9f16da6c696a3121b90980f002ca73018999ccaa62f06b72c19
3
+ size 37866495
k-military2-02.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1265bc09daa7818529c78fee931d10c674cab56eef2774c787400ca81af1b8f0
3
+ size 37866493
k-military2-03.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e49356652784ac07ff09bd0a4fff4f20a75237db24aa047ef4f015d211281c05
3
+ size 37866495
k-military2-04.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83374a9d5e1cfa5af68448cf7b2ccb8798a3e86bd616b4e752b20e29435cff33
3
+ size 37866495
k-military2-05.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c880ca5eeaf41af306cccf7a1c72f9d44cebede47212b338c6a9850fcde0ba9
3
+ size 37866495
k-military2-06.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4036b60f5d4b489eabaffeeea2cdf13c01bf3f520cff78876c475aa9d7c96c55
3
+ size 37866494
k-military2-07.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77e908e9340dc506e3db67e87d585fcc5d970d4744c5872d12038749fe9972e0
3
+ size 37866495
k-military2-08.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb88cb5eb73e32e98621a2379a515f21951208a14fdd4e05810785198eea551c
3
+ size 37866494
k-military2-09.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c60d12c4deb1c33d3ad8c35d0d9113a27803447afd8cbe718b9bbfceda78bead
3
+ size 37866494
k-military2-10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373318e7d075b08af0dfda04ff5b718ca91e4e7f186a242487353f517d0cfc52
3
+ size 37866496
latexIDLoraLatex_loconV02.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a2ac8f0aa468c231c26e5b5b98dc1f09c278a7dff5031e88fc11362e6878acf
3
+ size 11997284
lightAndShadow_v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a630ab2255460b2e99b1e5a6765a1cedcd210e2ffca32474bd02df24232624
3
+ size 151110122
nippleHeartPiercing_2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:041b3839f66e4949538e4c9eb87be1102994cdb4c56db3e816e05ade42b7c33f
3
+ size 37866880
nipplePiercing_v20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c45e83d7b6b35f9153cddd1012269313977b28cb395b7a8e31556018a3b4edf
3
+ size 151111469
nymphs_v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fcb6d23a534a58bfa6d6626246f6a9e093a2032c425e65e7e4b41c524869dfb
3
+ size 151112325
oversizedHoodie_oversizedHoodieV10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef13d4fab0763a07972b73b872317836bc54aa55a68f479100f22995c6eaee0
3
+ size 37868596
photorealisticQueenOf_v202.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99a692c9776ca9aef81242664e735941668145b3155294cf1c647ab5f7f0dc9
3
+ size 75612193
photorealisticWombTattoos_401.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79aff61f8d2265653cb958bc2c18f89ebcde838e6705c06861c525cd2197d91b
3
+ size 37864053
povMissionaryAnal_v5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ff7df71766918601b3cf64ced49838f784b3d499a07e8778fb54c7c015c787e
3
+ size 151114260
pregnant_v10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e2168832614e4e8191a2fcd1b5526bc630ba0332aaffdefff5c01437d719b43
3
+ size 172345376
ridingDildoSexActLora_v11.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4352bb7a1c87684d662b212bf7578c54641daef7c1b30064ad8b64a48901bb83
3
+ size 9618189
schoolUniform_naughty.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9928f7a675b11b0c43c36e9f5cf4f847ad95ef98d6f8f17af3dc5571e0469b9
3
+ size 56749594
sexyCostume_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1628891bf53cc19a5ead96c277392d0c3880b41457d07d46f1fa2b0d2393559d
3
+ size 113391649
shinyOiledSkin_v1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddfd403ac5d244c48531973130c7416d2e345a17af53e843a8e31d9adafbe861
3
+ size 75613273
sittingPussyPosition_highPrecision.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ffb3e5218eb04d98ae2c661eb4371bde62f76487f2c36612a5f7309a03c489
3
+ size 377832529
ssb_v17-40.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63cb1b8aa403e8241a0c5aa4644db7b8c51a3fa9c43b4a3794b75460a2edf970
3
+ size 37865658
ssb_v18-10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f09b518a94753981e472df5c0984705ac0cbad24c267805910cea051fa7e3cea
3
+ size 37865658
ssb_v18-12.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c6537adef43a5e4f730e422b7adc89adb6ab3a36a2fa7488d4d84133ab3a4e
3
+ size 37865656
ssb_v18-14.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf8f94d2458e257ca3fddb209b985ae8c044f7d07d95cf28087c81ba4d82c18
3
+ size 37865657
ssb_v18-16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3881676645abd7dd337c37e845b2445e4b73434ad8b3edfe3f8046ae947d15a0
3
+ size 37865657
ssb_v18-18.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edd663f69e68b7c89928f742b79bfa751339ae92a5120a57b8a04c0c5e7c0f9d
3
+ size 37865657
ssb_v18-20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df8e2370136354472c4288ba71dca28389991bac6196448909c47ab98f4b6744
3
+ size 37865658
ssb_v21-80.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89fa5e6b88e438754394c4fc6a3ddf4c55d13a79f8af58da68394fdaa516f211
3
+ size 37865655
ssb_v21-90.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7971b8cb99d8695acac2ad68304d82d9f278561aeb9d97a2a4159f84088fd0b
3
+ size 37865655
ssb_v7-10.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5410c50847b6fd2ec3519efbb75e835b630ead361a9db67ec3f07df789dbeb93
3
+ size 18988594
ssb_v8-30.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07891e6094480fe3c928ec20d5310b2c42892154e470ebca5723b4dcac36d494
3
+ size 18988595
ssb_v9-000001.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c157d67bd945614e9ee705a05dd9279cc203d0a0de0c6bc89065dfe3a17997a0
3
+ size 75613498
ssb_v9-000002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602be6fdddcf29dcd6fc5b21b3e8e019ac3a65c3eb234d37bfe5c5563145de17
3
+ size 75613499
ssb_v9-000003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776b416f4f6e8cfb5fa51e4fb9ec43c4737797b544fdd40eacbf4ed82cd2ae01
3
+ size 75613499
ssb_v9-000004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:743d1c663b18e70db261706d1503a05f57d5904b1d6b6e8f3dd248363af5bfbf
3
+ size 75613499