Upload 31 files
Browse files- .gitattributes +3 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/__init__.py +3 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename.json +194 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_and_counter.json +485 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_in_divided_folders.json +536 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_text_nodes_example.json +822 -0
- ComfyUI/custom_nodes/ComfyUI_yanc/yanc.py +1594 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/__init__.py +9 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/__pycache__/__init__.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/pyproject.toml +15 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/requirements.txt +6 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__init__.py +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/__init__.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/blip_img2txt.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/img2txt_node.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/img_tensor_utils.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/llava_img2txt.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/mini_cpm_img2txt.cpython-310.pyc +0 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/blip_img2txt.py +81 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/description_classifier.py +8 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/img2txt_node.py +209 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/img_tensor_utils.py +129 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/keyword_extract.py +114 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/llava_img2txt.py +131 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/mini_cpm_img2txt.py +53 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/web/show-output-text.js +51 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_001.png +3 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_002.png +3 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_003.png +3 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/img2img.json +523 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/inpaint.json +705 -0
- ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/txt2img.json +498 -0
.gitattributes
CHANGED
@@ -65,3 +65,6 @@ ComfyUI/custom_nodes/ComfyUI-BrushNet/example/sleeping_cat_inpaint3.png filter=l
|
|
65 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/sleeping_cat_inpaint5.png filter=lfs diff=lfs merge=lfs -text
|
66 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/sleeping_cat_inpaint6.png filter=lfs diff=lfs merge=lfs -text
|
67 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/test_image3.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
65 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/sleeping_cat_inpaint5.png filter=lfs diff=lfs merge=lfs -text
|
66 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/sleeping_cat_inpaint6.png filter=lfs diff=lfs merge=lfs -text
|
67 |
ComfyUI/custom_nodes/ComfyUI-BrushNet/example/test_image3.png filter=lfs diff=lfs merge=lfs -text
|
68 |
+
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_001.png filter=lfs diff=lfs merge=lfs -text
|
69 |
+
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_002.png filter=lfs diff=lfs merge=lfs -text
|
70 |
+
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_003.png filter=lfs diff=lfs merge=lfs -text
|
ComfyUI/custom_nodes/ComfyUI_yanc/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .yanc import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
2 |
+
|
3 |
+
__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
|
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename.json
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 5,
|
3 |
+
"last_link_id": 5,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 3,
|
7 |
+
"type": "PreviewImage",
|
8 |
+
"pos": [
|
9 |
+
442,
|
10 |
+
250
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
300,
|
14 |
+
246
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 2,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "images",
|
22 |
+
"type": "IMAGE",
|
23 |
+
"link": 2
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"properties": {
|
27 |
+
"Node name for S&R": "PreviewImage"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"id": 4,
|
32 |
+
"type": "> Save Image",
|
33 |
+
"pos": [
|
34 |
+
820,
|
35 |
+
100
|
36 |
+
],
|
37 |
+
"size": [
|
38 |
+
315,
|
39 |
+
338
|
40 |
+
],
|
41 |
+
"flags": {},
|
42 |
+
"order": 3,
|
43 |
+
"mode": 0,
|
44 |
+
"inputs": [
|
45 |
+
{
|
46 |
+
"name": "images",
|
47 |
+
"type": "IMAGE",
|
48 |
+
"link": 3
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "filename_opt",
|
52 |
+
"type": "STRING",
|
53 |
+
"link": 4,
|
54 |
+
"widget": {
|
55 |
+
"name": "filename_opt"
|
56 |
+
}
|
57 |
+
}
|
58 |
+
],
|
59 |
+
"properties": {
|
60 |
+
"Node name for S&R": "> Save Image"
|
61 |
+
},
|
62 |
+
"widgets_values": [
|
63 |
+
"ComfyUI",
|
64 |
+
"myoutputs",
|
65 |
+
true,
|
66 |
+
""
|
67 |
+
]
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"id": 1,
|
71 |
+
"type": "> Load Image From Folder",
|
72 |
+
"pos": [
|
73 |
+
440,
|
74 |
+
100
|
75 |
+
],
|
76 |
+
"size": {
|
77 |
+
"0": 315,
|
78 |
+
"1": 102
|
79 |
+
},
|
80 |
+
"flags": {},
|
81 |
+
"order": 1,
|
82 |
+
"mode": 0,
|
83 |
+
"inputs": [
|
84 |
+
{
|
85 |
+
"name": "index",
|
86 |
+
"type": "INT",
|
87 |
+
"link": 5,
|
88 |
+
"widget": {
|
89 |
+
"name": "index"
|
90 |
+
}
|
91 |
+
}
|
92 |
+
],
|
93 |
+
"outputs": [
|
94 |
+
{
|
95 |
+
"name": "image",
|
96 |
+
"type": "IMAGE",
|
97 |
+
"links": [
|
98 |
+
2,
|
99 |
+
3
|
100 |
+
],
|
101 |
+
"shape": 3,
|
102 |
+
"slot_index": 0
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"name": "file_name",
|
106 |
+
"type": "STRING",
|
107 |
+
"links": [
|
108 |
+
4
|
109 |
+
],
|
110 |
+
"shape": 3,
|
111 |
+
"slot_index": 1
|
112 |
+
}
|
113 |
+
],
|
114 |
+
"properties": {
|
115 |
+
"Node name for S&R": "> Load Image From Folder"
|
116 |
+
},
|
117 |
+
"widgets_values": [
|
118 |
+
"myinputs",
|
119 |
+
-1
|
120 |
+
]
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"id": 5,
|
124 |
+
"type": "> Int",
|
125 |
+
"pos": [
|
126 |
+
53,
|
127 |
+
103
|
128 |
+
],
|
129 |
+
"size": {
|
130 |
+
"0": 315,
|
131 |
+
"1": 82
|
132 |
+
},
|
133 |
+
"flags": {},
|
134 |
+
"order": 0,
|
135 |
+
"mode": 0,
|
136 |
+
"outputs": [
|
137 |
+
{
|
138 |
+
"name": "int",
|
139 |
+
"type": "INT",
|
140 |
+
"links": [
|
141 |
+
5
|
142 |
+
],
|
143 |
+
"shape": 3,
|
144 |
+
"slot_index": 0
|
145 |
+
}
|
146 |
+
],
|
147 |
+
"properties": {
|
148 |
+
"Node name for S&R": "> Int"
|
149 |
+
},
|
150 |
+
"widgets_values": [
|
151 |
+
0,
|
152 |
+
"increment"
|
153 |
+
]
|
154 |
+
}
|
155 |
+
],
|
156 |
+
"links": [
|
157 |
+
[
|
158 |
+
2,
|
159 |
+
1,
|
160 |
+
0,
|
161 |
+
3,
|
162 |
+
0,
|
163 |
+
"IMAGE"
|
164 |
+
],
|
165 |
+
[
|
166 |
+
3,
|
167 |
+
1,
|
168 |
+
0,
|
169 |
+
4,
|
170 |
+
0,
|
171 |
+
"IMAGE"
|
172 |
+
],
|
173 |
+
[
|
174 |
+
4,
|
175 |
+
1,
|
176 |
+
1,
|
177 |
+
4,
|
178 |
+
1,
|
179 |
+
"STRING"
|
180 |
+
],
|
181 |
+
[
|
182 |
+
5,
|
183 |
+
5,
|
184 |
+
0,
|
185 |
+
1,
|
186 |
+
0,
|
187 |
+
"INT"
|
188 |
+
]
|
189 |
+
],
|
190 |
+
"groups": [],
|
191 |
+
"config": {},
|
192 |
+
"extra": {},
|
193 |
+
"version": 0.4
|
194 |
+
}
|
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_and_counter.json
ADDED
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 10,
|
3 |
+
"last_link_id": 13,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 3,
|
7 |
+
"type": "PreviewImage",
|
8 |
+
"pos": [
|
9 |
+
442,
|
10 |
+
250
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
300,
|
14 |
+
246
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 4,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "images",
|
22 |
+
"type": "IMAGE",
|
23 |
+
"link": 2
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"properties": {
|
27 |
+
"Node name for S&R": "PreviewImage"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"id": 4,
|
32 |
+
"type": "> Save Image",
|
33 |
+
"pos": [
|
34 |
+
1580,
|
35 |
+
100
|
36 |
+
],
|
37 |
+
"size": [
|
38 |
+
315,
|
39 |
+
338
|
40 |
+
],
|
41 |
+
"flags": {},
|
42 |
+
"order": 8,
|
43 |
+
"mode": 0,
|
44 |
+
"inputs": [
|
45 |
+
{
|
46 |
+
"name": "images",
|
47 |
+
"type": "IMAGE",
|
48 |
+
"link": 3
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"name": "filename_opt",
|
52 |
+
"type": "STRING",
|
53 |
+
"link": 7,
|
54 |
+
"widget": {
|
55 |
+
"name": "filename_opt"
|
56 |
+
}
|
57 |
+
}
|
58 |
+
],
|
59 |
+
"properties": {
|
60 |
+
"Node name for S&R": "> Save Image"
|
61 |
+
},
|
62 |
+
"widgets_values": [
|
63 |
+
"ComfyUI",
|
64 |
+
"myoutputs",
|
65 |
+
true,
|
66 |
+
""
|
67 |
+
]
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"id": 1,
|
71 |
+
"type": "> Load Image From Folder",
|
72 |
+
"pos": [
|
73 |
+
440,
|
74 |
+
100
|
75 |
+
],
|
76 |
+
"size": {
|
77 |
+
"0": 315,
|
78 |
+
"1": 102
|
79 |
+
},
|
80 |
+
"flags": {},
|
81 |
+
"order": 2,
|
82 |
+
"mode": 0,
|
83 |
+
"inputs": [
|
84 |
+
{
|
85 |
+
"name": "index",
|
86 |
+
"type": "INT",
|
87 |
+
"link": 5,
|
88 |
+
"widget": {
|
89 |
+
"name": "index"
|
90 |
+
}
|
91 |
+
}
|
92 |
+
],
|
93 |
+
"outputs": [
|
94 |
+
{
|
95 |
+
"name": "image",
|
96 |
+
"type": "IMAGE",
|
97 |
+
"links": [
|
98 |
+
2,
|
99 |
+
3
|
100 |
+
],
|
101 |
+
"shape": 3,
|
102 |
+
"slot_index": 0
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"name": "file_name",
|
106 |
+
"type": "STRING",
|
107 |
+
"links": [
|
108 |
+
6
|
109 |
+
],
|
110 |
+
"shape": 3,
|
111 |
+
"slot_index": 1
|
112 |
+
}
|
113 |
+
],
|
114 |
+
"properties": {
|
115 |
+
"Node name for S&R": "> Load Image From Folder"
|
116 |
+
},
|
117 |
+
"widgets_values": [
|
118 |
+
"myinputs",
|
119 |
+
-1
|
120 |
+
]
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"id": 6,
|
124 |
+
"type": "> Text Combine",
|
125 |
+
"pos": [
|
126 |
+
1220,
|
127 |
+
160
|
128 |
+
],
|
129 |
+
"size": [
|
130 |
+
315,
|
131 |
+
130
|
132 |
+
],
|
133 |
+
"flags": {},
|
134 |
+
"order": 7,
|
135 |
+
"mode": 0,
|
136 |
+
"inputs": [
|
137 |
+
{
|
138 |
+
"name": "text",
|
139 |
+
"type": "STRING",
|
140 |
+
"link": 6,
|
141 |
+
"widget": {
|
142 |
+
"name": "text"
|
143 |
+
}
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"name": "text_append",
|
147 |
+
"type": "STRING",
|
148 |
+
"link": 8,
|
149 |
+
"widget": {
|
150 |
+
"name": "text_append"
|
151 |
+
}
|
152 |
+
}
|
153 |
+
],
|
154 |
+
"outputs": [
|
155 |
+
{
|
156 |
+
"name": "text",
|
157 |
+
"type": "STRING",
|
158 |
+
"links": [
|
159 |
+
7
|
160 |
+
],
|
161 |
+
"shape": 3,
|
162 |
+
"slot_index": 0
|
163 |
+
}
|
164 |
+
],
|
165 |
+
"properties": {
|
166 |
+
"Node name for S&R": "> Text Combine"
|
167 |
+
},
|
168 |
+
"widgets_values": [
|
169 |
+
"",
|
170 |
+
"",
|
171 |
+
"_",
|
172 |
+
false
|
173 |
+
]
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"id": 7,
|
177 |
+
"type": "> Int to Text",
|
178 |
+
"pos": [
|
179 |
+
860,
|
180 |
+
240
|
181 |
+
],
|
182 |
+
"size": [
|
183 |
+
315,
|
184 |
+
106
|
185 |
+
],
|
186 |
+
"flags": {},
|
187 |
+
"order": 6,
|
188 |
+
"mode": 0,
|
189 |
+
"inputs": [
|
190 |
+
{
|
191 |
+
"name": "int",
|
192 |
+
"type": "INT",
|
193 |
+
"link": 13,
|
194 |
+
"widget": {
|
195 |
+
"name": "int"
|
196 |
+
}
|
197 |
+
}
|
198 |
+
],
|
199 |
+
"outputs": [
|
200 |
+
{
|
201 |
+
"name": "text",
|
202 |
+
"type": "STRING",
|
203 |
+
"links": [
|
204 |
+
8
|
205 |
+
],
|
206 |
+
"shape": 3,
|
207 |
+
"slot_index": 0
|
208 |
+
}
|
209 |
+
],
|
210 |
+
"properties": {
|
211 |
+
"Node name for S&R": "> Int to Text"
|
212 |
+
},
|
213 |
+
"widgets_values": [
|
214 |
+
0,
|
215 |
+
true,
|
216 |
+
5
|
217 |
+
]
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"id": 5,
|
221 |
+
"type": "> Int",
|
222 |
+
"pos": [
|
223 |
+
53,
|
224 |
+
103
|
225 |
+
],
|
226 |
+
"size": {
|
227 |
+
"0": 315,
|
228 |
+
"1": 82
|
229 |
+
},
|
230 |
+
"flags": {},
|
231 |
+
"order": 0,
|
232 |
+
"mode": 0,
|
233 |
+
"outputs": [
|
234 |
+
{
|
235 |
+
"name": "int",
|
236 |
+
"type": "INT",
|
237 |
+
"links": [
|
238 |
+
5,
|
239 |
+
10
|
240 |
+
],
|
241 |
+
"shape": 3,
|
242 |
+
"slot_index": 0
|
243 |
+
}
|
244 |
+
],
|
245 |
+
"properties": {
|
246 |
+
"Node name for S&R": "> Int"
|
247 |
+
},
|
248 |
+
"widgets_values": [
|
249 |
+
124,
|
250 |
+
"increment"
|
251 |
+
]
|
252 |
+
},
|
253 |
+
{
|
254 |
+
"id": 10,
|
255 |
+
"type": "> Float to Int",
|
256 |
+
"pos": [
|
257 |
+
780,
|
258 |
+
580
|
259 |
+
],
|
260 |
+
"size": [
|
261 |
+
315,
|
262 |
+
82
|
263 |
+
],
|
264 |
+
"flags": {},
|
265 |
+
"order": 5,
|
266 |
+
"mode": 0,
|
267 |
+
"inputs": [
|
268 |
+
{
|
269 |
+
"name": "float",
|
270 |
+
"type": "FLOAT",
|
271 |
+
"link": 12,
|
272 |
+
"widget": {
|
273 |
+
"name": "float"
|
274 |
+
}
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"outputs": [
|
278 |
+
{
|
279 |
+
"name": "int",
|
280 |
+
"type": "INT",
|
281 |
+
"links": [
|
282 |
+
13
|
283 |
+
],
|
284 |
+
"shape": 3,
|
285 |
+
"slot_index": 0
|
286 |
+
}
|
287 |
+
],
|
288 |
+
"properties": {
|
289 |
+
"Node name for S&R": "> Float to Int"
|
290 |
+
},
|
291 |
+
"widgets_values": [
|
292 |
+
0,
|
293 |
+
"floor"
|
294 |
+
]
|
295 |
+
},
|
296 |
+
{
|
297 |
+
"id": 8,
|
298 |
+
"type": "SimpleMath+",
|
299 |
+
"pos": [
|
300 |
+
420,
|
301 |
+
560
|
302 |
+
],
|
303 |
+
"size": {
|
304 |
+
"0": 315,
|
305 |
+
"1": 78
|
306 |
+
},
|
307 |
+
"flags": {},
|
308 |
+
"order": 3,
|
309 |
+
"mode": 0,
|
310 |
+
"inputs": [
|
311 |
+
{
|
312 |
+
"name": "a",
|
313 |
+
"type": "INT,FLOAT",
|
314 |
+
"link": 10
|
315 |
+
},
|
316 |
+
{
|
317 |
+
"name": "b",
|
318 |
+
"type": "INT,FLOAT",
|
319 |
+
"link": 11
|
320 |
+
}
|
321 |
+
],
|
322 |
+
"outputs": [
|
323 |
+
{
|
324 |
+
"name": "INT",
|
325 |
+
"type": "INT",
|
326 |
+
"links": null,
|
327 |
+
"shape": 3,
|
328 |
+
"slot_index": 0
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"name": "FLOAT",
|
332 |
+
"type": "FLOAT",
|
333 |
+
"links": [
|
334 |
+
12
|
335 |
+
],
|
336 |
+
"shape": 3,
|
337 |
+
"slot_index": 1
|
338 |
+
}
|
339 |
+
],
|
340 |
+
"properties": {
|
341 |
+
"Node name for S&R": "SimpleMath+"
|
342 |
+
},
|
343 |
+
"widgets_values": [
|
344 |
+
"a/b"
|
345 |
+
]
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"id": 9,
|
349 |
+
"type": "SimpleMath+",
|
350 |
+
"pos": [
|
351 |
+
60,
|
352 |
+
560
|
353 |
+
],
|
354 |
+
"size": {
|
355 |
+
"0": 315,
|
356 |
+
"1": 78
|
357 |
+
},
|
358 |
+
"flags": {},
|
359 |
+
"order": 1,
|
360 |
+
"mode": 0,
|
361 |
+
"inputs": [
|
362 |
+
{
|
363 |
+
"name": "a",
|
364 |
+
"type": "INT,FLOAT",
|
365 |
+
"link": null
|
366 |
+
},
|
367 |
+
{
|
368 |
+
"name": "b",
|
369 |
+
"type": "INT,FLOAT",
|
370 |
+
"link": null
|
371 |
+
}
|
372 |
+
],
|
373 |
+
"outputs": [
|
374 |
+
{
|
375 |
+
"name": "INT",
|
376 |
+
"type": "INT",
|
377 |
+
"links": [
|
378 |
+
11
|
379 |
+
],
|
380 |
+
"shape": 3,
|
381 |
+
"slot_index": 0
|
382 |
+
},
|
383 |
+
{
|
384 |
+
"name": "FLOAT",
|
385 |
+
"type": "FLOAT",
|
386 |
+
"links": null,
|
387 |
+
"shape": 3
|
388 |
+
}
|
389 |
+
],
|
390 |
+
"title": "Amount of Images in Input Folder",
|
391 |
+
"properties": {
|
392 |
+
"Node name for S&R": "SimpleMath+"
|
393 |
+
},
|
394 |
+
"widgets_values": [
|
395 |
+
"62"
|
396 |
+
]
|
397 |
+
}
|
398 |
+
],
|
399 |
+
"links": [
|
400 |
+
[
|
401 |
+
2,
|
402 |
+
1,
|
403 |
+
0,
|
404 |
+
3,
|
405 |
+
0,
|
406 |
+
"IMAGE"
|
407 |
+
],
|
408 |
+
[
|
409 |
+
3,
|
410 |
+
1,
|
411 |
+
0,
|
412 |
+
4,
|
413 |
+
0,
|
414 |
+
"IMAGE"
|
415 |
+
],
|
416 |
+
[
|
417 |
+
5,
|
418 |
+
5,
|
419 |
+
0,
|
420 |
+
1,
|
421 |
+
0,
|
422 |
+
"INT"
|
423 |
+
],
|
424 |
+
[
|
425 |
+
6,
|
426 |
+
1,
|
427 |
+
1,
|
428 |
+
6,
|
429 |
+
0,
|
430 |
+
"STRING"
|
431 |
+
],
|
432 |
+
[
|
433 |
+
7,
|
434 |
+
6,
|
435 |
+
0,
|
436 |
+
4,
|
437 |
+
1,
|
438 |
+
"STRING"
|
439 |
+
],
|
440 |
+
[
|
441 |
+
8,
|
442 |
+
7,
|
443 |
+
0,
|
444 |
+
6,
|
445 |
+
1,
|
446 |
+
"STRING"
|
447 |
+
],
|
448 |
+
[
|
449 |
+
10,
|
450 |
+
5,
|
451 |
+
0,
|
452 |
+
8,
|
453 |
+
0,
|
454 |
+
"INT,FLOAT"
|
455 |
+
],
|
456 |
+
[
|
457 |
+
11,
|
458 |
+
9,
|
459 |
+
0,
|
460 |
+
8,
|
461 |
+
1,
|
462 |
+
"INT,FLOAT"
|
463 |
+
],
|
464 |
+
[
|
465 |
+
12,
|
466 |
+
8,
|
467 |
+
1,
|
468 |
+
10,
|
469 |
+
0,
|
470 |
+
"FLOAT"
|
471 |
+
],
|
472 |
+
[
|
473 |
+
13,
|
474 |
+
10,
|
475 |
+
0,
|
476 |
+
7,
|
477 |
+
0,
|
478 |
+
"INT"
|
479 |
+
]
|
480 |
+
],
|
481 |
+
"groups": [],
|
482 |
+
"config": {},
|
483 |
+
"extra": {},
|
484 |
+
"version": 0.4
|
485 |
+
}
|
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_in_divided_folders.json
ADDED
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 11,
|
3 |
+
"last_link_id": 17,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 3,
|
7 |
+
"type": "PreviewImage",
|
8 |
+
"pos": [
|
9 |
+
442,
|
10 |
+
250
|
11 |
+
],
|
12 |
+
"size": [
|
13 |
+
300,
|
14 |
+
246
|
15 |
+
],
|
16 |
+
"flags": {},
|
17 |
+
"order": 5,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "images",
|
22 |
+
"type": "IMAGE",
|
23 |
+
"link": 2
|
24 |
+
}
|
25 |
+
],
|
26 |
+
"properties": {
|
27 |
+
"Node name for S&R": "PreviewImage"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"id": 10,
|
32 |
+
"type": "> Float to Int",
|
33 |
+
"pos": [
|
34 |
+
780,
|
35 |
+
580
|
36 |
+
],
|
37 |
+
"size": [
|
38 |
+
315,
|
39 |
+
82
|
40 |
+
],
|
41 |
+
"flags": {},
|
42 |
+
"order": 6,
|
43 |
+
"mode": 0,
|
44 |
+
"inputs": [
|
45 |
+
{
|
46 |
+
"name": "float",
|
47 |
+
"type": "FLOAT",
|
48 |
+
"link": 12,
|
49 |
+
"widget": {
|
50 |
+
"name": "float"
|
51 |
+
}
|
52 |
+
}
|
53 |
+
],
|
54 |
+
"outputs": [
|
55 |
+
{
|
56 |
+
"name": "int",
|
57 |
+
"type": "INT",
|
58 |
+
"links": [
|
59 |
+
13
|
60 |
+
],
|
61 |
+
"shape": 3,
|
62 |
+
"slot_index": 0
|
63 |
+
}
|
64 |
+
],
|
65 |
+
"properties": {
|
66 |
+
"Node name for S&R": "> Float to Int"
|
67 |
+
},
|
68 |
+
"widgets_values": [
|
69 |
+
0,
|
70 |
+
"floor"
|
71 |
+
]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"id": 8,
|
75 |
+
"type": "SimpleMath+",
|
76 |
+
"pos": [
|
77 |
+
420,
|
78 |
+
560
|
79 |
+
],
|
80 |
+
"size": {
|
81 |
+
"0": 315,
|
82 |
+
"1": 78
|
83 |
+
},
|
84 |
+
"flags": {},
|
85 |
+
"order": 4,
|
86 |
+
"mode": 0,
|
87 |
+
"inputs": [
|
88 |
+
{
|
89 |
+
"name": "a",
|
90 |
+
"type": "INT,FLOAT",
|
91 |
+
"link": 10
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"name": "b",
|
95 |
+
"type": "INT,FLOAT",
|
96 |
+
"link": 11
|
97 |
+
}
|
98 |
+
],
|
99 |
+
"outputs": [
|
100 |
+
{
|
101 |
+
"name": "INT",
|
102 |
+
"type": "INT",
|
103 |
+
"links": null,
|
104 |
+
"shape": 3,
|
105 |
+
"slot_index": 0
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"name": "FLOAT",
|
109 |
+
"type": "FLOAT",
|
110 |
+
"links": [
|
111 |
+
12
|
112 |
+
],
|
113 |
+
"shape": 3,
|
114 |
+
"slot_index": 1
|
115 |
+
}
|
116 |
+
],
|
117 |
+
"properties": {
|
118 |
+
"Node name for S&R": "SimpleMath+"
|
119 |
+
},
|
120 |
+
"widgets_values": [
|
121 |
+
"a/b"
|
122 |
+
]
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"id": 9,
|
126 |
+
"type": "SimpleMath+",
|
127 |
+
"pos": [
|
128 |
+
60,
|
129 |
+
560
|
130 |
+
],
|
131 |
+
"size": {
|
132 |
+
"0": 315,
|
133 |
+
"1": 78
|
134 |
+
},
|
135 |
+
"flags": {},
|
136 |
+
"order": 0,
|
137 |
+
"mode": 0,
|
138 |
+
"inputs": [
|
139 |
+
{
|
140 |
+
"name": "a",
|
141 |
+
"type": "INT,FLOAT",
|
142 |
+
"link": null
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"name": "b",
|
146 |
+
"type": "INT,FLOAT",
|
147 |
+
"link": null
|
148 |
+
}
|
149 |
+
],
|
150 |
+
"outputs": [
|
151 |
+
{
|
152 |
+
"name": "INT",
|
153 |
+
"type": "INT",
|
154 |
+
"links": [
|
155 |
+
11
|
156 |
+
],
|
157 |
+
"shape": 3,
|
158 |
+
"slot_index": 0
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"name": "FLOAT",
|
162 |
+
"type": "FLOAT",
|
163 |
+
"links": null,
|
164 |
+
"shape": 3
|
165 |
+
}
|
166 |
+
],
|
167 |
+
"title": "Amount of Images in Input Folder",
|
168 |
+
"properties": {
|
169 |
+
"Node name for S&R": "SimpleMath+"
|
170 |
+
},
|
171 |
+
"widgets_values": [
|
172 |
+
"62"
|
173 |
+
]
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"id": 4,
|
177 |
+
"type": "> Save Image",
|
178 |
+
"pos": [
|
179 |
+
1580,
|
180 |
+
100
|
181 |
+
],
|
182 |
+
"size": [
|
183 |
+
315,
|
184 |
+
338
|
185 |
+
],
|
186 |
+
"flags": {},
|
187 |
+
"order": 9,
|
188 |
+
"mode": 0,
|
189 |
+
"inputs": [
|
190 |
+
{
|
191 |
+
"name": "images",
|
192 |
+
"type": "IMAGE",
|
193 |
+
"link": 3
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"name": "filename_opt",
|
197 |
+
"type": "STRING",
|
198 |
+
"link": 14,
|
199 |
+
"widget": {
|
200 |
+
"name": "filename_opt"
|
201 |
+
}
|
202 |
+
},
|
203 |
+
{
|
204 |
+
"name": "folder",
|
205 |
+
"type": "STRING",
|
206 |
+
"link": 15,
|
207 |
+
"widget": {
|
208 |
+
"name": "folder"
|
209 |
+
}
|
210 |
+
}
|
211 |
+
],
|
212 |
+
"properties": {
|
213 |
+
"Node name for S&R": "> Save Image"
|
214 |
+
},
|
215 |
+
"widgets_values": [
|
216 |
+
"ComfyUI",
|
217 |
+
"myoutputs",
|
218 |
+
true,
|
219 |
+
""
|
220 |
+
]
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"id": 1,
|
224 |
+
"type": "> Load Image From Folder",
|
225 |
+
"pos": [
|
226 |
+
440,
|
227 |
+
100
|
228 |
+
],
|
229 |
+
"size": {
|
230 |
+
"0": 315,
|
231 |
+
"1": 102
|
232 |
+
},
|
233 |
+
"flags": {},
|
234 |
+
"order": 3,
|
235 |
+
"mode": 0,
|
236 |
+
"inputs": [
|
237 |
+
{
|
238 |
+
"name": "index",
|
239 |
+
"type": "INT",
|
240 |
+
"link": 5,
|
241 |
+
"widget": {
|
242 |
+
"name": "index"
|
243 |
+
}
|
244 |
+
}
|
245 |
+
],
|
246 |
+
"outputs": [
|
247 |
+
{
|
248 |
+
"name": "image",
|
249 |
+
"type": "IMAGE",
|
250 |
+
"links": [
|
251 |
+
2,
|
252 |
+
3
|
253 |
+
],
|
254 |
+
"shape": 3,
|
255 |
+
"slot_index": 0
|
256 |
+
},
|
257 |
+
{
|
258 |
+
"name": "file_name",
|
259 |
+
"type": "STRING",
|
260 |
+
"links": [
|
261 |
+
14
|
262 |
+
],
|
263 |
+
"shape": 3,
|
264 |
+
"slot_index": 1
|
265 |
+
}
|
266 |
+
],
|
267 |
+
"properties": {
|
268 |
+
"Node name for S&R": "> Load Image From Folder"
|
269 |
+
},
|
270 |
+
"widgets_values": [
|
271 |
+
"myinputs",
|
272 |
+
-1
|
273 |
+
]
|
274 |
+
},
|
275 |
+
{
|
276 |
+
"id": 7,
|
277 |
+
"type": "> Int to Text",
|
278 |
+
"pos": [
|
279 |
+
855,
|
280 |
+
325
|
281 |
+
],
|
282 |
+
"size": [
|
283 |
+
315,
|
284 |
+
106
|
285 |
+
],
|
286 |
+
"flags": {},
|
287 |
+
"order": 7,
|
288 |
+
"mode": 0,
|
289 |
+
"inputs": [
|
290 |
+
{
|
291 |
+
"name": "int",
|
292 |
+
"type": "INT",
|
293 |
+
"link": 13,
|
294 |
+
"widget": {
|
295 |
+
"name": "int"
|
296 |
+
}
|
297 |
+
}
|
298 |
+
],
|
299 |
+
"outputs": [
|
300 |
+
{
|
301 |
+
"name": "text",
|
302 |
+
"type": "STRING",
|
303 |
+
"links": [
|
304 |
+
8
|
305 |
+
],
|
306 |
+
"shape": 3,
|
307 |
+
"slot_index": 0
|
308 |
+
}
|
309 |
+
],
|
310 |
+
"properties": {
|
311 |
+
"Node name for S&R": "> Int to Text"
|
312 |
+
},
|
313 |
+
"widgets_values": [
|
314 |
+
0,
|
315 |
+
true,
|
316 |
+
5
|
317 |
+
]
|
318 |
+
},
|
319 |
+
{
|
320 |
+
"id": 6,
|
321 |
+
"type": "> Text Combine",
|
322 |
+
"pos": [
|
323 |
+
1220,
|
324 |
+
220
|
325 |
+
],
|
326 |
+
"size": [
|
327 |
+
315,
|
328 |
+
130
|
329 |
+
],
|
330 |
+
"flags": {},
|
331 |
+
"order": 8,
|
332 |
+
"mode": 0,
|
333 |
+
"inputs": [
|
334 |
+
{
|
335 |
+
"name": "text",
|
336 |
+
"type": "STRING",
|
337 |
+
"link": 17,
|
338 |
+
"widget": {
|
339 |
+
"name": "text"
|
340 |
+
},
|
341 |
+
"slot_index": 0
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"name": "text_append",
|
345 |
+
"type": "STRING",
|
346 |
+
"link": 8,
|
347 |
+
"widget": {
|
348 |
+
"name": "text_append"
|
349 |
+
}
|
350 |
+
}
|
351 |
+
],
|
352 |
+
"outputs": [
|
353 |
+
{
|
354 |
+
"name": "text",
|
355 |
+
"type": "STRING",
|
356 |
+
"links": [
|
357 |
+
15
|
358 |
+
],
|
359 |
+
"shape": 3,
|
360 |
+
"slot_index": 0
|
361 |
+
}
|
362 |
+
],
|
363 |
+
"properties": {
|
364 |
+
"Node name for S&R": "> Text Combine"
|
365 |
+
},
|
366 |
+
"widgets_values": [
|
367 |
+
"myoutputs",
|
368 |
+
"",
|
369 |
+
"_",
|
370 |
+
false
|
371 |
+
]
|
372 |
+
},
|
373 |
+
{
|
374 |
+
"id": 11,
|
375 |
+
"type": "PrimitiveNode",
|
376 |
+
"pos": [
|
377 |
+
850,
|
378 |
+
215
|
379 |
+
],
|
380 |
+
"size": [
|
381 |
+
320,
|
382 |
+
60
|
383 |
+
],
|
384 |
+
"flags": {},
|
385 |
+
"order": 1,
|
386 |
+
"mode": 0,
|
387 |
+
"outputs": [
|
388 |
+
{
|
389 |
+
"name": "STRING",
|
390 |
+
"type": "STRING",
|
391 |
+
"links": [
|
392 |
+
17
|
393 |
+
],
|
394 |
+
"widget": {
|
395 |
+
"name": "text"
|
396 |
+
}
|
397 |
+
}
|
398 |
+
],
|
399 |
+
"title": "text",
|
400 |
+
"properties": {
|
401 |
+
"Run widget replace on values": false
|
402 |
+
},
|
403 |
+
"widgets_values": [
|
404 |
+
"myoutputs"
|
405 |
+
]
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"id": 5,
|
409 |
+
"type": "> Int",
|
410 |
+
"pos": [
|
411 |
+
53,
|
412 |
+
103
|
413 |
+
],
|
414 |
+
"size": {
|
415 |
+
"0": 315,
|
416 |
+
"1": 82
|
417 |
+
},
|
418 |
+
"flags": {},
|
419 |
+
"order": 2,
|
420 |
+
"mode": 0,
|
421 |
+
"outputs": [
|
422 |
+
{
|
423 |
+
"name": "int",
|
424 |
+
"type": "INT",
|
425 |
+
"links": [
|
426 |
+
5,
|
427 |
+
10
|
428 |
+
],
|
429 |
+
"shape": 3,
|
430 |
+
"slot_index": 0
|
431 |
+
}
|
432 |
+
],
|
433 |
+
"properties": {
|
434 |
+
"Node name for S&R": "> Int"
|
435 |
+
},
|
436 |
+
"widgets_values": [
|
437 |
+
0,
|
438 |
+
"increment"
|
439 |
+
]
|
440 |
+
}
|
441 |
+
],
|
442 |
+
"links": [
|
443 |
+
[
|
444 |
+
2,
|
445 |
+
1,
|
446 |
+
0,
|
447 |
+
3,
|
448 |
+
0,
|
449 |
+
"IMAGE"
|
450 |
+
],
|
451 |
+
[
|
452 |
+
3,
|
453 |
+
1,
|
454 |
+
0,
|
455 |
+
4,
|
456 |
+
0,
|
457 |
+
"IMAGE"
|
458 |
+
],
|
459 |
+
[
|
460 |
+
5,
|
461 |
+
5,
|
462 |
+
0,
|
463 |
+
1,
|
464 |
+
0,
|
465 |
+
"INT"
|
466 |
+
],
|
467 |
+
[
|
468 |
+
8,
|
469 |
+
7,
|
470 |
+
0,
|
471 |
+
6,
|
472 |
+
1,
|
473 |
+
"STRING"
|
474 |
+
],
|
475 |
+
[
|
476 |
+
10,
|
477 |
+
5,
|
478 |
+
0,
|
479 |
+
8,
|
480 |
+
0,
|
481 |
+
"INT,FLOAT"
|
482 |
+
],
|
483 |
+
[
|
484 |
+
11,
|
485 |
+
9,
|
486 |
+
0,
|
487 |
+
8,
|
488 |
+
1,
|
489 |
+
"INT,FLOAT"
|
490 |
+
],
|
491 |
+
[
|
492 |
+
12,
|
493 |
+
8,
|
494 |
+
1,
|
495 |
+
10,
|
496 |
+
0,
|
497 |
+
"FLOAT"
|
498 |
+
],
|
499 |
+
[
|
500 |
+
13,
|
501 |
+
10,
|
502 |
+
0,
|
503 |
+
7,
|
504 |
+
0,
|
505 |
+
"INT"
|
506 |
+
],
|
507 |
+
[
|
508 |
+
14,
|
509 |
+
1,
|
510 |
+
1,
|
511 |
+
4,
|
512 |
+
1,
|
513 |
+
"STRING"
|
514 |
+
],
|
515 |
+
[
|
516 |
+
15,
|
517 |
+
6,
|
518 |
+
0,
|
519 |
+
4,
|
520 |
+
2,
|
521 |
+
"STRING"
|
522 |
+
],
|
523 |
+
[
|
524 |
+
17,
|
525 |
+
11,
|
526 |
+
0,
|
527 |
+
6,
|
528 |
+
0,
|
529 |
+
"STRING"
|
530 |
+
]
|
531 |
+
],
|
532 |
+
"groups": [],
|
533 |
+
"config": {},
|
534 |
+
"extra": {},
|
535 |
+
"version": 0.4
|
536 |
+
}
|
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_text_nodes_example.json
ADDED
@@ -0,0 +1,822 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 35,
|
3 |
+
"last_link_id": 64,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 8,
|
7 |
+
"type": "VAEDecode",
|
8 |
+
"pos": [
|
9 |
+
2040,
|
10 |
+
700
|
11 |
+
],
|
12 |
+
"size": {
|
13 |
+
"0": 210,
|
14 |
+
"1": 46
|
15 |
+
},
|
16 |
+
"flags": {},
|
17 |
+
"order": 14,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "samples",
|
22 |
+
"type": "LATENT",
|
23 |
+
"link": 7
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "vae",
|
27 |
+
"type": "VAE",
|
28 |
+
"link": 8
|
29 |
+
}
|
30 |
+
],
|
31 |
+
"outputs": [
|
32 |
+
{
|
33 |
+
"name": "IMAGE",
|
34 |
+
"type": "IMAGE",
|
35 |
+
"links": [
|
36 |
+
34
|
37 |
+
],
|
38 |
+
"slot_index": 0
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"properties": {
|
42 |
+
"Node name for S&R": "VAEDecode"
|
43 |
+
}
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"id": 27,
|
47 |
+
"type": "> Save Image",
|
48 |
+
"pos": [
|
49 |
+
2300,
|
50 |
+
700
|
51 |
+
],
|
52 |
+
"size": [
|
53 |
+
480,
|
54 |
+
620
|
55 |
+
],
|
56 |
+
"flags": {},
|
57 |
+
"order": 15,
|
58 |
+
"mode": 0,
|
59 |
+
"inputs": [
|
60 |
+
{
|
61 |
+
"name": "images",
|
62 |
+
"type": "IMAGE",
|
63 |
+
"link": 34
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"name": "filename_opt",
|
67 |
+
"type": "STRING",
|
68 |
+
"link": null,
|
69 |
+
"widget": {
|
70 |
+
"name": "filename_opt"
|
71 |
+
}
|
72 |
+
}
|
73 |
+
],
|
74 |
+
"properties": {
|
75 |
+
"Node name for S&R": "> Save Image"
|
76 |
+
},
|
77 |
+
"widgets_values": [
|
78 |
+
"ComfyUI",
|
79 |
+
"yanc_demo",
|
80 |
+
true,
|
81 |
+
""
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"id": 29,
|
86 |
+
"type": "> Text Pick Random Line",
|
87 |
+
"pos": [
|
88 |
+
300,
|
89 |
+
560
|
90 |
+
],
|
91 |
+
"size": [
|
92 |
+
315,
|
93 |
+
106
|
94 |
+
],
|
95 |
+
"flags": {},
|
96 |
+
"order": 5,
|
97 |
+
"mode": 0,
|
98 |
+
"inputs": [
|
99 |
+
{
|
100 |
+
"name": "text",
|
101 |
+
"type": "STRING",
|
102 |
+
"link": 39,
|
103 |
+
"widget": {
|
104 |
+
"name": "text"
|
105 |
+
}
|
106 |
+
}
|
107 |
+
],
|
108 |
+
"outputs": [
|
109 |
+
{
|
110 |
+
"name": "text",
|
111 |
+
"type": "STRING",
|
112 |
+
"links": [
|
113 |
+
42
|
114 |
+
],
|
115 |
+
"shape": 3,
|
116 |
+
"slot_index": 0
|
117 |
+
}
|
118 |
+
],
|
119 |
+
"properties": {
|
120 |
+
"Node name for S&R": "> Text Pick Random Line"
|
121 |
+
},
|
122 |
+
"widgets_values": [
|
123 |
+
"",
|
124 |
+
529102614921446,
|
125 |
+
"randomize"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"id": 24,
|
130 |
+
"type": "> Text Combine",
|
131 |
+
"pos": [
|
132 |
+
980,
|
133 |
+
540
|
134 |
+
],
|
135 |
+
"size": [
|
136 |
+
210,
|
137 |
+
102
|
138 |
+
],
|
139 |
+
"flags": {},
|
140 |
+
"order": 10,
|
141 |
+
"mode": 0,
|
142 |
+
"inputs": [
|
143 |
+
{
|
144 |
+
"name": "text",
|
145 |
+
"type": "STRING",
|
146 |
+
"link": 38,
|
147 |
+
"widget": {
|
148 |
+
"name": "text"
|
149 |
+
}
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"name": "text_append",
|
153 |
+
"type": "STRING",
|
154 |
+
"link": 54,
|
155 |
+
"widget": {
|
156 |
+
"name": "text_append"
|
157 |
+
}
|
158 |
+
}
|
159 |
+
],
|
160 |
+
"outputs": [
|
161 |
+
{
|
162 |
+
"name": "text",
|
163 |
+
"type": "STRING",
|
164 |
+
"links": [
|
165 |
+
52,
|
166 |
+
53
|
167 |
+
],
|
168 |
+
"shape": 3,
|
169 |
+
"slot_index": 0
|
170 |
+
}
|
171 |
+
],
|
172 |
+
"properties": {
|
173 |
+
"Node name for S&R": "> Text Combine"
|
174 |
+
},
|
175 |
+
"widgets_values": [
|
176 |
+
"",
|
177 |
+
"",
|
178 |
+
"",
|
179 |
+
true
|
180 |
+
]
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"id": 23,
|
184 |
+
"type": "> Clear Text",
|
185 |
+
"pos": [
|
186 |
+
640,
|
187 |
+
800
|
188 |
+
],
|
189 |
+
"size": [
|
190 |
+
320,
|
191 |
+
60
|
192 |
+
],
|
193 |
+
"flags": {},
|
194 |
+
"order": 9,
|
195 |
+
"mode": 0,
|
196 |
+
"inputs": [
|
197 |
+
{
|
198 |
+
"name": "text",
|
199 |
+
"type": "STRING",
|
200 |
+
"link": 27,
|
201 |
+
"widget": {
|
202 |
+
"name": "text"
|
203 |
+
}
|
204 |
+
}
|
205 |
+
],
|
206 |
+
"outputs": [
|
207 |
+
{
|
208 |
+
"name": "text",
|
209 |
+
"type": "STRING",
|
210 |
+
"links": [
|
211 |
+
54
|
212 |
+
],
|
213 |
+
"shape": 3,
|
214 |
+
"slot_index": 0
|
215 |
+
}
|
216 |
+
],
|
217 |
+
"properties": {
|
218 |
+
"Node name for S&R": "> Clear Text"
|
219 |
+
},
|
220 |
+
"widgets_values": [
|
221 |
+
"",
|
222 |
+
0.25
|
223 |
+
]
|
224 |
+
},
|
225 |
+
{
|
226 |
+
"id": 7,
|
227 |
+
"type": "CLIPTextEncode",
|
228 |
+
"pos": [
|
229 |
+
1240,
|
230 |
+
900
|
231 |
+
],
|
232 |
+
"size": {
|
233 |
+
"0": 425.27801513671875,
|
234 |
+
"1": 180.6060791015625
|
235 |
+
},
|
236 |
+
"flags": {},
|
237 |
+
"order": 6,
|
238 |
+
"mode": 0,
|
239 |
+
"inputs": [
|
240 |
+
{
|
241 |
+
"name": "clip",
|
242 |
+
"type": "CLIP",
|
243 |
+
"link": 5
|
244 |
+
}
|
245 |
+
],
|
246 |
+
"outputs": [
|
247 |
+
{
|
248 |
+
"name": "CONDITIONING",
|
249 |
+
"type": "CONDITIONING",
|
250 |
+
"links": [
|
251 |
+
6
|
252 |
+
],
|
253 |
+
"slot_index": 0
|
254 |
+
}
|
255 |
+
],
|
256 |
+
"properties": {
|
257 |
+
"Node name for S&R": "CLIPTextEncode"
|
258 |
+
},
|
259 |
+
"widgets_values": [
|
260 |
+
"text, watermark, author, signature, blurry, horror"
|
261 |
+
]
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"id": 6,
|
265 |
+
"type": "CLIPTextEncode",
|
266 |
+
"pos": [
|
267 |
+
1240,
|
268 |
+
700
|
269 |
+
],
|
270 |
+
"size": [
|
271 |
+
220,
|
272 |
+
60
|
273 |
+
],
|
274 |
+
"flags": {},
|
275 |
+
"order": 12,
|
276 |
+
"mode": 0,
|
277 |
+
"inputs": [
|
278 |
+
{
|
279 |
+
"name": "clip",
|
280 |
+
"type": "CLIP",
|
281 |
+
"link": 3
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"name": "text",
|
285 |
+
"type": "STRING",
|
286 |
+
"link": 53,
|
287 |
+
"widget": {
|
288 |
+
"name": "text"
|
289 |
+
}
|
290 |
+
}
|
291 |
+
],
|
292 |
+
"outputs": [
|
293 |
+
{
|
294 |
+
"name": "CONDITIONING",
|
295 |
+
"type": "CONDITIONING",
|
296 |
+
"links": [
|
297 |
+
4
|
298 |
+
],
|
299 |
+
"slot_index": 0
|
300 |
+
}
|
301 |
+
],
|
302 |
+
"properties": {
|
303 |
+
"Node name for S&R": "CLIPTextEncode"
|
304 |
+
},
|
305 |
+
"widgets_values": [
|
306 |
+
"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
|
307 |
+
]
|
308 |
+
},
|
309 |
+
{
|
310 |
+
"id": 25,
|
311 |
+
"type": "ConsoleDebug+",
|
312 |
+
"pos": [
|
313 |
+
1260,
|
314 |
+
540
|
315 |
+
],
|
316 |
+
"size": {
|
317 |
+
"0": 315,
|
318 |
+
"1": 58
|
319 |
+
},
|
320 |
+
"flags": {},
|
321 |
+
"order": 11,
|
322 |
+
"mode": 0,
|
323 |
+
"inputs": [
|
324 |
+
{
|
325 |
+
"name": "value",
|
326 |
+
"type": "*",
|
327 |
+
"link": 52
|
328 |
+
}
|
329 |
+
],
|
330 |
+
"properties": {
|
331 |
+
"Node name for S&R": "ConsoleDebug+"
|
332 |
+
},
|
333 |
+
"widgets_values": [
|
334 |
+
"Generated Prompt: "
|
335 |
+
]
|
336 |
+
},
|
337 |
+
{
|
338 |
+
"id": 5,
|
339 |
+
"type": "EmptyLatentImage",
|
340 |
+
"pos": [
|
341 |
+
1300,
|
342 |
+
1120
|
343 |
+
],
|
344 |
+
"size": {
|
345 |
+
"0": 315,
|
346 |
+
"1": 106
|
347 |
+
},
|
348 |
+
"flags": {},
|
349 |
+
"order": 0,
|
350 |
+
"mode": 0,
|
351 |
+
"outputs": [
|
352 |
+
{
|
353 |
+
"name": "LATENT",
|
354 |
+
"type": "LATENT",
|
355 |
+
"links": [
|
356 |
+
2
|
357 |
+
],
|
358 |
+
"slot_index": 0
|
359 |
+
}
|
360 |
+
],
|
361 |
+
"properties": {
|
362 |
+
"Node name for S&R": "EmptyLatentImage"
|
363 |
+
},
|
364 |
+
"widgets_values": [
|
365 |
+
512,
|
366 |
+
512,
|
367 |
+
1
|
368 |
+
]
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"id": 18,
|
372 |
+
"type": "> Text Replace",
|
373 |
+
"pos": [
|
374 |
+
640,
|
375 |
+
540
|
376 |
+
],
|
377 |
+
"size": [
|
378 |
+
315,
|
379 |
+
106
|
380 |
+
],
|
381 |
+
"flags": {},
|
382 |
+
"order": 8,
|
383 |
+
"mode": 0,
|
384 |
+
"inputs": [
|
385 |
+
{
|
386 |
+
"name": "text",
|
387 |
+
"type": "STRING",
|
388 |
+
"link": 64,
|
389 |
+
"widget": {
|
390 |
+
"name": "text"
|
391 |
+
}
|
392 |
+
},
|
393 |
+
{
|
394 |
+
"name": "replace",
|
395 |
+
"type": "STRING",
|
396 |
+
"link": 42,
|
397 |
+
"widget": {
|
398 |
+
"name": "replace"
|
399 |
+
}
|
400 |
+
}
|
401 |
+
],
|
402 |
+
"outputs": [
|
403 |
+
{
|
404 |
+
"name": "text",
|
405 |
+
"type": "STRING",
|
406 |
+
"links": [
|
407 |
+
38
|
408 |
+
],
|
409 |
+
"shape": 3,
|
410 |
+
"slot_index": 0
|
411 |
+
}
|
412 |
+
],
|
413 |
+
"properties": {
|
414 |
+
"Node name for S&R": "> Text Replace"
|
415 |
+
},
|
416 |
+
"widgets_values": [
|
417 |
+
"",
|
418 |
+
"_accessory_",
|
419 |
+
""
|
420 |
+
]
|
421 |
+
},
|
422 |
+
{
|
423 |
+
"id": 28,
|
424 |
+
"type": "> Text",
|
425 |
+
"pos": [
|
426 |
+
-120,
|
427 |
+
560
|
428 |
+
],
|
429 |
+
"size": {
|
430 |
+
"0": 400,
|
431 |
+
"1": 200
|
432 |
+
},
|
433 |
+
"flags": {},
|
434 |
+
"order": 1,
|
435 |
+
"mode": 0,
|
436 |
+
"outputs": [
|
437 |
+
{
|
438 |
+
"name": "text",
|
439 |
+
"type": "STRING",
|
440 |
+
"links": [
|
441 |
+
39
|
442 |
+
],
|
443 |
+
"shape": 3,
|
444 |
+
"slot_index": 0
|
445 |
+
}
|
446 |
+
],
|
447 |
+
"properties": {
|
448 |
+
"Node name for S&R": "> Text"
|
449 |
+
},
|
450 |
+
"widgets_values": [
|
451 |
+
"a hat\na necklace\nglasses\na shirt"
|
452 |
+
]
|
453 |
+
},
|
454 |
+
{
|
455 |
+
"id": 22,
|
456 |
+
"type": "> Text Random Weights",
|
457 |
+
"pos": [
|
458 |
+
300,
|
459 |
+
800
|
460 |
+
],
|
461 |
+
"size": [
|
462 |
+
315,
|
463 |
+
154
|
464 |
+
],
|
465 |
+
"flags": {},
|
466 |
+
"order": 7,
|
467 |
+
"mode": 0,
|
468 |
+
"inputs": [
|
469 |
+
{
|
470 |
+
"name": "text",
|
471 |
+
"type": "STRING",
|
472 |
+
"link": 58,
|
473 |
+
"widget": {
|
474 |
+
"name": "text"
|
475 |
+
}
|
476 |
+
}
|
477 |
+
],
|
478 |
+
"outputs": [
|
479 |
+
{
|
480 |
+
"name": "text",
|
481 |
+
"type": "STRING",
|
482 |
+
"links": [
|
483 |
+
27
|
484 |
+
],
|
485 |
+
"shape": 3,
|
486 |
+
"slot_index": 0
|
487 |
+
}
|
488 |
+
],
|
489 |
+
"properties": {
|
490 |
+
"Node name for S&R": "> Text Random Weights"
|
491 |
+
},
|
492 |
+
"widgets_values": [
|
493 |
+
"",
|
494 |
+
0,
|
495 |
+
1.2000000000000002,
|
496 |
+
784130165638034,
|
497 |
+
"randomize"
|
498 |
+
]
|
499 |
+
},
|
500 |
+
{
|
501 |
+
"id": 4,
|
502 |
+
"type": "CheckpointLoaderSimple",
|
503 |
+
"pos": [
|
504 |
+
840,
|
505 |
+
980
|
506 |
+
],
|
507 |
+
"size": {
|
508 |
+
"0": 315,
|
509 |
+
"1": 98
|
510 |
+
},
|
511 |
+
"flags": {},
|
512 |
+
"order": 2,
|
513 |
+
"mode": 0,
|
514 |
+
"outputs": [
|
515 |
+
{
|
516 |
+
"name": "MODEL",
|
517 |
+
"type": "MODEL",
|
518 |
+
"links": [
|
519 |
+
1
|
520 |
+
],
|
521 |
+
"slot_index": 0
|
522 |
+
},
|
523 |
+
{
|
524 |
+
"name": "CLIP",
|
525 |
+
"type": "CLIP",
|
526 |
+
"links": [
|
527 |
+
3,
|
528 |
+
5
|
529 |
+
],
|
530 |
+
"slot_index": 1
|
531 |
+
},
|
532 |
+
{
|
533 |
+
"name": "VAE",
|
534 |
+
"type": "VAE",
|
535 |
+
"links": [
|
536 |
+
8
|
537 |
+
],
|
538 |
+
"slot_index": 2
|
539 |
+
}
|
540 |
+
],
|
541 |
+
"properties": {
|
542 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
543 |
+
},
|
544 |
+
"widgets_values": [
|
545 |
+
"15\\epicrealism_pureEvolutionV5.safetensors"
|
546 |
+
]
|
547 |
+
},
|
548 |
+
{
|
549 |
+
"id": 31,
|
550 |
+
"type": "> Text",
|
551 |
+
"pos": [
|
552 |
+
-120,
|
553 |
+
800
|
554 |
+
],
|
555 |
+
"size": {
|
556 |
+
"0": 400,
|
557 |
+
"1": 200
|
558 |
+
},
|
559 |
+
"flags": {},
|
560 |
+
"order": 3,
|
561 |
+
"mode": 0,
|
562 |
+
"outputs": [
|
563 |
+
{
|
564 |
+
"name": "text",
|
565 |
+
"type": "STRING",
|
566 |
+
"links": [
|
567 |
+
58
|
568 |
+
],
|
569 |
+
"shape": 3,
|
570 |
+
"slot_index": 0
|
571 |
+
}
|
572 |
+
],
|
573 |
+
"properties": {
|
574 |
+
"Node name for S&R": "> Text"
|
575 |
+
},
|
576 |
+
"widgets_values": [
|
577 |
+
"oil painting\npencil sketch\ncoal sketch\ncomci strip\nmonochrome"
|
578 |
+
]
|
579 |
+
},
|
580 |
+
{
|
581 |
+
"id": 3,
|
582 |
+
"type": "KSampler",
|
583 |
+
"pos": [
|
584 |
+
1680,
|
585 |
+
700
|
586 |
+
],
|
587 |
+
"size": {
|
588 |
+
"0": 315,
|
589 |
+
"1": 262
|
590 |
+
},
|
591 |
+
"flags": {},
|
592 |
+
"order": 13,
|
593 |
+
"mode": 0,
|
594 |
+
"inputs": [
|
595 |
+
{
|
596 |
+
"name": "model",
|
597 |
+
"type": "MODEL",
|
598 |
+
"link": 1
|
599 |
+
},
|
600 |
+
{
|
601 |
+
"name": "positive",
|
602 |
+
"type": "CONDITIONING",
|
603 |
+
"link": 4
|
604 |
+
},
|
605 |
+
{
|
606 |
+
"name": "negative",
|
607 |
+
"type": "CONDITIONING",
|
608 |
+
"link": 6
|
609 |
+
},
|
610 |
+
{
|
611 |
+
"name": "latent_image",
|
612 |
+
"type": "LATENT",
|
613 |
+
"link": 2
|
614 |
+
}
|
615 |
+
],
|
616 |
+
"outputs": [
|
617 |
+
{
|
618 |
+
"name": "LATENT",
|
619 |
+
"type": "LATENT",
|
620 |
+
"links": [
|
621 |
+
7
|
622 |
+
],
|
623 |
+
"slot_index": 0
|
624 |
+
}
|
625 |
+
],
|
626 |
+
"properties": {
|
627 |
+
"Node name for S&R": "KSampler"
|
628 |
+
},
|
629 |
+
"widgets_values": [
|
630 |
+
0,
|
631 |
+
"fixed",
|
632 |
+
20,
|
633 |
+
6,
|
634 |
+
"dpmpp_2m",
|
635 |
+
"karras",
|
636 |
+
1
|
637 |
+
]
|
638 |
+
},
|
639 |
+
{
|
640 |
+
"id": 33,
|
641 |
+
"type": "> Text",
|
642 |
+
"pos": [
|
643 |
+
-120,
|
644 |
+
320
|
645 |
+
],
|
646 |
+
"size": {
|
647 |
+
"0": 400,
|
648 |
+
"1": 200
|
649 |
+
},
|
650 |
+
"flags": {},
|
651 |
+
"order": 4,
|
652 |
+
"mode": 0,
|
653 |
+
"outputs": [
|
654 |
+
{
|
655 |
+
"name": "text",
|
656 |
+
"type": "STRING",
|
657 |
+
"links": [
|
658 |
+
64
|
659 |
+
],
|
660 |
+
"shape": 3,
|
661 |
+
"slot_index": 0
|
662 |
+
}
|
663 |
+
],
|
664 |
+
"properties": {
|
665 |
+
"Node name for S&R": "> Text"
|
666 |
+
},
|
667 |
+
"widgets_values": [
|
668 |
+
"painting of a cat wearing _accessory_\n\nhigh detail"
|
669 |
+
]
|
670 |
+
}
|
671 |
+
],
|
672 |
+
"links": [
|
673 |
+
[
|
674 |
+
1,
|
675 |
+
4,
|
676 |
+
0,
|
677 |
+
3,
|
678 |
+
0,
|
679 |
+
"MODEL"
|
680 |
+
],
|
681 |
+
[
|
682 |
+
2,
|
683 |
+
5,
|
684 |
+
0,
|
685 |
+
3,
|
686 |
+
3,
|
687 |
+
"LATENT"
|
688 |
+
],
|
689 |
+
[
|
690 |
+
3,
|
691 |
+
4,
|
692 |
+
1,
|
693 |
+
6,
|
694 |
+
0,
|
695 |
+
"CLIP"
|
696 |
+
],
|
697 |
+
[
|
698 |
+
4,
|
699 |
+
6,
|
700 |
+
0,
|
701 |
+
3,
|
702 |
+
1,
|
703 |
+
"CONDITIONING"
|
704 |
+
],
|
705 |
+
[
|
706 |
+
5,
|
707 |
+
4,
|
708 |
+
1,
|
709 |
+
7,
|
710 |
+
0,
|
711 |
+
"CLIP"
|
712 |
+
],
|
713 |
+
[
|
714 |
+
6,
|
715 |
+
7,
|
716 |
+
0,
|
717 |
+
3,
|
718 |
+
2,
|
719 |
+
"CONDITIONING"
|
720 |
+
],
|
721 |
+
[
|
722 |
+
7,
|
723 |
+
3,
|
724 |
+
0,
|
725 |
+
8,
|
726 |
+
0,
|
727 |
+
"LATENT"
|
728 |
+
],
|
729 |
+
[
|
730 |
+
8,
|
731 |
+
4,
|
732 |
+
2,
|
733 |
+
8,
|
734 |
+
1,
|
735 |
+
"VAE"
|
736 |
+
],
|
737 |
+
[
|
738 |
+
27,
|
739 |
+
22,
|
740 |
+
0,
|
741 |
+
23,
|
742 |
+
0,
|
743 |
+
"STRING"
|
744 |
+
],
|
745 |
+
[
|
746 |
+
34,
|
747 |
+
8,
|
748 |
+
0,
|
749 |
+
27,
|
750 |
+
0,
|
751 |
+
"IMAGE"
|
752 |
+
],
|
753 |
+
[
|
754 |
+
38,
|
755 |
+
18,
|
756 |
+
0,
|
757 |
+
24,
|
758 |
+
0,
|
759 |
+
"STRING"
|
760 |
+
],
|
761 |
+
[
|
762 |
+
39,
|
763 |
+
28,
|
764 |
+
0,
|
765 |
+
29,
|
766 |
+
0,
|
767 |
+
"STRING"
|
768 |
+
],
|
769 |
+
[
|
770 |
+
42,
|
771 |
+
29,
|
772 |
+
0,
|
773 |
+
18,
|
774 |
+
1,
|
775 |
+
"STRING"
|
776 |
+
],
|
777 |
+
[
|
778 |
+
52,
|
779 |
+
24,
|
780 |
+
0,
|
781 |
+
25,
|
782 |
+
0,
|
783 |
+
"*"
|
784 |
+
],
|
785 |
+
[
|
786 |
+
53,
|
787 |
+
24,
|
788 |
+
0,
|
789 |
+
6,
|
790 |
+
1,
|
791 |
+
"STRING"
|
792 |
+
],
|
793 |
+
[
|
794 |
+
54,
|
795 |
+
23,
|
796 |
+
0,
|
797 |
+
24,
|
798 |
+
1,
|
799 |
+
"STRING"
|
800 |
+
],
|
801 |
+
[
|
802 |
+
58,
|
803 |
+
31,
|
804 |
+
0,
|
805 |
+
22,
|
806 |
+
0,
|
807 |
+
"STRING"
|
808 |
+
],
|
809 |
+
[
|
810 |
+
64,
|
811 |
+
33,
|
812 |
+
0,
|
813 |
+
18,
|
814 |
+
0,
|
815 |
+
"STRING"
|
816 |
+
]
|
817 |
+
],
|
818 |
+
"groups": [],
|
819 |
+
"config": {},
|
820 |
+
"extra": {},
|
821 |
+
"version": 0.4
|
822 |
+
}
|
ComfyUI/custom_nodes/ComfyUI_yanc/yanc.py
ADDED
@@ -0,0 +1,1594 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision.transforms as T
|
3 |
+
import torchvision.transforms.functional as F
|
4 |
+
import torch.nn.functional as NNF
|
5 |
+
import torch.nn.functional as NNF
|
6 |
+
from PIL import Image, ImageSequence, ImageOps
|
7 |
+
from PIL.PngImagePlugin import PngInfo
|
8 |
+
import random
|
9 |
+
import folder_paths
|
10 |
+
import hashlib
|
11 |
+
import numpy as np
|
12 |
+
import os
|
13 |
+
from pathlib import Path
|
14 |
+
from comfy.cli_args import args
|
15 |
+
from comfy_extras import nodes_mask as masks
|
16 |
+
import comfy.utils
|
17 |
+
import nodes as nodes
|
18 |
+
import json
|
19 |
+
import math
|
20 |
+
import datetime
|
21 |
+
|
22 |
+
yanc_root_name = "YANC"
|
23 |
+
yanc_sub_image = "/😼 Image"
|
24 |
+
yanc_sub_text = "/😼 Text"
|
25 |
+
yanc_sub_basics = "/😼 Basics"
|
26 |
+
yanc_sub_nik = "/😼 Noise Injection Sampler"
|
27 |
+
yanc_sub_masking = "/😼 Masking"
|
28 |
+
yanc_sub_utils = "/😼 Utils"
|
29 |
+
yanc_sub_experimental = "/😼 Experimental"
|
30 |
+
|
31 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
32 |
+
# Functions #
|
33 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
34 |
+
|
35 |
+
|
36 |
+
def permute_to_image(image):
|
37 |
+
image = T.ToTensor()(image).unsqueeze(0)
|
38 |
+
return image.permute([0, 2, 3, 1])[:, :, :, :3]
|
39 |
+
|
40 |
+
|
41 |
+
def to_binary_mask(image):
|
42 |
+
images_sum = image.sum(axis=3)
|
43 |
+
return torch.where(images_sum > 0, 1.0, 0.)
|
44 |
+
|
45 |
+
|
46 |
+
def print_brown(text):
|
47 |
+
print("\033[33m" + text + "\033[0m")
|
48 |
+
|
49 |
+
|
50 |
+
def print_cyan(text):
|
51 |
+
print("\033[96m" + text + "\033[0m")
|
52 |
+
|
53 |
+
|
54 |
+
def print_green(text):
|
55 |
+
print("\033[92m" + text + "\033[0m")
|
56 |
+
|
57 |
+
|
58 |
+
def get_common_aspect_ratios():
|
59 |
+
return [
|
60 |
+
(4, 3),
|
61 |
+
(3, 2),
|
62 |
+
(16, 9),
|
63 |
+
(1, 1),
|
64 |
+
(21, 9),
|
65 |
+
(9, 16),
|
66 |
+
(3, 4),
|
67 |
+
(2, 3),
|
68 |
+
(5, 8)
|
69 |
+
]
|
70 |
+
|
71 |
+
|
72 |
+
def get_sdxl_resolutions():
|
73 |
+
return [
|
74 |
+
("1:1", (1024, 1024)),
|
75 |
+
("3:4", (896, 1152)),
|
76 |
+
("5:8", (832, 1216)),
|
77 |
+
("9:16", (768, 1344)),
|
78 |
+
("9:21", (640, 1536)),
|
79 |
+
("4:3", (1152, 896)),
|
80 |
+
("3:2", (1216, 832)),
|
81 |
+
("16:9", (1344, 768)),
|
82 |
+
("21:9", (1536, 640))
|
83 |
+
]
|
84 |
+
|
85 |
+
|
86 |
+
def get_15_resolutions():
|
87 |
+
return [
|
88 |
+
("1:1", (512, 512)),
|
89 |
+
("2:3", (512, 768)),
|
90 |
+
("3:4", (512, 682)),
|
91 |
+
("3:2", (768, 512)),
|
92 |
+
("16:9", (910, 512)),
|
93 |
+
("1.85:1", (952, 512)),
|
94 |
+
("2:1", (1024, 512)),
|
95 |
+
("2.39:1", (1224, 512))
|
96 |
+
]
|
97 |
+
|
98 |
+
|
99 |
+
def replace_dt_placeholders(string):
|
100 |
+
dt = datetime.datetime.now()
|
101 |
+
|
102 |
+
format_mapping = {
|
103 |
+
"%d", # Day
|
104 |
+
"%m", # Month
|
105 |
+
"%Y", # Year long
|
106 |
+
"%y", # Year short
|
107 |
+
"%H", # Hour 00 - 23
|
108 |
+
"%I", # Hour 00 - 12
|
109 |
+
"%p", # AM/PM
|
110 |
+
"%M", # Minute
|
111 |
+
"%S" # Second
|
112 |
+
}
|
113 |
+
|
114 |
+
for placeholder in format_mapping:
|
115 |
+
if placeholder in string:
|
116 |
+
string = string.replace(placeholder, dt.strftime(placeholder))
|
117 |
+
|
118 |
+
return string
|
119 |
+
|
120 |
+
|
121 |
+
def patch(model, multiplier): # RescaleCFG functionality from the ComfyUI nodes
|
122 |
+
def rescale_cfg(args):
|
123 |
+
cond = args["cond"]
|
124 |
+
uncond = args["uncond"]
|
125 |
+
cond_scale = args["cond_scale"]
|
126 |
+
sigma = args["sigma"]
|
127 |
+
sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
|
128 |
+
x_orig = args["input"]
|
129 |
+
|
130 |
+
# rescale cfg has to be done on v-pred model output
|
131 |
+
x = x_orig / (sigma * sigma + 1.0)
|
132 |
+
cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
|
133 |
+
uncond = ((x - (x_orig - uncond)) *
|
134 |
+
(sigma ** 2 + 1.0) ** 0.5) / (sigma)
|
135 |
+
|
136 |
+
# rescalecfg
|
137 |
+
x_cfg = uncond + cond_scale * (cond - uncond)
|
138 |
+
ro_pos = torch.std(cond, dim=(1, 2, 3), keepdim=True)
|
139 |
+
ro_cfg = torch.std(x_cfg, dim=(1, 2, 3), keepdim=True)
|
140 |
+
|
141 |
+
x_rescaled = x_cfg * (ro_pos / ro_cfg)
|
142 |
+
x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
|
143 |
+
|
144 |
+
return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)
|
145 |
+
|
146 |
+
m = model.clone()
|
147 |
+
m.set_model_sampler_cfg_function(rescale_cfg)
|
148 |
+
return (m, )
|
149 |
+
|
150 |
+
|
151 |
+
def blend_images(image1, image2, blend_mode, blend_rate):
|
152 |
+
if blend_mode == 'multiply':
|
153 |
+
return (1 - blend_rate) * image1 + blend_rate * (image1 * image2)
|
154 |
+
elif blend_mode == 'add':
|
155 |
+
return (1 - blend_rate) * image1 + blend_rate * (image1 + image2)
|
156 |
+
elif blend_mode == 'overlay':
|
157 |
+
blended_image = torch.where(
|
158 |
+
image1 < 0.5, 2 * image1 * image2, 1 - 2 * (1 - image1) * (1 - image2))
|
159 |
+
return (1 - blend_rate) * image1 + blend_rate * blended_image
|
160 |
+
elif blend_mode == 'soft light':
|
161 |
+
return (1 - blend_rate) * image1 + blend_rate * (soft_light_blend(image1, image2))
|
162 |
+
elif blend_mode == 'hard light':
|
163 |
+
return (1 - blend_rate) * image1 + blend_rate * (hard_light_blend(image1, image2))
|
164 |
+
elif blend_mode == 'lighten':
|
165 |
+
return (1 - blend_rate) * image1 + blend_rate * (lighten_blend(image1, image2))
|
166 |
+
elif blend_mode == 'darken':
|
167 |
+
return (1 - blend_rate) * image1 + blend_rate * (darken_blend(image1, image2))
|
168 |
+
else:
|
169 |
+
raise ValueError("Unsupported blend mode")
|
170 |
+
|
171 |
+
|
172 |
+
def soft_light_blend(base, blend):
|
173 |
+
return 2 * base * blend + base**2 * (1 - 2 * blend)
|
174 |
+
|
175 |
+
|
176 |
+
def hard_light_blend(base, blend):
|
177 |
+
return 2 * base * blend + (1 - 2 * base) * (1 - blend)
|
178 |
+
|
179 |
+
|
180 |
+
def lighten_blend(base, blend):
|
181 |
+
return torch.max(base, blend)
|
182 |
+
|
183 |
+
|
184 |
+
def darken_blend(base, blend):
|
185 |
+
return torch.min(base, blend)
|
186 |
+
|
187 |
+
|
188 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
189 |
+
# Comfy classes #
|
190 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
191 |
+
class YANCRotateImage:
|
192 |
+
def __init__(self):
|
193 |
+
pass
|
194 |
+
|
195 |
+
@classmethod
|
196 |
+
def INPUT_TYPES(s):
|
197 |
+
return {
|
198 |
+
"required": {
|
199 |
+
"image": ("IMAGE",),
|
200 |
+
"rotation_angle": ("INT", {
|
201 |
+
"default": 0,
|
202 |
+
"min": -359,
|
203 |
+
"max": 359,
|
204 |
+
"step": 1,
|
205 |
+
"display": "number"})
|
206 |
+
},
|
207 |
+
}
|
208 |
+
|
209 |
+
RETURN_TYPES = ("IMAGE", "MASK")
|
210 |
+
RETURN_NAMES = ("image", "mask")
|
211 |
+
|
212 |
+
FUNCTION = "do_it"
|
213 |
+
|
214 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
215 |
+
|
216 |
+
def do_it(self, image, rotation_angle):
|
217 |
+
samples = image.movedim(-1, 1)
|
218 |
+
height, width = F.get_image_size(samples)
|
219 |
+
|
220 |
+
rotation_angle = rotation_angle * -1
|
221 |
+
rotated_image = F.rotate(samples, angle=rotation_angle, expand=True)
|
222 |
+
|
223 |
+
empty_mask = Image.new('RGBA', (height, width), color=(255, 255, 255))
|
224 |
+
rotated_mask = F.rotate(empty_mask, angle=rotation_angle, expand=True)
|
225 |
+
|
226 |
+
img_out = rotated_image.movedim(1, -1)
|
227 |
+
mask_out = to_binary_mask(permute_to_image(rotated_mask))
|
228 |
+
|
229 |
+
return (img_out, mask_out)
|
230 |
+
|
231 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
232 |
+
|
233 |
+
|
234 |
+
class YANCText:
|
235 |
+
def __init__(self):
|
236 |
+
pass
|
237 |
+
|
238 |
+
@classmethod
|
239 |
+
def INPUT_TYPES(s):
|
240 |
+
return {
|
241 |
+
"required": {
|
242 |
+
"text": ("STRING", {
|
243 |
+
"multiline": True,
|
244 |
+
"default": "",
|
245 |
+
"dynamicPrompts": True
|
246 |
+
}),
|
247 |
+
},
|
248 |
+
}
|
249 |
+
|
250 |
+
RETURN_TYPES = ("STRING",)
|
251 |
+
RETURN_NAMES = ("text",)
|
252 |
+
|
253 |
+
FUNCTION = "do_it"
|
254 |
+
|
255 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
256 |
+
|
257 |
+
def do_it(self, text):
|
258 |
+
return (text,)
|
259 |
+
|
260 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
261 |
+
|
262 |
+
|
263 |
+
class YANCTextCombine:
|
264 |
+
def __init__(self):
|
265 |
+
pass
|
266 |
+
|
267 |
+
@classmethod
|
268 |
+
def INPUT_TYPES(s):
|
269 |
+
return {
|
270 |
+
"required": {
|
271 |
+
"text": ("STRING", {"forceInput": True}),
|
272 |
+
"text_append": ("STRING", {"forceInput": True}),
|
273 |
+
"delimiter": ("STRING", {"multiline": False, "default": ", "}),
|
274 |
+
"add_empty_line": ("BOOLEAN", {"default": False})
|
275 |
+
},
|
276 |
+
}
|
277 |
+
|
278 |
+
RETURN_TYPES = ("STRING",)
|
279 |
+
RETURN_NAMES = ("text",)
|
280 |
+
|
281 |
+
FUNCTION = "do_it"
|
282 |
+
|
283 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
284 |
+
|
285 |
+
def do_it(self, text, text_append, delimiter, add_empty_line):
|
286 |
+
if text_append.strip() == "":
|
287 |
+
delimiter = ""
|
288 |
+
|
289 |
+
str_list = [text, text_append]
|
290 |
+
|
291 |
+
if add_empty_line:
|
292 |
+
str_list = [text, "\n\n", text_append]
|
293 |
+
|
294 |
+
return (delimiter.join(str_list),)
|
295 |
+
|
296 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
297 |
+
|
298 |
+
|
299 |
+
class YANCTextPickRandomLine:
|
300 |
+
def __init__(self):
|
301 |
+
pass
|
302 |
+
|
303 |
+
@classmethod
|
304 |
+
def INPUT_TYPES(s):
|
305 |
+
return {
|
306 |
+
"required": {
|
307 |
+
"text": ("STRING", {"forceInput": True}),
|
308 |
+
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
|
309 |
+
},
|
310 |
+
}
|
311 |
+
|
312 |
+
RETURN_TYPES = ("STRING",)
|
313 |
+
RETURN_NAMES = ("text",)
|
314 |
+
|
315 |
+
FUNCTION = "do_it"
|
316 |
+
|
317 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
318 |
+
|
319 |
+
def do_it(self, text, seed):
|
320 |
+
lines = text.splitlines()
|
321 |
+
random.seed(seed)
|
322 |
+
line = random.choice(lines)
|
323 |
+
|
324 |
+
return (line,)
|
325 |
+
|
326 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
327 |
+
|
328 |
+
|
329 |
+
class YANCClearText:
|
330 |
+
def __init__(self):
|
331 |
+
pass
|
332 |
+
|
333 |
+
@classmethod
|
334 |
+
def INPUT_TYPES(s):
|
335 |
+
return {
|
336 |
+
"required": {
|
337 |
+
"text": ("STRING", {"forceInput": True}),
|
338 |
+
"chance": ("FLOAT", {
|
339 |
+
"default": 0.0,
|
340 |
+
"min": 0.0,
|
341 |
+
"max": 1.0,
|
342 |
+
"step": 0.01,
|
343 |
+
"round": 0.001,
|
344 |
+
"display": "number"}),
|
345 |
+
},
|
346 |
+
}
|
347 |
+
|
348 |
+
RETURN_TYPES = ("STRING",)
|
349 |
+
RETURN_NAMES = ("text",)
|
350 |
+
|
351 |
+
FUNCTION = "do_it"
|
352 |
+
|
353 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
354 |
+
|
355 |
+
def do_it(self, text, chance):
|
356 |
+
dice = random.uniform(0, 1)
|
357 |
+
|
358 |
+
if chance > dice:
|
359 |
+
text = ""
|
360 |
+
|
361 |
+
return (text,)
|
362 |
+
|
363 |
+
@classmethod
|
364 |
+
def IS_CHANGED(s, text, chance):
|
365 |
+
return s.do_it(s, text, chance)
|
366 |
+
|
367 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
368 |
+
|
369 |
+
|
370 |
+
class YANCTextReplace:
|
371 |
+
def __init__(self):
|
372 |
+
pass
|
373 |
+
|
374 |
+
@classmethod
|
375 |
+
def INPUT_TYPES(s):
|
376 |
+
return {
|
377 |
+
"required": {
|
378 |
+
"text": ("STRING", {"forceInput": True}),
|
379 |
+
"find": ("STRING", {
|
380 |
+
"multiline": False,
|
381 |
+
"Default": "find"
|
382 |
+
}),
|
383 |
+
"replace": ("STRING", {
|
384 |
+
"multiline": False,
|
385 |
+
"Default": "replace"
|
386 |
+
}),
|
387 |
+
},
|
388 |
+
}
|
389 |
+
|
390 |
+
RETURN_TYPES = ("STRING",)
|
391 |
+
RETURN_NAMES = ("text",)
|
392 |
+
|
393 |
+
FUNCTION = "do_it"
|
394 |
+
|
395 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
396 |
+
|
397 |
+
def do_it(self, text, find, replace):
|
398 |
+
text = text.replace(find, replace)
|
399 |
+
|
400 |
+
return (text,)
|
401 |
+
|
402 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
403 |
+
|
404 |
+
|
405 |
+
class YANCTextRandomWeights:
|
406 |
+
def __init__(self):
|
407 |
+
pass
|
408 |
+
|
409 |
+
@classmethod
|
410 |
+
def INPUT_TYPES(s):
|
411 |
+
return {
|
412 |
+
"required": {
|
413 |
+
"text": ("STRING", {"forceInput": True}),
|
414 |
+
"min": ("FLOAT", {
|
415 |
+
"default": 1.0,
|
416 |
+
"min": 0.0,
|
417 |
+
"max": 10.0,
|
418 |
+
"step": 0.1,
|
419 |
+
"round": 0.1,
|
420 |
+
"display": "number"}),
|
421 |
+
"max": ("FLOAT", {
|
422 |
+
"default": 1.0,
|
423 |
+
"min": 0.0,
|
424 |
+
"max": 10.0,
|
425 |
+
"step": 0.1,
|
426 |
+
"round": 0.1,
|
427 |
+
"display": "number"}),
|
428 |
+
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
429 |
+
},
|
430 |
+
}
|
431 |
+
|
432 |
+
RETURN_TYPES = ("STRING",)
|
433 |
+
RETURN_NAMES = ("text",)
|
434 |
+
|
435 |
+
FUNCTION = "do_it"
|
436 |
+
|
437 |
+
CATEGORY = yanc_root_name + yanc_sub_text
|
438 |
+
|
439 |
+
def do_it(self, text, min, max, seed):
|
440 |
+
lines = text.splitlines()
|
441 |
+
count = 0
|
442 |
+
out = ""
|
443 |
+
|
444 |
+
random.seed(seed)
|
445 |
+
|
446 |
+
for line in lines:
|
447 |
+
count += 1
|
448 |
+
out += "({}:{})".format(line, round(random.uniform(min, max), 1)
|
449 |
+
) + (", " if count < len(lines) else "")
|
450 |
+
|
451 |
+
return (out,)
|
452 |
+
|
453 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
454 |
+
|
455 |
+
|
456 |
+
class YANCLoadImageAndFilename:
|
457 |
+
@classmethod
|
458 |
+
def INPUT_TYPES(s):
|
459 |
+
input_dir = folder_paths.get_input_directory()
|
460 |
+
# files = [f for f in os.listdir(input_dir) if os.path.isfile(
|
461 |
+
# os.path.join(input_dir, f))]
|
462 |
+
|
463 |
+
files = []
|
464 |
+
for root, dirs, filenames in os.walk(input_dir):
|
465 |
+
for filename in filenames:
|
466 |
+
full_path = os.path.join(root, filename)
|
467 |
+
relative_path = os.path.relpath(full_path, input_dir)
|
468 |
+
relative_path = relative_path.replace("\\", "/")
|
469 |
+
files.append(relative_path)
|
470 |
+
|
471 |
+
return {"required":
|
472 |
+
{"image": (sorted(files), {"image_upload": True}),
|
473 |
+
"strip_extension": ("BOOLEAN", {"default": True})}
|
474 |
+
}
|
475 |
+
|
476 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
477 |
+
|
478 |
+
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
|
479 |
+
RETURN_NAMES = ("IMAGE", "MASK", "FILENAME")
|
480 |
+
|
481 |
+
FUNCTION = "do_it"
|
482 |
+
|
483 |
+
def do_it(self, image, strip_extension):
|
484 |
+
image_path = folder_paths.get_annotated_filepath(image)
|
485 |
+
img = Image.open(image_path)
|
486 |
+
output_images = []
|
487 |
+
output_masks = []
|
488 |
+
for i in ImageSequence.Iterator(img):
|
489 |
+
i = ImageOps.exif_transpose(i)
|
490 |
+
if i.mode == 'I':
|
491 |
+
i = i.point(lambda i: i * (1 / 255))
|
492 |
+
image = i.convert("RGB")
|
493 |
+
image = np.array(image).astype(np.float32) / 255.0
|
494 |
+
image = torch.from_numpy(image)[None,]
|
495 |
+
if 'A' in i.getbands():
|
496 |
+
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
497 |
+
mask = 1. - torch.from_numpy(mask)
|
498 |
+
else:
|
499 |
+
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
500 |
+
output_images.append(image)
|
501 |
+
output_masks.append(mask.unsqueeze(0))
|
502 |
+
|
503 |
+
if len(output_images) > 1:
|
504 |
+
output_image = torch.cat(output_images, dim=0)
|
505 |
+
output_mask = torch.cat(output_masks, dim=0)
|
506 |
+
else:
|
507 |
+
output_image = output_images[0]
|
508 |
+
output_mask = output_masks[0]
|
509 |
+
|
510 |
+
if strip_extension:
|
511 |
+
filename = Path(image_path).stem
|
512 |
+
else:
|
513 |
+
filename = Path(image_path).name
|
514 |
+
|
515 |
+
return (output_image, output_mask, filename,)
|
516 |
+
|
517 |
+
@classmethod
|
518 |
+
def IS_CHANGED(s, image, strip_extension):
|
519 |
+
image_path = folder_paths.get_annotated_filepath(image)
|
520 |
+
m = hashlib.sha256()
|
521 |
+
with open(image_path, 'rb') as f:
|
522 |
+
m.update(f.read())
|
523 |
+
return m.digest().hex()
|
524 |
+
|
525 |
+
@classmethod
|
526 |
+
def VALIDATE_INPUTS(s, image, strip_extension):
|
527 |
+
if not folder_paths.exists_annotated_filepath(image):
|
528 |
+
return "Invalid image file: {}".format(image)
|
529 |
+
|
530 |
+
return True
|
531 |
+
|
532 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
533 |
+
|
534 |
+
|
535 |
+
class YANCSaveImage:
|
536 |
+
def __init__(self):
|
537 |
+
self.output_dir = folder_paths.get_output_directory()
|
538 |
+
self.type = "output"
|
539 |
+
self.prefix_append = ""
|
540 |
+
self.compress_level = 4
|
541 |
+
|
542 |
+
@classmethod
|
543 |
+
def INPUT_TYPES(s):
|
544 |
+
return {"required":
|
545 |
+
{"images": ("IMAGE", ),
|
546 |
+
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
547 |
+
"folder": ("STRING", {"default": ""}),
|
548 |
+
"overwrite_warning": ("BOOLEAN", {"default": False}),
|
549 |
+
"include_metadata": ("BOOLEAN", {"default": True}),
|
550 |
+
"extension": (["png", "jpg"],),
|
551 |
+
"quality": ("INT", {"default": 95, "min": 0, "max": 100}),
|
552 |
+
},
|
553 |
+
"optional":
|
554 |
+
{"filename_opt": ("STRING", {"forceInput": True})},
|
555 |
+
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
556 |
+
}
|
557 |
+
|
558 |
+
RETURN_TYPES = ()
|
559 |
+
FUNCTION = "do_it"
|
560 |
+
|
561 |
+
OUTPUT_NODE = True
|
562 |
+
|
563 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
564 |
+
|
565 |
+
def do_it(self, images, overwrite_warning, include_metadata, extension, quality, filename_opt=None, folder=None, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None,):
|
566 |
+
|
567 |
+
if folder:
|
568 |
+
filename_prefix += self.prefix_append
|
569 |
+
filename_prefix = os.sep.join([folder, filename_prefix])
|
570 |
+
else:
|
571 |
+
filename_prefix += self.prefix_append
|
572 |
+
|
573 |
+
if "%" in filename_prefix:
|
574 |
+
filename_prefix = replace_dt_placeholders(filename_prefix)
|
575 |
+
|
576 |
+
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
577 |
+
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
|
578 |
+
|
579 |
+
results = list()
|
580 |
+
for (batch_number, image) in enumerate(images):
|
581 |
+
i = 255. * image.cpu().numpy()
|
582 |
+
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
583 |
+
metadata = None
|
584 |
+
|
585 |
+
if not filename_opt:
|
586 |
+
|
587 |
+
filename_with_batch_num = filename.replace(
|
588 |
+
"%batch_num%", str(batch_number))
|
589 |
+
|
590 |
+
counter = 1
|
591 |
+
|
592 |
+
if os.path.exists(full_output_folder) and os.listdir(full_output_folder):
|
593 |
+
filtered_filenames = list(filter(
|
594 |
+
lambda filename: filename.startswith(
|
595 |
+
filename_with_batch_num + "_")
|
596 |
+
and filename[len(filename_with_batch_num) + 1:-4].isdigit(),
|
597 |
+
os.listdir(full_output_folder)
|
598 |
+
))
|
599 |
+
|
600 |
+
if filtered_filenames:
|
601 |
+
max_counter = max(
|
602 |
+
int(filename[len(filename_with_batch_num) + 1:-4])
|
603 |
+
for filename in filtered_filenames
|
604 |
+
)
|
605 |
+
counter = max_counter + 1
|
606 |
+
|
607 |
+
file = f"{filename_with_batch_num}_{counter:05}.{extension}"
|
608 |
+
else:
|
609 |
+
if len(images) == 1:
|
610 |
+
file = f"{filename_opt}.{extension}"
|
611 |
+
else:
|
612 |
+
raise Exception(
|
613 |
+
"Multiple images and filename detected: Images will overwrite themselves!")
|
614 |
+
|
615 |
+
save_path = os.path.join(full_output_folder, file)
|
616 |
+
|
617 |
+
if os.path.exists(save_path) and overwrite_warning:
|
618 |
+
raise Exception("Filename already exists.")
|
619 |
+
else:
|
620 |
+
if extension == "png":
|
621 |
+
if not args.disable_metadata and include_metadata:
|
622 |
+
metadata = PngInfo()
|
623 |
+
if prompt is not None:
|
624 |
+
metadata.add_text("prompt", json.dumps(prompt))
|
625 |
+
if extra_pnginfo is not None:
|
626 |
+
for x in extra_pnginfo:
|
627 |
+
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
628 |
+
|
629 |
+
img.save(save_path, pnginfo=metadata,
|
630 |
+
compress_level=self.compress_level)
|
631 |
+
elif extension == "jpg":
|
632 |
+
if not args.disable_metadata and include_metadata:
|
633 |
+
metadata = {}
|
634 |
+
|
635 |
+
if prompt is not None:
|
636 |
+
metadata["prompt"] = prompt
|
637 |
+
if extra_pnginfo is not None:
|
638 |
+
for key, value in extra_pnginfo.items():
|
639 |
+
metadata[key] = value
|
640 |
+
|
641 |
+
metadata_json = json.dumps(metadata)
|
642 |
+
img.info["comment"] = metadata_json
|
643 |
+
|
644 |
+
img.save(save_path, quality=quality)
|
645 |
+
|
646 |
+
results.append({
|
647 |
+
"filename": file,
|
648 |
+
"subfolder": subfolder,
|
649 |
+
"type": self.type
|
650 |
+
})
|
651 |
+
|
652 |
+
return {"ui": {"images": results}}
|
653 |
+
|
654 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
655 |
+
|
656 |
+
|
657 |
+
class YANCLoadImageFromFolder:
|
658 |
+
@classmethod
|
659 |
+
def INPUT_TYPES(s):
|
660 |
+
return {"required":
|
661 |
+
{"image_folder": ("STRING", {"default": ""})
|
662 |
+
},
|
663 |
+
"optional":
|
664 |
+
{"index": ("INT",
|
665 |
+
{"default": -1,
|
666 |
+
"min": -1,
|
667 |
+
"max": 0xffffffffffffffff,
|
668 |
+
"forceInput": True})}
|
669 |
+
}
|
670 |
+
|
671 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
672 |
+
|
673 |
+
RETURN_TYPES = ("IMAGE", "STRING")
|
674 |
+
RETURN_NAMES = ("image", "file_name")
|
675 |
+
FUNCTION = "do_it"
|
676 |
+
|
677 |
+
def do_it(self, image_folder, index=-1):
|
678 |
+
|
679 |
+
image_path = os.path.join(
|
680 |
+
folder_paths.get_input_directory(), image_folder)
|
681 |
+
|
682 |
+
# Get all files in the directory
|
683 |
+
files = os.listdir(image_path)
|
684 |
+
|
685 |
+
# Filter out only image files
|
686 |
+
image_files = [file for file in files if file.endswith(
|
687 |
+
('.jpg', '.jpeg', '.png', '.webp'))]
|
688 |
+
|
689 |
+
if index is not -1:
|
690 |
+
print_green("INFO: Index connected.")
|
691 |
+
|
692 |
+
if index > len(image_files) - 1:
|
693 |
+
index = index % len(image_files)
|
694 |
+
print_green(
|
695 |
+
"INFO: Index too high, falling back to: " + str(index))
|
696 |
+
|
697 |
+
image_file = image_files[index]
|
698 |
+
else:
|
699 |
+
print_green("INFO: Picking a random image.")
|
700 |
+
image_file = random.choice(image_files)
|
701 |
+
|
702 |
+
filename = Path(image_file).stem
|
703 |
+
|
704 |
+
img_path = os.path.join(image_path, image_file)
|
705 |
+
|
706 |
+
img = Image.open(img_path)
|
707 |
+
img = ImageOps.exif_transpose(img)
|
708 |
+
if img.mode == 'I':
|
709 |
+
img = img.point(lambda i: i * (1 / 255))
|
710 |
+
output_image = img.convert("RGB")
|
711 |
+
output_image = np.array(output_image).astype(np.float32) / 255.0
|
712 |
+
output_image = torch.from_numpy(output_image)[None,]
|
713 |
+
|
714 |
+
return (output_image, filename)
|
715 |
+
|
716 |
+
@classmethod
|
717 |
+
def IS_CHANGED(s, image_folder, index):
|
718 |
+
image_path = folder_paths.get_input_directory()
|
719 |
+
m = hashlib.sha256()
|
720 |
+
with open(image_path, 'rb') as f:
|
721 |
+
m.update(f.read())
|
722 |
+
return m.digest().hex()
|
723 |
+
|
724 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
725 |
+
|
726 |
+
|
727 |
+
class YANCIntToText:
|
728 |
+
@classmethod
|
729 |
+
def INPUT_TYPES(s):
|
730 |
+
return {"required":
|
731 |
+
{"int": ("INT",
|
732 |
+
{"default": 0,
|
733 |
+
"min": 0,
|
734 |
+
"max": 0xffffffffffffffff,
|
735 |
+
"forceInput": True}),
|
736 |
+
"leading_zeros": ("BOOLEAN", {"default": False}),
|
737 |
+
"length": ("INT",
|
738 |
+
{"default": 5,
|
739 |
+
"min": 0,
|
740 |
+
"max": 5})
|
741 |
+
}
|
742 |
+
}
|
743 |
+
|
744 |
+
CATEGORY = yanc_root_name + yanc_sub_basics
|
745 |
+
|
746 |
+
RETURN_TYPES = ("STRING",)
|
747 |
+
RETURN_NAMES = ("text",)
|
748 |
+
FUNCTION = "do_it"
|
749 |
+
|
750 |
+
def do_it(self, int, leading_zeros, length):
|
751 |
+
|
752 |
+
text = str(int)
|
753 |
+
|
754 |
+
if leading_zeros:
|
755 |
+
text = text.zfill(length)
|
756 |
+
|
757 |
+
return (text,)
|
758 |
+
|
759 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
760 |
+
|
761 |
+
|
762 |
+
class YANCInt:
|
763 |
+
@classmethod
|
764 |
+
def INPUT_TYPES(s):
|
765 |
+
return {"required":
|
766 |
+
{"seed": ("INT", {"default": 0, "min": 0,
|
767 |
+
"max": 0xffffffffffffffff}), }
|
768 |
+
}
|
769 |
+
|
770 |
+
CATEGORY = yanc_root_name + yanc_sub_basics
|
771 |
+
|
772 |
+
RETURN_TYPES = ("INT",)
|
773 |
+
RETURN_NAMES = ("int",)
|
774 |
+
FUNCTION = "do_it"
|
775 |
+
|
776 |
+
def do_it(self, seed):
|
777 |
+
|
778 |
+
return (seed,)
|
779 |
+
|
780 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
781 |
+
|
782 |
+
|
783 |
+
class YANCFloatToInt:
|
784 |
+
@classmethod
|
785 |
+
def INPUT_TYPES(s):
|
786 |
+
return {"required":
|
787 |
+
{"float": ("FLOAT", {"forceInput": True}),
|
788 |
+
"function": (["round", "floor", "ceil"],)
|
789 |
+
}
|
790 |
+
}
|
791 |
+
|
792 |
+
CATEGORY = yanc_root_name + yanc_sub_basics
|
793 |
+
|
794 |
+
RETURN_TYPES = ("INT",)
|
795 |
+
RETURN_NAMES = ("int",)
|
796 |
+
FUNCTION = "do_it"
|
797 |
+
|
798 |
+
def do_it(self, float, function):
|
799 |
+
|
800 |
+
result = round(float)
|
801 |
+
|
802 |
+
if function == "floor":
|
803 |
+
result = math.floor(float)
|
804 |
+
elif function == "ceil":
|
805 |
+
result = math.ceil(float)
|
806 |
+
|
807 |
+
return (int(result),)
|
808 |
+
|
809 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
810 |
+
|
811 |
+
|
812 |
+
class YANCScaleImageToSide:
|
813 |
+
@classmethod
|
814 |
+
def INPUT_TYPES(s):
|
815 |
+
return {"required":
|
816 |
+
{
|
817 |
+
"image": ("IMAGE",),
|
818 |
+
"scale_to": ("INT", {"default": 512}),
|
819 |
+
"side": (["shortest", "longest", "width", "height"],),
|
820 |
+
"interpolation": (["lanczos", "nearest", "bilinear", "bicubic", "area", "nearest-exact"],),
|
821 |
+
"modulo": ("INT", {"default": 0})
|
822 |
+
},
|
823 |
+
"optional":
|
824 |
+
{
|
825 |
+
"mask_opt": ("MASK",),
|
826 |
+
}
|
827 |
+
}
|
828 |
+
|
829 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
830 |
+
|
831 |
+
RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "FLOAT",)
|
832 |
+
RETURN_NAMES = ("image", "mask", "width", "height", "scale_ratio",)
|
833 |
+
FUNCTION = "do_it"
|
834 |
+
|
835 |
+
def do_it(self, image, scale_to, side, interpolation, modulo, mask_opt=None):
|
836 |
+
|
837 |
+
image = image.movedim(-1, 1)
|
838 |
+
|
839 |
+
image_height, image_width = image.shape[-2:]
|
840 |
+
|
841 |
+
longer_side = "height" if image_height > image_width else "width"
|
842 |
+
shorter_side = "height" if image_height < image_width else "width"
|
843 |
+
|
844 |
+
new_height, new_width, scale_ratio = 0, 0, 0
|
845 |
+
|
846 |
+
if side == "shortest":
|
847 |
+
side = shorter_side
|
848 |
+
elif side == "longest":
|
849 |
+
side = longer_side
|
850 |
+
|
851 |
+
if side == "width":
|
852 |
+
scale_ratio = scale_to / image_width
|
853 |
+
elif side == "height":
|
854 |
+
scale_ratio = scale_to / image_height
|
855 |
+
|
856 |
+
new_height = image_height * scale_ratio
|
857 |
+
new_width = image_width * scale_ratio
|
858 |
+
|
859 |
+
if modulo != 0:
|
860 |
+
new_height = new_height - (new_height % modulo)
|
861 |
+
new_width = new_width - (new_width % modulo)
|
862 |
+
|
863 |
+
new_width = int(new_width)
|
864 |
+
new_height = int(new_height)
|
865 |
+
|
866 |
+
image = comfy.utils.common_upscale(image,
|
867 |
+
new_width, new_height, interpolation, "center")
|
868 |
+
|
869 |
+
if mask_opt is not None:
|
870 |
+
mask_opt = mask_opt.permute(0, 1, 2)
|
871 |
+
|
872 |
+
mask_opt = mask_opt.unsqueeze(0)
|
873 |
+
mask_opt = NNF.interpolate(mask_opt, size=(
|
874 |
+
new_height, new_width), mode='bilinear', align_corners=False)
|
875 |
+
|
876 |
+
mask_opt = mask_opt.squeeze(0)
|
877 |
+
mask_opt = mask_opt.squeeze(0)
|
878 |
+
|
879 |
+
mask_opt = mask_opt.permute(0, 1)
|
880 |
+
|
881 |
+
image = image.movedim(1, -1)
|
882 |
+
|
883 |
+
return (image, mask_opt, new_width, new_height, 1.0/scale_ratio)
|
884 |
+
|
885 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
886 |
+
|
887 |
+
|
888 |
+
class YANCResolutionByAspectRatio:
|
889 |
+
@classmethod
|
890 |
+
def INPUT_TYPES(s):
|
891 |
+
return {"required":
|
892 |
+
{
|
893 |
+
"stable_diffusion": (["1.5", "SDXL"],),
|
894 |
+
"image": ("IMAGE",),
|
895 |
+
},
|
896 |
+
}
|
897 |
+
|
898 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
899 |
+
|
900 |
+
RETURN_TYPES = ("INT", "INT")
|
901 |
+
RETURN_NAMES = ("width", "height",)
|
902 |
+
FUNCTION = "do_it"
|
903 |
+
|
904 |
+
def do_it(self, stable_diffusion, image):
|
905 |
+
|
906 |
+
common_ratios = get_common_aspect_ratios()
|
907 |
+
resolutionsSDXL = get_sdxl_resolutions()
|
908 |
+
resolutions15 = get_15_resolutions()
|
909 |
+
|
910 |
+
resolution = resolutions15 if stable_diffusion == "1.5" else resolutionsSDXL
|
911 |
+
|
912 |
+
image_height, image_width = 0, 0
|
913 |
+
|
914 |
+
image = image.movedim(-1, 1)
|
915 |
+
image_height, image_width = image.shape[-2:]
|
916 |
+
|
917 |
+
gcd = math.gcd(image_width, image_height)
|
918 |
+
aspect_ratio = image_width // gcd, image_height // gcd
|
919 |
+
|
920 |
+
closest_ratio = min(common_ratios, key=lambda x: abs(
|
921 |
+
x[1] / x[0] - aspect_ratio[1] / aspect_ratio[0]))
|
922 |
+
|
923 |
+
closest_resolution = min(resolution, key=lambda res: abs(
|
924 |
+
res[1][0] * aspect_ratio[1] - res[1][1] * aspect_ratio[0]))
|
925 |
+
|
926 |
+
height, width = closest_resolution[1][1], closest_resolution[1][0]
|
927 |
+
sd_version = stable_diffusion if stable_diffusion == "SDXL" else "SD 1.5"
|
928 |
+
|
929 |
+
print_cyan(
|
930 |
+
f"Orig. Resolution: {image_width}x{image_height}, Aspect Ratio: {closest_ratio[0]}:{closest_ratio[1]}, Picked resolution: {width}x{height} for {sd_version}")
|
931 |
+
|
932 |
+
return (width, height,)
|
933 |
+
|
934 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
935 |
+
|
936 |
+
|
937 |
+
class YANCNIKSampler:
|
938 |
+
@classmethod
|
939 |
+
def INPUT_TYPES(s):
|
940 |
+
return {"required":
|
941 |
+
{"model": ("MODEL",),
|
942 |
+
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
943 |
+
"steps": ("INT", {"default": 30, "min": 1, "max": 10000}),
|
944 |
+
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
|
945 |
+
"cfg_noise": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
|
946 |
+
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
|
947 |
+
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
|
948 |
+
"positive": ("CONDITIONING", ),
|
949 |
+
"negative": ("CONDITIONING", ),
|
950 |
+
"latent_image": ("LATENT", ),
|
951 |
+
"noise_strength": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1, "round": 0.01}),
|
952 |
+
},
|
953 |
+
"optional":
|
954 |
+
{
|
955 |
+
"latent_noise": ("LATENT", ),
|
956 |
+
"mask": ("MASK",)
|
957 |
+
}
|
958 |
+
}
|
959 |
+
|
960 |
+
RETURN_TYPES = ("LATENT",)
|
961 |
+
RETURN_NAME = ("latent",)
|
962 |
+
FUNCTION = "do_it"
|
963 |
+
|
964 |
+
CATEGORY = yanc_root_name + yanc_sub_nik
|
965 |
+
|
966 |
+
def do_it(self, model, seed, steps, cfg, cfg_noise, sampler_name, scheduler, positive, negative, latent_image, noise_strength, latent_noise, inject_time=0.5, denoise=1.0, mask=None):
|
967 |
+
|
968 |
+
inject_at_step = round(steps * inject_time)
|
969 |
+
print("Inject at step: " + str(inject_at_step))
|
970 |
+
|
971 |
+
empty_latent = False if torch.all(
|
972 |
+
latent_image["samples"]) != 0 else True
|
973 |
+
|
974 |
+
print_cyan("Sampling first step image.")
|
975 |
+
samples_base_sampler = nodes.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
|
976 |
+
denoise=denoise, disable_noise=False, start_step=0, last_step=inject_at_step, force_full_denoise=True)
|
977 |
+
|
978 |
+
if mask is not None and empty_latent:
|
979 |
+
print_cyan(
|
980 |
+
"Sampling full image for unmasked areas. You can avoid this step by providing a non empty latent.")
|
981 |
+
samples_base_sampler2 = nodes.common_ksampler(
|
982 |
+
model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0)
|
983 |
+
|
984 |
+
samples_base_sampler = samples_base_sampler[0]
|
985 |
+
|
986 |
+
if mask is not None and not empty_latent:
|
987 |
+
samples_base_sampler = latent_image.copy()
|
988 |
+
samples_base_sampler["samples"] = latent_image["samples"].clone()
|
989 |
+
|
990 |
+
samples_out = latent_image.copy()
|
991 |
+
samples_out["samples"] = latent_image["samples"].clone()
|
992 |
+
|
993 |
+
samples_noise = latent_noise.copy()
|
994 |
+
samples_noise = latent_noise["samples"].clone()
|
995 |
+
|
996 |
+
if samples_base_sampler["samples"].shape != samples_noise.shape:
|
997 |
+
samples_noise.permute(0, 3, 1, 2)
|
998 |
+
samples_noise = comfy.utils.common_upscale(
|
999 |
+
samples_noise, samples_base_sampler["samples"].shape[3], samples_base_sampler["samples"].shape[2], 'bicubic', crop='center')
|
1000 |
+
samples_noise.permute(0, 2, 3, 1)
|
1001 |
+
|
1002 |
+
samples_o = samples_base_sampler["samples"] * (1 - noise_strength)
|
1003 |
+
samples_n = samples_noise * noise_strength
|
1004 |
+
|
1005 |
+
samples_out["samples"] = samples_o + samples_n
|
1006 |
+
|
1007 |
+
patched_model = patch(model=model, multiplier=0.65)[
|
1008 |
+
0] if round(cfg_noise, 1) > 8.0 else model
|
1009 |
+
|
1010 |
+
print_cyan("Applying noise.")
|
1011 |
+
result = nodes.common_ksampler(patched_model, seed, steps, cfg_noise, sampler_name, scheduler, positive, negative, samples_out,
|
1012 |
+
denoise=denoise, disable_noise=False, start_step=inject_at_step, last_step=steps, force_full_denoise=False)[0]
|
1013 |
+
|
1014 |
+
if mask is not None:
|
1015 |
+
print_cyan("Composing...")
|
1016 |
+
destination = latent_image["samples"].clone(
|
1017 |
+
) if not empty_latent else samples_base_sampler2[0]["samples"].clone()
|
1018 |
+
source = result["samples"]
|
1019 |
+
result["samples"] = masks.composite(
|
1020 |
+
destination, source, 0, 0, mask, 8)
|
1021 |
+
|
1022 |
+
return (result,)
|
1023 |
+
|
1024 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1025 |
+
|
1026 |
+
|
1027 |
+
class YANCNoiseFromImage:
|
1028 |
+
@classmethod
|
1029 |
+
def INPUT_TYPES(s):
|
1030 |
+
return {"required":
|
1031 |
+
{
|
1032 |
+
"image": ("IMAGE",),
|
1033 |
+
"magnitude": ("FLOAT", {"default": 210.0, "min": 0.0, "max": 250.0, "step": 0.5, "round": 0.1}),
|
1034 |
+
"smoothness": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.5, "round": 0.1}),
|
1035 |
+
"noise_intensity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
|
1036 |
+
"noise_resize_factor": ("INT", {"default": 2.0, "min": 0, "max": 5.0}),
|
1037 |
+
"noise_blend_rate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.005, "round": 0.005}),
|
1038 |
+
"saturation_correction": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.5, "step": 0.1, "round": 0.1}),
|
1039 |
+
"blend_mode": (["off", "multiply", "add", "overlay", "soft light", "hard light", "lighten", "darken"],),
|
1040 |
+
"blend_rate": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
|
1041 |
+
},
|
1042 |
+
"optional":
|
1043 |
+
{
|
1044 |
+
"vae_opt": ("VAE", ),
|
1045 |
+
}
|
1046 |
+
}
|
1047 |
+
|
1048 |
+
CATEGORY = yanc_root_name + yanc_sub_nik
|
1049 |
+
|
1050 |
+
RETURN_TYPES = ("IMAGE", "LATENT")
|
1051 |
+
RETURN_NAMES = ("image", "latent")
|
1052 |
+
FUNCTION = "do_it"
|
1053 |
+
|
1054 |
+
def do_it(self, image, magnitude, smoothness, noise_intensity, noise_resize_factor, noise_blend_rate, saturation_correction, blend_mode, blend_rate, vae_opt=None):
|
1055 |
+
|
1056 |
+
# magnitude: The alpha for the elastic transform. Magnitude of displacements.
|
1057 |
+
# smoothness: The sigma for the elastic transform. Smoothness of displacements.
|
1058 |
+
# noise_intensity: Multiplier for the torch noise.
|
1059 |
+
# noise_resize_factor: Multiplier to enlarge the generated noise.
|
1060 |
+
# noise_blend_rate: Blend rate between the elastic and the noise.
|
1061 |
+
# saturation_correction: Well, for saturation correction.
|
1062 |
+
# blend_mode: Different blending modes to blend over batched images.
|
1063 |
+
# blend_rate: The strength of the blending.
|
1064 |
+
|
1065 |
+
noise_blend_rate = noise_blend_rate / 2.25
|
1066 |
+
|
1067 |
+
if blend_mode != "off":
|
1068 |
+
blended_image = image[0:1]
|
1069 |
+
|
1070 |
+
for i in range(1, image.size(0)):
|
1071 |
+
blended_image = blend_images(
|
1072 |
+
blended_image, image[i:i+1], blend_mode=blend_mode, blend_rate=blend_rate)
|
1073 |
+
|
1074 |
+
max_value = torch.max(blended_image)
|
1075 |
+
blended_image /= max_value
|
1076 |
+
|
1077 |
+
image = blended_image
|
1078 |
+
|
1079 |
+
noisy_image = torch.randn_like(image) * noise_intensity
|
1080 |
+
noisy_image = noisy_image.movedim(-1, 1)
|
1081 |
+
|
1082 |
+
image = image.movedim(-1, 1)
|
1083 |
+
image_height, image_width = image.shape[-2:]
|
1084 |
+
|
1085 |
+
r_mean = torch.mean(image[:, 0, :, :])
|
1086 |
+
g_mean = torch.mean(image[:, 1, :, :])
|
1087 |
+
b_mean = torch.mean(image[:, 2, :, :])
|
1088 |
+
|
1089 |
+
fill_value = (r_mean.item(), g_mean.item(), b_mean.item())
|
1090 |
+
|
1091 |
+
elastic_transformer = T.ElasticTransform(
|
1092 |
+
alpha=float(magnitude), sigma=float(smoothness), fill=fill_value)
|
1093 |
+
transformed_img = elastic_transformer(image)
|
1094 |
+
|
1095 |
+
if saturation_correction != 1.0:
|
1096 |
+
transformed_img = F.adjust_saturation(
|
1097 |
+
transformed_img, saturation_factor=saturation_correction)
|
1098 |
+
|
1099 |
+
if noise_resize_factor > 0:
|
1100 |
+
resize_cropper = T.RandomResizedCrop(
|
1101 |
+
size=(image_height // noise_resize_factor, image_width // noise_resize_factor))
|
1102 |
+
|
1103 |
+
resized_crop = resize_cropper(noisy_image)
|
1104 |
+
|
1105 |
+
resized_img = T.Resize(
|
1106 |
+
size=(image_height, image_width))(resized_crop)
|
1107 |
+
resized_img = resized_img.movedim(1, -1)
|
1108 |
+
else:
|
1109 |
+
resized_img = noisy_image.movedim(1, -1)
|
1110 |
+
|
1111 |
+
if image.size(0) == 1:
|
1112 |
+
result = transformed_img.squeeze(0).permute(
|
1113 |
+
1, 2, 0) + (resized_img * noise_blend_rate)
|
1114 |
+
else:
|
1115 |
+
result = transformed_img.squeeze(0).permute(
|
1116 |
+
[0, 2, 3, 1])[:, :, :, :3] + (resized_img * noise_blend_rate)
|
1117 |
+
|
1118 |
+
latent = None
|
1119 |
+
|
1120 |
+
if vae_opt is not None:
|
1121 |
+
latent = vae_opt.encode(result[:, :, :, :3])
|
1122 |
+
|
1123 |
+
return (result, {"samples": latent})
|
1124 |
+
|
1125 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1126 |
+
|
1127 |
+
|
1128 |
+
class YANCMaskCurves:
|
1129 |
+
@classmethod
|
1130 |
+
def INPUT_TYPES(s):
|
1131 |
+
return {"required":
|
1132 |
+
{
|
1133 |
+
"mask": ("MASK",),
|
1134 |
+
"low_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
|
1135 |
+
"mid_low_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
|
1136 |
+
"mid_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
|
1137 |
+
"high_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
|
1138 |
+
"brightness": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
|
1139 |
+
},
|
1140 |
+
}
|
1141 |
+
|
1142 |
+
CATEGORY = yanc_root_name + yanc_sub_masking
|
1143 |
+
|
1144 |
+
RETURN_TYPES = ("MASK",)
|
1145 |
+
RETURN_NAMES = ("mask",)
|
1146 |
+
FUNCTION = "do_it"
|
1147 |
+
|
1148 |
+
def do_it(self, mask, low_value_factor, mid_low_value_factor, mid_value_factor, high_value_factor, brightness):
|
1149 |
+
|
1150 |
+
low_mask = (mask < 0.25).float()
|
1151 |
+
mid_low_mask = ((mask >= 0.25) & (mask < 0.5)).float()
|
1152 |
+
mid_mask = ((mask >= 0.5) & (mask < 0.75)).float()
|
1153 |
+
high_mask = (mask >= 0.75).float()
|
1154 |
+
|
1155 |
+
low_mask = low_mask * (mask * low_value_factor)
|
1156 |
+
mid_low_mask = mid_low_mask * (mask * mid_low_value_factor)
|
1157 |
+
mid_mask = mid_mask * (mask * mid_value_factor)
|
1158 |
+
high_mask = high_mask * (mask * high_value_factor)
|
1159 |
+
|
1160 |
+
final_mask = low_mask + mid_low_mask + mid_mask + high_mask
|
1161 |
+
final_mask = final_mask * brightness
|
1162 |
+
final_mask = torch.clamp(final_mask, 0, 1)
|
1163 |
+
|
1164 |
+
return (final_mask,)
|
1165 |
+
|
1166 |
+
|
1167 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1168 |
+
|
1169 |
+
|
1170 |
+
class YANCLightSourceMask:
|
1171 |
+
@classmethod
|
1172 |
+
def INPUT_TYPES(s):
|
1173 |
+
return {"required":
|
1174 |
+
{
|
1175 |
+
"image": ("IMAGE",),
|
1176 |
+
"threshold": ("FLOAT", {"default": 0.33, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
|
1177 |
+
},
|
1178 |
+
}
|
1179 |
+
|
1180 |
+
CATEGORY = yanc_root_name + yanc_sub_masking
|
1181 |
+
|
1182 |
+
RETURN_TYPES = ("MASK",)
|
1183 |
+
RETURN_NAMES = ("mask",)
|
1184 |
+
FUNCTION = "do_it"
|
1185 |
+
|
1186 |
+
def do_it(self, image, threshold):
|
1187 |
+
batch_size, height, width, _ = image.shape
|
1188 |
+
|
1189 |
+
kernel_size = max(33, int(0.05 * min(height, width)))
|
1190 |
+
kernel_size = kernel_size if kernel_size % 2 == 1 else kernel_size + 1
|
1191 |
+
sigma = max(1.0, kernel_size / 5.0)
|
1192 |
+
|
1193 |
+
masks = []
|
1194 |
+
|
1195 |
+
for i in range(batch_size):
|
1196 |
+
mask = image[i].permute(2, 0, 1)
|
1197 |
+
mask = torch.mean(mask, dim=0)
|
1198 |
+
|
1199 |
+
mask = torch.where(mask > threshold, mask * 3.0,
|
1200 |
+
torch.tensor(0.0, device=mask.device))
|
1201 |
+
mask.clamp_(min=0.0, max=1.0)
|
1202 |
+
|
1203 |
+
mask = mask.unsqueeze(0).unsqueeze(0)
|
1204 |
+
|
1205 |
+
blur = T.GaussianBlur(kernel_size=(
|
1206 |
+
kernel_size, kernel_size), sigma=(sigma, sigma))
|
1207 |
+
mask = blur(mask)
|
1208 |
+
|
1209 |
+
mask = mask.squeeze(0).squeeze(0)
|
1210 |
+
masks.append(mask)
|
1211 |
+
|
1212 |
+
masks = torch.stack(masks)
|
1213 |
+
|
1214 |
+
return (masks,)
|
1215 |
+
|
1216 |
+
|
1217 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1218 |
+
|
1219 |
+
|
1220 |
+
class YANCNormalMapLighting:
|
1221 |
+
|
1222 |
+
def __init__(self):
|
1223 |
+
pass
|
1224 |
+
|
1225 |
+
@classmethod
|
1226 |
+
def INPUT_TYPES(cls):
|
1227 |
+
return {
|
1228 |
+
"required": {
|
1229 |
+
"diffuse_map": ("IMAGE",),
|
1230 |
+
"normal_map": ("IMAGE",),
|
1231 |
+
"specular_map": ("IMAGE",),
|
1232 |
+
"light_yaw": ("FLOAT", {"default": 45, "min": -180, "max": 180, "step": 1}),
|
1233 |
+
"light_pitch": ("FLOAT", {"default": 30, "min": -90, "max": 90, "step": 1}),
|
1234 |
+
"specular_power": ("FLOAT", {"default": 32, "min": 1, "max": 200, "step": 1}),
|
1235 |
+
"ambient_light": ("FLOAT", {"default": 0.50, "min": 0, "max": 1, "step": 0.01}),
|
1236 |
+
"NormalDiffuseStrength": ("FLOAT", {"default": 1.00, "min": 0, "max": 5.0, "step": 0.01}),
|
1237 |
+
"SpecularHighlightsStrength": ("FLOAT", {"default": 1.00, "min": 0, "max": 5.0, "step": 0.01}),
|
1238 |
+
"TotalGain": ("FLOAT", {"default": 1.00, "min": 0, "max": 2.0, "step": 0.01}),
|
1239 |
+
"color": ("INT", {"default": 0xFFFFFF, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
|
1240 |
+
},
|
1241 |
+
"optional": {
|
1242 |
+
"mask": ("MASK",),
|
1243 |
+
}
|
1244 |
+
}
|
1245 |
+
|
1246 |
+
RETURN_TYPES = ("IMAGE",)
|
1247 |
+
|
1248 |
+
FUNCTION = "do_it"
|
1249 |
+
|
1250 |
+
CATEGORY = yanc_root_name + yanc_sub_image
|
1251 |
+
|
1252 |
+
def resize_tensor(self, tensor, size):
|
1253 |
+
return torch.nn.functional.interpolate(tensor, size=size, mode='bilinear', align_corners=False)
|
1254 |
+
|
1255 |
+
def do_it(self, diffuse_map, normal_map, specular_map, light_yaw, light_pitch, specular_power, ambient_light, NormalDiffuseStrength, SpecularHighlightsStrength, TotalGain, color, mask=None,):
|
1256 |
+
if mask is None:
|
1257 |
+
mask = torch.ones_like(diffuse_map[:, :, :, 0])
|
1258 |
+
|
1259 |
+
diffuse_tensor = diffuse_map.permute(
|
1260 |
+
0, 3, 1, 2)
|
1261 |
+
normal_tensor = normal_map.permute(
|
1262 |
+
0, 3, 1, 2) * 2.0 - 1.0
|
1263 |
+
specular_tensor = specular_map.permute(
|
1264 |
+
0, 3, 1, 2)
|
1265 |
+
mask_tensor = mask.unsqueeze(1)
|
1266 |
+
mask_tensor = mask_tensor.expand(-1, 3, -1, -1)
|
1267 |
+
|
1268 |
+
target_size = (diffuse_tensor.shape[2], diffuse_tensor.shape[3])
|
1269 |
+
normal_tensor = self.resize_tensor(normal_tensor, target_size)
|
1270 |
+
specular_tensor = self.resize_tensor(specular_tensor, target_size)
|
1271 |
+
mask_tensor = self.resize_tensor(mask_tensor, target_size)
|
1272 |
+
|
1273 |
+
normal_tensor = torch.nn.functional.normalize(normal_tensor, dim=1)
|
1274 |
+
|
1275 |
+
light_direction = self.euler_to_vector(light_yaw, light_pitch, 0)
|
1276 |
+
light_direction = light_direction.view(1, 3, 1, 1)
|
1277 |
+
|
1278 |
+
camera_direction = self.euler_to_vector(0, 0, 0)
|
1279 |
+
camera_direction = camera_direction.view(1, 3, 1, 1)
|
1280 |
+
|
1281 |
+
light_color = self.int_to_rgb(color)
|
1282 |
+
light_color_tensor = torch.tensor(
|
1283 |
+
light_color).view(1, 3, 1, 1)
|
1284 |
+
|
1285 |
+
diffuse = torch.sum(normal_tensor * light_direction,
|
1286 |
+
dim=1, keepdim=True)
|
1287 |
+
diffuse = torch.clamp(diffuse, 0, 1)
|
1288 |
+
diffuse = diffuse * light_color_tensor
|
1289 |
+
|
1290 |
+
half_vector = torch.nn.functional.normalize(
|
1291 |
+
light_direction + camera_direction, dim=1)
|
1292 |
+
specular = torch.sum(normal_tensor * half_vector, dim=1, keepdim=True)
|
1293 |
+
specular = torch.pow(torch.clamp(specular, 0, 1), specular_power)
|
1294 |
+
|
1295 |
+
specular = specular * light_color_tensor
|
1296 |
+
|
1297 |
+
if diffuse.shape != target_size:
|
1298 |
+
diffuse = self.resize_tensor(diffuse, target_size)
|
1299 |
+
if specular.shape != target_size:
|
1300 |
+
specular = self.resize_tensor(specular, target_size)
|
1301 |
+
|
1302 |
+
output_tensor = (diffuse_tensor * (ambient_light + diffuse * NormalDiffuseStrength) +
|
1303 |
+
specular_tensor * specular * SpecularHighlightsStrength) * TotalGain
|
1304 |
+
|
1305 |
+
output_tensor = output_tensor * mask_tensor + \
|
1306 |
+
diffuse_tensor * (1 - mask_tensor)
|
1307 |
+
|
1308 |
+
output_tensor = output_tensor.permute(
|
1309 |
+
0, 2, 3, 1)
|
1310 |
+
|
1311 |
+
return (output_tensor,)
|
1312 |
+
|
1313 |
+
def euler_to_vector(self, yaw, pitch, roll):
|
1314 |
+
yaw_rad = np.radians(yaw)
|
1315 |
+
pitch_rad = np.radians(pitch)
|
1316 |
+
roll_rad = np.radians(roll)
|
1317 |
+
|
1318 |
+
cos_pitch = np.cos(pitch_rad)
|
1319 |
+
sin_pitch = np.sin(pitch_rad)
|
1320 |
+
cos_yaw = np.cos(yaw_rad)
|
1321 |
+
sin_yaw = np.sin(yaw_rad)
|
1322 |
+
|
1323 |
+
direction = np.array([
|
1324 |
+
sin_yaw * cos_pitch,
|
1325 |
+
sin_pitch,
|
1326 |
+
cos_pitch * cos_yaw
|
1327 |
+
])
|
1328 |
+
|
1329 |
+
return torch.from_numpy(direction).float()
|
1330 |
+
|
1331 |
+
def int_to_rgb(self, color_int):
|
1332 |
+
r = (color_int >> 16) & 0xFF
|
1333 |
+
g = (color_int >> 8) & 0xFF
|
1334 |
+
b = color_int & 0xFF
|
1335 |
+
|
1336 |
+
return (r / 255.0, g / 255.0, b / 255.0)
|
1337 |
+
|
1338 |
+
|
1339 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1340 |
+
|
1341 |
+
|
1342 |
+
class YANCRGBColor:
|
1343 |
+
@classmethod
|
1344 |
+
def INPUT_TYPES(s):
|
1345 |
+
return {"required":
|
1346 |
+
{
|
1347 |
+
"red": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
|
1348 |
+
"green": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
|
1349 |
+
"blue": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
|
1350 |
+
"plus_minus": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
|
1351 |
+
},
|
1352 |
+
}
|
1353 |
+
|
1354 |
+
CATEGORY = yanc_root_name + yanc_sub_utils
|
1355 |
+
|
1356 |
+
RETURN_TYPES = ("INT", "INT", "INT", "INT", "STRING",)
|
1357 |
+
RETURN_NAMES = ("int", "red", "green", "blue", "hex",)
|
1358 |
+
FUNCTION = "do_it"
|
1359 |
+
|
1360 |
+
def do_it(self, red, green, blue, plus_minus):
|
1361 |
+
total = red + green + blue
|
1362 |
+
|
1363 |
+
r_ratio = red / total if total != 0 else 0
|
1364 |
+
g_ratio = green / total if total != 0 else 0
|
1365 |
+
b_ratio = blue / total if total != 0 else 0
|
1366 |
+
|
1367 |
+
if plus_minus > 0:
|
1368 |
+
max_plus_minus = min((255 - red) / r_ratio if r_ratio > 0 else float('inf'),
|
1369 |
+
(255 - green) / g_ratio if g_ratio > 0 else float('inf'),
|
1370 |
+
(255 - blue) / b_ratio if b_ratio > 0 else float('inf'))
|
1371 |
+
effective_plus_minus = min(plus_minus, max_plus_minus)
|
1372 |
+
else:
|
1373 |
+
max_plus_minus = min(red / r_ratio if r_ratio > 0 else float('inf'),
|
1374 |
+
green / g_ratio if g_ratio > 0 else float('inf'),
|
1375 |
+
blue / b_ratio if b_ratio > 0 else float('inf'))
|
1376 |
+
effective_plus_minus = max(plus_minus, -max_plus_minus)
|
1377 |
+
|
1378 |
+
new_r = red + effective_plus_minus * r_ratio
|
1379 |
+
new_g = green + effective_plus_minus * g_ratio
|
1380 |
+
new_b = blue + effective_plus_minus * b_ratio
|
1381 |
+
|
1382 |
+
new_r = max(0, min(255, round(new_r)))
|
1383 |
+
new_g = max(0, min(255, round(new_g)))
|
1384 |
+
new_b = max(0, min(255, round(new_b)))
|
1385 |
+
|
1386 |
+
color = (new_r << 16) | (new_g << 8) | new_b
|
1387 |
+
|
1388 |
+
hex_color = "#{:02x}{:02x}{:02x}".format(
|
1389 |
+
int(new_r), int(new_g), int(new_b)).upper()
|
1390 |
+
|
1391 |
+
return (color, new_r, new_g, new_b, hex_color)
|
1392 |
+
|
1393 |
+
|
1394 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1395 |
+
|
1396 |
+
|
1397 |
+
class YANCGetMeanColor:
|
1398 |
+
@classmethod
|
1399 |
+
def INPUT_TYPES(s):
|
1400 |
+
return {"required":
|
1401 |
+
{
|
1402 |
+
"image": ("IMAGE",),
|
1403 |
+
"amplify": ("BOOLEAN", {"default": False})
|
1404 |
+
},
|
1405 |
+
"optional":
|
1406 |
+
{
|
1407 |
+
"mask_opt": ("MASK",),
|
1408 |
+
},
|
1409 |
+
}
|
1410 |
+
|
1411 |
+
CATEGORY = yanc_root_name + yanc_sub_utils
|
1412 |
+
|
1413 |
+
RETURN_TYPES = ("INT", "INT", "INT", "INT", "STRING")
|
1414 |
+
RETURN_NAMES = ("int", "red", "green", "blue", "hex")
|
1415 |
+
FUNCTION = "do_it"
|
1416 |
+
|
1417 |
+
def do_it(self, image, amplify, mask_opt=None):
|
1418 |
+
masked_image = image.clone()
|
1419 |
+
|
1420 |
+
if mask_opt is not None:
|
1421 |
+
if mask_opt.shape[1:3] != image.shape[1:3]:
|
1422 |
+
raise ValueError(
|
1423 |
+
"Mask and image spatial dimensions must match.")
|
1424 |
+
|
1425 |
+
mask_opt = mask_opt.unsqueeze(-1)
|
1426 |
+
masked_image = masked_image * mask_opt
|
1427 |
+
|
1428 |
+
num_masked_pixels = torch.sum(mask_opt)
|
1429 |
+
if num_masked_pixels == 0:
|
1430 |
+
raise ValueError(
|
1431 |
+
"No masked pixels found in the image. Please set a mask.")
|
1432 |
+
|
1433 |
+
sum_r = torch.sum(masked_image[:, :, :, 0])
|
1434 |
+
sum_g = torch.sum(masked_image[:, :, :, 1])
|
1435 |
+
sum_b = torch.sum(masked_image[:, :, :, 2])
|
1436 |
+
|
1437 |
+
r_mean = sum_r / num_masked_pixels
|
1438 |
+
g_mean = sum_g / num_masked_pixels
|
1439 |
+
b_mean = sum_b / num_masked_pixels
|
1440 |
+
else:
|
1441 |
+
r_mean = torch.mean(masked_image[:, :, :, 0])
|
1442 |
+
g_mean = torch.mean(masked_image[:, :, :, 1])
|
1443 |
+
b_mean = torch.mean(masked_image[:, :, :, 2])
|
1444 |
+
|
1445 |
+
r_mean_255 = r_mean.item() * 255.0
|
1446 |
+
g_mean_255 = g_mean.item() * 255.0
|
1447 |
+
b_mean_255 = b_mean.item() * 255.0
|
1448 |
+
|
1449 |
+
if amplify:
|
1450 |
+
highest_value = max(r_mean_255, g_mean_255, b_mean_255)
|
1451 |
+
diff_to_max = 255.0 - highest_value
|
1452 |
+
|
1453 |
+
amp_factor = 1.0
|
1454 |
+
|
1455 |
+
r_mean_255 += diff_to_max * amp_factor * \
|
1456 |
+
(r_mean_255 / highest_value)
|
1457 |
+
g_mean_255 += diff_to_max * amp_factor * \
|
1458 |
+
(g_mean_255 / highest_value)
|
1459 |
+
b_mean_255 += diff_to_max * amp_factor * \
|
1460 |
+
(b_mean_255 / highest_value)
|
1461 |
+
|
1462 |
+
r_mean_255 = min(max(r_mean_255, 0), 255)
|
1463 |
+
g_mean_255 = min(max(g_mean_255, 0), 255)
|
1464 |
+
b_mean_255 = min(max(b_mean_255, 0), 255)
|
1465 |
+
|
1466 |
+
fill_value = (int(r_mean_255) << 16) + \
|
1467 |
+
(int(g_mean_255) << 8) + int(b_mean_255)
|
1468 |
+
|
1469 |
+
hex_color = "#{:02x}{:02x}{:02x}".format(
|
1470 |
+
int(r_mean_255), int(g_mean_255), int(b_mean_255)).upper()
|
1471 |
+
|
1472 |
+
return (fill_value, int(r_mean_255), int(g_mean_255), int(b_mean_255), hex_color,)
|
1473 |
+
|
1474 |
+
|
1475 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1476 |
+
|
1477 |
+
|
1478 |
+
class YANCLayerWeights:
|
1479 |
+
@classmethod
|
1480 |
+
def INPUT_TYPES(s):
|
1481 |
+
return {"required":
|
1482 |
+
{
|
1483 |
+
"layer_0": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1484 |
+
"layer_1": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1485 |
+
"layer_2": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1486 |
+
"layer_3": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1487 |
+
"layer_4": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1488 |
+
"layer_5": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1489 |
+
"layer_6": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1490 |
+
"layer_7": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1491 |
+
"layer_8": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1492 |
+
"layer_9": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1493 |
+
"layer_10": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1494 |
+
"layer_11": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
|
1495 |
+
}
|
1496 |
+
}
|
1497 |
+
|
1498 |
+
CATEGORY = yanc_root_name + yanc_sub_experimental
|
1499 |
+
|
1500 |
+
RETURN_TYPES = ("STRING", "STRING")
|
1501 |
+
RETURN_NAMES = ("layer_weights", "help")
|
1502 |
+
FUNCTION = "do_it"
|
1503 |
+
|
1504 |
+
def do_it(self, layer_0, layer_1, layer_2, layer_3, layer_4, layer_5, layer_6, layer_7, layer_8, layer_9, layer_10, layer_11,):
|
1505 |
+
result = ""
|
1506 |
+
|
1507 |
+
result = f"0:{layer_0:g}, 1:{layer_1:g}, 2:{layer_2:g}, 3:{layer_3:g}, 4:{layer_4:g}, 5:{layer_5:g}, 6:{layer_6:g}, 7:{layer_7:g}, 8:{layer_8:g}, 9:{layer_9:g}, 10:{layer_10:g}, 11:{layer_11:g}"
|
1508 |
+
|
1509 |
+
help = """layer_3: Composition
|
1510 |
+
layer_6: Style
|
1511 |
+
"""
|
1512 |
+
|
1513 |
+
return (result, help)
|
1514 |
+
|
1515 |
+
|
1516 |
+
# ------------------------------------------------------------------------------------------------------------------ #
|
1517 |
+
NODE_CLASS_MAPPINGS = {
|
1518 |
+
# Image
|
1519 |
+
"> Rotate Image": YANCRotateImage,
|
1520 |
+
"> Scale Image to Side": YANCScaleImageToSide,
|
1521 |
+
"> Resolution by Aspect Ratio": YANCResolutionByAspectRatio,
|
1522 |
+
"> Load Image": YANCLoadImageAndFilename,
|
1523 |
+
"> Save Image": YANCSaveImage,
|
1524 |
+
"> Load Image From Folder": YANCLoadImageFromFolder,
|
1525 |
+
"> Normal Map Lighting": YANCNormalMapLighting,
|
1526 |
+
|
1527 |
+
# Text
|
1528 |
+
"> Text": YANCText,
|
1529 |
+
"> Text Combine": YANCTextCombine,
|
1530 |
+
"> Text Pick Random Line": YANCTextPickRandomLine,
|
1531 |
+
"> Clear Text": YANCClearText,
|
1532 |
+
"> Text Replace": YANCTextReplace,
|
1533 |
+
"> Text Random Weights": YANCTextRandomWeights,
|
1534 |
+
|
1535 |
+
# Basics
|
1536 |
+
"> Int to Text": YANCIntToText,
|
1537 |
+
"> Int": YANCInt,
|
1538 |
+
"> Float to Int": YANCFloatToInt,
|
1539 |
+
|
1540 |
+
# Noise Injection Sampler
|
1541 |
+
"> NIKSampler": YANCNIKSampler,
|
1542 |
+
"> Noise From Image": YANCNoiseFromImage,
|
1543 |
+
|
1544 |
+
# Masking
|
1545 |
+
"> Mask Curves": YANCMaskCurves,
|
1546 |
+
"> Light Source Mask": YANCLightSourceMask,
|
1547 |
+
|
1548 |
+
# Utils
|
1549 |
+
"> Get Mean Color": YANCGetMeanColor,
|
1550 |
+
"> RGB Color": YANCRGBColor,
|
1551 |
+
|
1552 |
+
# Experimental
|
1553 |
+
"> Layer Weights (for IPAMS)": YANCLayerWeights,
|
1554 |
+
}
|
1555 |
+
|
1556 |
+
# A dictionary that contains the friendly/humanly readable titles for the nodes
|
1557 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
1558 |
+
# Image
|
1559 |
+
"> Rotate Image": "😼> Rotate Image",
|
1560 |
+
"> Scale Image to Side": "😼> Scale Image to Side",
|
1561 |
+
"> Resolution by Aspect Ratio": "😼> Resolution by Aspect Ratio",
|
1562 |
+
"> Load Image": "😼> Load Image",
|
1563 |
+
"> Save Image": "😼> Save Image",
|
1564 |
+
"> Load Image From Folder": "😼> Load Image From Folder",
|
1565 |
+
"> Normal Map Lighting": "😼> Normal Map Lighting",
|
1566 |
+
|
1567 |
+
# Text
|
1568 |
+
"> Text": "😼> Text",
|
1569 |
+
"> Text Combine": "😼> Text Combine",
|
1570 |
+
"> Text Pick Random Line": "😼> Text Pick Random Line",
|
1571 |
+
"> Clear Text": "😼> Clear Text",
|
1572 |
+
"> Text Replace": "😼> Text Replace",
|
1573 |
+
"> Text Random Weights": "😼> Text Random Weights",
|
1574 |
+
|
1575 |
+
# Basics
|
1576 |
+
"> Int to Text": "😼> Int to Text",
|
1577 |
+
"> Int": "😼> Int",
|
1578 |
+
"> Float to Int": "😼> Float to Int",
|
1579 |
+
|
1580 |
+
# Noise Injection Sampler
|
1581 |
+
"> NIKSampler": "😼> NIKSampler",
|
1582 |
+
"> Noise From Image": "😼> Noise From Image",
|
1583 |
+
|
1584 |
+
# Masking
|
1585 |
+
"> Mask Curves": "😼> Mask Curves",
|
1586 |
+
"> Light Source Mask": "😼> Light Source Mask",
|
1587 |
+
|
1588 |
+
# Utils
|
1589 |
+
"> Get Mean Color": "😼> Get Mean Color",
|
1590 |
+
"> RGB Color": "😼> RGB Color",
|
1591 |
+
|
1592 |
+
# Experimental
|
1593 |
+
"> Layer Weights (for IPAMS)": "😼> Layer Weights (for IPAMS)",
|
1594 |
+
}
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/__init__.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .src.img2txt_node import Img2TxtNode
|
2 |
+
|
3 |
+
NODE_CLASS_MAPPINGS = {
|
4 |
+
"img2txt BLIP/Llava Multimodel Tagger": Img2TxtNode,
|
5 |
+
}
|
6 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
7 |
+
"img2txt BLIP/Llava Multimodel Tagger": "Image to Text - Auto Caption"
|
8 |
+
}
|
9 |
+
WEB_DIRECTORY = "./web"
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (400 Bytes). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/pyproject.toml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "img2txt-comfyui-nodes"
|
3 |
+
description = "Get general description or specify questions to ask about images (medium, art style, background, etc.). Supports Chinese 🇨🇳 questions via MiniCPM model."
|
4 |
+
version = "1.1.4"
|
5 |
+
license = "LICENSE"
|
6 |
+
dependencies = ["transformers>=4.36.0", "bitsandbytes>=0.43.0", "timm>=1.0.7", "sentencepiece==0.1.99", "accelerate>=0.3.0", "deepspeed"]
|
7 |
+
|
8 |
+
[project.urls]
|
9 |
+
Repository = "https://github.com/christian-byrne/img2txt-comfyui-nodes"
|
10 |
+
# Used by Comfy Registry https://comfyregistry.org
|
11 |
+
|
12 |
+
[tool.comfy]
|
13 |
+
PublisherId = "christian-byrne"
|
14 |
+
DisplayName = "Img2txt - Auto Caption"
|
15 |
+
Icon = "https://img.icons8.com/?size=100&id=49374&format=png&color=000000"
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers>=4.36.0
|
2 |
+
bitsandbytes>=0.43.0
|
3 |
+
timm>=1.0.7
|
4 |
+
sentencepiece
|
5 |
+
accelerate>=0.3.0
|
6 |
+
deepspeed
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__init__.py
ADDED
File without changes
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/blip_img2txt.cpython-310.pyc
ADDED
Binary file (2.04 kB). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/img2txt_node.cpython-310.pyc
ADDED
Binary file (5.52 kB). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/img_tensor_utils.cpython-310.pyc
ADDED
Binary file (4.43 kB). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/llava_img2txt.cpython-310.pyc
ADDED
Binary file (5.12 kB). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/__pycache__/mini_cpm_img2txt.cpython-310.pyc
ADDED
Binary file (1.8 kB). View file
|
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/blip_img2txt.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from transformers import (
|
3 |
+
BlipProcessor,
|
4 |
+
BlipForConditionalGeneration,
|
5 |
+
BlipConfig,
|
6 |
+
BlipTextConfig,
|
7 |
+
BlipVisionConfig,
|
8 |
+
)
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import model_management
|
12 |
+
|
13 |
+
|
14 |
+
class BLIPImg2Txt:
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
conditional_caption: str,
|
18 |
+
min_words: int,
|
19 |
+
max_words: int,
|
20 |
+
temperature: float,
|
21 |
+
repetition_penalty: float,
|
22 |
+
search_beams: int,
|
23 |
+
model_id: str = "Salesforce/blip-image-captioning-large",
|
24 |
+
):
|
25 |
+
self.conditional_caption = conditional_caption
|
26 |
+
self.model_id = model_id
|
27 |
+
|
28 |
+
# Determine do_sample and num_beams
|
29 |
+
if temperature > 1.1 or temperature < 0.90:
|
30 |
+
do_sample = True
|
31 |
+
num_beams = 1 # Sampling does not use beam search
|
32 |
+
else:
|
33 |
+
do_sample = False
|
34 |
+
num_beams = (
|
35 |
+
search_beams if search_beams > 1 else 1
|
36 |
+
) # Use beam search if num_beams > 1
|
37 |
+
|
38 |
+
# Initialize text config kwargs
|
39 |
+
self.text_config_kwargs = {
|
40 |
+
"do_sample": do_sample,
|
41 |
+
"max_length": max_words,
|
42 |
+
"min_length": min_words,
|
43 |
+
"repetition_penalty": repetition_penalty,
|
44 |
+
"padding": "max_length",
|
45 |
+
}
|
46 |
+
if not do_sample:
|
47 |
+
self.text_config_kwargs["temperature"] = temperature
|
48 |
+
self.text_config_kwargs["num_beams"] = num_beams
|
49 |
+
|
50 |
+
def generate_caption(self, image: Image.Image) -> str:
|
51 |
+
if image.mode != "RGB":
|
52 |
+
image = image.convert("RGB")
|
53 |
+
|
54 |
+
processor = BlipProcessor.from_pretrained(self.model_id)
|
55 |
+
|
56 |
+
# Update and apply configurations
|
57 |
+
config_text = BlipTextConfig.from_pretrained(self.model_id)
|
58 |
+
config_text.update(self.text_config_kwargs)
|
59 |
+
config_vision = BlipVisionConfig.from_pretrained(self.model_id)
|
60 |
+
config = BlipConfig.from_text_vision_configs(config_text, config_vision)
|
61 |
+
|
62 |
+
model = BlipForConditionalGeneration.from_pretrained(
|
63 |
+
self.model_id,
|
64 |
+
config=config,
|
65 |
+
torch_dtype=torch.float16,
|
66 |
+
).to(model_management.get_torch_device())
|
67 |
+
|
68 |
+
inputs = processor(
|
69 |
+
image,
|
70 |
+
self.conditional_caption,
|
71 |
+
return_tensors="pt",
|
72 |
+
).to(model_management.get_torch_device(), torch.float16)
|
73 |
+
|
74 |
+
with torch.no_grad():
|
75 |
+
out = model.generate(**inputs)
|
76 |
+
ret = processor.decode(out[0], skip_special_tokens=True)
|
77 |
+
|
78 |
+
del model
|
79 |
+
torch.cuda.empty_cache()
|
80 |
+
|
81 |
+
return ret
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/description_classifier.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!pip install transformers[sentencepiece]
|
2 |
+
# from transformers import pipeline
|
3 |
+
# text = "Angela Merkel is a politician in Germany and leader of the CDU"
|
4 |
+
# hypothesis_template = "This text is about {}"
|
5 |
+
# classes_verbalized = ["politics", "economy", "entertainment", "environment"]
|
6 |
+
# zeroshot_classifier = pipeline("zero-shot-classification", model="MoritzLaurer/deberta-v3-large-zeroshot-v2.0") # change the model identifier here
|
7 |
+
# output = zeroshot_classifier(text, classes_verbalized, hypothesis_template=hypothesis_template, multi_label=False)
|
8 |
+
# print(output)
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/img2txt_node.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
@author: christian-byrne
|
3 |
+
@title: Img2Txt auto captioning. Choose from models: BLIP, Llava, MiniCPM, MS-GIT. Use model combos and merge results. Specify questions to ask about images (medium, art style, background). Supports Chinese 🇨🇳 questions via MiniCPM.
|
4 |
+
@nickname: Image to Text - Auto Caption
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torchvision import transforms
|
9 |
+
|
10 |
+
from .img_tensor_utils import TensorImgUtils
|
11 |
+
from .llava_img2txt import LlavaImg2Txt
|
12 |
+
from .blip_img2txt import BLIPImg2Txt
|
13 |
+
from .mini_cpm_img2txt import MiniPCMImg2Txt
|
14 |
+
|
15 |
+
from typing import Tuple
|
16 |
+
|
17 |
+
|
18 |
+
class Img2TxtNode:
|
19 |
+
CATEGORY = "img2txt"
|
20 |
+
|
21 |
+
@classmethod
|
22 |
+
def INPUT_TYPES(s):
|
23 |
+
return {
|
24 |
+
"required": {
|
25 |
+
"input_image": ("IMAGE",),
|
26 |
+
},
|
27 |
+
"optional": {
|
28 |
+
"use_blip_model": (
|
29 |
+
"BOOLEAN",
|
30 |
+
{
|
31 |
+
"default": True,
|
32 |
+
"label_on": "Use BLIP (Requires 2Gb Disk)",
|
33 |
+
"label_off": "Don't use BLIP",
|
34 |
+
},
|
35 |
+
),
|
36 |
+
"use_llava_model": (
|
37 |
+
"BOOLEAN",
|
38 |
+
{
|
39 |
+
"default": False,
|
40 |
+
"label_on": "Use Llava (Requires 15Gb Disk)",
|
41 |
+
"label_off": "Don't use Llava",
|
42 |
+
},
|
43 |
+
),
|
44 |
+
"use_mini_pcm_model": (
|
45 |
+
"BOOLEAN",
|
46 |
+
{
|
47 |
+
"default": False,
|
48 |
+
"label_on": "Use MiniCPM (Requires 6Gb Disk)",
|
49 |
+
"label_off": "Don't use MiniCPM",
|
50 |
+
},
|
51 |
+
),
|
52 |
+
"use_all_models": (
|
53 |
+
"BOOLEAN",
|
54 |
+
{
|
55 |
+
"default": False,
|
56 |
+
"label_on": "Use all models and combine outputs (Total Size: 20+Gb)",
|
57 |
+
"label_off": "Use selected models only",
|
58 |
+
},
|
59 |
+
),
|
60 |
+
"blip_caption_prefix": (
|
61 |
+
"STRING",
|
62 |
+
{
|
63 |
+
"default": "a photograph of",
|
64 |
+
},
|
65 |
+
),
|
66 |
+
"prompt_questions": (
|
67 |
+
"STRING",
|
68 |
+
{
|
69 |
+
"default": "What is the subject of this image?\nWhat are the mediums used to make this?\nWhat are the artistic styles this is reminiscent of?\nWhich famous artists is this reminiscent of?\nHow sharp or detailed is this image?\nWhat is the environment and background of this image?\nWhat are the objects in this image?\nWhat is the composition of this image?\nWhat is the color palette in this image?\nWhat is the lighting in this image?",
|
70 |
+
"multiline": True,
|
71 |
+
},
|
72 |
+
),
|
73 |
+
"temperature": (
|
74 |
+
"FLOAT",
|
75 |
+
{
|
76 |
+
"default": 0.8,
|
77 |
+
"min": 0.1,
|
78 |
+
"max": 2.0,
|
79 |
+
"step": 0.01,
|
80 |
+
"display": "slider",
|
81 |
+
},
|
82 |
+
),
|
83 |
+
"repetition_penalty": (
|
84 |
+
"FLOAT",
|
85 |
+
{
|
86 |
+
"default": 1.2,
|
87 |
+
"min": 0.1,
|
88 |
+
"max": 2.0,
|
89 |
+
"step": 0.01,
|
90 |
+
"display": "slider",
|
91 |
+
},
|
92 |
+
),
|
93 |
+
"min_words": ("INT", {"default": 36}),
|
94 |
+
"max_words": ("INT", {"default": 128}),
|
95 |
+
"search_beams": ("INT", {"default": 5}),
|
96 |
+
"exclude_terms": (
|
97 |
+
"STRING",
|
98 |
+
{
|
99 |
+
"default": "watermark, text, writing",
|
100 |
+
},
|
101 |
+
),
|
102 |
+
},
|
103 |
+
"hidden": {
|
104 |
+
"unique_id": "UNIQUE_ID",
|
105 |
+
"extra_pnginfo": "EXTRA_PNGINFO",
|
106 |
+
"output_text": (
|
107 |
+
"STRING",
|
108 |
+
{
|
109 |
+
"default": "",
|
110 |
+
},
|
111 |
+
),
|
112 |
+
},
|
113 |
+
}
|
114 |
+
|
115 |
+
RETURN_TYPES = ("STRING",)
|
116 |
+
RETURN_NAMES = ("caption",)
|
117 |
+
FUNCTION = "main"
|
118 |
+
OUTPUT_NODE = True
|
119 |
+
|
120 |
+
def main(
|
121 |
+
self,
|
122 |
+
input_image: torch.Tensor, # [Batch_n, H, W, 3-channel]
|
123 |
+
use_blip_model: bool,
|
124 |
+
use_llava_model: bool,
|
125 |
+
use_all_models: bool,
|
126 |
+
use_mini_pcm_model: bool,
|
127 |
+
blip_caption_prefix: str,
|
128 |
+
prompt_questions: str,
|
129 |
+
temperature: float,
|
130 |
+
repetition_penalty: float,
|
131 |
+
min_words: int,
|
132 |
+
max_words: int,
|
133 |
+
search_beams: int,
|
134 |
+
exclude_terms: str,
|
135 |
+
output_text: str = "",
|
136 |
+
unique_id=None,
|
137 |
+
extra_pnginfo=None,
|
138 |
+
) -> Tuple[str, ...]:
|
139 |
+
raw_image = transforms.ToPILImage()(
|
140 |
+
TensorImgUtils.convert_to_type(input_image, "CHW")
|
141 |
+
).convert("RGB")
|
142 |
+
|
143 |
+
if blip_caption_prefix == "":
|
144 |
+
blip_caption_prefix = "a photograph of"
|
145 |
+
|
146 |
+
captions = []
|
147 |
+
if use_all_models or use_blip_model:
|
148 |
+
blip = BLIPImg2Txt(
|
149 |
+
conditional_caption=blip_caption_prefix,
|
150 |
+
min_words=min_words,
|
151 |
+
max_words=max_words,
|
152 |
+
temperature=temperature,
|
153 |
+
repetition_penalty=repetition_penalty,
|
154 |
+
search_beams=search_beams,
|
155 |
+
)
|
156 |
+
captions.append(blip.generate_caption(raw_image))
|
157 |
+
|
158 |
+
if use_all_models or use_llava_model:
|
159 |
+
llava_questions = prompt_questions.split("\n")
|
160 |
+
llava_questions = [
|
161 |
+
q
|
162 |
+
for q in llava_questions
|
163 |
+
if q != "" and q != " " and q != "\n" and q != "\n\n"
|
164 |
+
]
|
165 |
+
if len(llava_questions) > 0:
|
166 |
+
llava = LlavaImg2Txt(
|
167 |
+
question_list=llava_questions,
|
168 |
+
model_id="llava-hf/llava-1.5-7b-hf",
|
169 |
+
use_4bit_quantization=True,
|
170 |
+
use_low_cpu_mem=True,
|
171 |
+
use_flash2_attention=False,
|
172 |
+
max_tokens_per_chunk=300,
|
173 |
+
)
|
174 |
+
captions.append(llava.generate_caption(raw_image))
|
175 |
+
|
176 |
+
if use_all_models or use_mini_pcm_model:
|
177 |
+
mini_pcm = MiniPCMImg2Txt(
|
178 |
+
question_list=prompt_questions.split("\n"),
|
179 |
+
temperature=temperature,
|
180 |
+
)
|
181 |
+
captions.append(mini_pcm.generate_captions(raw_image))
|
182 |
+
|
183 |
+
out_string = self.exclude(exclude_terms, self.merge_captions(captions))
|
184 |
+
|
185 |
+
return {"ui": {"text": out_string}, "result": (out_string,)}
|
186 |
+
|
187 |
+
def merge_captions(self, captions: list) -> str:
|
188 |
+
"""Merge captions from multiple models into one string.
|
189 |
+
Necessary because we can expect the generated captions will generally
|
190 |
+
be comma-separated fragments ordered by relevance - so combine
|
191 |
+
fragments in an alternating order."""
|
192 |
+
merged_caption = ""
|
193 |
+
captions = [c.split(",") for c in captions]
|
194 |
+
for i in range(max(len(c) for c in captions)):
|
195 |
+
for j in range(len(captions)):
|
196 |
+
if i < len(captions[j]) and captions[j][i].strip() != "":
|
197 |
+
merged_caption += captions[j][i].strip() + ", "
|
198 |
+
return merged_caption
|
199 |
+
|
200 |
+
def exclude(self, exclude_terms: str, out_string: str) -> str:
|
201 |
+
# https://huggingface.co/Salesforce/blip-image-captioning-large/discussions/20
|
202 |
+
exclude_terms = "arafed," + exclude_terms
|
203 |
+
exclude_terms = [
|
204 |
+
term.strip().lower() for term in exclude_terms.split(",") if term != ""
|
205 |
+
]
|
206 |
+
for term in exclude_terms:
|
207 |
+
out_string = out_string.replace(term, "")
|
208 |
+
|
209 |
+
return out_string
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/img_tensor_utils.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from typing import Tuple
|
3 |
+
|
4 |
+
|
5 |
+
class TensorImgUtils:
|
6 |
+
@staticmethod
|
7 |
+
def from_to(from_type: list[str], to_type: list[str]):
|
8 |
+
"""Return a function that converts a tensor from one type to another. Args can be lists of strings or just strings (e.g., ["C", "H", "W"] or just "CHW")."""
|
9 |
+
if isinstance(from_type, list):
|
10 |
+
from_type = "".join(from_type)
|
11 |
+
if isinstance(to_type, list):
|
12 |
+
to_type = "".join(to_type)
|
13 |
+
|
14 |
+
permute_arg = [from_type.index(c) for c in to_type]
|
15 |
+
|
16 |
+
def convert(tensor: torch.Tensor) -> torch.Tensor:
|
17 |
+
return tensor.permute(permute_arg)
|
18 |
+
|
19 |
+
return convert
|
20 |
+
|
21 |
+
@staticmethod
|
22 |
+
def convert_to_type(tensor: torch.Tensor, to_type: str) -> torch.Tensor:
|
23 |
+
"""Convert a tensor to a specific type."""
|
24 |
+
from_type = TensorImgUtils.identify_type(tensor)[0]
|
25 |
+
if from_type == list(to_type):
|
26 |
+
return tensor
|
27 |
+
|
28 |
+
if len(from_type) == 4 and len(to_type) == 3:
|
29 |
+
# If converting from a batched tensor to a non-batched tensor, squeeze the batch dimension
|
30 |
+
tensor = tensor.squeeze(0)
|
31 |
+
from_type = from_type[1:]
|
32 |
+
if len(from_type) == 3 and len(to_type) == 4:
|
33 |
+
# If converting from a non-batched tensor to a batched tensor, unsqueeze the batch dimension
|
34 |
+
tensor = tensor.unsqueeze(0)
|
35 |
+
from_type = ["B"] + from_type
|
36 |
+
|
37 |
+
return TensorImgUtils.from_to(from_type, list(to_type))(tensor)
|
38 |
+
|
39 |
+
@staticmethod
|
40 |
+
def identify_type(tensor: torch.Tensor) -> Tuple[list[str], str]:
|
41 |
+
"""Identify the type of image tensor. Doesn't currently check for BHW. Returns one of the following:"""
|
42 |
+
dim_n = tensor.dim()
|
43 |
+
if dim_n == 2:
|
44 |
+
return (["H", "W"], "HW")
|
45 |
+
elif dim_n == 3: # HWA, AHW, HWC, or CHW
|
46 |
+
if tensor.size(2) == 3:
|
47 |
+
return (["H", "W", "C"], "HWRGB")
|
48 |
+
elif tensor.size(2) == 4:
|
49 |
+
return (["H", "W", "C"], "HWRGBA")
|
50 |
+
elif tensor.size(0) == 3:
|
51 |
+
return (["C", "H", "W"], "RGBHW")
|
52 |
+
elif tensor.size(0) == 4:
|
53 |
+
return (["C", "H", "W"], "RGBAHW")
|
54 |
+
elif tensor.size(2) == 1:
|
55 |
+
return (["H", "W", "C"], "HWA")
|
56 |
+
elif tensor.size(0) == 1:
|
57 |
+
return (["C", "H", "W"], "AHW")
|
58 |
+
elif dim_n == 4: # BHWC or BCHW
|
59 |
+
if tensor.size(3) >= 3: # BHWRGB or BHWRGBA
|
60 |
+
if tensor.size(3) == 3:
|
61 |
+
return (["B", "H", "W", "C"], "BHWRGB")
|
62 |
+
elif tensor.size(3) == 4:
|
63 |
+
return (["B", "H", "W", "C"], "BHWRGBA")
|
64 |
+
|
65 |
+
elif tensor.size(1) >= 3:
|
66 |
+
if tensor.size(1) == 3:
|
67 |
+
return (["B", "C", "H", "W"], "BRGBHW")
|
68 |
+
elif tensor.size(1) == 4:
|
69 |
+
return (["B", "C", "H", "W"], "BRGBAHW")
|
70 |
+
|
71 |
+
else:
|
72 |
+
raise ValueError(
|
73 |
+
f"{dim_n} dimensions is not a valid number of dimensions for an image tensor."
|
74 |
+
)
|
75 |
+
|
76 |
+
raise ValueError(
|
77 |
+
f"Could not determine shape of Tensor with {dim_n} dimensions and {tensor.shape} shape."
|
78 |
+
)
|
79 |
+
|
80 |
+
@staticmethod
|
81 |
+
def test_squeeze_batch(tensor: torch.Tensor, strict=False) -> torch.Tensor:
|
82 |
+
# Check if the tensor has a batch dimension (size 4)
|
83 |
+
if tensor.dim() == 4:
|
84 |
+
if tensor.size(0) == 1 or not strict:
|
85 |
+
# If it has a batch dimension with size 1, remove it. It represents a single image.
|
86 |
+
return tensor.squeeze(0)
|
87 |
+
else:
|
88 |
+
raise ValueError(
|
89 |
+
f"This is not a single image. It's a batch of {tensor.size(0)} images."
|
90 |
+
)
|
91 |
+
else:
|
92 |
+
# Otherwise, it doesn't have a batch dimension, so just return the tensor as is.
|
93 |
+
return tensor
|
94 |
+
|
95 |
+
@staticmethod
|
96 |
+
def test_unsqueeze_batch(tensor: torch.Tensor) -> torch.Tensor:
|
97 |
+
# Check if the tensor has a batch dimension (size 4)
|
98 |
+
if tensor.dim() == 3:
|
99 |
+
# If it doesn't have a batch dimension, add one. It represents a single image.
|
100 |
+
return tensor.unsqueeze(0)
|
101 |
+
else:
|
102 |
+
# Otherwise, it already has a batch dimension, so just return the tensor as is.
|
103 |
+
return tensor
|
104 |
+
|
105 |
+
@staticmethod
|
106 |
+
def most_pixels(img_tensors: list[torch.Tensor]) -> torch.Tensor:
|
107 |
+
sizes = [
|
108 |
+
TensorImgUtils.height_width(img)[0] * TensorImgUtils.height_width(img)[1]
|
109 |
+
for img in img_tensors
|
110 |
+
]
|
111 |
+
return img_tensors[sizes.index(max(sizes))]
|
112 |
+
|
113 |
+
@staticmethod
|
114 |
+
def height_width(image: torch.Tensor) -> Tuple[int, int]:
|
115 |
+
"""Like torchvision.transforms methods, this method assumes Tensor to
|
116 |
+
have [..., H, W] shape, where ... means an arbitrary number of leading
|
117 |
+
dimensions
|
118 |
+
"""
|
119 |
+
return image.shape[-2:]
|
120 |
+
|
121 |
+
@staticmethod
|
122 |
+
def smaller_axis(image: torch.Tensor) -> int:
|
123 |
+
h, w = TensorImgUtils.height_width(image)
|
124 |
+
return 2 if h < w else 3
|
125 |
+
|
126 |
+
@staticmethod
|
127 |
+
def larger_axis(image: torch.Tensor) -> int:
|
128 |
+
h, w = TensorImgUtils.height_width(image)
|
129 |
+
return 2 if h > w else 3
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/keyword_extract.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification
|
3 |
+
from nltk.tokenize import word_tokenize
|
4 |
+
from nltk.corpus import stopwords
|
5 |
+
from nltk import pos_tag
|
6 |
+
from nltk.tokenize import word_tokenize
|
7 |
+
import nltk
|
8 |
+
|
9 |
+
|
10 |
+
def nltk_speach_tag(sentence):
|
11 |
+
nltk.download("punkt")
|
12 |
+
nltk.download("averaged_perceptron_tagger")
|
13 |
+
nltk.download("stopwords")
|
14 |
+
|
15 |
+
# Tokenize the sentence
|
16 |
+
tokens = word_tokenize(sentence)
|
17 |
+
|
18 |
+
# Filter out stopwords and punctuation
|
19 |
+
stop_words = set(stopwords.words("english"))
|
20 |
+
filtered_tokens = [
|
21 |
+
word for word in tokens if word.lower() not in stop_words and word.isalnum()
|
22 |
+
]
|
23 |
+
|
24 |
+
# Perform Part-of-Speech tagging
|
25 |
+
tagged_tokens = pos_tag(filtered_tokens)
|
26 |
+
|
27 |
+
# Extract nouns and proper nouns
|
28 |
+
salient_tokens = [
|
29 |
+
token
|
30 |
+
for token, pos in tagged_tokens
|
31 |
+
if pos in ["NN", "NNP", "NNS", "NNPS", "ADJ", "JJ", "FW"]
|
32 |
+
]
|
33 |
+
salient_tokens = list(set(salient_tokens))
|
34 |
+
|
35 |
+
# Re-add commas or periods relative to the original sentence
|
36 |
+
|
37 |
+
comma_period_indices = [i for i, char in enumerate(sentence) if char in [",", "."]]
|
38 |
+
salient_tokens_indices = [sentence.index(token) for token in salient_tokens]
|
39 |
+
|
40 |
+
# Add commas or periods between words if there was one in the original sentence
|
41 |
+
out = ""
|
42 |
+
for i, index in enumerate(salient_tokens_indices):
|
43 |
+
out += salient_tokens[i]
|
44 |
+
distance_between_next = (
|
45 |
+
salient_tokens_indices[i + 1] - index
|
46 |
+
if i + 1 < len(salient_tokens_indices)
|
47 |
+
else None
|
48 |
+
)
|
49 |
+
|
50 |
+
puncuated = False
|
51 |
+
if not distance_between_next:
|
52 |
+
puncuated = True
|
53 |
+
else:
|
54 |
+
for i in range(index, index + distance_between_next):
|
55 |
+
if i in comma_period_indices:
|
56 |
+
puncuated = True
|
57 |
+
break
|
58 |
+
|
59 |
+
if not puncuated:
|
60 |
+
# IF the previous word was an adjective, and current is a noun, add a space
|
61 |
+
if (
|
62 |
+
i > 0
|
63 |
+
and tagged_tokens[i - 1][1] in ["JJ", "ADJ"]
|
64 |
+
and tagged_tokens[i][1] in ["NN", "NNP", "NNS", "NNPS"]
|
65 |
+
):
|
66 |
+
out += " "
|
67 |
+
else:
|
68 |
+
out += ", "
|
69 |
+
else:
|
70 |
+
out += ". "
|
71 |
+
|
72 |
+
# Add the last token
|
73 |
+
out += sentence[-1]
|
74 |
+
|
75 |
+
# Print the salient tokens
|
76 |
+
return out.strip().strip(",").strip(".").strip()
|
77 |
+
|
78 |
+
|
79 |
+
def extract_keywords(text: str) -> str:
|
80 |
+
tokenizer = AutoTokenizer.from_pretrained("yanekyuk/bert-keyword-extractor")
|
81 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
82 |
+
"yanekyuk/bert-keyword-extractor"
|
83 |
+
)
|
84 |
+
"""Return keywords from text using a BERT model trained for keyword extraction as
|
85 |
+
a comma-separated string."""
|
86 |
+
print(f"Extracting keywords from text: {text}")
|
87 |
+
|
88 |
+
for char in ["\n", "\t", "\r"]:
|
89 |
+
text = text.replace(char, " ")
|
90 |
+
|
91 |
+
sentences = text.split(".")
|
92 |
+
result = ""
|
93 |
+
|
94 |
+
for sentence in sentences:
|
95 |
+
print(f"Extracting keywords from sentence: {sentence}")
|
96 |
+
inputs = tokenizer(sentence, return_tensors="pt", padding=True, truncation=True)
|
97 |
+
with torch.no_grad():
|
98 |
+
logits = model(**inputs).logits
|
99 |
+
|
100 |
+
predicted_token_class_ids = logits.argmax(dim=-1)
|
101 |
+
|
102 |
+
predicted_keywords = []
|
103 |
+
for token_id, token in zip(
|
104 |
+
predicted_token_class_ids[0],
|
105 |
+
tokenizer.convert_ids_to_tokens(inputs["input_ids"][0]),
|
106 |
+
):
|
107 |
+
if token_id == 1:
|
108 |
+
predicted_keywords.append(token)
|
109 |
+
|
110 |
+
print(f"Extracted keywords: {predicted_keywords}")
|
111 |
+
result += ", ".join(predicted_keywords) + ", "
|
112 |
+
|
113 |
+
print(f"All Keywords: {result}")
|
114 |
+
return result
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/llava_img2txt.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import torch
|
3 |
+
import model_management
|
4 |
+
from transformers import AutoProcessor, LlavaForConditionalGeneration, BitsAndBytesConfig
|
5 |
+
|
6 |
+
|
7 |
+
class LlavaImg2Txt:
|
8 |
+
"""
|
9 |
+
A class to generate text captions for images using the Llava model.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
question_list (list[str]): A list of questions to ask the model about the image.
|
13 |
+
model_id (str): The model's name in the Hugging Face model hub.
|
14 |
+
use_4bit_quantization (bool): Whether to use 4-bit quantization to reduce memory usage. 4-bit quantization reduces the precision of model parameters, potentially affecting the quality of generated outputs. Use if VRAM is limited. Default is True.
|
15 |
+
use_low_cpu_mem (bool): In low_cpu_mem_usage mode, the model is initialized with optimizations aimed at reducing CPU memory consumption. This can be beneficial when working with large models or limited computational resources. Default is True.
|
16 |
+
use_flash2_attention (bool): Whether to use Flash-Attention 2. Flash-Attention 2 focuses on optimizing attention mechanisms, which are crucial for the model's performance during generation. Use if computational resources are abundant. Default is False.
|
17 |
+
max_tokens_per_chunk (int): The maximum number of tokens to generate per prompt chunk. Default is 300.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(
|
21 |
+
self,
|
22 |
+
question_list,
|
23 |
+
model_id: str = "llava-hf/llava-1.5-7b-hf",
|
24 |
+
use_4bit_quantization: bool = True,
|
25 |
+
use_low_cpu_mem: bool = True,
|
26 |
+
use_flash2_attention: bool = False,
|
27 |
+
max_tokens_per_chunk: int = 300,
|
28 |
+
):
|
29 |
+
self.question_list = question_list
|
30 |
+
self.model_id = model_id
|
31 |
+
self.use_4bit = use_4bit_quantization
|
32 |
+
self.use_flash2 = use_flash2_attention
|
33 |
+
self.use_low_cpu_mem = use_low_cpu_mem
|
34 |
+
self.max_tokens_per_chunk = max_tokens_per_chunk
|
35 |
+
|
36 |
+
def generate_caption(
|
37 |
+
self,
|
38 |
+
raw_image: Image.Image,
|
39 |
+
) -> str:
|
40 |
+
"""
|
41 |
+
Generate a caption for an image using the Llava model.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
raw_image (Image): Image to generate caption for
|
45 |
+
"""
|
46 |
+
# Convert Image to RGB first
|
47 |
+
if raw_image.mode != "RGB":
|
48 |
+
raw_image = raw_image.convert("RGB")
|
49 |
+
|
50 |
+
dtype = torch.float16
|
51 |
+
quant_config = BitsAndBytesConfig(
|
52 |
+
load_in_4bit=self.use_4bit,
|
53 |
+
bnb_4bit_compute_dtype=dtype,
|
54 |
+
bnb_4bit_quant_type="fp4"
|
55 |
+
)
|
56 |
+
|
57 |
+
model = LlavaForConditionalGeneration.from_pretrained(
|
58 |
+
self.model_id,
|
59 |
+
torch_dtype=dtype,
|
60 |
+
low_cpu_mem_usage=self.use_low_cpu_mem,
|
61 |
+
use_flash_attention_2=self.use_flash2,
|
62 |
+
quantization_config=quant_config,
|
63 |
+
)
|
64 |
+
|
65 |
+
# model.to() is not supported for 4-bit or 8-bit bitsandbytes models. With 4-bit quantization, use the model as it is, since the model will already be set to the correct devices and casted to the correct `dtype`.
|
66 |
+
if torch.cuda.is_available() and not self.use_4bit:
|
67 |
+
model = model.to(model_management.get_torch_device(), torch.float16)
|
68 |
+
|
69 |
+
processor = AutoProcessor.from_pretrained(self.model_id)
|
70 |
+
prompt_chunks = self.__get_prompt_chunks(chunk_size=4)
|
71 |
+
|
72 |
+
caption = ""
|
73 |
+
with torch.no_grad():
|
74 |
+
for prompt_list in prompt_chunks:
|
75 |
+
prompt = self.__get_single_answer_prompt(prompt_list)
|
76 |
+
inputs = processor(prompt, raw_image, return_tensors="pt").to(
|
77 |
+
model_management.get_torch_device(), torch.float16
|
78 |
+
)
|
79 |
+
output = model.generate(
|
80 |
+
**inputs, max_new_tokens=self.max_tokens_per_chunk, do_sample=False
|
81 |
+
)
|
82 |
+
decoded = processor.decode(output[0][2:])
|
83 |
+
cleaned = self.clean_output(decoded)
|
84 |
+
caption += cleaned
|
85 |
+
|
86 |
+
del model
|
87 |
+
torch.cuda.empty_cache()
|
88 |
+
|
89 |
+
return caption
|
90 |
+
|
91 |
+
def clean_output(self, decoded_output, delimiter=","):
|
92 |
+
output_only = decoded_output.split("ASSISTANT: ")[1]
|
93 |
+
lines = output_only.split("\n")
|
94 |
+
cleaned_output = ""
|
95 |
+
for line in lines:
|
96 |
+
cleaned_output += self.__replace_delimiter(line, ".", delimiter)
|
97 |
+
|
98 |
+
return cleaned_output
|
99 |
+
|
100 |
+
def __get_single_answer_prompt(self, questions):
|
101 |
+
"""
|
102 |
+
For multiple turns conversation:
|
103 |
+
"USER: <image>\n<prompt1> ASSISTANT: <answer1></s>USER: <prompt2> ASSISTANT: <answer2></s>USER: <prompt3> ASSISTANT:"
|
104 |
+
From: https://huggingface.co/docs/transformers/en/model_doc/llava#usage-tips
|
105 |
+
Not sure how the formatting works for multi-turn but those are the docs.
|
106 |
+
"""
|
107 |
+
prompt = "USER: <image>\n"
|
108 |
+
for index, question in enumerate(questions):
|
109 |
+
if index != 0:
|
110 |
+
prompt += "USER: "
|
111 |
+
prompt += f"{question} </s >"
|
112 |
+
prompt += "ASSISTANT: "
|
113 |
+
|
114 |
+
return prompt
|
115 |
+
|
116 |
+
def __replace_delimiter(self, text: str, old, new=","):
|
117 |
+
"""Replace only the LAST instance of old with new"""
|
118 |
+
if old not in text:
|
119 |
+
return text.strip() + " "
|
120 |
+
last_old_index = text.rindex(old)
|
121 |
+
replaced = text[:last_old_index] + new + text[last_old_index + len(old) :]
|
122 |
+
return replaced.strip() + " "
|
123 |
+
|
124 |
+
def __get_prompt_chunks(self, chunk_size=4):
|
125 |
+
prompt_chunks = []
|
126 |
+
for index, feature in enumerate(self.question_list):
|
127 |
+
if index % chunk_size == 0:
|
128 |
+
prompt_chunks.append([feature])
|
129 |
+
else:
|
130 |
+
prompt_chunks[-1].append(feature)
|
131 |
+
return prompt_chunks
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/src/mini_cpm_img2txt.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from PIL import Image
|
3 |
+
from transformers import AutoModel, AutoTokenizer
|
4 |
+
|
5 |
+
import model_management
|
6 |
+
|
7 |
+
class MiniPCMImg2Txt:
|
8 |
+
def __init__(self, question_list: list[str], temperature: float = 0.7):
|
9 |
+
self.model_id = "openbmb/MiniCPM-V-2"
|
10 |
+
self.question_list = question_list
|
11 |
+
self.question_list = self.__create_question_list()
|
12 |
+
self.temperature = temperature
|
13 |
+
|
14 |
+
def __create_question_list(self) -> list:
|
15 |
+
ret = []
|
16 |
+
for q in self.question_list:
|
17 |
+
ret.append({"role": "user", "content": q})
|
18 |
+
return ret
|
19 |
+
|
20 |
+
def generate_captions(self, raw_image: Image.Image) -> str:
|
21 |
+
device = model_management.get_torch_device()
|
22 |
+
|
23 |
+
# For Nvidia GPUs support BF16 (like A100, H100, RTX3090)
|
24 |
+
# For Nvidia GPUs do NOT support BF16 (like V100, T4, RTX2080)
|
25 |
+
torch_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
|
26 |
+
|
27 |
+
model = AutoModel.from_pretrained(
|
28 |
+
"openbmb/MiniCPM-V-2", trust_remote_code=True, torch_dtype=torch_dtype
|
29 |
+
)
|
30 |
+
model = model.to(device=device, dtype=torch_dtype)
|
31 |
+
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
33 |
+
self.model_id, trust_remote_code=True
|
34 |
+
)
|
35 |
+
model.eval()
|
36 |
+
|
37 |
+
if raw_image.mode != "RGB":
|
38 |
+
raw_image = raw_image.convert("RGB")
|
39 |
+
|
40 |
+
with torch.no_grad():
|
41 |
+
res, _, _ = model.chat(
|
42 |
+
image=raw_image,
|
43 |
+
msgs=self.question_list,
|
44 |
+
context=None,
|
45 |
+
tokenizer=tokenizer,
|
46 |
+
sampling=True,
|
47 |
+
temperature=self.temperature,
|
48 |
+
)
|
49 |
+
|
50 |
+
del model
|
51 |
+
torch.cuda.empty_cache()
|
52 |
+
|
53 |
+
return res
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/web/show-output-text.js
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { app } from "../../../scripts/app.js";
|
2 |
+
import { ComfyWidgets } from "../../../scripts/widgets.js";
|
3 |
+
|
4 |
+
// Displays output caption text
|
5 |
+
app.registerExtension({
|
6 |
+
name: "Img2TxtNode",
|
7 |
+
async beforeRegisterNodeDef(nodeType, nodeData, app) {
|
8 |
+
if (nodeData.name === "img2txt BLIP/Llava Multimodel Tagger") {
|
9 |
+
function populate(message) {
|
10 |
+
console.log("message", message);
|
11 |
+
console.log("message.text", message.text);
|
12 |
+
|
13 |
+
const insertIndex = this.widgets.findIndex((w) => w.name === "output_text");
|
14 |
+
if (insertIndex !== -1) {
|
15 |
+
for (let i = insertIndex; i < this.widgets.length; i++) {
|
16 |
+
this.widgets[i].onRemove?.();
|
17 |
+
}
|
18 |
+
this.widgets.length = insertIndex;
|
19 |
+
}
|
20 |
+
|
21 |
+
const outputWidget = ComfyWidgets["STRING"](
|
22 |
+
this,
|
23 |
+
"output_text",
|
24 |
+
["STRING", { multiline: true }],
|
25 |
+
app
|
26 |
+
).widget;
|
27 |
+
outputWidget.inputEl.readOnly = true;
|
28 |
+
outputWidget.inputEl.style.opacity = 0.6;
|
29 |
+
outputWidget.value = message.text.join("");
|
30 |
+
|
31 |
+
requestAnimationFrame(() => {
|
32 |
+
const size_ = this.computeSize();
|
33 |
+
if (size_[0] < this.size[0]) {
|
34 |
+
size_[0] = this.size[0];
|
35 |
+
}
|
36 |
+
if (size_[1] < this.size[1]) {
|
37 |
+
size_[1] = this.size[1];
|
38 |
+
}
|
39 |
+
this.onResize?.(size_);
|
40 |
+
app.graph.setDirtyCanvas(true, false);
|
41 |
+
});
|
42 |
+
}
|
43 |
+
|
44 |
+
const onExecuted = nodeType.prototype.onExecuted;
|
45 |
+
nodeType.prototype.onExecuted = function (message) {
|
46 |
+
onExecuted?.apply(this, arguments);
|
47 |
+
populate.call(this, message);
|
48 |
+
};
|
49 |
+
}
|
50 |
+
},
|
51 |
+
});
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_001.png
ADDED
Git LFS Details
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_002.png
ADDED
Git LFS Details
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/demo-pics/Selection_003.png
ADDED
Git LFS Details
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/img2img.json
ADDED
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 51,
|
3 |
+
"last_link_id": 60,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 41,
|
7 |
+
"type": "CLIPTextEncode",
|
8 |
+
"pos": [
|
9 |
+
1055,
|
10 |
+
571
|
11 |
+
],
|
12 |
+
"size": {
|
13 |
+
"0": 348.9403381347656,
|
14 |
+
"1": 56.439388275146484
|
15 |
+
},
|
16 |
+
"flags": {},
|
17 |
+
"order": 5,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "clip",
|
22 |
+
"type": "CLIP",
|
23 |
+
"link": 50
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "text",
|
27 |
+
"type": "STRING",
|
28 |
+
"link": 60,
|
29 |
+
"widget": {
|
30 |
+
"name": "text"
|
31 |
+
}
|
32 |
+
}
|
33 |
+
],
|
34 |
+
"outputs": [
|
35 |
+
{
|
36 |
+
"name": "CONDITIONING",
|
37 |
+
"type": "CONDITIONING",
|
38 |
+
"links": [
|
39 |
+
44
|
40 |
+
],
|
41 |
+
"shape": 3,
|
42 |
+
"slot_index": 0
|
43 |
+
}
|
44 |
+
],
|
45 |
+
"properties": {
|
46 |
+
"Node name for S&R": "CLIPTextEncode"
|
47 |
+
},
|
48 |
+
"widgets_values": [
|
49 |
+
""
|
50 |
+
]
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"id": 39,
|
54 |
+
"type": "KSampler",
|
55 |
+
"pos": [
|
56 |
+
1587,
|
57 |
+
982
|
58 |
+
],
|
59 |
+
"size": {
|
60 |
+
"0": 315,
|
61 |
+
"1": 262
|
62 |
+
},
|
63 |
+
"flags": {},
|
64 |
+
"order": 6,
|
65 |
+
"mode": 0,
|
66 |
+
"inputs": [
|
67 |
+
{
|
68 |
+
"name": "model",
|
69 |
+
"type": "MODEL",
|
70 |
+
"link": 42
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"name": "positive",
|
74 |
+
"type": "CONDITIONING",
|
75 |
+
"link": 44
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"name": "negative",
|
79 |
+
"type": "CONDITIONING",
|
80 |
+
"link": 45
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"name": "latent_image",
|
84 |
+
"type": "LATENT",
|
85 |
+
"link": 58
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "LATENT",
|
91 |
+
"type": "LATENT",
|
92 |
+
"links": [
|
93 |
+
48
|
94 |
+
],
|
95 |
+
"shape": 3,
|
96 |
+
"slot_index": 0
|
97 |
+
}
|
98 |
+
],
|
99 |
+
"properties": {
|
100 |
+
"Node name for S&R": "KSampler"
|
101 |
+
},
|
102 |
+
"widgets_values": [
|
103 |
+
290872458059323,
|
104 |
+
"randomize",
|
105 |
+
20,
|
106 |
+
8,
|
107 |
+
"euler",
|
108 |
+
"normal",
|
109 |
+
1
|
110 |
+
]
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"id": 45,
|
114 |
+
"type": "VAEDecode",
|
115 |
+
"pos": [
|
116 |
+
1998,
|
117 |
+
1018
|
118 |
+
],
|
119 |
+
"size": {
|
120 |
+
"0": 210,
|
121 |
+
"1": 46
|
122 |
+
},
|
123 |
+
"flags": {},
|
124 |
+
"order": 7,
|
125 |
+
"mode": 0,
|
126 |
+
"inputs": [
|
127 |
+
{
|
128 |
+
"name": "samples",
|
129 |
+
"type": "LATENT",
|
130 |
+
"link": 48
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"name": "vae",
|
134 |
+
"type": "VAE",
|
135 |
+
"link": 49
|
136 |
+
}
|
137 |
+
],
|
138 |
+
"outputs": [
|
139 |
+
{
|
140 |
+
"name": "IMAGE",
|
141 |
+
"type": "IMAGE",
|
142 |
+
"links": [
|
143 |
+
55
|
144 |
+
],
|
145 |
+
"shape": 3,
|
146 |
+
"slot_index": 0
|
147 |
+
}
|
148 |
+
],
|
149 |
+
"properties": {
|
150 |
+
"Node name for S&R": "VAEDecode"
|
151 |
+
}
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"id": 48,
|
155 |
+
"type": "PreviewImage",
|
156 |
+
"pos": [
|
157 |
+
2039,
|
158 |
+
1262
|
159 |
+
],
|
160 |
+
"size": {
|
161 |
+
"0": 210,
|
162 |
+
"1": 246
|
163 |
+
},
|
164 |
+
"flags": {},
|
165 |
+
"order": 8,
|
166 |
+
"mode": 0,
|
167 |
+
"inputs": [
|
168 |
+
{
|
169 |
+
"name": "images",
|
170 |
+
"type": "IMAGE",
|
171 |
+
"link": 55
|
172 |
+
}
|
173 |
+
],
|
174 |
+
"properties": {
|
175 |
+
"Node name for S&R": "PreviewImage"
|
176 |
+
}
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"id": 42,
|
180 |
+
"type": "CLIPTextEncode",
|
181 |
+
"pos": [
|
182 |
+
1056,
|
183 |
+
683
|
184 |
+
],
|
185 |
+
"size": {
|
186 |
+
"0": 352.9139404296875,
|
187 |
+
"1": 113.16606140136719
|
188 |
+
},
|
189 |
+
"flags": {},
|
190 |
+
"order": 3,
|
191 |
+
"mode": 0,
|
192 |
+
"inputs": [
|
193 |
+
{
|
194 |
+
"name": "clip",
|
195 |
+
"type": "CLIP",
|
196 |
+
"link": 51
|
197 |
+
}
|
198 |
+
],
|
199 |
+
"outputs": [
|
200 |
+
{
|
201 |
+
"name": "CONDITIONING",
|
202 |
+
"type": "CONDITIONING",
|
203 |
+
"links": [
|
204 |
+
45
|
205 |
+
],
|
206 |
+
"shape": 3,
|
207 |
+
"slot_index": 0
|
208 |
+
}
|
209 |
+
],
|
210 |
+
"properties": {
|
211 |
+
"Node name for S&R": "CLIPTextEncode"
|
212 |
+
},
|
213 |
+
"widgets_values": [
|
214 |
+
"text, watermark"
|
215 |
+
]
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"id": 50,
|
219 |
+
"type": "VAEEncode",
|
220 |
+
"pos": [
|
221 |
+
1119,
|
222 |
+
1329
|
223 |
+
],
|
224 |
+
"size": {
|
225 |
+
"0": 201.4841766357422,
|
226 |
+
"1": 55.59581756591797
|
227 |
+
},
|
228 |
+
"flags": {},
|
229 |
+
"order": 4,
|
230 |
+
"mode": 0,
|
231 |
+
"inputs": [
|
232 |
+
{
|
233 |
+
"name": "pixels",
|
234 |
+
"type": "IMAGE",
|
235 |
+
"link": 56
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"name": "vae",
|
239 |
+
"type": "VAE",
|
240 |
+
"link": 57
|
241 |
+
}
|
242 |
+
],
|
243 |
+
"outputs": [
|
244 |
+
{
|
245 |
+
"name": "LATENT",
|
246 |
+
"type": "LATENT",
|
247 |
+
"links": [
|
248 |
+
58
|
249 |
+
],
|
250 |
+
"shape": 3,
|
251 |
+
"slot_index": 0
|
252 |
+
}
|
253 |
+
],
|
254 |
+
"properties": {
|
255 |
+
"Node name for S&R": "VAEEncode"
|
256 |
+
}
|
257 |
+
},
|
258 |
+
{
|
259 |
+
"id": 11,
|
260 |
+
"type": "LoadImage",
|
261 |
+
"pos": [
|
262 |
+
-135,
|
263 |
+
907
|
264 |
+
],
|
265 |
+
"size": {
|
266 |
+
"0": 670,
|
267 |
+
"1": 460
|
268 |
+
},
|
269 |
+
"flags": {},
|
270 |
+
"order": 0,
|
271 |
+
"mode": 0,
|
272 |
+
"outputs": [
|
273 |
+
{
|
274 |
+
"name": "IMAGE",
|
275 |
+
"type": "IMAGE",
|
276 |
+
"links": [
|
277 |
+
56,
|
278 |
+
59
|
279 |
+
],
|
280 |
+
"shape": 3,
|
281 |
+
"slot_index": 0
|
282 |
+
},
|
283 |
+
{
|
284 |
+
"name": "MASK",
|
285 |
+
"type": "MASK",
|
286 |
+
"links": [],
|
287 |
+
"shape": 3,
|
288 |
+
"slot_index": 1
|
289 |
+
}
|
290 |
+
],
|
291 |
+
"properties": {
|
292 |
+
"Node name for S&R": "LoadImage"
|
293 |
+
},
|
294 |
+
"widgets_values": [
|
295 |
+
"example.png",
|
296 |
+
"image"
|
297 |
+
]
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"id": 40,
|
301 |
+
"type": "CheckpointLoaderSimple",
|
302 |
+
"pos": [
|
303 |
+
1124,
|
304 |
+
1019
|
305 |
+
],
|
306 |
+
"size": {
|
307 |
+
"0": 315,
|
308 |
+
"1": 98
|
309 |
+
},
|
310 |
+
"flags": {},
|
311 |
+
"order": 1,
|
312 |
+
"mode": 0,
|
313 |
+
"outputs": [
|
314 |
+
{
|
315 |
+
"name": "MODEL",
|
316 |
+
"type": "MODEL",
|
317 |
+
"links": [
|
318 |
+
42
|
319 |
+
],
|
320 |
+
"shape": 3,
|
321 |
+
"slot_index": 0
|
322 |
+
},
|
323 |
+
{
|
324 |
+
"name": "CLIP",
|
325 |
+
"type": "CLIP",
|
326 |
+
"links": [
|
327 |
+
50,
|
328 |
+
51
|
329 |
+
],
|
330 |
+
"shape": 3,
|
331 |
+
"slot_index": 1
|
332 |
+
},
|
333 |
+
{
|
334 |
+
"name": "VAE",
|
335 |
+
"type": "VAE",
|
336 |
+
"links": [
|
337 |
+
49,
|
338 |
+
57
|
339 |
+
],
|
340 |
+
"shape": 3,
|
341 |
+
"slot_index": 2
|
342 |
+
}
|
343 |
+
],
|
344 |
+
"properties": {
|
345 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
346 |
+
},
|
347 |
+
"widgets_values": [
|
348 |
+
"dreamshaper_8.safetensors"
|
349 |
+
]
|
350 |
+
},
|
351 |
+
{
|
352 |
+
"id": 51,
|
353 |
+
"type": "img2txt BLIP/Llava Multimodel Tagger",
|
354 |
+
"pos": [
|
355 |
+
605,
|
356 |
+
881
|
357 |
+
],
|
358 |
+
"size": {
|
359 |
+
"0": 427.2057800292969,
|
360 |
+
"1": 476.26934814453125
|
361 |
+
},
|
362 |
+
"flags": {},
|
363 |
+
"order": 2,
|
364 |
+
"mode": 0,
|
365 |
+
"inputs": [
|
366 |
+
{
|
367 |
+
"name": "input_image",
|
368 |
+
"type": "IMAGE",
|
369 |
+
"link": 59
|
370 |
+
}
|
371 |
+
],
|
372 |
+
"outputs": [
|
373 |
+
{
|
374 |
+
"name": "caption",
|
375 |
+
"type": "STRING",
|
376 |
+
"links": [
|
377 |
+
60
|
378 |
+
],
|
379 |
+
"shape": 3,
|
380 |
+
"slot_index": 0
|
381 |
+
}
|
382 |
+
],
|
383 |
+
"properties": {
|
384 |
+
"Node name for S&R": "img2txt BLIP/Llava Multimodel Tagger"
|
385 |
+
},
|
386 |
+
"widgets_values": [
|
387 |
+
true,
|
388 |
+
false,
|
389 |
+
false,
|
390 |
+
false,
|
391 |
+
"a photograph of",
|
392 |
+
"What is the subject and background of this image?",
|
393 |
+
0.7000000000000001,
|
394 |
+
1.26,
|
395 |
+
36,
|
396 |
+
128,
|
397 |
+
5,
|
398 |
+
"watermark, text, writing",
|
399 |
+
"a photograph of a girl dressed up, in pink dress and bright blue eyes poses in the grass with arms spread out in front of her face, holding an umbrella on a sky, "
|
400 |
+
],
|
401 |
+
"color": "#322",
|
402 |
+
"bgcolor": "#533"
|
403 |
+
}
|
404 |
+
],
|
405 |
+
"links": [
|
406 |
+
[
|
407 |
+
42,
|
408 |
+
40,
|
409 |
+
0,
|
410 |
+
39,
|
411 |
+
0,
|
412 |
+
"MODEL"
|
413 |
+
],
|
414 |
+
[
|
415 |
+
44,
|
416 |
+
41,
|
417 |
+
0,
|
418 |
+
39,
|
419 |
+
1,
|
420 |
+
"CONDITIONING"
|
421 |
+
],
|
422 |
+
[
|
423 |
+
45,
|
424 |
+
42,
|
425 |
+
0,
|
426 |
+
39,
|
427 |
+
2,
|
428 |
+
"CONDITIONING"
|
429 |
+
],
|
430 |
+
[
|
431 |
+
48,
|
432 |
+
39,
|
433 |
+
0,
|
434 |
+
45,
|
435 |
+
0,
|
436 |
+
"LATENT"
|
437 |
+
],
|
438 |
+
[
|
439 |
+
49,
|
440 |
+
40,
|
441 |
+
2,
|
442 |
+
45,
|
443 |
+
1,
|
444 |
+
"VAE"
|
445 |
+
],
|
446 |
+
[
|
447 |
+
50,
|
448 |
+
40,
|
449 |
+
1,
|
450 |
+
41,
|
451 |
+
0,
|
452 |
+
"CLIP"
|
453 |
+
],
|
454 |
+
[
|
455 |
+
51,
|
456 |
+
40,
|
457 |
+
1,
|
458 |
+
42,
|
459 |
+
0,
|
460 |
+
"CLIP"
|
461 |
+
],
|
462 |
+
[
|
463 |
+
55,
|
464 |
+
45,
|
465 |
+
0,
|
466 |
+
48,
|
467 |
+
0,
|
468 |
+
"IMAGE"
|
469 |
+
],
|
470 |
+
[
|
471 |
+
56,
|
472 |
+
11,
|
473 |
+
0,
|
474 |
+
50,
|
475 |
+
0,
|
476 |
+
"IMAGE"
|
477 |
+
],
|
478 |
+
[
|
479 |
+
57,
|
480 |
+
40,
|
481 |
+
2,
|
482 |
+
50,
|
483 |
+
1,
|
484 |
+
"VAE"
|
485 |
+
],
|
486 |
+
[
|
487 |
+
58,
|
488 |
+
50,
|
489 |
+
0,
|
490 |
+
39,
|
491 |
+
3,
|
492 |
+
"LATENT"
|
493 |
+
],
|
494 |
+
[
|
495 |
+
59,
|
496 |
+
11,
|
497 |
+
0,
|
498 |
+
51,
|
499 |
+
0,
|
500 |
+
"IMAGE"
|
501 |
+
],
|
502 |
+
[
|
503 |
+
60,
|
504 |
+
51,
|
505 |
+
0,
|
506 |
+
41,
|
507 |
+
1,
|
508 |
+
"STRING"
|
509 |
+
]
|
510 |
+
],
|
511 |
+
"groups": [],
|
512 |
+
"config": {},
|
513 |
+
"extra": {
|
514 |
+
"ds": {
|
515 |
+
"scale": 0.9090909090909091,
|
516 |
+
"offset": {
|
517 |
+
"0": 304.575645264068,
|
518 |
+
"1": -258.56908735931404
|
519 |
+
}
|
520 |
+
}
|
521 |
+
},
|
522 |
+
"version": 0.4
|
523 |
+
}
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/inpaint.json
ADDED
@@ -0,0 +1,705 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 61,
|
3 |
+
"last_link_id": 80,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 45,
|
7 |
+
"type": "VAEDecode",
|
8 |
+
"pos": [
|
9 |
+
1998,
|
10 |
+
1018
|
11 |
+
],
|
12 |
+
"size": {
|
13 |
+
"0": 210,
|
14 |
+
"1": 46
|
15 |
+
},
|
16 |
+
"flags": {},
|
17 |
+
"order": 10,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "samples",
|
22 |
+
"type": "LATENT",
|
23 |
+
"link": 71
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "vae",
|
27 |
+
"type": "VAE",
|
28 |
+
"link": 49
|
29 |
+
}
|
30 |
+
],
|
31 |
+
"outputs": [
|
32 |
+
{
|
33 |
+
"name": "IMAGE",
|
34 |
+
"type": "IMAGE",
|
35 |
+
"links": [
|
36 |
+
55
|
37 |
+
],
|
38 |
+
"shape": 3,
|
39 |
+
"slot_index": 0
|
40 |
+
}
|
41 |
+
],
|
42 |
+
"properties": {
|
43 |
+
"Node name for S&R": "VAEDecode"
|
44 |
+
}
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"id": 42,
|
48 |
+
"type": "CLIPTextEncode",
|
49 |
+
"pos": [
|
50 |
+
1056,
|
51 |
+
683
|
52 |
+
],
|
53 |
+
"size": {
|
54 |
+
"0": 352.9139404296875,
|
55 |
+
"1": 113.16606140136719
|
56 |
+
},
|
57 |
+
"flags": {},
|
58 |
+
"order": 2,
|
59 |
+
"mode": 0,
|
60 |
+
"inputs": [
|
61 |
+
{
|
62 |
+
"name": "clip",
|
63 |
+
"type": "CLIP",
|
64 |
+
"link": 51
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"outputs": [
|
68 |
+
{
|
69 |
+
"name": "CONDITIONING",
|
70 |
+
"type": "CONDITIONING",
|
71 |
+
"links": [
|
72 |
+
63
|
73 |
+
],
|
74 |
+
"shape": 3,
|
75 |
+
"slot_index": 0
|
76 |
+
}
|
77 |
+
],
|
78 |
+
"properties": {
|
79 |
+
"Node name for S&R": "CLIPTextEncode"
|
80 |
+
},
|
81 |
+
"widgets_values": [
|
82 |
+
"text, watermark"
|
83 |
+
]
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"id": 41,
|
87 |
+
"type": "CLIPTextEncode",
|
88 |
+
"pos": [
|
89 |
+
1055,
|
90 |
+
571
|
91 |
+
],
|
92 |
+
"size": {
|
93 |
+
"0": 348.9403381347656,
|
94 |
+
"1": 56.439388275146484
|
95 |
+
},
|
96 |
+
"flags": {},
|
97 |
+
"order": 6,
|
98 |
+
"mode": 0,
|
99 |
+
"inputs": [
|
100 |
+
{
|
101 |
+
"name": "clip",
|
102 |
+
"type": "CLIP",
|
103 |
+
"link": 50
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"name": "text",
|
107 |
+
"type": "STRING",
|
108 |
+
"link": 80,
|
109 |
+
"widget": {
|
110 |
+
"name": "text"
|
111 |
+
}
|
112 |
+
}
|
113 |
+
],
|
114 |
+
"outputs": [
|
115 |
+
{
|
116 |
+
"name": "CONDITIONING",
|
117 |
+
"type": "CONDITIONING",
|
118 |
+
"links": [
|
119 |
+
64
|
120 |
+
],
|
121 |
+
"shape": 3,
|
122 |
+
"slot_index": 0
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"properties": {
|
126 |
+
"Node name for S&R": "CLIPTextEncode"
|
127 |
+
},
|
128 |
+
"widgets_values": [
|
129 |
+
""
|
130 |
+
]
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"id": 58,
|
134 |
+
"type": "PreviewImage",
|
135 |
+
"pos": [
|
136 |
+
616,
|
137 |
+
1631
|
138 |
+
],
|
139 |
+
"size": {
|
140 |
+
"0": 401.17840576171875,
|
141 |
+
"1": 246
|
142 |
+
},
|
143 |
+
"flags": {},
|
144 |
+
"order": 7,
|
145 |
+
"mode": 0,
|
146 |
+
"inputs": [
|
147 |
+
{
|
148 |
+
"name": "images",
|
149 |
+
"type": "IMAGE",
|
150 |
+
"link": 73
|
151 |
+
}
|
152 |
+
],
|
153 |
+
"properties": {
|
154 |
+
"Node name for S&R": "PreviewImage"
|
155 |
+
}
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"id": 57,
|
159 |
+
"type": "MaskToImage",
|
160 |
+
"pos": [
|
161 |
+
617,
|
162 |
+
1543
|
163 |
+
],
|
164 |
+
"size": {
|
165 |
+
"0": 210,
|
166 |
+
"1": 26
|
167 |
+
},
|
168 |
+
"flags": {},
|
169 |
+
"order": 5,
|
170 |
+
"mode": 0,
|
171 |
+
"inputs": [
|
172 |
+
{
|
173 |
+
"name": "mask",
|
174 |
+
"type": "MASK",
|
175 |
+
"link": 78
|
176 |
+
}
|
177 |
+
],
|
178 |
+
"outputs": [
|
179 |
+
{
|
180 |
+
"name": "IMAGE",
|
181 |
+
"type": "IMAGE",
|
182 |
+
"links": [
|
183 |
+
73
|
184 |
+
],
|
185 |
+
"shape": 3,
|
186 |
+
"slot_index": 0
|
187 |
+
}
|
188 |
+
],
|
189 |
+
"properties": {
|
190 |
+
"Node name for S&R": "MaskToImage"
|
191 |
+
}
|
192 |
+
},
|
193 |
+
{
|
194 |
+
"id": 40,
|
195 |
+
"type": "CheckpointLoaderSimple",
|
196 |
+
"pos": [
|
197 |
+
1044,
|
198 |
+
1032
|
199 |
+
],
|
200 |
+
"size": {
|
201 |
+
"0": 315,
|
202 |
+
"1": 98
|
203 |
+
},
|
204 |
+
"flags": {},
|
205 |
+
"order": 0,
|
206 |
+
"mode": 0,
|
207 |
+
"outputs": [
|
208 |
+
{
|
209 |
+
"name": "MODEL",
|
210 |
+
"type": "MODEL",
|
211 |
+
"links": [
|
212 |
+
68
|
213 |
+
],
|
214 |
+
"shape": 3,
|
215 |
+
"slot_index": 0
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"name": "CLIP",
|
219 |
+
"type": "CLIP",
|
220 |
+
"links": [
|
221 |
+
50,
|
222 |
+
51
|
223 |
+
],
|
224 |
+
"shape": 3,
|
225 |
+
"slot_index": 1
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"name": "VAE",
|
229 |
+
"type": "VAE",
|
230 |
+
"links": [
|
231 |
+
49,
|
232 |
+
69
|
233 |
+
],
|
234 |
+
"shape": 3,
|
235 |
+
"slot_index": 2
|
236 |
+
}
|
237 |
+
],
|
238 |
+
"properties": {
|
239 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
240 |
+
},
|
241 |
+
"widgets_values": [
|
242 |
+
"experience_70-inpainting.safetensors"
|
243 |
+
]
|
244 |
+
},
|
245 |
+
{
|
246 |
+
"id": 48,
|
247 |
+
"type": "PreviewImage",
|
248 |
+
"pos": [
|
249 |
+
2039,
|
250 |
+
1262
|
251 |
+
],
|
252 |
+
"size": {
|
253 |
+
"0": 295.2332458496094,
|
254 |
+
"1": 293.2945251464844
|
255 |
+
},
|
256 |
+
"flags": {},
|
257 |
+
"order": 11,
|
258 |
+
"mode": 0,
|
259 |
+
"inputs": [
|
260 |
+
{
|
261 |
+
"name": "images",
|
262 |
+
"type": "IMAGE",
|
263 |
+
"link": 55
|
264 |
+
}
|
265 |
+
],
|
266 |
+
"properties": {
|
267 |
+
"Node name for S&R": "PreviewImage"
|
268 |
+
}
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"id": 56,
|
272 |
+
"type": "KSampler",
|
273 |
+
"pos": [
|
274 |
+
1642,
|
275 |
+
820
|
276 |
+
],
|
277 |
+
"size": {
|
278 |
+
"0": 315,
|
279 |
+
"1": 262
|
280 |
+
},
|
281 |
+
"flags": {},
|
282 |
+
"order": 9,
|
283 |
+
"mode": 0,
|
284 |
+
"inputs": [
|
285 |
+
{
|
286 |
+
"name": "model",
|
287 |
+
"type": "MODEL",
|
288 |
+
"link": 68
|
289 |
+
},
|
290 |
+
{
|
291 |
+
"name": "positive",
|
292 |
+
"type": "CONDITIONING",
|
293 |
+
"link": 66
|
294 |
+
},
|
295 |
+
{
|
296 |
+
"name": "negative",
|
297 |
+
"type": "CONDITIONING",
|
298 |
+
"link": 67
|
299 |
+
},
|
300 |
+
{
|
301 |
+
"name": "latent_image",
|
302 |
+
"type": "LATENT",
|
303 |
+
"link": 65
|
304 |
+
}
|
305 |
+
],
|
306 |
+
"outputs": [
|
307 |
+
{
|
308 |
+
"name": "LATENT",
|
309 |
+
"type": "LATENT",
|
310 |
+
"links": [
|
311 |
+
71
|
312 |
+
],
|
313 |
+
"shape": 3,
|
314 |
+
"slot_index": 0
|
315 |
+
}
|
316 |
+
],
|
317 |
+
"properties": {
|
318 |
+
"Node name for S&R": "KSampler"
|
319 |
+
},
|
320 |
+
"widgets_values": [
|
321 |
+
492464952856155,
|
322 |
+
"randomize",
|
323 |
+
30,
|
324 |
+
7,
|
325 |
+
"dpmpp_2m_sde_gpu",
|
326 |
+
"normal",
|
327 |
+
0.8
|
328 |
+
]
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"id": 55,
|
332 |
+
"type": "ImageColorToMask",
|
333 |
+
"pos": [
|
334 |
+
610,
|
335 |
+
1425
|
336 |
+
],
|
337 |
+
"size": {
|
338 |
+
"0": 315,
|
339 |
+
"1": 58
|
340 |
+
},
|
341 |
+
"flags": {},
|
342 |
+
"order": 3,
|
343 |
+
"mode": 0,
|
344 |
+
"inputs": [
|
345 |
+
{
|
346 |
+
"name": "image",
|
347 |
+
"type": "IMAGE",
|
348 |
+
"link": 61
|
349 |
+
}
|
350 |
+
],
|
351 |
+
"outputs": [
|
352 |
+
{
|
353 |
+
"name": "MASK",
|
354 |
+
"type": "MASK",
|
355 |
+
"links": [
|
356 |
+
77,
|
357 |
+
78
|
358 |
+
],
|
359 |
+
"shape": 3,
|
360 |
+
"slot_index": 0
|
361 |
+
}
|
362 |
+
],
|
363 |
+
"properties": {
|
364 |
+
"Node name for S&R": "ImageColorToMask"
|
365 |
+
},
|
366 |
+
"widgets_values": [
|
367 |
+
6198527
|
368 |
+
]
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"id": 54,
|
372 |
+
"type": "InpaintModelConditioning",
|
373 |
+
"pos": [
|
374 |
+
1289,
|
375 |
+
1377
|
376 |
+
],
|
377 |
+
"size": {
|
378 |
+
"0": 216.59999084472656,
|
379 |
+
"1": 106
|
380 |
+
},
|
381 |
+
"flags": {},
|
382 |
+
"order": 8,
|
383 |
+
"mode": 0,
|
384 |
+
"inputs": [
|
385 |
+
{
|
386 |
+
"name": "positive",
|
387 |
+
"type": "CONDITIONING",
|
388 |
+
"link": 64
|
389 |
+
},
|
390 |
+
{
|
391 |
+
"name": "negative",
|
392 |
+
"type": "CONDITIONING",
|
393 |
+
"link": 63
|
394 |
+
},
|
395 |
+
{
|
396 |
+
"name": "vae",
|
397 |
+
"type": "VAE",
|
398 |
+
"link": 69
|
399 |
+
},
|
400 |
+
{
|
401 |
+
"name": "pixels",
|
402 |
+
"type": "IMAGE",
|
403 |
+
"link": 70
|
404 |
+
},
|
405 |
+
{
|
406 |
+
"name": "mask",
|
407 |
+
"type": "MASK",
|
408 |
+
"link": 77
|
409 |
+
}
|
410 |
+
],
|
411 |
+
"outputs": [
|
412 |
+
{
|
413 |
+
"name": "positive",
|
414 |
+
"type": "CONDITIONING",
|
415 |
+
"links": [
|
416 |
+
66
|
417 |
+
],
|
418 |
+
"shape": 3,
|
419 |
+
"slot_index": 0
|
420 |
+
},
|
421 |
+
{
|
422 |
+
"name": "negative",
|
423 |
+
"type": "CONDITIONING",
|
424 |
+
"links": [
|
425 |
+
67
|
426 |
+
],
|
427 |
+
"shape": 3,
|
428 |
+
"slot_index": 1
|
429 |
+
},
|
430 |
+
{
|
431 |
+
"name": "latent",
|
432 |
+
"type": "LATENT",
|
433 |
+
"links": [
|
434 |
+
65
|
435 |
+
],
|
436 |
+
"shape": 3,
|
437 |
+
"slot_index": 2
|
438 |
+
}
|
439 |
+
],
|
440 |
+
"properties": {
|
441 |
+
"Node name for S&R": "InpaintModelConditioning"
|
442 |
+
}
|
443 |
+
},
|
444 |
+
{
|
445 |
+
"id": 11,
|
446 |
+
"type": "LoadImage",
|
447 |
+
"pos": [
|
448 |
+
-135,
|
449 |
+
907
|
450 |
+
],
|
451 |
+
"size": {
|
452 |
+
"0": 670,
|
453 |
+
"1": 460
|
454 |
+
},
|
455 |
+
"flags": {},
|
456 |
+
"order": 1,
|
457 |
+
"mode": 0,
|
458 |
+
"outputs": [
|
459 |
+
{
|
460 |
+
"name": "IMAGE",
|
461 |
+
"type": "IMAGE",
|
462 |
+
"links": [
|
463 |
+
61,
|
464 |
+
70,
|
465 |
+
79
|
466 |
+
],
|
467 |
+
"shape": 3,
|
468 |
+
"slot_index": 0
|
469 |
+
},
|
470 |
+
{
|
471 |
+
"name": "MASK",
|
472 |
+
"type": "MASK",
|
473 |
+
"links": [],
|
474 |
+
"shape": 3,
|
475 |
+
"slot_index": 1
|
476 |
+
}
|
477 |
+
],
|
478 |
+
"properties": {
|
479 |
+
"Node name for S&R": "LoadImage"
|
480 |
+
},
|
481 |
+
"widgets_values": [
|
482 |
+
"example.png",
|
483 |
+
"image"
|
484 |
+
]
|
485 |
+
},
|
486 |
+
{
|
487 |
+
"id": 61,
|
488 |
+
"type": "img2txt BLIP/Llava Multimodel Tagger",
|
489 |
+
"pos": [
|
490 |
+
599,
|
491 |
+
886
|
492 |
+
],
|
493 |
+
"size": [
|
494 |
+
414.8329491017887,
|
495 |
+
453.3791344354013
|
496 |
+
],
|
497 |
+
"flags": {},
|
498 |
+
"order": 4,
|
499 |
+
"mode": 0,
|
500 |
+
"inputs": [
|
501 |
+
{
|
502 |
+
"name": "input_image",
|
503 |
+
"type": "IMAGE",
|
504 |
+
"link": 79
|
505 |
+
}
|
506 |
+
],
|
507 |
+
"outputs": [
|
508 |
+
{
|
509 |
+
"name": "caption",
|
510 |
+
"type": "STRING",
|
511 |
+
"links": [
|
512 |
+
80
|
513 |
+
],
|
514 |
+
"shape": 3,
|
515 |
+
"slot_index": 0
|
516 |
+
}
|
517 |
+
],
|
518 |
+
"properties": {
|
519 |
+
"Node name for S&R": "img2txt BLIP/Llava Multimodel Tagger"
|
520 |
+
},
|
521 |
+
"widgets_values": [
|
522 |
+
true,
|
523 |
+
false,
|
524 |
+
false,
|
525 |
+
false,
|
526 |
+
"a photograph of",
|
527 |
+
"What is the subject of this image?\n",
|
528 |
+
0.8,
|
529 |
+
1.2,
|
530 |
+
36,
|
531 |
+
128,
|
532 |
+
5,
|
533 |
+
"watermark, text, writing"
|
534 |
+
],
|
535 |
+
"color": "#322",
|
536 |
+
"bgcolor": "#533"
|
537 |
+
}
|
538 |
+
],
|
539 |
+
"links": [
|
540 |
+
[
|
541 |
+
49,
|
542 |
+
40,
|
543 |
+
2,
|
544 |
+
45,
|
545 |
+
1,
|
546 |
+
"VAE"
|
547 |
+
],
|
548 |
+
[
|
549 |
+
50,
|
550 |
+
40,
|
551 |
+
1,
|
552 |
+
41,
|
553 |
+
0,
|
554 |
+
"CLIP"
|
555 |
+
],
|
556 |
+
[
|
557 |
+
51,
|
558 |
+
40,
|
559 |
+
1,
|
560 |
+
42,
|
561 |
+
0,
|
562 |
+
"CLIP"
|
563 |
+
],
|
564 |
+
[
|
565 |
+
55,
|
566 |
+
45,
|
567 |
+
0,
|
568 |
+
48,
|
569 |
+
0,
|
570 |
+
"IMAGE"
|
571 |
+
],
|
572 |
+
[
|
573 |
+
61,
|
574 |
+
11,
|
575 |
+
0,
|
576 |
+
55,
|
577 |
+
0,
|
578 |
+
"IMAGE"
|
579 |
+
],
|
580 |
+
[
|
581 |
+
63,
|
582 |
+
42,
|
583 |
+
0,
|
584 |
+
54,
|
585 |
+
1,
|
586 |
+
"CONDITIONING"
|
587 |
+
],
|
588 |
+
[
|
589 |
+
64,
|
590 |
+
41,
|
591 |
+
0,
|
592 |
+
54,
|
593 |
+
0,
|
594 |
+
"CONDITIONING"
|
595 |
+
],
|
596 |
+
[
|
597 |
+
65,
|
598 |
+
54,
|
599 |
+
2,
|
600 |
+
56,
|
601 |
+
3,
|
602 |
+
"LATENT"
|
603 |
+
],
|
604 |
+
[
|
605 |
+
66,
|
606 |
+
54,
|
607 |
+
0,
|
608 |
+
56,
|
609 |
+
1,
|
610 |
+
"CONDITIONING"
|
611 |
+
],
|
612 |
+
[
|
613 |
+
67,
|
614 |
+
54,
|
615 |
+
1,
|
616 |
+
56,
|
617 |
+
2,
|
618 |
+
"CONDITIONING"
|
619 |
+
],
|
620 |
+
[
|
621 |
+
68,
|
622 |
+
40,
|
623 |
+
0,
|
624 |
+
56,
|
625 |
+
0,
|
626 |
+
"MODEL"
|
627 |
+
],
|
628 |
+
[
|
629 |
+
69,
|
630 |
+
40,
|
631 |
+
2,
|
632 |
+
54,
|
633 |
+
2,
|
634 |
+
"VAE"
|
635 |
+
],
|
636 |
+
[
|
637 |
+
70,
|
638 |
+
11,
|
639 |
+
0,
|
640 |
+
54,
|
641 |
+
3,
|
642 |
+
"IMAGE"
|
643 |
+
],
|
644 |
+
[
|
645 |
+
71,
|
646 |
+
56,
|
647 |
+
0,
|
648 |
+
45,
|
649 |
+
0,
|
650 |
+
"LATENT"
|
651 |
+
],
|
652 |
+
[
|
653 |
+
73,
|
654 |
+
57,
|
655 |
+
0,
|
656 |
+
58,
|
657 |
+
0,
|
658 |
+
"IMAGE"
|
659 |
+
],
|
660 |
+
[
|
661 |
+
77,
|
662 |
+
55,
|
663 |
+
0,
|
664 |
+
54,
|
665 |
+
4,
|
666 |
+
"MASK"
|
667 |
+
],
|
668 |
+
[
|
669 |
+
78,
|
670 |
+
55,
|
671 |
+
0,
|
672 |
+
57,
|
673 |
+
0,
|
674 |
+
"MASK"
|
675 |
+
],
|
676 |
+
[
|
677 |
+
79,
|
678 |
+
11,
|
679 |
+
0,
|
680 |
+
61,
|
681 |
+
0,
|
682 |
+
"IMAGE"
|
683 |
+
],
|
684 |
+
[
|
685 |
+
80,
|
686 |
+
61,
|
687 |
+
0,
|
688 |
+
41,
|
689 |
+
1,
|
690 |
+
"STRING"
|
691 |
+
]
|
692 |
+
],
|
693 |
+
"groups": [],
|
694 |
+
"config": {},
|
695 |
+
"extra": {
|
696 |
+
"ds": {
|
697 |
+
"scale": 0.8264462809917354,
|
698 |
+
"offset": {
|
699 |
+
"0": 478.9515963527572,
|
700 |
+
"1": -472.76124333876595
|
701 |
+
}
|
702 |
+
}
|
703 |
+
},
|
704 |
+
"version": 0.4
|
705 |
+
}
|
ComfyUI/custom_nodes/img2txt-comfyui-nodes/wiki/workflow-examples/txt2img.json
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"last_node_id": 53,
|
3 |
+
"last_link_id": 61,
|
4 |
+
"nodes": [
|
5 |
+
{
|
6 |
+
"id": 41,
|
7 |
+
"type": "CLIPTextEncode",
|
8 |
+
"pos": [
|
9 |
+
1055,
|
10 |
+
571
|
11 |
+
],
|
12 |
+
"size": {
|
13 |
+
"0": 348.9403381347656,
|
14 |
+
"1": 56.439388275146484
|
15 |
+
},
|
16 |
+
"flags": {},
|
17 |
+
"order": 5,
|
18 |
+
"mode": 0,
|
19 |
+
"inputs": [
|
20 |
+
{
|
21 |
+
"name": "clip",
|
22 |
+
"type": "CLIP",
|
23 |
+
"link": 50
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "text",
|
27 |
+
"type": "STRING",
|
28 |
+
"link": 61,
|
29 |
+
"widget": {
|
30 |
+
"name": "text"
|
31 |
+
}
|
32 |
+
}
|
33 |
+
],
|
34 |
+
"outputs": [
|
35 |
+
{
|
36 |
+
"name": "CONDITIONING",
|
37 |
+
"type": "CONDITIONING",
|
38 |
+
"links": [
|
39 |
+
44
|
40 |
+
],
|
41 |
+
"shape": 3,
|
42 |
+
"slot_index": 0
|
43 |
+
}
|
44 |
+
],
|
45 |
+
"properties": {
|
46 |
+
"Node name for S&R": "CLIPTextEncode"
|
47 |
+
},
|
48 |
+
"widgets_values": [
|
49 |
+
""
|
50 |
+
]
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"id": 39,
|
54 |
+
"type": "KSampler",
|
55 |
+
"pos": [
|
56 |
+
1587,
|
57 |
+
982
|
58 |
+
],
|
59 |
+
"size": {
|
60 |
+
"0": 315,
|
61 |
+
"1": 262
|
62 |
+
},
|
63 |
+
"flags": {},
|
64 |
+
"order": 6,
|
65 |
+
"mode": 0,
|
66 |
+
"inputs": [
|
67 |
+
{
|
68 |
+
"name": "model",
|
69 |
+
"type": "MODEL",
|
70 |
+
"link": 42
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"name": "positive",
|
74 |
+
"type": "CONDITIONING",
|
75 |
+
"link": 44
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"name": "negative",
|
79 |
+
"type": "CONDITIONING",
|
80 |
+
"link": 45
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"name": "latent_image",
|
84 |
+
"type": "LATENT",
|
85 |
+
"link": 59
|
86 |
+
}
|
87 |
+
],
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "LATENT",
|
91 |
+
"type": "LATENT",
|
92 |
+
"links": [
|
93 |
+
48
|
94 |
+
],
|
95 |
+
"shape": 3,
|
96 |
+
"slot_index": 0
|
97 |
+
}
|
98 |
+
],
|
99 |
+
"properties": {
|
100 |
+
"Node name for S&R": "KSampler"
|
101 |
+
},
|
102 |
+
"widgets_values": [
|
103 |
+
438454791536393,
|
104 |
+
"randomize",
|
105 |
+
20,
|
106 |
+
8,
|
107 |
+
"euler",
|
108 |
+
"normal",
|
109 |
+
1
|
110 |
+
]
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"id": 45,
|
114 |
+
"type": "VAEDecode",
|
115 |
+
"pos": [
|
116 |
+
1998,
|
117 |
+
1018
|
118 |
+
],
|
119 |
+
"size": {
|
120 |
+
"0": 210,
|
121 |
+
"1": 46
|
122 |
+
},
|
123 |
+
"flags": {},
|
124 |
+
"order": 7,
|
125 |
+
"mode": 0,
|
126 |
+
"inputs": [
|
127 |
+
{
|
128 |
+
"name": "samples",
|
129 |
+
"type": "LATENT",
|
130 |
+
"link": 48
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"name": "vae",
|
134 |
+
"type": "VAE",
|
135 |
+
"link": 49
|
136 |
+
}
|
137 |
+
],
|
138 |
+
"outputs": [
|
139 |
+
{
|
140 |
+
"name": "IMAGE",
|
141 |
+
"type": "IMAGE",
|
142 |
+
"links": [
|
143 |
+
55
|
144 |
+
],
|
145 |
+
"shape": 3,
|
146 |
+
"slot_index": 0
|
147 |
+
}
|
148 |
+
],
|
149 |
+
"properties": {
|
150 |
+
"Node name for S&R": "VAEDecode"
|
151 |
+
}
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"id": 48,
|
155 |
+
"type": "PreviewImage",
|
156 |
+
"pos": [
|
157 |
+
2039,
|
158 |
+
1262
|
159 |
+
],
|
160 |
+
"size": {
|
161 |
+
"0": 210,
|
162 |
+
"1": 246
|
163 |
+
},
|
164 |
+
"flags": {},
|
165 |
+
"order": 8,
|
166 |
+
"mode": 0,
|
167 |
+
"inputs": [
|
168 |
+
{
|
169 |
+
"name": "images",
|
170 |
+
"type": "IMAGE",
|
171 |
+
"link": 55
|
172 |
+
}
|
173 |
+
],
|
174 |
+
"properties": {
|
175 |
+
"Node name for S&R": "PreviewImage"
|
176 |
+
}
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"id": 42,
|
180 |
+
"type": "CLIPTextEncode",
|
181 |
+
"pos": [
|
182 |
+
1056,
|
183 |
+
683
|
184 |
+
],
|
185 |
+
"size": {
|
186 |
+
"0": 352.9139404296875,
|
187 |
+
"1": 113.16606140136719
|
188 |
+
},
|
189 |
+
"flags": {},
|
190 |
+
"order": 4,
|
191 |
+
"mode": 0,
|
192 |
+
"inputs": [
|
193 |
+
{
|
194 |
+
"name": "clip",
|
195 |
+
"type": "CLIP",
|
196 |
+
"link": 51
|
197 |
+
}
|
198 |
+
],
|
199 |
+
"outputs": [
|
200 |
+
{
|
201 |
+
"name": "CONDITIONING",
|
202 |
+
"type": "CONDITIONING",
|
203 |
+
"links": [
|
204 |
+
45
|
205 |
+
],
|
206 |
+
"shape": 3,
|
207 |
+
"slot_index": 0
|
208 |
+
}
|
209 |
+
],
|
210 |
+
"properties": {
|
211 |
+
"Node name for S&R": "CLIPTextEncode"
|
212 |
+
},
|
213 |
+
"widgets_values": [
|
214 |
+
"text, watermark"
|
215 |
+
]
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"id": 52,
|
219 |
+
"type": "EmptyLatentImage",
|
220 |
+
"pos": [
|
221 |
+
1126,
|
222 |
+
1189
|
223 |
+
],
|
224 |
+
"size": {
|
225 |
+
"0": 315,
|
226 |
+
"1": 106
|
227 |
+
},
|
228 |
+
"flags": {},
|
229 |
+
"order": 0,
|
230 |
+
"mode": 0,
|
231 |
+
"outputs": [
|
232 |
+
{
|
233 |
+
"name": "LATENT",
|
234 |
+
"type": "LATENT",
|
235 |
+
"links": [
|
236 |
+
59
|
237 |
+
],
|
238 |
+
"shape": 3,
|
239 |
+
"slot_index": 0
|
240 |
+
}
|
241 |
+
],
|
242 |
+
"properties": {
|
243 |
+
"Node name for S&R": "EmptyLatentImage"
|
244 |
+
},
|
245 |
+
"widgets_values": [
|
246 |
+
512,
|
247 |
+
512,
|
248 |
+
1
|
249 |
+
]
|
250 |
+
},
|
251 |
+
{
|
252 |
+
"id": 11,
|
253 |
+
"type": "LoadImage",
|
254 |
+
"pos": [
|
255 |
+
-135,
|
256 |
+
907
|
257 |
+
],
|
258 |
+
"size": {
|
259 |
+
"0": 670,
|
260 |
+
"1": 460
|
261 |
+
},
|
262 |
+
"flags": {},
|
263 |
+
"order": 1,
|
264 |
+
"mode": 0,
|
265 |
+
"outputs": [
|
266 |
+
{
|
267 |
+
"name": "IMAGE",
|
268 |
+
"type": "IMAGE",
|
269 |
+
"links": [
|
270 |
+
60
|
271 |
+
],
|
272 |
+
"shape": 3,
|
273 |
+
"slot_index": 0
|
274 |
+
},
|
275 |
+
{
|
276 |
+
"name": "MASK",
|
277 |
+
"type": "MASK",
|
278 |
+
"links": [],
|
279 |
+
"shape": 3,
|
280 |
+
"slot_index": 1
|
281 |
+
}
|
282 |
+
],
|
283 |
+
"properties": {
|
284 |
+
"Node name for S&R": "LoadImage"
|
285 |
+
},
|
286 |
+
"widgets_values": [
|
287 |
+
"example.png",
|
288 |
+
"image"
|
289 |
+
]
|
290 |
+
},
|
291 |
+
{
|
292 |
+
"id": 40,
|
293 |
+
"type": "CheckpointLoaderSimple",
|
294 |
+
"pos": [
|
295 |
+
1124,
|
296 |
+
1019
|
297 |
+
],
|
298 |
+
"size": {
|
299 |
+
"0": 315,
|
300 |
+
"1": 98
|
301 |
+
},
|
302 |
+
"flags": {},
|
303 |
+
"order": 2,
|
304 |
+
"mode": 0,
|
305 |
+
"outputs": [
|
306 |
+
{
|
307 |
+
"name": "MODEL",
|
308 |
+
"type": "MODEL",
|
309 |
+
"links": [
|
310 |
+
42
|
311 |
+
],
|
312 |
+
"shape": 3,
|
313 |
+
"slot_index": 0
|
314 |
+
},
|
315 |
+
{
|
316 |
+
"name": "CLIP",
|
317 |
+
"type": "CLIP",
|
318 |
+
"links": [
|
319 |
+
50,
|
320 |
+
51
|
321 |
+
],
|
322 |
+
"shape": 3,
|
323 |
+
"slot_index": 1
|
324 |
+
},
|
325 |
+
{
|
326 |
+
"name": "VAE",
|
327 |
+
"type": "VAE",
|
328 |
+
"links": [
|
329 |
+
49
|
330 |
+
],
|
331 |
+
"shape": 3,
|
332 |
+
"slot_index": 2
|
333 |
+
}
|
334 |
+
],
|
335 |
+
"properties": {
|
336 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
337 |
+
},
|
338 |
+
"widgets_values": [
|
339 |
+
"dreamshaper_8.safetensors"
|
340 |
+
]
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"id": 53,
|
344 |
+
"type": "img2txt BLIP/Llava Multimodel Tagger",
|
345 |
+
"pos": [
|
346 |
+
584,
|
347 |
+
865
|
348 |
+
],
|
349 |
+
"size": [
|
350 |
+
462.2727684830322,
|
351 |
+
532.8236759410865
|
352 |
+
],
|
353 |
+
"flags": {},
|
354 |
+
"order": 3,
|
355 |
+
"mode": 0,
|
356 |
+
"inputs": [
|
357 |
+
{
|
358 |
+
"name": "input_image",
|
359 |
+
"type": "IMAGE",
|
360 |
+
"link": 60
|
361 |
+
}
|
362 |
+
],
|
363 |
+
"outputs": [
|
364 |
+
{
|
365 |
+
"name": "caption",
|
366 |
+
"type": "STRING",
|
367 |
+
"links": [
|
368 |
+
61
|
369 |
+
],
|
370 |
+
"shape": 3,
|
371 |
+
"slot_index": 0
|
372 |
+
}
|
373 |
+
],
|
374 |
+
"properties": {
|
375 |
+
"Node name for S&R": "img2txt BLIP/Llava Multimodel Tagger"
|
376 |
+
},
|
377 |
+
"widgets_values": [
|
378 |
+
false,
|
379 |
+
false,
|
380 |
+
true,
|
381 |
+
false,
|
382 |
+
"a photograph of",
|
383 |
+
"What is a detailed description of this image?\nWhat is the background of this image?",
|
384 |
+
0.8,
|
385 |
+
1.2,
|
386 |
+
36,
|
387 |
+
128,
|
388 |
+
5,
|
389 |
+
"watermark, text, writing",
|
390 |
+
"The image features a cartoon character standing against an abstract background consisting of green, blue, and white elements. The main focus is on the woman with bright yellow wings wearing pink attire while smiling at something off-frame in front of her that seems to be representing \"clouds\" or possibly another object within view but not clearly visible due to its distance from us as viewers., "
|
391 |
+
],
|
392 |
+
"color": "#322",
|
393 |
+
"bgcolor": "#533"
|
394 |
+
}
|
395 |
+
],
|
396 |
+
"links": [
|
397 |
+
[
|
398 |
+
42,
|
399 |
+
40,
|
400 |
+
0,
|
401 |
+
39,
|
402 |
+
0,
|
403 |
+
"MODEL"
|
404 |
+
],
|
405 |
+
[
|
406 |
+
44,
|
407 |
+
41,
|
408 |
+
0,
|
409 |
+
39,
|
410 |
+
1,
|
411 |
+
"CONDITIONING"
|
412 |
+
],
|
413 |
+
[
|
414 |
+
45,
|
415 |
+
42,
|
416 |
+
0,
|
417 |
+
39,
|
418 |
+
2,
|
419 |
+
"CONDITIONING"
|
420 |
+
],
|
421 |
+
[
|
422 |
+
48,
|
423 |
+
39,
|
424 |
+
0,
|
425 |
+
45,
|
426 |
+
0,
|
427 |
+
"LATENT"
|
428 |
+
],
|
429 |
+
[
|
430 |
+
49,
|
431 |
+
40,
|
432 |
+
2,
|
433 |
+
45,
|
434 |
+
1,
|
435 |
+
"VAE"
|
436 |
+
],
|
437 |
+
[
|
438 |
+
50,
|
439 |
+
40,
|
440 |
+
1,
|
441 |
+
41,
|
442 |
+
0,
|
443 |
+
"CLIP"
|
444 |
+
],
|
445 |
+
[
|
446 |
+
51,
|
447 |
+
40,
|
448 |
+
1,
|
449 |
+
42,
|
450 |
+
0,
|
451 |
+
"CLIP"
|
452 |
+
],
|
453 |
+
[
|
454 |
+
55,
|
455 |
+
45,
|
456 |
+
0,
|
457 |
+
48,
|
458 |
+
0,
|
459 |
+
"IMAGE"
|
460 |
+
],
|
461 |
+
[
|
462 |
+
59,
|
463 |
+
52,
|
464 |
+
0,
|
465 |
+
39,
|
466 |
+
3,
|
467 |
+
"LATENT"
|
468 |
+
],
|
469 |
+
[
|
470 |
+
60,
|
471 |
+
11,
|
472 |
+
0,
|
473 |
+
53,
|
474 |
+
0,
|
475 |
+
"IMAGE"
|
476 |
+
],
|
477 |
+
[
|
478 |
+
61,
|
479 |
+
53,
|
480 |
+
0,
|
481 |
+
41,
|
482 |
+
1,
|
483 |
+
"STRING"
|
484 |
+
]
|
485 |
+
],
|
486 |
+
"groups": [],
|
487 |
+
"config": {},
|
488 |
+
"extra": {
|
489 |
+
"ds": {
|
490 |
+
"scale": 0.9090909090909091,
|
491 |
+
"offset": {
|
492 |
+
"0": 278.52736579431155,
|
493 |
+
"1": -323.6237095104226
|
494 |
+
}
|
495 |
+
}
|
496 |
+
},
|
497 |
+
"version": 0.4
|
498 |
+
}
|