codeShare commited on
Commit
be82bb1
β€’
1 Parent(s): 4c87349

Upload fusion_t2i_CLIP_interrogator.ipynb

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "cellView": "form",
22
+ "id": "UEYEdzjgOEOE"
23
+ },
24
+ "outputs": [],
25
+ "source": [
26
+ "# @title ✳️ Load/initialize values\n",
27
+ "#Imports\n",
28
+ "#!pip install safetensors\n",
29
+ "from safetensors.torch import load_file\n",
30
+ "import json , os , shelve , torch\n",
31
+ "import pandas as pd\n",
32
+ "#----#\n",
33
+ "\n",
34
+ "def my_mkdirs(folder):\n",
35
+ " if os.path.exists(folder)==False:\n",
36
+ " os.makedirs(folder)\n",
37
+ "\n",
38
+ "def fix_bad_symbols(txt):\n",
39
+ " result = txt\n",
40
+ " for symbol in ['^', '}', '{' , ')', '(', '[' , ']' , ':' , '=' ]:\n",
41
+ " result = result.replace(symbol,'\\\\' + symbol)\n",
42
+ " #------#\n",
43
+ " return result;\n",
44
+ "\n",
45
+ "\n",
46
+ "def getPrompts(_path, separator):\n",
47
+ "\n",
48
+ " path = _path + '/text'\n",
49
+ " path_enc = _path + '/text_encodings'\n",
50
+ " #-----#\n",
51
+ " index = 0\n",
52
+ " file_index = 0\n",
53
+ " prompts = {}\n",
54
+ " text_encodings = {}\n",
55
+ " _text_encodings = {}\n",
56
+ " #-----#\n",
57
+ " for filename in os.listdir(f'{path}'):\n",
58
+ "\n",
59
+ " print(f'reading {filename}....')\n",
60
+ " _index = 0\n",
61
+ " %cd {path}\n",
62
+ " with open(f'{filename}', 'r') as f:\n",
63
+ " data = json.load(f)\n",
64
+ " #------#\n",
65
+ " _df = pd.DataFrame({'count': data})['count']\n",
66
+ " _prompts = {\n",
67
+ " key : value for key, value in _df.items()\n",
68
+ " }\n",
69
+ " for key in _prompts:\n",
70
+ " _index = int(key)\n",
71
+ " value = _prompts[key]\n",
72
+ "\n",
73
+ " #Read the 'header' file in the JSON\n",
74
+ " if _index <= 0 :\n",
75
+ " _NUM_ITEMS = int(value)\n",
76
+ " prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
77
+ " index = index + 1\n",
78
+ " continue\n",
79
+ " if _index <= 1 :\n",
80
+ " _file_name = f'{value}'\n",
81
+ " %cd {path_enc}\n",
82
+ " _text_encodings = load_file(f'{_file_name}.safetensors')\n",
83
+ " #Store text_encodings for the header items\n",
84
+ " text_encodings[f'{index-1}'] = _text_encodings[f'{_index-1}']\n",
85
+ " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
86
+ " #------#\n",
87
+ " prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
88
+ " index = index + 1\n",
89
+ " continue\n",
90
+ " #------#\n",
91
+ " #Read the text_encodings + prompts\n",
92
+ " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
93
+ " prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
94
+ " index = index + 1\n",
95
+ " continue\n",
96
+ " #-------#\n",
97
+ " #--------#\n",
98
+ " #_text_encodings.close() #close the text_encodings file\n",
99
+ " file_index = file_index + 1\n",
100
+ " #----------#\n",
101
+ " NUM_ITEMS = index -1\n",
102
+ " return prompts , text_encodings , NUM_ITEMS\n",
103
+ "#--------#\n",
104
+ "\n",
105
+ "def append_from_url(dictA, tensA , nA , url , separator):\n",
106
+ " dictB , tensB, nB = getPrompts(url, separator)\n",
107
+ " dictAB = dictA\n",
108
+ " tensAB = tensA\n",
109
+ " nAB = nA\n",
110
+ " for key in dictB:\n",
111
+ " nAB = nAB + 1\n",
112
+ " dictAB[f'{nA + int(key)}'] = dictB[key]\n",
113
+ " tensAB[f'{nA + int(key)}'] = tensB[key]\n",
114
+ " #-----#\n",
115
+ " return dictAB, tensAB , nAB-1\n",
116
+ "#-------#\n",
117
+ "\n",
118
+ "home_directory = '/content/'\n",
119
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
120
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
121
+ "%cd {home_directory}\n",
122
+ "\n",
123
+ "#πŸ”ΈπŸ”Ή\n",
124
+ "# Load the data if not already loaded\n",
125
+ "try:\n",
126
+ " loaded\n",
127
+ "except:\n",
128
+ " %cd {home_directory}\n",
129
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
130
+ " loaded = True\n",
131
+ "#--------#\n",
132
+ "\n",
133
+ "#default NEG values\n",
134
+ "try: name_NEG\n",
135
+ "except: name_NEG = ''\n",
136
+ "try: image_NEG\n",
137
+ "except: image_NEG = ''\n",
138
+ "try: strength_image_NEG\n",
139
+ "except: strength_image_NEG = 1\n",
140
+ "try: strength_NEG\n",
141
+ "except: strength_NEG = 1\n",
142
+ "try: NUM_VOCAB_ITEMS\n",
143
+ "except: NUM_VOCAB_ITEMS = 0\n",
144
+ "try: using_NEG\n",
145
+ "except: using_NEG = False\n",
146
+ "try: using_image_NEG\n",
147
+ "except: using_image_NEG = False\n",
148
+ "#------#\n",
149
+ "\n",
150
+ "def getJSON(path , filename):\n",
151
+ " %cd {path}\n",
152
+ " with open(f'{filename}', 'r') as f:\n",
153
+ " data = json.load(f)\n",
154
+ " #------#\n",
155
+ " print(f'reading {filename}....')\n",
156
+ " _df = pd.DataFrame({'count': data})['count']\n",
157
+ " _prompts = {\n",
158
+ " key : value for key, value in _df.items()\n",
159
+ " }\n",
160
+ " return _prompts\n",
161
+ "\n",
162
+ "#----#\n",
163
+ "\n",
164
+ "def getPromptsAndLinks(_path):\n",
165
+ " path = _path + '/text'\n",
166
+ " path_enc = _path + '/text_encodings'\n",
167
+ " #-----#\n",
168
+ " path_images = _path + '/images'\n",
169
+ " path_enc_images = _path + '/image_encodings'\n",
170
+ " #----#\n",
171
+ " _file_name = ''\n",
172
+ " _file_name_image = ''\n",
173
+ " #-----#\n",
174
+ " index = 0\n",
175
+ " prompts = {}\n",
176
+ " _prompts = {}\n",
177
+ " #-------#\n",
178
+ " urls = {}\n",
179
+ " _urls = {}\n",
180
+ " #------#\n",
181
+ " text_encodings = {}\n",
182
+ " _text_encodings = {}\n",
183
+ " image_encodings = {}\n",
184
+ " _image_encodings = {}\n",
185
+ " #-----#\n",
186
+ " for filename in os.listdir(f'{path}'):\n",
187
+ "\n",
188
+ " print(f'reading {filename}.json...')\n",
189
+ " _index = 0\n",
190
+ " %cd {path}\n",
191
+ " with open(f'{filename}', 'r') as f:\n",
192
+ " data = json.load(f)\n",
193
+ " _df = pd.DataFrame({'count': data})['count']\n",
194
+ " _prompts = {\n",
195
+ " key : value for key, value in _df.items()\n",
196
+ " }\n",
197
+ "\n",
198
+ " for key in _prompts:\n",
199
+ " _index = int(key)\n",
200
+ " value = _prompts[key]\n",
201
+ " if _index<=0: continue\n",
202
+ " if _index<=1:\n",
203
+ " _file_name = f'{value}'\n",
204
+ " _file_name_images = _prompts[f'{0}']\n",
205
+ " #-------#\n",
206
+ " print(f'reading {_file_name_images}.json..')\n",
207
+ " %cd {path_images}\n",
208
+ " with open(f'{_file_name_images}.json', 'r') as f:\n",
209
+ " data = json.load(f)\n",
210
+ " _df = pd.DataFrame({'count': data})['count']\n",
211
+ " _urls = {\n",
212
+ " key : value for key, value in _df.items()\n",
213
+ " }\n",
214
+ " #--------#\n",
215
+ " %cd {path_enc}\n",
216
+ " _text_encodings = load_file(f'{_file_name}.safetensors')\n",
217
+ " text_encodings[f'{index-1}'] = _text_encodings[f'{_index-1}']\n",
218
+ " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
219
+ " #-------#\n",
220
+ " %cd {path_enc_images}\n",
221
+ " _image_encodings = load_file(f'{_file_name_images}.safetensors')\n",
222
+ " image_encodings[f'{index-1}'] = _image_encodings[f'{_index-1}']\n",
223
+ " image_encodings[f'{index}'] = _image_encodings[f'{_index}']\n",
224
+ " #-------#\n",
225
+ " prompts[f'{index-1}'] = _prompts[f'{_index-1}']\n",
226
+ " urls[f'{index-1}'] = _urls[f'{_index-1}']\n",
227
+ " prompts[f'{index}'] = _prompts[f'{_index}']\n",
228
+ " urls[f'{index}'] = _urls[f'{_index}']\n",
229
+ " #-------#\n",
230
+ " index = index + 1\n",
231
+ " continue\n",
232
+ " #--------#\n",
233
+ " #Read the text_encodings + prompts\n",
234
+ " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
235
+ " image_encodings[f'{index}'] = _image_encodings[f'{_index}']\n",
236
+ " prompts[f'{index}'] = _prompts[f'{_index}']\n",
237
+ " urls[f'{index}'] = _urls[f'{_index}']\n",
238
+ " index = index + 1\n",
239
+ " continue\n",
240
+ " #-------#\n",
241
+ " #--------#\n",
242
+ " #----------#\n",
243
+ " NUM_ITEMS = index -1\n",
244
+ " return prompts , text_encodings , urls , image_encodings , NUM_ITEMS\n",
245
+ "#--------#\n",
246
+ "\n"
247
+ ]
248
+ },
249
+ {
250
+ "cell_type": "code",
251
+ "source": [
252
+ "# @title πŸ“š Select items to sample from\n",
253
+ "\n",
254
+ "prompt_features = True # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
255
+ "civitai_blue_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
256
+ "suffix = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
257
+ "prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
258
+ "emojis = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
259
+ "#------#\n",
260
+ "\n",
261
+ "first_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
262
+ "last_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
263
+ "full_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
264
+ "celebs = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ†”πŸ‘¨\"}\n",
265
+ "#-------#\n",
266
+ "danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸŽ€\"}\n",
267
+ "lyrics = False # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
268
+ "tripple_nouns = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
269
+ "#-----#\n",
270
+ "female_fullnames = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
271
+ "debug = False\n",
272
+ "#------#\n",
273
+ "prompts = {}\n",
274
+ "text_encodings = {}\n",
275
+ "nA = 0\n",
276
+ "#--------#\n",
277
+ "\n",
278
+ "\n",
279
+ "if tripple_nouns:\n",
280
+ " url = '/content/text-to-image-prompts/nouns'\n",
281
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
282
+ "\n",
283
+ "if lyrics:\n",
284
+ " url = '/content/text-to-image-prompts/lyrics'\n",
285
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
286
+ "\n",
287
+ "if danbooru_tags:\n",
288
+ " url = '/content/text-to-image-prompts/danbooru'\n",
289
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
290
+ "#--------#\n",
291
+ "\n",
292
+ "if first_names:\n",
293
+ " url = '/content/text-to-image-prompts/names/firstnames'\n",
294
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
295
+ "#--------#\n",
296
+ "\n",
297
+ "if last_names:\n",
298
+ " url = '/content/text-to-image-prompts/names/lastnames'\n",
299
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
300
+ "#--------#\n",
301
+ "\n",
302
+ "if full_names:\n",
303
+ " url = '/content/text-to-image-prompts/names/fullnames'\n",
304
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
305
+ "#--------#\n",
306
+ "\n",
307
+ "if celebs:\n",
308
+ " url = '/content/text-to-image-prompts/names/celebs/mixed'\n",
309
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
310
+ "#--------#\n",
311
+ "\n",
312
+ "if celebs_young :\n",
313
+ " url = '/content/text-to-image-prompts/names/celebs/young'\n",
314
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
315
+ "#--------#\n",
316
+ "\n",
317
+ "if female_fullnames:\n",
318
+ " url = '/content/text-to-image-prompts/names/fullnames'\n",
319
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
320
+ "#--------#\n",
321
+ "\n",
322
+ "\n",
323
+ "if prompt_features:\n",
324
+ " url = '/content/text-to-image-prompts/civitai-prompts/green'\n",
325
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
326
+ "#--------#\n",
327
+ "\n",
328
+ "\n",
329
+ "if emojis:\n",
330
+ " url = '/content/text-to-image-prompts/vocab/text_encodings/emoji'\n",
331
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
332
+ "#--------#\n",
333
+ "\n",
334
+ "\n",
335
+ "if civitai_blue_set:\n",
336
+ " url = '/content/text-to-image-prompts/civitai-prompts/blue'\n",
337
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
338
+ "#--------#\n",
339
+ "\n",
340
+ "if suffix :\n",
341
+ " tmp = '/content/text-to-image-prompts/vocab/text_encodings/suffix/'\n",
342
+ " for item in ['common','average','rare','weird','exotic'] :\n",
343
+ " url = tmp + item\n",
344
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
345
+ "#------#\n",
346
+ "\n",
347
+ "if prefix :\n",
348
+ " tmp = '/content/text-to-image-prompts/vocab/text_encodings/prefix/'\n",
349
+ " for item in ['common','average','rare','weird','exotic'] :\n",
350
+ " url = tmp + item\n",
351
+ " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '-')\n",
352
+ "#------#\n",
353
+ "\n",
354
+ "if debug:\n",
355
+ " index = 0\n",
356
+ " for key in prompts: index = index + 1\n",
357
+ " print(index)\n",
358
+ " index = 0\n",
359
+ " for key in text_encodings : index = index + 1\n",
360
+ " print(index)\n",
361
+ "#------#\n",
362
+ "\n",
363
+ "NUM_VOCAB_ITEMS = nA\n",
364
+ "text_tensor = torch.zeros(NUM_VOCAB_ITEMS,768)\n",
365
+ "for index in range(NUM_VOCAB_ITEMS):\n",
366
+ " text_tensor[index] = text_encodings[f'{index}']\n",
367
+ "#---------#\n"
368
+ ],
369
+ "metadata": {
370
+ "cellView": "form",
371
+ "id": "CF53WIAKObg3"
372
+ },
373
+ "execution_count": null,
374
+ "outputs": []
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "source": [
379
+ "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
380
+ "\n",
381
+ "\n",
382
+ "#image_index = 0 # @param {type:'number'}\n",
383
+ "# @markdown πŸ“₯ Load the data (only required one time)\n",
384
+ "load_the_data = False # @param {type:\"boolean\"}\n",
385
+ "\n",
386
+ "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
387
+ "index = 829 # @param {type:\"slider\", min:0, max:1668, step:1}\n",
388
+ "\n",
389
+ "# @markdown βš–οΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
390
+ "\n",
391
+ "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
392
+ "\n",
393
+ "# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
394
+ "\n",
395
+ "NEG = '' # @param {type:'string'}\n",
396
+ "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
397
+ "\n",
398
+ "# @markdown Calculate most similiar items using above settings?\n",
399
+ "enable = True # @param {type:\"boolean\"}\n",
400
+ "\n",
401
+ "if (load_the_data):\n",
402
+ " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
403
+ " from transformers import AutoTokenizer\n",
404
+ " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
405
+ " from transformers import CLIPProcessor, CLIPModel\n",
406
+ " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
407
+ " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
408
+ " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
409
+ "\n",
410
+ "from PIL import Image\n",
411
+ "import requests\n",
412
+ "prompt = target_prompts[f'{index}']\n",
413
+ "url = urls[f'{index}']\n",
414
+ "if url.find('perchance')>-1:\n",
415
+ " image = Image.open(requests.get(url, stream=True).raw)\n",
416
+ "else: print(\"(No image for this ID)\")\n",
417
+ "\n",
418
+ "print(\"\")\n",
419
+ "print(f\"'{prompt}'\")\n",
420
+ "print(\"\")\n",
421
+ "\n",
422
+ "if(enable):\n",
423
+ " text_features_A = target_text_encodings[f'{index}']\n",
424
+ " image_features_A = target_image_encodings[f'{index}']\n",
425
+ "\n",
426
+ " # text-similarity\n",
427
+ " sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
428
+ "\n",
429
+ " neg_sims = 0*sims\n",
430
+ " if(NEG != ''):\n",
431
+ "\n",
432
+ " # Get text features for user input\n",
433
+ " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
434
+ " text_features_NEG = model.get_text_features(**inputs)\n",
435
+ " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
436
+ "\n",
437
+ " # text-similarity\n",
438
+ " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
439
+ " #------#\n",
440
+ "\n",
441
+ " # plus image-similarity\n",
442
+ " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
443
+ "\n",
444
+ "\n",
445
+ " # minus NEG-similarity\n",
446
+ " sims = sims - neg_sims\n",
447
+ "\n",
448
+ " # Sort the items\n",
449
+ " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
450
+ "\n",
451
+ " # @title βš™οΈπŸ“ Print the results (Advanced)\n",
452
+ " list_size = 1000 # param {type:'number'}\n",
453
+ " start_at_index = 0 # param {type:'number'}\n",
454
+ " print_Similarity = True # param {type:\"boolean\"}\n",
455
+ " print_Prompts = True # param {type:\"boolean\"}\n",
456
+ " print_Prefix = True # param {type:\"boolean\"}\n",
457
+ " print_Descriptions = True # param {type:\"boolean\"}\n",
458
+ " compact_Output = True # param {type:\"boolean\"}\n",
459
+ "\n",
460
+ " # @markdown -----------\n",
461
+ " # @markdown βš™οΈπŸ“ Printing options\n",
462
+ " newline_Separator = True # @param {type:\"boolean\"}\n",
463
+ "\n",
464
+ " import random\n",
465
+ " list_size2 = 1000 # param {type:'number'}\n",
466
+ " start_at_index2 = 10000 # param {type:'number'}\n",
467
+ " rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
468
+ "\n",
469
+ " # @markdown Repeat output N times\n",
470
+ " N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
471
+ "\n",
472
+ " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
473
+ " RANGE = list_size\n",
474
+ " separator = '|'\n",
475
+ " if newline_Separator : separator = separator + '\\n'\n",
476
+ "\n",
477
+ " _prompts = ''\n",
478
+ " _sims = ''\n",
479
+ " for _index in range(start_at_index + RANGE):\n",
480
+ " if _index < start_at_index : continue\n",
481
+ " index = indices[_index].item()\n",
482
+ "\n",
483
+ " prompt = prompts[f'{index}']\n",
484
+ " if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
485
+ "\n",
486
+ " #Remove duplicates\n",
487
+ " if _prompts.find(prompt + separator)<=-1:\n",
488
+ " _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
489
+ " #-------#\n",
490
+ " _prompts = _prompts.replace(prompt + separator,'')\n",
491
+ " _prompts = _prompts + prompt + separator\n",
492
+ " #------#\n",
493
+ " #------#\n",
494
+ " __prompts = fix_bad_symbols(__prompts)\n",
495
+ " __prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
496
+ " __sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
497
+ " #------#\n",
498
+ "\n",
499
+ " if(not print_Prompts): __prompts = ''\n",
500
+ " if(not print_Similarity): __sims = ''\n",
501
+ "\n",
502
+ " if(not compact_Output):\n",
503
+ " if(print_Descriptions):\n",
504
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
505
+ " for i in range(N) : print(__prompts)\n",
506
+ " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
507
+ " print('')\n",
508
+ " else:\n",
509
+ " for i in range(N) : print(__prompts)\n",
510
+ " else:\n",
511
+ " for i in range(N) : print(__prompts)\n",
512
+ " #-------#\n",
513
+ " #-------#\n",
514
+ "#-------#\n",
515
+ "image\n"
516
+ ],
517
+ "metadata": {
518
+ "cellView": "form",
519
+ "id": "XW3914T8O2uf"
520
+ },
521
+ "execution_count": null,
522
+ "outputs": []
523
+ },
524
+ {
525
+ "cell_type": "code",
526
+ "source": [
527
+ "# @title βš™οΈπŸ“ Print the results (Advanced)\n",
528
+ "list_size = 1000 # @param {type:'number'}\n",
529
+ "start_at_index = 0 # @param {type:'number'}\n",
530
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
531
+ "print_Prompts = True # @param {type:\"boolean\"}\n",
532
+ "print_Descriptions = True # @param {type:\"boolean\"}\n",
533
+ "compact_Output = True # @param {type:\"boolean\"}\n",
534
+ "newline_Separator = False # @param {type:\"boolean\"}\n",
535
+ "\n",
536
+ "import random\n",
537
+ "# @markdown -----------\n",
538
+ "# @markdown Mix with...\n",
539
+ "list_size2 = 1000 # @param {type:'number'}\n",
540
+ "start_at_index2 = 10000 # @param {type:'number'}\n",
541
+ "rate_percent = 0 # @param {type:\"slider\", min:0, max:100, step:1}\n",
542
+ "\n",
543
+ "# @markdown -----------\n",
544
+ "# @markdown Repeat output N times\n",
545
+ "N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
546
+ "\n",
547
+ "# title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
548
+ "RANGE = list_size\n",
549
+ "separator = '|'\n",
550
+ "if newline_Separator : separator = separator + '\\n'\n",
551
+ "\n",
552
+ "_prompts = ''\n",
553
+ "_sims = ''\n",
554
+ "for _index in range(start_at_index + RANGE):\n",
555
+ " if _index < start_at_index : continue\n",
556
+ " index = indices[_index].item()\n",
557
+ "\n",
558
+ " prompt = prompts[f'{index}']\n",
559
+ " if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
560
+ "\n",
561
+ " #Remove duplicates\n",
562
+ " if _prompts.find(prompt + separator)<=-1:\n",
563
+ " _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
564
+ " #-------#\n",
565
+ " _prompts = _prompts.replace(prompt + separator,'')\n",
566
+ " _prompts = _prompts + prompt + separator\n",
567
+ " #------#\n",
568
+ "#------#\n",
569
+ "__prompts = fix_bad_symbols(__prompts)\n",
570
+ "__prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
571
+ "__sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
572
+ "#------#\n",
573
+ "\n",
574
+ "if(not print_Prompts): __prompts = ''\n",
575
+ "if(not print_Similarity): __sims = ''\n",
576
+ "\n",
577
+ "if(not compact_Output):\n",
578
+ " if(print_Descriptions):\n",
579
+ " print(f'The {start_at_index}-{start_at_index + RANGE} most similiar items to prompt : \\n\\n ')\n",
580
+ " for i in range(N) : print(__prompts)\n",
581
+ " print(f'The {start_at_index}-{start_at_index + RANGE} similarity % for items : \\n\\n' + __sims)\n",
582
+ " print('')\n",
583
+ " else:\n",
584
+ " for i in range(N) : print(__prompts)\n",
585
+ "else:\n",
586
+ " for i in range(N) : print(__prompts)\n",
587
+ "#-------#"
588
+ ],
589
+ "metadata": {
590
+ "cellView": "form",
591
+ "id": "EdBiAguJO9aX"
592
+ },
593
+ "execution_count": null,
594
+ "outputs": []
595
+ }
596
+ ]
597
+ }