codeShare commited on
Commit
3d64a03
1 Parent(s): 45a2684

Upload token_vectors_math.ipynb

Browse files
Google Colab Notebooks/token_vectors_math.ipynb ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "# NOTE : although they have 1x768 dimension , these are not text_encodings , but token vectors\n",
21
+ "import json\n",
22
+ "import pandas as pd\n",
23
+ "import os\n",
24
+ "import shelve\n",
25
+ "import torch\n",
26
+ "from safetensors.torch import save_file , load_file\n",
27
+ "import json\n",
28
+ "\n",
29
+ "home_directory = '/content/'\n",
30
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
31
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
32
+ "%cd {home_directory}\n",
33
+ "#-------#\n",
34
+ "\n",
35
+ "# Load the data if not already loaded\n",
36
+ "try:\n",
37
+ " loaded\n",
38
+ "except:\n",
39
+ " %cd {home_directory}\n",
40
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
41
+ " loaded = True\n",
42
+ "#--------#\n",
43
+ "\n",
44
+ "def getPrompts(_path, separator):\n",
45
+ " path = _path + '/text'\n",
46
+ " path_vec = _path + '/token_vectors'\n",
47
+ " _file_name = 'vocab'\n",
48
+ " #-----#\n",
49
+ " index = 0\n",
50
+ " file_index = 0\n",
51
+ " prompts = {}\n",
52
+ " text_encodings = {}\n",
53
+ " _text_encodings = {}\n",
54
+ " #-----#\n",
55
+ " for filename in os.listdir(f'{path}'):\n",
56
+ " print(f'reading {filename}....')\n",
57
+ " _index = 0\n",
58
+ " %cd {path}\n",
59
+ " with open(f'{filename}', 'r') as f:\n",
60
+ " data = json.load(f)\n",
61
+ " #------#\n",
62
+ " _df = pd.DataFrame({'count': data})['count']\n",
63
+ " _prompts = {\n",
64
+ " key : value for key, value in _df.items()\n",
65
+ " }\n",
66
+ " #-------#\n",
67
+ " %cd {path_vec}\n",
68
+ " _text_encodings = load_file(f'{_file_name}.safetensors')\n",
69
+ "\n",
70
+ " for key in _prompts:\n",
71
+ " _index = int(key)\n",
72
+ " value = _prompts[key]\n",
73
+ " #------#\n",
74
+ " #Read the text_encodings + prompts\n",
75
+ " text_encodings[f'{index}'] = _text_encodings[f'{_index}']\n",
76
+ " prompts[f'{index}'] = _prompts[f'{_index}'] + separator\n",
77
+ " index = index + 1\n",
78
+ " continue\n",
79
+ " #-------#\n",
80
+ " #--------#\n",
81
+ " #_text_encodings.close() #close the text_encodings file\n",
82
+ " file_index = file_index + 1\n",
83
+ " #----------#\n",
84
+ " NUM_ITEMS = index -1\n",
85
+ " return prompts , text_encodings , NUM_ITEMS\n",
86
+ "#--------#\n",
87
+ "\n",
88
+ "def append_from_url(dictA, tensA , nA , url , separator):\n",
89
+ " dictB , tensB, nB = getPrompts(url, separator)\n",
90
+ " dictAB = dictA\n",
91
+ " tensAB = tensA\n",
92
+ " nAB = nA\n",
93
+ " for key in dictB:\n",
94
+ " nAB = nAB + 1\n",
95
+ " dictAB[f'{nA + int(key)}'] = dictB[key]\n",
96
+ " tensAB[f'{nA + int(key)}'] = tensB[key]\n",
97
+ " #-----#\n",
98
+ " return dictAB, tensAB , nAB-1\n",
99
+ "#-------#"
100
+ ],
101
+ "metadata": {
102
+ "colab": {
103
+ "base_uri": "https://localhost:8080/"
104
+ },
105
+ "id": "V-1DrszLqEVj",
106
+ "outputId": "9b894182-a7e0-436e-9bf1-5a7d3d920ac7"
107
+ },
108
+ "execution_count": 5,
109
+ "outputs": [
110
+ {
111
+ "output_type": "stream",
112
+ "name": "stdout",
113
+ "text": [
114
+ "/content\n"
115
+ ]
116
+ }
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "source": [
122
+ "# @title Fetch the json + .safetensor pair\n",
123
+ "\n",
124
+ "#------#\n",
125
+ "vocab = {}\n",
126
+ "tokens = {}\n",
127
+ "nA = 0\n",
128
+ "#--------#\n",
129
+ "\n",
130
+ "if True:\n",
131
+ " url = '/content/text-to-image-prompts/vocab'\n",
132
+ " vocab , tokens, nA = append_from_url(vocab , tokens, nA , url , '')\n",
133
+ "#-------#\n",
134
+ "NUM_TOKENS = nA # NUM_TOKENS = 49407\n",
135
+ "#--------#\n",
136
+ "\n",
137
+ "print(NUM_TOKENS)"
138
+ ],
139
+ "metadata": {
140
+ "colab": {
141
+ "base_uri": "https://localhost:8080/"
142
+ },
143
+ "id": "EDCd1IGEqj3-",
144
+ "outputId": "bbaab5ab-4bd3-4766-ad44-f139a0ec7a02"
145
+ },
146
+ "execution_count": 12,
147
+ "outputs": [
148
+ {
149
+ "output_type": "stream",
150
+ "name": "stdout",
151
+ "text": [
152
+ "reading vocab.json....\n",
153
+ "/content/text-to-image-prompts/vocab/text\n",
154
+ "/content/text-to-image-prompts/vocab/token_vectors\n",
155
+ "49407\n"
156
+ ]
157
+ }
158
+ ]
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "source": [
163
+ "vocab[f'{8922}']"
164
+ ],
165
+ "metadata": {
166
+ "colab": {
167
+ "base_uri": "https://localhost:8080/",
168
+ "height": 35
169
+ },
170
+ "id": "o9AfUKkvwUdG",
171
+ "outputId": "029e1148-056b-4040-da23-7ed6caaca878"
172
+ },
173
+ "execution_count": 19,
174
+ "outputs": [
175
+ {
176
+ "output_type": "execute_result",
177
+ "data": {
178
+ "text/plain": [
179
+ "'benedict</w>'"
180
+ ],
181
+ "application/vnd.google.colaboratory.intrinsic+json": {
182
+ "type": "string"
183
+ }
184
+ },
185
+ "metadata": {},
186
+ "execution_count": 19
187
+ }
188
+ ]
189
+ },
190
+ {
191
+ "cell_type": "code",
192
+ "source": [
193
+ "# @title Compare similiarity between tokens\n",
194
+ "\n",
195
+ "import torch\n",
196
+ "from transformers import AutoTokenizer\n",
197
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
198
+ "\n",
199
+ "# @markdown Write name of token to match against\n",
200
+ "token_name = \"banana\" # @param {type:'string',\"placeholder\":\"leave empty for random value token\"}\n",
201
+ "\n",
202
+ "prompt = token_name\n",
203
+ "# @markdown (optional) Mix the token with something else\n",
204
+ "mix_with = \"\" # @param {\"type\":\"string\",\"placeholder\":\"leave empty for random value token\"}\n",
205
+ "mix_method = \"None\" # @param [\"None\" , \"Average\", \"Subtract\"] {allow-input: true}\n",
206
+ "w = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
207
+ "# @markdown Limit char size of included token\n",
208
+ "\n",
209
+ "min_char_size = 0 # param {type:\"slider\", min:0, max: 50, step:1}\n",
210
+ "char_range = 50 # param {type:\"slider\", min:0, max: 50, step:1}\n",
211
+ "\n",
212
+ "tokenizer_output = tokenizer(text = prompt)\n",
213
+ "input_ids = tokenizer_output['input_ids']\n",
214
+ "id_A = input_ids[1]\n",
215
+ "A = torch.tensor(tokens[f'{id_A}'])\n",
216
+ "A = A/A.norm(p=2, dim=-1, keepdim=True)\n",
217
+ "#-----#\n",
218
+ "tokenizer_output = tokenizer(text = mix_with)\n",
219
+ "input_ids = tokenizer_output['input_ids']\n",
220
+ "id_C = input_ids[1]\n",
221
+ "C = torch.tensor(tokens[f'{id_C}'])\n",
222
+ "C = C/C.norm(p=2, dim=-1, keepdim=True)\n",
223
+ "#-----#\n",
224
+ "sim_AC = torch.dot(A,C)\n",
225
+ "#-----#\n",
226
+ "print(input_ids)\n",
227
+ "#-----#\n",
228
+ "\n",
229
+ "#if no imput exists we just randomize the entire thing\n",
230
+ "if (prompt == \"\"):\n",
231
+ " id_A = -1\n",
232
+ " print(\"Tokenized prompt tensor A is a random valued tensor with no ID\")\n",
233
+ " R = torch.rand(A.shape)\n",
234
+ " R = R/R.norm(p=2, dim=-1, keepdim=True)\n",
235
+ " A = R\n",
236
+ " name_A = 'random_A'\n",
237
+ "\n",
238
+ "#if no imput exists we just randomize the entire thing\n",
239
+ "if (mix_with == \"\"):\n",
240
+ " id_C = -1\n",
241
+ " print(\"Tokenized prompt 'mix_with' tensor C is a random valued tensor with no ID\")\n",
242
+ " R = torch.rand(A.shape)\n",
243
+ " R = R/R.norm(p=2, dim=-1, keepdim=True)\n",
244
+ " C = R\n",
245
+ " name_C = 'random_C'\n",
246
+ "\n",
247
+ "name_A = \"A of random type\"\n",
248
+ "if (id_A>-1):\n",
249
+ " name_A = vocab[f'{id_A}']\n",
250
+ "\n",
251
+ "name_C = \"token C of random type\"\n",
252
+ "if (id_C>-1):\n",
253
+ " name_C = vocab[f'{id_C}']\n",
254
+ "\n",
255
+ "print(f\"The similarity between A '{name_A}' and C '{name_C}' is {round(sim_AC.item()*100,2)} %\")\n",
256
+ "\n",
257
+ "if (mix_method == \"None\"):\n",
258
+ " print(\"No operation\")\n",
259
+ "\n",
260
+ "if (mix_method == \"Average\"):\n",
261
+ " A = w*A + (1-w)*C\n",
262
+ " _A = A.norm(p=2, dim=-1, keepdim=True)\n",
263
+ " print(f\"Tokenized prompt tensor A '{name_A}' token has been recalculated as A = w*A + (1-w)*C , where C is '{name_C}' token , for w = {w} \")\n",
264
+ "\n",
265
+ "if (mix_method == \"Subtract\"):\n",
266
+ " tmp = w*A - (1-w)*C\n",
267
+ " tmp = tmp/tmp.norm(p=2, dim=-1, keepdim=True)\n",
268
+ " A = tmp\n",
269
+ " #//---//\n",
270
+ " print(f\"Tokenized prompt tensor A '{name_A}' token has been recalculated as A = _A*norm(w*A - (1-w)*C) , where C is '{name_C}' token , for w = {w} \")\n",
271
+ "\n",
272
+ "#OPTIONAL : Add/subtract + normalize above result with another token. Leave field empty to get a random value tensor\n",
273
+ "\n",
274
+ "dots = torch.zeros(NUM_TOKENS)\n",
275
+ "for index in range(NUM_TOKENS):\n",
276
+ " id_B = index\n",
277
+ " B = torch.tensor(tokens[f'{id_B}'])\n",
278
+ " B = B/B.norm(p=2, dim=-1, keepdim=True)\n",
279
+ " sim_AB = torch.dot(A,B)\n",
280
+ " dots[index] = sim_AB\n",
281
+ "\n",
282
+ "\n",
283
+ "sorted, indices = torch.sort(dots,dim=0 , descending=True)\n",
284
+ "#----#\n",
285
+ "if (mix_method == \"Average\"):\n",
286
+ " print(f'Calculated all cosine-similarities between the average of token {name_A} and {name_C} with Id_A = {id_A} and mixed Id_C = {id_C} as a 1x{sorted.shape[0]} tensor')\n",
287
+ "if (mix_method == \"Subtract\"):\n",
288
+ " print(f'Calculated all cosine-similarities between the subtract of token {name_A} and {name_C} with Id_A = {id_A} and mixed Id_C = {id_C} as a 1x{sorted.shape[0]} tensor')\n",
289
+ "if (mix_method == \"None\"):\n",
290
+ " print(f'Calculated all cosine-similarities between the token {name_A} with Id_A = {id_A} with the the rest of the {NUM_TOKENS} tokens as a 1x{sorted.shape[0]} tensor')\n",
291
+ "\n",
292
+ "#Produce a list id IDs that are most similiar to the prompt ID at positiion 1 based on above result\n",
293
+ "\n",
294
+ "# @markdown Set print options\n",
295
+ "list_size = 100 # @param {type:'number'}\n",
296
+ "print_ID = False # @param {type:\"boolean\"}\n",
297
+ "print_Similarity = True # @param {type:\"boolean\"}\n",
298
+ "print_Name = True # @param {type:\"boolean\"}\n",
299
+ "print_Divider = True # @param {type:\"boolean\"}\n",
300
+ "\n",
301
+ "\n",
302
+ "if (print_Divider):\n",
303
+ " print('//---//')\n",
304
+ "\n",
305
+ "print('')\n",
306
+ "print('Here is the result : ')\n",
307
+ "print('')\n",
308
+ "\n",
309
+ "for index in range(list_size):\n",
310
+ " id = indices[index].item()\n",
311
+ " if (print_Name):\n",
312
+ " print(vocab[f'{id}']) # vocab item\n",
313
+ " if (print_ID):\n",
314
+ " print(f'ID = {id}') # IDs\n",
315
+ " if (print_Similarity):\n",
316
+ " print(f'similiarity = {round(sorted[index].item()*100,2)} %')\n",
317
+ " if (print_Divider):\n",
318
+ " print('--------')\n",
319
+ "\n",
320
+ "#Print the sorted list from above result\n",
321
+ "\n",
322
+ "#The prompt will be enclosed with the <|start-of-text|> and <|end-of-text|> tokens, which is why output will be [49406, ... , 49407].\n",
323
+ "\n",
324
+ "#You can leave the 'prompt' field empty to get a random value tensor. Since the tensor is random value, it will not correspond to any tensor in the vocab.json list , and this it will have no ID.\n",
325
+ "\n",
326
+ "# Save results as .db file\n",
327
+ "import shelve\n",
328
+ "VOCAB_FILENAME = 'tokens_most_similiar_to_' + name_A.replace('</w>','').strip()\n",
329
+ "d = shelve.open(VOCAB_FILENAME)\n",
330
+ "#NUM TOKENS == 49407\n",
331
+ "for index in range(NUM_TOKENS):\n",
332
+ " #print(d[f'{index}']) #<-----Use this to read values from the .db file\n",
333
+ " d[f'{index}']= vocab[f'{indices[index].item()}'] #<---- write values to .db file\n",
334
+ "#----#\n",
335
+ "d.close() #close the file\n",
336
+ "# See this link for additional stuff to do with shelve: https://docs.python.org/3/library/shelve.html"
337
+ ],
338
+ "metadata": {
339
+ "id": "ZwGqg9R5s1QS"
340
+ },
341
+ "execution_count": null,
342
+ "outputs": []
343
+ },
344
+ {
345
+ "cell_type": "markdown",
346
+ "source": [
347
+ "Below is code used to create the .safetensor + json files for the notebook"
348
+ ],
349
+ "metadata": {
350
+ "id": "dGb1KgP_p4_w"
351
+ }
352
+ },
353
+ {
354
+ "cell_type": "code",
355
+ "execution_count": 1,
356
+ "metadata": {
357
+ "colab": {
358
+ "base_uri": "https://localhost:8080/",
359
+ "height": 599
360
+ },
361
+ "id": "AyhYBlP2pYyI",
362
+ "outputId": "0168beb3-428c-4886-f159-adc479b9da4b"
363
+ },
364
+ "outputs": [
365
+ {
366
+ "output_type": "stream",
367
+ "name": "stdout",
368
+ "text": [
369
+ "/content\n",
370
+ "/content\n",
371
+ "Cloning into 'text-to-image-prompts'...\n",
372
+ "remote: Enumerating objects: 1552, done.\u001b[K\n",
373
+ "remote: Counting objects: 100% (1549/1549), done.\u001b[K\n",
374
+ "remote: Compressing objects: 100% (1506/1506), done.\u001b[K\n",
375
+ "remote: Total 1552 (delta 190), reused 0 (delta 0), pack-reused 3 (from 1)\u001b[K\n",
376
+ "Receiving objects: 100% (1552/1552), 9.09 MiB | 6.30 MiB/s, done.\n",
377
+ "Resolving deltas: 100% (190/190), done.\n",
378
+ "Updating files: 100% (906/906), done.\n",
379
+ "Filtering content: 100% (438/438), 1.49 GiB | 56.42 MiB/s, done.\n",
380
+ "/content\n",
381
+ "/content/text-to-image-prompts/vocab/raw\n",
382
+ "/content/text-to-image-prompts/vocab/raw\n"
383
+ ]
384
+ },
385
+ {
386
+ "output_type": "error",
387
+ "ename": "JSONDecodeError",
388
+ "evalue": "Expecting ':' delimiter: line 28 column 7 (char 569)",
389
+ "traceback": [
390
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
391
+ "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)",
392
+ "\u001b[0;32m<ipython-input-1-542fe0f58fcc>\u001b[0m in \u001b[0;36m<cell line: 56>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cd'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'{target_raw}'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'{root_filename}.json'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 57\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mjson\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 58\u001b[0m \u001b[0m_df\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'count'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m \u001b[0;31m#reverse key and value in the dict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
393
+ "\u001b[0;32m/usr/lib/python3.10/json/__init__.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(fp, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 291\u001b[0m \u001b[0mkwarg\u001b[0m\u001b[0;34m;\u001b[0m \u001b[0motherwise\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mJSONDecoder\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mused\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 292\u001b[0m \"\"\"\n\u001b[0;32m--> 293\u001b[0;31m return loads(fp.read(),\n\u001b[0m\u001b[1;32m 294\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcls\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobject_hook\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobject_hook\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mparse_float\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparse_float\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparse_int\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparse_int\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
394
+ "\u001b[0;32m/usr/lib/python3.10/json/__init__.py\u001b[0m in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0mparse_int\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mparse_float\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 345\u001b[0m parse_constant is None and object_pairs_hook is None and not kw):\n\u001b[0;32m--> 346\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_default_decoder\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdecode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 347\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcls\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 348\u001b[0m \u001b[0mcls\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mJSONDecoder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
395
+ "\u001b[0;32m/usr/lib/python3.10/json/decoder.py\u001b[0m in \u001b[0;36mdecode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 335\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 336\u001b[0m \"\"\"\n\u001b[0;32m--> 337\u001b[0;31m \u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraw_decode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m_w\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 338\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_w\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
396
+ "\u001b[0;32m/usr/lib/python3.10/json/decoder.py\u001b[0m in \u001b[0;36mraw_decode\u001b[0;34m(self, s, idx)\u001b[0m\n\u001b[1;32m 351\u001b[0m \"\"\"\n\u001b[1;32m 352\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 353\u001b[0;31m \u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mscan_once\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 354\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mJSONDecodeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Expecting value\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
397
+ "\u001b[0;31mJSONDecodeError\u001b[0m: Expecting ':' delimiter: line 28 column 7 (char 569)"
398
+ ]
399
+ }
400
+ ],
401
+ "source": [
402
+ "# @title Process the raw vocab into json + .safetensor pair\n",
403
+ "\n",
404
+ "# NOTE : although they have 1x768 dimension , these are not text_encodings , but token vectors\n",
405
+ "import json\n",
406
+ "import pandas as pd\n",
407
+ "import os\n",
408
+ "import shelve\n",
409
+ "import torch\n",
410
+ "from safetensors.torch import save_file , load_file\n",
411
+ "import json\n",
412
+ "\n",
413
+ "home_directory = '/content/'\n",
414
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
415
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
416
+ "%cd {home_directory}\n",
417
+ "#-------#\n",
418
+ "\n",
419
+ "# Load the data if not already loaded\n",
420
+ "try:\n",
421
+ " loaded\n",
422
+ "except:\n",
423
+ " %cd {home_directory}\n",
424
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
425
+ " loaded = True\n",
426
+ "#--------#\n",
427
+ "\n",
428
+ "# User input\n",
429
+ "target = home_directory + 'text-to-image-prompts/vocab/'\n",
430
+ "root_output_folder = home_directory + 'output/'\n",
431
+ "output_folder = root_output_folder + 'vocab/'\n",
432
+ "root_filename = 'vocab'\n",
433
+ "NUM_FILES = 1\n",
434
+ "#--------#\n",
435
+ "\n",
436
+ "# Setup environment\n",
437
+ "def my_mkdirs(folder):\n",
438
+ " if os.path.exists(folder)==False:\n",
439
+ " os.makedirs(folder)\n",
440
+ "#--------#\n",
441
+ "output_folder_text = output_folder + 'text/'\n",
442
+ "output_folder_text = output_folder + 'text/'\n",
443
+ "output_folder_token_vectors = output_folder + 'token_vectors/'\n",
444
+ "target_raw = target + 'raw/'\n",
445
+ "%cd {home_directory}\n",
446
+ "my_mkdirs(output_folder)\n",
447
+ "my_mkdirs(output_folder_text)\n",
448
+ "my_mkdirs(output_folder_token_vectors)\n",
449
+ "#-------#\n",
450
+ "\n",
451
+ "%cd {target_raw}\n",
452
+ "model = torch.load(f'{root_filename}.pt' , weights_only=True)\n",
453
+ "tokens = model.clone().detach()\n",
454
+ "\n",
455
+ "\n",
456
+ "%cd {target_raw}\n",
457
+ "with open(f'{root_filename}.json', 'r') as f:\n",
458
+ " data = json.load(f)\n",
459
+ "_df = pd.DataFrame({'count': data})['count']\n",
460
+ "#reverse key and value in the dict\n",
461
+ "vocab = {\n",
462
+ " value : key for key, value in _df.items()\n",
463
+ "}\n",
464
+ "#------#\n",
465
+ "\n",
466
+ "\n",
467
+ "tensors = {}\n",
468
+ "for key in vocab:\n",
469
+ " name = vocab[key]\n",
470
+ " token = tokens[int(key)]\n",
471
+ " tensors[key] = token\n",
472
+ "#-----#\n",
473
+ "\n",
474
+ "%cd {output_folder_token_vectors}\n",
475
+ "save_file(tensors, \"vocab.safetensors\")\n",
476
+ "\n",
477
+ "%cd {output_folder_text}\n",
478
+ "with open('vocab.json', 'w') as f:\n",
479
+ " json.dump(vocab, f)\n"
480
+ ]
481
+ },
482
+ {
483
+ "cell_type": "code",
484
+ "source": [
485
+ "# Determine if this notebook is running on Colab or Kaggle\n",
486
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
487
+ "home_directory = '/content/'\n",
488
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
489
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
490
+ "%cd {home_directory}\n",
491
+ "#-------#\n",
492
+ "\n",
493
+ "# @title Download the vocab as .zip\n",
494
+ "import os\n",
495
+ "%cd {home_directory}\n",
496
+ "#os.remove(f'{home_directory}results.zip')\n",
497
+ "root_output_folder = home_directory + 'output/'\n",
498
+ "zip_dest = f'{home_directory}results.zip'\n",
499
+ "!zip -r {zip_dest} '/content/text-to-image-prompts/tokens'"
500
+ ],
501
+ "metadata": {
502
+ "id": "9uIDf9IUpzh2"
503
+ },
504
+ "execution_count": null,
505
+ "outputs": []
506
+ }
507
+ ]
508
+ }