Upload indexed_text_encoding_converter.ipynb
Browse files
Google Colab Notebooks/indexed_text_encoding_converter.ipynb
CHANGED
@@ -16,13 +16,55 @@
|
|
16 |
"accelerator": "GPU"
|
17 |
},
|
18 |
"cells": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
{
|
20 |
"cell_type": "code",
|
21 |
"execution_count": null,
|
22 |
"metadata": {
|
23 |
-
"id": "cskYkw0zXHEm"
|
|
|
|
|
|
|
|
|
24 |
},
|
25 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
"source": [
|
27 |
"# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up)\n",
|
28 |
"\n",
|
@@ -43,10 +85,10 @@
|
|
43 |
"#-------#\n",
|
44 |
"\n",
|
45 |
"# User input\n",
|
46 |
-
"target = home_directory + 'text-to-image-prompts/
|
47 |
-
"output_folder = home_directory + 'output/
|
48 |
-
"root_filename = '
|
49 |
-
"NUM_FILES =
|
50 |
"#--------#\n",
|
51 |
"\n",
|
52 |
"# Setup environment\n",
|
@@ -82,6 +124,7 @@
|
|
82 |
"for file_index in range(NUM_FILES + 1):\n",
|
83 |
" if (file_index < 1): continue\n",
|
84 |
" filename = f'{root_filename}-{file_index}'\n",
|
|
|
85 |
"\n",
|
86 |
" # Read {filename}.json\n",
|
87 |
" %cd {target_raw}\n",
|
|
|
16 |
"accelerator": "GPU"
|
17 |
},
|
18 |
"cells": [
|
19 |
+
{
|
20 |
+
"cell_type": "markdown",
|
21 |
+
"source": [
|
22 |
+
"This notebook processes a JSON file of N items into chunks of 1000 items. The items are stored as a JSON + Safetensor pair. The name of the safestensor file is written within the JSON file at index 1"
|
23 |
+
],
|
24 |
+
"metadata": {
|
25 |
+
"id": "T7pqzVAFcPoK"
|
26 |
+
}
|
27 |
+
},
|
28 |
{
|
29 |
"cell_type": "code",
|
30 |
"execution_count": null,
|
31 |
"metadata": {
|
32 |
+
"id": "cskYkw0zXHEm",
|
33 |
+
"outputId": "40cd8c83-6619-4acf-f720-c4126b9d99e5",
|
34 |
+
"colab": {
|
35 |
+
"base_uri": "https://localhost:8080/"
|
36 |
+
}
|
37 |
},
|
38 |
+
"outputs": [
|
39 |
+
{
|
40 |
+
"output_type": "stream",
|
41 |
+
"name": "stdout",
|
42 |
+
"text": [
|
43 |
+
"/content\n",
|
44 |
+
"/content\n",
|
45 |
+
"/content/text-to-image-prompts/suffix_pairs/raw\n",
|
46 |
+
"0\n",
|
47 |
+
"100\n",
|
48 |
+
"200\n",
|
49 |
+
"300\n",
|
50 |
+
"400\n",
|
51 |
+
"500\n",
|
52 |
+
"600\n",
|
53 |
+
"700\n",
|
54 |
+
"800\n",
|
55 |
+
"900\n",
|
56 |
+
"/content/output/suffix_pairs/text\n",
|
57 |
+
"Saving segment suffix_pairs-1.json to /content/output/suffix_pairs/text/...\n",
|
58 |
+
"/content/output/suffix_pairs/text_encodings\n",
|
59 |
+
"Saving segment suffix_pairs-1.safetensors to /content/output/suffix_pairs/text_encodings/...\n",
|
60 |
+
"0\n",
|
61 |
+
"100\n",
|
62 |
+
"200\n",
|
63 |
+
"300\n",
|
64 |
+
"400\n"
|
65 |
+
]
|
66 |
+
}
|
67 |
+
],
|
68 |
"source": [
|
69 |
"# @title Make your own text_encodings .safetensor file for later use (using GPU is recommended to speed things up)\n",
|
70 |
"\n",
|
|
|
85 |
"#-------#\n",
|
86 |
"\n",
|
87 |
"# User input\n",
|
88 |
+
"target = home_directory + 'text-to-image-prompts/suffix_pairs/'\n",
|
89 |
+
"output_folder = home_directory + 'output/suffix_pairs/'\n",
|
90 |
+
"root_filename = 'suffix_pairs'\n",
|
91 |
+
"NUM_FILES = 1\n",
|
92 |
"#--------#\n",
|
93 |
"\n",
|
94 |
"# Setup environment\n",
|
|
|
124 |
"for file_index in range(NUM_FILES + 1):\n",
|
125 |
" if (file_index < 1): continue\n",
|
126 |
" filename = f'{root_filename}-{file_index}'\n",
|
127 |
+
" if (NUM_FILES == 1) : filename = f'{root_filename}'\n",
|
128 |
"\n",
|
129 |
" # Read {filename}.json\n",
|
130 |
" %cd {target_raw}\n",
|