File size: 3,873 Bytes
805d7ed 5a1142f 805d7ed 5a1142f 805d7ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
#!/bin/env python
"""
(SDXL counterpart of "cliptextmodel-generate-embeddings.py".
Not following that name, because we dont use "cliptextmodel")
Take filenames of an SDXL clip-g type text_encoder2 and config file
Read in a wordlist from "dictionary"
Generate the official "embedding" tensor for each one.
Save the result set to "{outputfile}"
Defaults to loading openai/clip-vit-large-patch14 from huggingface hub,
for purposes of tokenizer, since thats what sdxl does anyway
RULES of the loader:
1. The text_encoder2 model file must appear to be either
in current directory or one down. So, do NOT use
badpath1=some/directory/tree/file.here
badpath2=/absolutepath
2. Yes, you MUST have a matching config.json file
3. if you have no safetensor alternative, you can get away with using pytorch_model.bin
Sample location for such things that you can download:
https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/
If there is a .safetensors AND a .bin file, ignore the .bin file
Alternatively, you can also convert a singlefile model, such as is downloaded from civitai,
by using the utility at
https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py
Args should look like
convert_original_stable_diffusion_to_diffusers.py \
--checkpoint_file somemodel.safetensors \
--dump_path extractdir --to_safetensors --from_safetensors
"""
outputfile="embeddingsXL.temp.safetensors"
import sys
import torch
from safetensors.torch import save_file
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
processor=None
tmodel2=None
model_path2=None
model_config2=None
if len(sys.argv) == 3:
model_path2=sys.argv[1]
model_config2=sys.argv[2]
else:
print("You have to give name of modelfile and config file")
sys.exit(1)
device=torch.device("cuda")
def initXLCLIPmodel(model_path,model_config):
global tmodel2,processor
# yes, oddly they all uses the same one, basically
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
print("loading",model_path)
tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
tmodel2.to(device)
def embed_from_text2(text):
global processor,tmodel2
inputs = processor(text=text, return_tensors="pt")
inputs.to(device)
with torch.no_grad():
outputs = tmodel2(**inputs)
embeddings = outputs.text_embeds
return embeddings
# "inputs" == magic pre-embedding format
def embed_from_inputs(inputs):
global processor,tmodel2
with torch.no_grad():
outputs = tmodel2(**inputs)
embedding = outputs.text_embeds
return embedding
initXLCLIPmodel(model_path2,model_config2)
inputs = processor(text="dummy", return_tensors="pt")
inputs.to(device)
with open("dictionary","r") as f:
tokendict = f.readlines()
tokendict = [token.strip() for token in tokendict] # Remove trailing newlines
count=1
all_embeddings = []
for word in tokendict:
emb = embed_from_text2(word)
#emb=emb.unsqueeze(0) # stupid matrix magic to make torch.cat work
all_embeddings.append(emb)
count+=1
if (count %100) ==0:
print(count)
"""
for id in range(49405):
inputs.input_ids[0][1]=id
emb=embed_from_inputs(inputs)
all_embeddings.append(emb)
if (id %100) ==0:
print(id)
"""
embs = torch.cat(all_embeddings,dim=0)
print("Shape of result = ",embs.shape)
if len(embs.shape) != 2:
print("Sanity check: result is wrong shape: it wont work")
print(f"Saving the calculatiuons to {outputfile}...")
save_file({"embeddings": embs}, outputfile)
|