tokenspace / openclip /generate-embeddings-open.py
ppbrown's picture
Upload 7 files
7bc3de2 verified
raw
history blame
2.39 kB
#!/bin/env python
""" Work in progress
Plan:
Read in "dictionary" for list of works and token
Generate "proper" embedding for each token, and store in tensor file
Generate a tensor array of distance to every other token/embedding
Save it out to "embeddings.safetensors"
"""
import sys
import torch
import open_clip
from safetensors.torch import save_file
outfile="out.safetensors"
"""
REMEMBER!!!
You MUST use the same settings when you READ from the output file as well!!
"""
#mtype='ViT-B-32'
#mname='laion2b_s34b_b79k'
#mtype='ViT-g-14'
#mname='laion2b_s12b_b42k'
#mtype='ViT-H-14'
#mname='laion2b_s32b_b79k'
mtype='ViT-L-14'
mname='laion2b_s32b_b82k'
#### Warning, this requires more than 4GB vram
#mtype='ViT-H-14-quickgelu'
#mname='dfn5b'
# May also be able to use syntax of
# hf-hub:hf-internal-testing/tiny-open-clip-model'
# for mname
print("Loading",mtype,mname)
cmodel, _, preprocess = open_clip.create_model_and_transforms(
mtype,
pretrained=mname)
tokenizer = open_clip.get_tokenizer(mtype)
device=torch.device("cuda")
try:
cmodel.to(device)
except torch.cuda.OutOfMemoryError as e:
print(f"FALLING BACK TO CPU!! \n {e}")
device=torch.device("cpu")
cmodel.to(device)
# This is very rare... unless you are trying to load the quickgelu sets
# on a 4GB card. Or maybe have 2 things running
def standard_embed_calc(text):
with torch.no_grad():
ttext = tokenizer(text).to(device)
text_features = cmodel.encode_text(ttext)
text_features.to(device)
#print("shape of text is",ttext.shape)
embedding = text_features[0]
#print("shape of embedding is",embedding.shape)
# For VIT-B, expected is [512]
return embedding
with open("dictionary","r") as f:
tokendict = f.readlines()
tokendict = [token.strip() for token in tokendict] # Remove trailing newlines
print("generate embeddings for each now",file=sys.stderr)
count=1
all_embeddings = []
for word in tokendict:
emb = standard_embed_calc(word)
emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
all_embeddings.append(emb)
count+=1
if (count %100) ==0:
print(count)
embs = torch.cat(all_embeddings,dim=0)
print("Shape of result = ",embs.shape)
print("Saving all the things...")
save_file({"embeddings": embs}, outfile)
print("calculate distances now")