|
|
|
|
|
""" Work in progress |
|
Plan: |
|
Read in "dictionary" for list of works and token |
|
Generate "proper" embedding for each token, and store in tensor file |
|
Generate a tensor array of distance to every other token/embedding |
|
Save it out to "embeddings.safetensors" |
|
""" |
|
|
|
|
|
import sys |
|
import torch |
|
import open_clip |
|
from safetensors.torch import save_file |
|
|
|
outfile="out.safetensors" |
|
|
|
|
|
""" |
|
REMEMBER!!! |
|
You MUST use the same settings when you READ from the output file as well!! |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mtype='ViT-L-14' |
|
mname='laion2b_s32b_b82k' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("Loading",mtype,mname) |
|
|
|
cmodel, _, preprocess = open_clip.create_model_and_transforms( |
|
mtype, |
|
pretrained=mname) |
|
tokenizer = open_clip.get_tokenizer(mtype) |
|
|
|
device=torch.device("cuda") |
|
|
|
try: |
|
cmodel.to(device) |
|
except torch.cuda.OutOfMemoryError as e: |
|
print(f"FALLING BACK TO CPU!! \n {e}") |
|
device=torch.device("cpu") |
|
cmodel.to(device) |
|
|
|
|
|
|
|
|
|
def standard_embed_calc(text): |
|
with torch.no_grad(): |
|
ttext = tokenizer(text).to(device) |
|
text_features = cmodel.encode_text(ttext) |
|
text_features.to(device) |
|
|
|
|
|
embedding = text_features[0] |
|
|
|
|
|
|
|
return embedding |
|
|
|
|
|
with open("dictionary","r") as f: |
|
tokendict = f.readlines() |
|
tokendict = [token.strip() for token in tokendict] |
|
|
|
print("generate embeddings for each now",file=sys.stderr) |
|
count=1 |
|
all_embeddings = [] |
|
for word in tokendict: |
|
emb = standard_embed_calc(word) |
|
emb=emb.unsqueeze(0) |
|
all_embeddings.append(emb) |
|
count+=1 |
|
if (count %100) ==0: |
|
print(count) |
|
|
|
embs = torch.cat(all_embeddings,dim=0) |
|
print("Shape of result = ",embs.shape) |
|
print("Saving all the things...") |
|
save_file({"embeddings": embs}, outfile) |
|
|
|
|
|
print("calculate distances now") |
|
|
|
|
|
|