|
|
|
|
|
""" |
|
(T5 counterpart of "generate-dict-embeddingsXL.py". |
|
""" |
|
|
|
outputfile="embeddingsT5.temp.safetensors" |
|
|
|
import sys |
|
import torch |
|
from safetensors.torch import save_file |
|
from transformers import T5Tokenizer,T5EncoderModel |
|
|
|
processor=None |
|
tmodel=None |
|
|
|
device=torch.device("cuda") |
|
|
|
def initT5model(): |
|
global processor,tmodel |
|
T="mcmonkey/google_t5-v1_1-xxl_encoderonly" |
|
processor = T5Tokenizer.from_pretrained(T) |
|
tmodel = T5EncoderModel.from_pretrained(T).to(device) |
|
|
|
|
|
def embed_from_text(text): |
|
global processor,tmodel |
|
|
|
tokens = processor(text, return_tensors="pt") |
|
tokens.to(device) |
|
|
|
if len(tokens.input_ids) >2: |
|
print("ERROR: expected single token per word") |
|
print(text) |
|
exit(1) |
|
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
outputs = tmodel(tokens.input_ids) |
|
|
|
embedding = outputs.last_hidden_state[0][0] |
|
|
|
|
|
|
|
return embedding |
|
|
|
initT5model() |
|
|
|
print("Reading in 'dictionary'") |
|
with open("dictionary","r") as f: |
|
tokendict = f.readlines() |
|
tokendict = [token.strip() for token in tokendict] |
|
|
|
|
|
count=1 |
|
all_embeddings = [] |
|
|
|
for word in tokendict: |
|
emb = embed_from_text(word) |
|
emb=emb.unsqueeze(0) |
|
all_embeddings.append(emb) |
|
count+=1 |
|
if (count %100) ==0: |
|
print(count) |
|
|
|
|
|
embs = torch.cat(all_embeddings,dim=0) |
|
print("Shape of result = ",embs.shape) |
|
|
|
if len(embs.shape) != 2: |
|
print("Sanity check: result is wrong shape: it wont work") |
|
|
|
print(f"Saving the calculatiuons to {outputfile}...") |
|
save_file({"embeddings": embs}, outputfile) |
|
|
|
|