#!/bin/env python """ (T5 counterpart of "generate-dict-embeddingsXL.py". """ outputfile="embeddingsT5.temp.safetensors" import sys import torch from safetensors.torch import save_file from transformers import T5Tokenizer,T5EncoderModel processor=None tmodel=None device=torch.device("cuda") def initT5model(): global processor,tmodel T="mcmonkey/google_t5-v1_1-xxl_encoderonly" processor = T5Tokenizer.from_pretrained(T) tmodel = T5EncoderModel.from_pretrained(T).to(device) def embed_from_text(text): global processor,tmodel #print("Word:"+text) tokens = processor(text, return_tensors="pt") tokens.to(device) if len(tokens.input_ids) >2: print("ERROR: expected single token per word") print(text) exit(1) # We can only accept single-token words, because we want our output # to be a single embedding per word, and we dont have an official # way to merge multiple T5 embeddings into one, like CLIP does with torch.no_grad(): outputs = tmodel(tokens.input_ids) embedding = outputs.last_hidden_state[0][0] #print(encoding.shape) # Shape of this is (1,2,4096) return embedding initT5model() print("Reading in 'dictionary'") with open("dictionary","r") as f: tokendict = f.readlines() tokendict = [token.strip() for token in tokendict] # Remove trailing newlines count=1 all_embeddings = [] for word in tokendict: emb = embed_from_text(word) emb=emb.unsqueeze(0) # stupid matrix magic to make torch.cat work all_embeddings.append(emb) count+=1 if (count %100) ==0: print(count) embs = torch.cat(all_embeddings,dim=0) print("Shape of result = ",embs.shape) if len(embs.shape) != 2: print("Sanity check: result is wrong shape: it wont work") print(f"Saving the calculatiuons to {outputfile}...") save_file({"embeddings": embs}, outputfile)