tokenspace / generate-allid-toptokens.py
ppbrown's picture
Util to find top token in each dimention
195f981
raw
history blame
1.23 kB
#!/bin/env python
"""
CONCEPT:
Load in a precalculated embeddings file of all the tokenids (0-49405)
(see "generate-allid-embeddings[XL].py")
For each dimension, calculate which tokenid has the highest value.
Print out list, keyed by dimension.
In theory, this should auto-adjust, whether the embeddings file
is SD, or SDXL (clip_l or clip_g)
"""
import sys
import json
import torch
from safetensors import safe_open
file1=sys.argv[1]
file2=sys.argv[2]
print(f"reading in json from {file2} now",file=sys.stderr)
with open(file2, "r") as file:
json_data = json.load(file)
token_names = {v: k for k, v in json_data.items()}
#print(token_names)
device=torch.device("cuda")
print(f"reading {file1} embeddings now",file=sys.stderr)
model = safe_open(file1,framework="pt",device="cuda")
embs1=model.get_tensor("embeddings")
embs1.to(device)
print("Shape of loaded embeds =",embs1.shape)
print(f"calculating distances...",file=sys.stderr)
indices = torch.argmax(embs1, dim=0)
print("Shape of results=",indices.shape,file=sys.stderr)
indices=indices.tolist()
counter=0
for token_num in indices:
#print("num:",token_num)
print(counter,token_names.get(token_num))
counter+=1