File size: 1,233 Bytes
195f981 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
#!/bin/env python
"""
CONCEPT:
Load in a precalculated embeddings file of all the tokenids (0-49405)
(see "generate-allid-embeddings[XL].py")
For each dimension, calculate which tokenid has the highest value.
Print out list, keyed by dimension.
In theory, this should auto-adjust, whether the embeddings file
is SD, or SDXL (clip_l or clip_g)
"""
import sys
import json
import torch
from safetensors import safe_open
file1=sys.argv[1]
file2=sys.argv[2]
print(f"reading in json from {file2} now",file=sys.stderr)
with open(file2, "r") as file:
json_data = json.load(file)
token_names = {v: k for k, v in json_data.items()}
#print(token_names)
device=torch.device("cuda")
print(f"reading {file1} embeddings now",file=sys.stderr)
model = safe_open(file1,framework="pt",device="cuda")
embs1=model.get_tensor("embeddings")
embs1.to(device)
print("Shape of loaded embeds =",embs1.shape)
print(f"calculating distances...",file=sys.stderr)
indices = torch.argmax(embs1, dim=0)
print("Shape of results=",indices.shape,file=sys.stderr)
indices=indices.tolist()
counter=0
for token_num in indices:
#print("num:",token_num)
print(counter,token_names.get(token_num))
counter+=1
|