File size: 2,871 Bytes
b02c223
 
8aca6c9
 
b02c223
 
 
8aca6c9
 
 
 
b02c223
 
 
 
 
 
 
 
7bc3de2
 
 
 
 
 
 
5695663
 
 
7bc3de2
5695663
 
704a194
 
 
 
 
 
 
 
 
 
 
 
7bc3de2
 
 
 
 
 
 
b02c223
e6baba3
5695663
 
b02c223
 
7bc3de2
 
b02c223
 
 
7bc3de2
 
 
 
 
 
 
 
 
 
b02c223
 
 
 
7bc3de2
b02c223
7bc3de2
b02c223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee7170e
7bc3de2
b02c223
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/bin/env python

"""
Purpose:
   Read in "dictionary" for list of works and token
   Generate "proper" embedding for each token, and store in tensor file
   Generate a tensor array of distance to every other token/embedding
   Save it out to "[email protected]"
Warning:
    Some models require more VRAM than others.
    Some require more RAM than others.
"""


import sys
import torch
import open_clip
from safetensors.torch import save_file



"""
    REMEMBER!!!
    You MUST use the same settings when you READ from the output file as well!!
"""

# See "list_models.txt" for full combination sets

#mtype='ViT-L-14-336'
mtype='ViT-L-14'
mname='openai'

import argparse
parser = argparse.ArgumentParser(
                    prog='generate-embeddings',
                    epilog=f"defaults: mtype={mtype}, mname={mname}",
                    description='Read in "dictionary" wordlist and generate calculated embeddings')
parser.add_argument('--mtype',default=mtype)
parser.add_argument('--mname',default=mname)
args = parser.parse_args()

mtype=args.mtype
mname=args.mname

#### Warning, this requires more than 4GB vram
#mtype='ViT-H-14-quickgelu'
#mname='dfn5b'

# May also be able to use syntax of
#   hf-hub:hf-internal-testing/tiny-open-clip-model'
# for mname

outfile=f"{mtype}@{mname}.safetensors"
print("Will save to:")
print("  ",outfile)
print("Loading",mtype,mname)

cmodel, _, preprocess = open_clip.create_model_and_transforms(
        mtype,
        pretrained=mname)
tokenizer = open_clip.get_tokenizer(mtype)

device=torch.device("cuda")

try:
    cmodel.to(device)
except torch.cuda.OutOfMemoryError as e:
    print(f"FALLING BACK TO CPU!! \n  {e}")
    device=torch.device("cpu")
    cmodel.to(device)
    # This is very rare... unless you are trying to load the quickgelu sets
    # on a 4GB card. Or maybe have 2 things running


def standard_embed_calc(text):
    with torch.no_grad():
        ttext = tokenizer(text).to(device)
        text_features = cmodel.encode_text(ttext)
        text_features.to(device)
    #print("shape of text is",ttext.shape)

    embedding = text_features[0]

    #print("shape of embedding is",embedding.shape)
    # For VIT-B, expected is [512]
    return embedding


with open("dictionary","r") as f:
    tokendict = f.readlines()
    tokendict = [token.strip() for token in tokendict]  # Remove trailing newlines

print("generate embeddings for each now",file=sys.stderr)
count=1
all_embeddings = []
for word in tokendict:
    emb = standard_embed_calc(word)
    emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
    all_embeddings.append(emb)
    count+=1
    if (count %100) ==0:
        print(count)

embs = torch.cat(all_embeddings,dim=0)
print("Shape of result = ",embs.shape)
print("Saving to ",outfile)
save_file({"embeddings": embs}, outfile)


print("calculate distances now")