ppbrown commited on
Commit
b02c223
1 Parent(s): 9428212

Upload 5 files

Browse files

tools to gen a set of comparison embedding files

openclip/calculate-distances-open.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from safetensor file
7
+ Prompt user for a word from the list
8
+ Generate a tensor array of distance to all the other known words
9
+ Print out the 20 closest ones
10
+ """
11
+
12
+
13
+ import sys
14
+ import torch
15
+ import open_clip
16
+
17
+ from safetensors import safe_open
18
+
19
+ #from transformers import CLIPProcessor,CLIPModel
20
+
21
+ device=torch.device("cuda")
22
+
23
+ mtype='ViT-B-32'
24
+ mname='laion2b_s34b_b79k'
25
+
26
+ print("Loading",mtype,mname)
27
+
28
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
29
+ pretrained=mname)
30
+ tokenizer = open_clip.get_tokenizer(mtype)
31
+
32
+ ## model = model.to(device)
33
+
34
+
35
+ #embed_file="embeddings.safetensors"
36
+ embed_file=sys.argv[1]
37
+ dictionary=sys.argv[2]
38
+
39
+
40
+ print(f"read in words from {dictionary} now",file=sys.stderr)
41
+ with open(dictionary,"r") as f:
42
+ tokendict = f.readlines()
43
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
44
+ print(len(wordlist),"lines read")
45
+
46
+ print(f"read in {embed_file} now",file=sys.stderr)
47
+ emodel = safe_open(embed_file,framework="pt",device="cuda")
48
+ embs=emodel.get_tensor("embeddings")
49
+ embs.to(device)
50
+ print("Shape of loaded embeds =",embs.shape)
51
+
52
+ def standard_embed_calc(text):
53
+ with torch.no_grad():
54
+ ttext = tokenizer(text)
55
+ text_features = cmodel.encode_text(ttext)
56
+ embedding = text_features[0]
57
+ #print("shape of text is",ttext.shape)
58
+ return embedding
59
+
60
+
61
+ def print_distances(targetemb):
62
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
63
+
64
+ print("shape of distances...",targetdistances.shape)
65
+
66
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
67
+
68
+ smallest_distances=smallest_distances.tolist()
69
+ smallest_indices=smallest_indices.tolist()
70
+ for d,i in zip(smallest_distances,smallest_indices):
71
+ print(wordlist[i],"(",d,")")
72
+
73
+
74
+
75
+ # Find 10 closest tokens to targetword.
76
+ # Will include the word itself
77
+ def find_closest(targetword):
78
+ try:
79
+ targetindex=wordlist.index(targetword)
80
+ targetemb=embs[targetindex]
81
+ print_distances(targetemb)
82
+ return
83
+ except ValueError:
84
+ print(targetword,"not found in cache")
85
+
86
+
87
+ print("Now doing with full calc embed")
88
+ targetemb=standard_embed_calc(targetword)
89
+ print_distances(targetemb)
90
+
91
+
92
+ while True:
93
+ input_text=input("Input a word now:")
94
+ find_closest(input_text)
openclip/datafiles.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ laion/CLIP-ViT-B-32-laion2B-s34B-b79K
3
+ laion/CLIP-ViT-L-14-laion2B-s32B-b82K
4
+ laion/CLIP-ViT-H-14-laion2B-s32B-b79K
5
+ laion/CLIP-ViT-g-14-laion2B-s12B-b42K
6
+ laion/CLIP-ViT-bigG-14-laion2B-39B-b160k
7
+
openclip/dictionary ADDED
The diff for this file is too large to render. See raw diff
 
openclip/generate-embeddings-open.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """ Work in progress
4
+ Plan:
5
+ Read in "dictionary" for list of works and token
6
+ Generate "proper" embedding for each token, and store in tensor file
7
+ Generate a tensor array of distance to every other token/embedding
8
+ Save it out to "embeddings.safetensors"
9
+ """
10
+
11
+
12
+ import sys
13
+ import torch
14
+ import open_clip
15
+ from safetensors.torch import save_file
16
+
17
+ #mtype='ViT-B-32'
18
+ #mname='laion2b_s34b_b79k'
19
+ #mtype='ViT-g-14'
20
+ #mname='laion2b_s12b_b42k'
21
+ mtype='ViT-H-14'
22
+ mname='laion2b_s32b_b79k'
23
+
24
+ print("Loading",mtype,mname)
25
+
26
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
27
+ pretrained=mname)
28
+ tokenizer = open_clip.get_tokenizer(mtype)
29
+
30
+
31
+
32
+ def standard_embed_calc(text):
33
+ with torch.no_grad():
34
+ ttext = tokenizer(text)
35
+ text_features = cmodel.encode_text(ttext)
36
+ #print("shape of text is",ttext.shape)
37
+
38
+ embedding = text_features[0]
39
+
40
+ #print("shape of embedding is",embedding.shape)
41
+ # For VIT-B, expected is [512]
42
+ return embedding
43
+
44
+
45
+ with open("dictionary","r") as f:
46
+ tokendict = f.readlines()
47
+ tokendict = [token.strip() for token in tokendict] # Remove trailing newlines
48
+
49
+ print("generate embeddings for each now",file=sys.stderr)
50
+ count=1
51
+ all_embeddings = []
52
+ for word in tokendict:
53
+ emb = standard_embed_calc(word)
54
+ emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
55
+ all_embeddings.append(emb)
56
+ count+=1
57
+ if (count %100) ==0:
58
+ print(count)
59
+
60
+ embs = torch.cat(all_embeddings,dim=0)
61
+ print("Shape of result = ",embs.shape)
62
+ print("Saving all the things...")
63
+ save_file({"embeddings": embs}, "embeddings.safetensors")
64
+
65
+
66
+ print("calculate distances now")
67
+
68
+
openclip/modeltypes.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ['coca_base', 'coca_roberta-ViT-B-32', 'coca_ViT-B-32', 'coca_ViT-L-14', 'convnext_base', 'convnext_base_w', 'convnext_base_w_320', 'convnext_large', 'convnext_large_d', 'convnext_large_d_320', 'convnext_small', 'convnext_tiny', 'convnext_xlarge', 'convnext_xxlarge', 'convnext_xxlarge_320', 'EVA01-g-14', 'EVA01-g-14-plus', 'EVA02-B-16', 'EVA02-E-14', 'EVA02-E-14-plus', 'EVA02-L-14', 'EVA02-L-14-336', 'mt5-base-ViT-B-32', 'mt5-xl-ViT-H-14', 'nllb-clip-base', 'nllb-clip-base-siglip', 'nllb-clip-large', 'nllb-clip-large-siglip', 'RN50', 'RN50-quickgelu', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'RN101-quickgelu', 'roberta-ViT-B-32', 'swin_base_patch4_window7_224', 'ViT-B-16', 'ViT-B-16-plus', 'ViT-B-16-plus-240', 'ViT-B-16-quickgelu', 'ViT-B-16-SigLIP', 'ViT-B-16-SigLIP-256', 'ViT-B-16-SigLIP-384', 'ViT-B-16-SigLIP-512', 'ViT-B-16-SigLIP-i18n-256', 'ViT-B-32', 'ViT-B-32-256', 'ViT-B-32-plus-256', 'ViT-B-32-quickgelu', 'ViT-bigG-14', 'ViT-bigG-14-CLIPA', 'ViT-bigG-14-CLIPA-336', 'ViT-e-14', 'ViT-g-14', 'ViT-H-14', 'ViT-H-14-378-quickgelu', 'ViT-H-14-CLIPA', 'ViT-H-14-CLIPA-336', 'ViT-H-14-quickgelu', 'ViT-H-16', 'ViT-L-14', 'ViT-L-14-280', 'ViT-L-14-336', 'ViT-L-14-CLIPA', 'ViT-L-14-CLIPA-336', 'ViT-L-14-quickgelu', 'ViT-L-16', 'ViT-L-16-320', 'ViT-L-16-SigLIP-256', 'ViT-L-16-SigLIP-384', 'ViT-M-16', 'ViT-M-16-alt', 'ViT-M-32', 'ViT-M-32-alt', 'ViT-S-16', 'ViT-S-16-alt', 'ViT-S-32', 'ViT-S-32-alt', 'ViT-SO400M-14-SigLIP', 'ViT-SO400M-14-SigLIP-384', 'vit_medium_patch16_gap_256', 'vit_relpos_medium_patch16_cls_224', 'xlm-roberta-base-ViT-B-32', 'xlm-roberta-large-ViT-H-14'].
2
+