ppbrown commited on
Commit
7bc3de2
1 Parent(s): a689605

Upload 7 files

Browse files
openclip/calculate-vitb.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from safetensor file
7
+ Prompt user for a word from the list
8
+ Generate a tensor array of distance to all the other known words
9
+ Print out the 20 closest ones
10
+ """
11
+
12
+
13
+ import sys
14
+ import torch
15
+ import open_clip
16
+
17
+ from safetensors import safe_open
18
+
19
+ #from transformers import CLIPProcessor,CLIPModel
20
+
21
+ device=torch.device("cuda")
22
+
23
+ mtype='ViT-B-32'
24
+ mname='laion2b_s34b_b79k'
25
+
26
+ print("Loading",mtype,mname)
27
+
28
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
29
+ pretrained=mname)
30
+ tokenizer = open_clip.get_tokenizer(mtype)
31
+
32
+ ## model = model.to(device)
33
+
34
+
35
+ #embed_file="embeddings.safetensors"
36
+ embed_file=sys.argv[1]
37
+ dictionary=sys.argv[2]
38
+
39
+
40
+ print(f"read in words from {dictionary} now",file=sys.stderr)
41
+ with open(dictionary,"r") as f:
42
+ tokendict = f.readlines()
43
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
44
+ print(len(wordlist),"lines read")
45
+
46
+ print(f"read in {embed_file} now",file=sys.stderr)
47
+ emodel = safe_open(embed_file,framework="pt",device="cuda")
48
+ embs=emodel.get_tensor("embeddings")
49
+ embs.to(device)
50
+ print("Shape of loaded embeds =",embs.shape)
51
+
52
+ def standard_embed_calc(text):
53
+ with torch.no_grad():
54
+ ttext = tokenizer(text)
55
+ text_features = cmodel.encode_text(ttext)
56
+ embedding = text_features[0]
57
+ #print("shape of text is",ttext.shape)
58
+ return embedding
59
+
60
+
61
+ def print_distances(targetemb):
62
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
63
+
64
+ print("shape of distances...",targetdistances.shape)
65
+
66
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
67
+
68
+ smallest_distances=smallest_distances.tolist()
69
+ smallest_indices=smallest_indices.tolist()
70
+ for d,i in zip(smallest_distances,smallest_indices):
71
+ print(wordlist[i],"(",d,")")
72
+
73
+
74
+
75
+ # Find 10 closest tokens to targetword.
76
+ # Will include the word itself
77
+ def find_closest(targetword):
78
+ try:
79
+ targetindex=wordlist.index(targetword)
80
+ targetemb=embs[targetindex]
81
+ print_distances(targetemb)
82
+ return
83
+ except ValueError:
84
+ print(targetword,"not found in cache")
85
+
86
+
87
+ print("Now doing with full calc embed")
88
+ targetemb=standard_embed_calc(targetword)
89
+ print_distances(targetemb)
90
+
91
+
92
+ while True:
93
+ input_text=input("Input a word now:")
94
+ find_closest(input_text)
openclip/calculate-vitg.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from safetensor file
7
+ Prompt user for a word from the list
8
+ Generate a tensor array of distance to all the other known words
9
+ Print out the 20 closest ones
10
+ """
11
+
12
+
13
+ import sys
14
+ import torch
15
+ import open_clip
16
+
17
+ from safetensors import safe_open
18
+
19
+ #from transformers import CLIPProcessor,CLIPModel
20
+
21
+ device=torch.device("cuda")
22
+
23
+ mtype='ViT-g-14'
24
+ mname='laion2b_s12b_b42k'
25
+
26
+ print("Loading",mtype,mname)
27
+
28
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
29
+ pretrained=mname)
30
+ tokenizer = open_clip.get_tokenizer(mtype)
31
+
32
+ ## model = model.to(device)
33
+
34
+
35
+ #embed_file="embeddings.safetensors"
36
+ embed_file=sys.argv[1]
37
+ dictionary=sys.argv[2]
38
+
39
+
40
+ print(f"read in words from {dictionary} now",file=sys.stderr)
41
+ with open(dictionary,"r") as f:
42
+ tokendict = f.readlines()
43
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
44
+ print(len(wordlist),"lines read")
45
+
46
+ print(f"read in {embed_file} now",file=sys.stderr)
47
+ emodel = safe_open(embed_file,framework="pt",device="cuda")
48
+ embs=emodel.get_tensor("embeddings")
49
+ embs.to(device)
50
+ print("Shape of loaded embeds =",embs.shape)
51
+
52
+ def standard_embed_calc(text):
53
+ with torch.no_grad():
54
+ ttext = tokenizer(text)
55
+ text_features = cmodel.encode_text(ttext)
56
+ embedding = text_features[0]
57
+ #print("shape of text is",ttext.shape)
58
+ return embedding
59
+
60
+
61
+ def print_distances(targetemb):
62
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
63
+
64
+ print("shape of distances...",targetdistances.shape)
65
+
66
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
67
+
68
+ smallest_distances=smallest_distances.tolist()
69
+ smallest_indices=smallest_indices.tolist()
70
+ for d,i in zip(smallest_distances,smallest_indices):
71
+ print(wordlist[i],"(",d,")")
72
+
73
+
74
+
75
+ # Find 10 closest tokens to targetword.
76
+ # Will include the word itself
77
+ def find_closest(targetword):
78
+ try:
79
+ targetindex=wordlist.index(targetword)
80
+ targetemb=embs[targetindex]
81
+ print_distances(targetemb)
82
+ return
83
+ except ValueError:
84
+ print(targetword,"not found in cache")
85
+
86
+
87
+ print("Now doing with full calc embed")
88
+ targetemb=standard_embed_calc(targetword)
89
+ print_distances(targetemb)
90
+
91
+
92
+ while True:
93
+ input_text=input("Input a word now:")
94
+ find_closest(input_text)
openclip/calculate-vith.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ Plan:
5
+ Read in "dictionary" for list of words
6
+ Read in pre-calculated "proper" embedding for each word from safetensor file
7
+ Prompt user for a word from the list
8
+ Generate a tensor array of distance to all the other known words
9
+ Print out the 20 closest ones
10
+ """
11
+
12
+
13
+ import sys
14
+ import torch
15
+ import open_clip
16
+
17
+ from safetensors import safe_open
18
+
19
+ #from transformers import CLIPProcessor,CLIPModel
20
+
21
+ device=torch.device("cuda")
22
+
23
+ mtype='ViT-H-14'
24
+ mname='laion2b_s32b_b79k'
25
+
26
+ print("Loading",mtype,mname)
27
+
28
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
29
+ pretrained=mname)
30
+ tokenizer = open_clip.get_tokenizer(mtype)
31
+
32
+ ## model = model.to(device)
33
+
34
+
35
+ #embed_file="embeddings.safetensors"
36
+ embed_file=sys.argv[1]
37
+ dictionary=sys.argv[2]
38
+
39
+
40
+ print(f"read in words from {dictionary} now",file=sys.stderr)
41
+ with open(dictionary,"r") as f:
42
+ tokendict = f.readlines()
43
+ wordlist = [token.strip() for token in tokendict] # Remove trailing newlines
44
+ print(len(wordlist),"lines read")
45
+
46
+ print(f"read in {embed_file} now",file=sys.stderr)
47
+ emodel = safe_open(embed_file,framework="pt",device="cuda")
48
+ embs=emodel.get_tensor("embeddings")
49
+ embs.to(device)
50
+ print("Shape of loaded embeds =",embs.shape)
51
+
52
+ def standard_embed_calc(text):
53
+ with torch.no_grad():
54
+ ttext = tokenizer(text)
55
+ text_features = cmodel.encode_text(ttext)
56
+ embedding = text_features[0]
57
+ #print("shape of text is",ttext.shape)
58
+ return embedding
59
+
60
+
61
+ def print_distances(targetemb):
62
+ targetdistances = torch.cdist( targetemb.unsqueeze(0), embs, p=2)
63
+
64
+ print("shape of distances...",targetdistances.shape)
65
+
66
+ smallest_distances, smallest_indices = torch.topk(targetdistances[0], 20, largest=False)
67
+
68
+ smallest_distances=smallest_distances.tolist()
69
+ smallest_indices=smallest_indices.tolist()
70
+ for d,i in zip(smallest_distances,smallest_indices):
71
+ print(wordlist[i],"(",d,")")
72
+
73
+
74
+
75
+ # Find 10 closest tokens to targetword.
76
+ # Will include the word itself
77
+ def find_closest(targetword):
78
+ try:
79
+ targetindex=wordlist.index(targetword)
80
+ targetemb=embs[targetindex]
81
+ print_distances(targetemb)
82
+ return
83
+ except ValueError:
84
+ print(targetword,"not found in cache")
85
+
86
+
87
+ print("Now doing with full calc embed")
88
+ targetemb=standard_embed_calc(targetword)
89
+ print_distances(targetemb)
90
+
91
+
92
+ while True:
93
+ input_text=input("Input a word now:")
94
+ find_closest(input_text)
openclip/embeddings.vith14-quickgelu.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8014c16a1b2b2a971c5d8d8aaf5662d7fa0269d32a46497f5769ae3718f02cc6
3
+ size 134885464
openclip/embeddings.vith14.dictionary.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c55fe6c6300500d9304343b19efb43cb1ccb170270dc8c854def0bedfa576413
3
+ size 134885464
openclip/embeddings.vitl.dictionary.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:240da92c0376aa71385c02b8edcb91ddc75c935d99ae42164ebf3fdab3d6499c
3
+ size 101164120
openclip/generate-embeddings-open.py CHANGED
@@ -14,25 +14,54 @@ import torch
14
  import open_clip
15
  from safetensors.torch import save_file
16
 
 
 
 
 
 
 
 
 
17
  #mtype='ViT-B-32'
18
  #mname='laion2b_s34b_b79k'
19
  #mtype='ViT-g-14'
20
  #mname='laion2b_s12b_b42k'
21
- mtype='ViT-H-14'
22
- mname='laion2b_s32b_b79k'
 
 
 
 
 
 
 
 
 
23
 
24
  print("Loading",mtype,mname)
25
 
26
- cmodel, _, preprocess = open_clip.create_model_and_transforms(mtype,
 
27
  pretrained=mname)
28
  tokenizer = open_clip.get_tokenizer(mtype)
29
 
 
 
 
 
 
 
 
 
 
 
30
 
31
 
32
  def standard_embed_calc(text):
33
  with torch.no_grad():
34
- ttext = tokenizer(text)
35
  text_features = cmodel.encode_text(ttext)
 
36
  #print("shape of text is",ttext.shape)
37
 
38
  embedding = text_features[0]
@@ -60,7 +89,7 @@ for word in tokendict:
60
  embs = torch.cat(all_embeddings,dim=0)
61
  print("Shape of result = ",embs.shape)
62
  print("Saving all the things...")
63
- save_file({"embeddings": embs}, "embeddings.safetensors")
64
 
65
 
66
  print("calculate distances now")
 
14
  import open_clip
15
  from safetensors.torch import save_file
16
 
17
+ outfile="out.safetensors"
18
+
19
+
20
+ """
21
+ REMEMBER!!!
22
+ You MUST use the same settings when you READ from the output file as well!!
23
+ """
24
+
25
  #mtype='ViT-B-32'
26
  #mname='laion2b_s34b_b79k'
27
  #mtype='ViT-g-14'
28
  #mname='laion2b_s12b_b42k'
29
+ #mtype='ViT-H-14'
30
+ #mname='laion2b_s32b_b79k'
31
+ mtype='ViT-L-14'
32
+ mname='laion2b_s32b_b82k'
33
+ #### Warning, this requires more than 4GB vram
34
+ #mtype='ViT-H-14-quickgelu'
35
+ #mname='dfn5b'
36
+
37
+ # May also be able to use syntax of
38
+ # hf-hub:hf-internal-testing/tiny-open-clip-model'
39
+ # for mname
40
 
41
  print("Loading",mtype,mname)
42
 
43
+ cmodel, _, preprocess = open_clip.create_model_and_transforms(
44
+ mtype,
45
  pretrained=mname)
46
  tokenizer = open_clip.get_tokenizer(mtype)
47
 
48
+ device=torch.device("cuda")
49
+
50
+ try:
51
+ cmodel.to(device)
52
+ except torch.cuda.OutOfMemoryError as e:
53
+ print(f"FALLING BACK TO CPU!! \n {e}")
54
+ device=torch.device("cpu")
55
+ cmodel.to(device)
56
+ # This is very rare... unless you are trying to load the quickgelu sets
57
+ # on a 4GB card. Or maybe have 2 things running
58
 
59
 
60
  def standard_embed_calc(text):
61
  with torch.no_grad():
62
+ ttext = tokenizer(text).to(device)
63
  text_features = cmodel.encode_text(ttext)
64
+ text_features.to(device)
65
  #print("shape of text is",ttext.shape)
66
 
67
  embedding = text_features[0]
 
89
  embs = torch.cat(all_embeddings,dim=0)
90
  print("Shape of result = ",embs.shape)
91
  print("Saving all the things...")
92
+ save_file({"embeddings": embs}, outfile)
93
 
94
 
95
  print("calculate distances now")