ppbrown commited on
Commit
bb625b5
1 Parent(s): 507c646

Upload graph-embeddings-frommodel.py

Browse files
Files changed (1) hide show
  1. graph-embeddings-frommodel.py +110 -0
graph-embeddings-frommodel.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """ Demo source that explores difference between embeddings from
4
+ stock CLIPModel data, vs one embedded in a full SD model.
5
+ Input a single word, and it will graph each version.
6
+
7
+ You will want to zoom in to actually see the differences, usually
8
+
9
+ Required data file: "text_encoder.bin"
10
+
11
+ Find the "diffusers format" version of the model you are interested in,
12
+ and steal from that.
13
+ eg: grab
14
+ stablediffusionapi/ghostmix/text_encoder/pytorch_model.bin
15
+
16
+ and download it, renamed to
17
+ "text_encoder.bin"
18
+
19
+ """
20
+
21
+
22
+ import sys
23
+ import json
24
+ import torch
25
+ from transformers import CLIPProcessor,CLIPModel
26
+ import logging
27
+ # Turn off stupid mesages from CLIPModel.load
28
+ logging.disable(logging.WARNING)
29
+
30
+ import PyQt5
31
+ import matplotlib
32
+ matplotlib.use('QT5Agg') # Set the backend to TkAgg
33
+
34
+ import matplotlib.pyplot as plt
35
+
36
+
37
+ clipsrc="openai/clip-vit-large-patch14"
38
+
39
+ overlaymodel="text_encoder.bin"
40
+
41
+ processor=None
42
+ model=None
43
+
44
+ device=torch.device("cuda")
45
+
46
+
47
+ def init():
48
+ global processor
49
+ global model
50
+ # Load the processor and model
51
+ print("loading processor from "+clipsrc,file=sys.stderr)
52
+ processor = CLIPProcessor.from_pretrained(clipsrc)
53
+ print("done",file=sys.stderr)
54
+ print("loading model from "+clipsrc,file=sys.stderr)
55
+ model = CLIPModel.from_pretrained(clipsrc)
56
+ print("done",file=sys.stderr)
57
+ model = model.to(device)
58
+
59
+ def load_overlay():
60
+ global model
61
+ print("loading overlay",overlaymodel)
62
+ overlay=torch.load(overlaymodel)
63
+ if "state_dict" in overlay:
64
+ print("dereferencing state_dict")
65
+ overlay=overlay["state_dict"]
66
+
67
+ print("Attempting to update old from new")
68
+ sd=model.state_dict()
69
+ sd.update(overlay)
70
+ print("Reloading merged data")
71
+ model.load_state_dict(sd)
72
+ model = model.to(device)
73
+
74
+ # Expect SINGLE WORD ONLY
75
+ def standard_embed_calc(text):
76
+ inputs = processor(text=text, return_tensors="pt")
77
+ inputs.to(device)
78
+ with torch.no_grad():
79
+ text_features = model.get_text_features(**inputs)
80
+ embedding = text_features[0]
81
+ return embedding
82
+
83
+
84
+ init()
85
+
86
+ fig, ax = plt.subplots()
87
+
88
+
89
+ text1 = input("First word or prompt: ")
90
+
91
+
92
+ print("generating embeddings for each now")
93
+ emb1 = standard_embed_calc(text1)
94
+ graph1=emb1.tolist()
95
+ ax.plot(graph1, label=text1[:20])
96
+
97
+ load_overlay()
98
+ emb2 = standard_embed_calc(text1)
99
+ graph2=emb2.tolist()
100
+ ax.plot(graph2, label="overlay data")
101
+
102
+ # Add labels, title, and legend
103
+ #ax.set_xlabel('Index')
104
+ ax.set_ylabel('Values')
105
+ ax.set_title('Graph embedding from standard vs MERGED dict')
106
+ ax.legend()
107
+
108
+ # Display the graph
109
+ print("Pulling up the graph")
110
+ plt.show()