File size: 2,887 Bytes
bb625b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230af7e
 
 
 
 
 
 
bb625b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#!/bin/env python

""" Demo source that explores difference between embeddings from 
    stock CLIPModel data, vs one embedded in a full SD model.
    Input a single word, and it will graph each version.

    You will want to zoom in to actually see the differences, usually

Required data file: "text_encoder.bin"

    Find the "diffusers format" version of the model you are interested in,
    and steal from that.
    eg: grab
    stablediffusionapi/ghostmix/text_encoder/pytorch_model.bin

    and download it, renamed to 
        "text_encoder.bin"

"""


import sys
import json
import torch
from transformers import CLIPProcessor,CLIPModel
import logging
# Turn off  stupid mesages from CLIPModel.load
logging.disable(logging.WARNING)

import PyQt5
import matplotlib
matplotlib.use('QT5Agg')  # Set the backend to TkAgg

import matplotlib.pyplot as plt


clipsrc="openai/clip-vit-large-patch14"

overlaymodel="text_encoder.bin"

processor=None
model=None

device=torch.device("cuda")


def init():
    global processor
    global model
    # Load the processor and model
    print("loading processor from "+clipsrc,file=sys.stderr)
    processor = CLIPProcessor.from_pretrained(clipsrc)
    print("done",file=sys.stderr)
    print("loading model from "+clipsrc,file=sys.stderr)
    model = CLIPModel.from_pretrained(clipsrc)
    print("done",file=sys.stderr)
    model = model.to(device)

def load_overlay():
    global model
    print("loading overlay",overlaymodel)
    overlay=torch.load(overlaymodel)
    if "state_dict" in overlay:
        print("dereferencing state_dict")
        overlay=overlay["state_dict"]

    print("Attempting to update old from new")
    sd=model.state_dict()
    sd.update(overlay)
    
    # surprisingly, CLIPModel doesnt use, or want, this key!?!
    # have to remove it.
    if "text_model.embeddings.position_ids" in sd:
        print("Removing key text_model.embeddings.position_ids")
        sd.pop("text_model.embeddings.position_ids")

    print("Reloading merged data")
    model.load_state_dict(sd)
    model = model.to(device)

# Expect SINGLE WORD ONLY
def standard_embed_calc(text):
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)
    with torch.no_grad():
        text_features = model.get_text_features(**inputs)
    embedding = text_features[0]
    return embedding


init()

fig, ax = plt.subplots()


text1 = input("First word or prompt: ")


print("generating embeddings for each now")
emb1 = standard_embed_calc(text1)
graph1=emb1.tolist()
ax.plot(graph1, label=text1[:20])

load_overlay()
emb2 = standard_embed_calc(text1)
graph2=emb2.tolist()
ax.plot(graph2, label="overlay data")

# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title('Graph embedding from standard vs MERGED dict')
ax.legend()

# Display the graph
print("Pulling up the graph")
plt.show()