File size: 2,865 Bytes
aea5e02 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
#!/bin/env python
"""
Input a single word, and it will graph it,
as embedded by CLIPModel vs CLIPTextModel
It will then print out the "distance" between the two,
and then show you a coordinate graph
You will want to zoom in to actually see the differences, usually
"""
import sys
import json
import torch
from transformers import CLIPProcessor,CLIPModel,CLIPTextModel
import logging
# Turn off stupid mesages from CLIPModel.load
logging.disable(logging.WARNING)
import PyQt5
import matplotlib
matplotlib.use('QT5Agg') # Set the backend to TkAgg
import matplotlib.pyplot as plt
clipsrc="openai/clip-vit-large-patch14"
overlaymodel="text_encoder.bin"
overlaymodel2="text_encoder2.bin"
processor=None
clipmodel=None
cliptextmodel=None
device=torch.device("cuda")
print("loading processor from "+clipsrc,file=sys.stderr)
processor = CLIPProcessor.from_pretrained(clipsrc)
print("done",file=sys.stderr)
def clipmodel_one_time(text):
global clipmodel
if clipmodel == None:
print("loading CLIPModel from "+clipsrc,file=sys.stderr)
clipmodel = CLIPModel.from_pretrained(clipsrc)
clipmodel = clipmodel.to(device)
print("done",file=sys.stderr)
inputs = processor(text=text, return_tensors="pt")
inputs.to(device)
with torch.no_grad():
text_features = clipmodel.get_text_features(**inputs)
return text_features
#shape = (1,768)
def cliptextmodel_one_time(text):
global cliptextmodel
if cliptextmodel == None:
print("loading CLIPTextModel from "+clipsrc,file=sys.stderr)
cliptextmodel = CLIPTextModel.from_pretrained(clipsrc)
cliptextmodel = cliptextmodel.to(device)
print("done",file=sys.stderr)
inputs = processor(text=text, return_tensors="pt")
inputs.to(device)
with torch.no_grad():
outputs = cliptextmodel(**inputs)
embeddings = outputs.pooler_output
return embeddings
# shape is (1,768)
def print_distance(emb1,emb2):
targetdistance = torch.norm( emb1 - emb2)
print("DISTANCE:",targetdistance)
def prompt_for_word():
fig, ax = plt.subplots()
text1 = input("Word or prompt: ")
if text1 == "q":
exit(0)
print("generating embeddings for each now")
emb1 = clipmodel_one_time(text1)[0]
graph1=emb1.tolist()
ax.plot(graph1, label="clipmodel")
emb2 = cliptextmodel_one_time(text1)[0]
graph2=emb2.tolist()
ax.plot(graph2, label="cliptextmodel")
print_distance(emb1,emb2)
# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title('Graph embedding from std libs')
ax.legend()
# Display the graph
print("Pulling up the graph. To calculate more distances, close graph")
plt.show()
# Dont know why plt.show only works once !
while True:
prompt_for_word()
|