File size: 2,204 Bytes
fc81a95 04b4ecb baf29fc fc81a95 638656b ad82cdf 638656b baf29fc 638656b fc81a95 b10d562 fc81a95 baf29fc fc81a95 baf29fc fc81a95 baf29fc fc81a95 ad82cdf fc81a95 638656b fc81a95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
#!/bin/env python
""" Work in progress
Plan:
Modded version of graph-embeddings.py
Just to see if using different CLIP module changes values significantly
(It does not)
It does have the small bonus feature of being able to accept a purely
numerical tokenid in liu of a number, if you use the syntax,
"#345".
You can input a text string, or a single numeric code, per input
This code requires
pip install git+https://github.com/openai/CLIP.git
"""
import sys
import json
import torch
import clip
import PyQt5
import matplotlib
matplotlib.use('QT5Agg') # Set the backend to TkAgg
import matplotlib.pyplot as plt
### The stablediffusion standard model is Vit-L/14
#CLIPname= "ViT-B/16"
CLIPname= "ViT-L/14"
#CLIPname= "ViT-L/14@336px"
# Available models:
# 'RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'
device=torch.device("cuda")
print("loading CLIP model",CLIPname)
model, processor = clip.load(CLIPname,device=device)
model.cuda().eval()
print("done")
def embed_from_tokenid(num):
# A bit sleazy, but, eh.
tokens = clip.tokenize("dummy").to(device)
tokens[0][1]=num
with torch.no_grad():
embed = model.encode_text(tokens)
return embed
def embed_from_text(text):
if text[0]=="#":
print("Converting string to number")
return embed_from_tokenid(int(text[1:]))
tokens = clip.tokenize(text).to(device)
print("Tokens for",text,"=",tokens)
with torch.no_grad():
embed = model.encode_text(tokens)
return embed
fig, ax = plt.subplots()
text1 = input("First prompt or #tokenid: ")
text2 = input("Second prompt(or leave blank): ")
print("generating embeddings for each now")
emb1 = embed_from_text(text1)
print("shape of emb1:",emb1.shape)
graph1=emb1[0].tolist()
ax.plot(graph1, label=text1[:20])
if len(text2) >0:
emb2 = embed_from_text(text2)
graph2=emb2[0].tolist()
ax.plot(graph2, label=text2[:20])
# Add labels, title, and legend
#ax.set_xlabel('Index')
ax.set_ylabel('Values')
ax.set_title(f"Graph of Embeddings in {CLIPname}")
ax.legend()
# Display the graph
print("Pulling up the graph")
plt.show()
|