File size: 2,603 Bytes
7a9b925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/bin/env python

""" 
Plan:
   Read in "dictionary" for list of words
   Read in pre-calculated "proper" embedding for each word from
     safetensor file named "embeddings.safetensors"
   Prompt user for two words from the list
   (but may also be off the list, or a phrase)
   Print out Euclidean distance between the two
 
  (the point of the dictionary is that it can make loading super fast for known words)
  
"""


import sys
import json
import torch
from safetensors import safe_open

import numpy

from transformers import CLIPProcessor,CLIPModel

clipsrc="openai/clip-vit-large-patch14"
processor=None
model=None

device=torch.device("cuda")


def init():
    global processor
    global model
    # Load the processor and model
    print("loading processor from "+clipsrc,file=sys.stderr)
    processor = CLIPProcessor.from_pretrained(clipsrc)
    print("done",file=sys.stderr)
    print("loading model from "+clipsrc,file=sys.stderr)
    model = CLIPModel.from_pretrained(clipsrc)
    print("done",file=sys.stderr)

    model = model.to(device)



embed_file="embeddings.safetensors"

device=torch.device("cuda")

print("read in words from dictionary now",file=sys.stderr)
with open("dictionary","r") as f:
    tokendict = f.readlines()
    wordlist = [token.strip() for token in tokendict]  # Remove trailing newlines
print(len(wordlist),"lines read")

print("read in embeddings now",file=sys.stderr)
model = safe_open(embed_file,framework="pt",device="cuda")
embs=model.get_tensor("embeddings")
embs.to(device)
print("Shape of loaded embeds =",embs.shape)

def standard_embed_calc(text):
    if processor == None:
        init()

    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)
    with torch.no_grad():
        text_features = model.get_text_features(**inputs)
    embedding = text_features[0]
    return embedding


def print_distance(emb1,emb2):
    targetdistance = torch.norm( emb1 - emb2)
    print("DISTANCE:",targetdistance)


# return embed of target word.
# pull from dictionary, or do full calc
def find_word(targetword):
    try:
        targetindex=wordlist.index(targetword)
        targetemb=embs[targetindex]
        return targetemb
        return
    except ValueError:
        print(targetword,"not found in cache")
    print("Now doing lookup with full calc embed")
    targetemb=standard_embed_calc(targetword)
    return targetemb


while True:
    input_text1=input("Input a word1(or phrase) now:")
    input_text2=input("Input word2 now:")
    emb1=find_word(input_text1)
    emb2=find_word(input_text2)
    print_distance(emb1,emb2)