File size: 1,759 Bytes
d1c7d8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/bin/env python

""" Work in progress
NB: This is COMPLETELY DIFFERENT from "generate-embeddings.py"!!!


Plan:
   Take input for a single word or phrase.
   Generate a embedding file, "generated.safetensors"
   Save it out, to "generated.safetensors"

   Note that you can generate an embedding from two words, or even more
   
   Note also that apparently there are multiple file formats for embeddings.
   I only use the simplest of them, in the simplest way.
"""


import sys
import json
import torch
from safetensors.torch import save_file
from transformers import CLIPProcessor,CLIPModel

import logging
# Turn off  stupid mesages from CLIPModel.load
logging.disable(logging.WARNING)

clipsrc="openai/clip-vit-large-patch14"
processor=None
model=None

device=torch.device("cuda")


def init():
    global processor
    global model
    # Load the processor and model
    print("loading processor from "+clipsrc,file=sys.stderr)
    processor = CLIPProcessor.from_pretrained(clipsrc)
    print("done",file=sys.stderr)
    print("loading model from "+clipsrc,file=sys.stderr)
    model = CLIPModel.from_pretrained(clipsrc)
    print("done",file=sys.stderr)

    model = model.to(device)

def standard_embed_calc(text):
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)
    with torch.no_grad():
        text_features = model.get_text_features(**inputs)
    embedding = text_features[0]
    return embedding


init()


word = input("type a phrase to generate an embedding for: ")

emb = standard_embed_calc(word)
embs=emb.unsqueeze(0) # stupid matrix magic to make the cat work

print("Shape of result = ",embs.shape)
output = "generated.safetensors"
print(f"Saving to {output}...")
save_file({"emb_params": embs}, output)