File size: 596 Bytes
9428212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

#basic openclip usage
import torch
from PIL import Image
import open_clip

mtype='ViT-B-32'
mname='laion2b_s34b_b79k'

print("Loading",mtype,mname)

model, _, preprocess = open_clip.create_model_and_transforms(mtype,
        pretrained=mname)
tokenizer = open_clip.get_tokenizer(mtype)

#image = preprocess(Image.open("CLIP.png")).unsqueeze(0)
text = tokenizer(["a diagram", "a dog", "a cat"])
text = tokenizer("cat")

with torch.no_grad(), torch.cuda.amp.autocast():
#    image_features = model.encode_image(image)
    text_features = model.encode_text(text)
    embedding=text_features[0]