File size: 892 Bytes
a95e33d
3153b84
 
 
a95e33d
58bb65b
 
 
 
 
 
a95e33d
58bb65b
3153b84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eaa9560
58bb65b
eaa9560
c9db301
eaa9560
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
import torch
import clip
from PIL import Image

print("Getting device...")
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Loading model...")
model, preprocess = clip.load("ViT-B/32", device=device)
print("Loaded model.")


def process(image, prompt):
    print("Inferring...")
    image = preprocess(image).unsqueeze(0).to(device)
    print(image)

    prompts = prompt.split("\n")
    text = clip.tokenize(prompts).to(device)
    print(text)

    with torch.no_grad():
        logits_per_image, logits_per_text = model(image, text)
        probs = logits_per_image.softmax(dim=-1).cpu().numpy()
        print(probs)

        return dict(zip(prompts, probs[0]))


iface = gr.Interface(
    fn=process,
    inputs=[
        gr.Image(type="pil"),
        gr.Textbox(lines=5, label="Prompts (newline-separated)"),
    ],
    outputs="label",
)
iface.launch()