File size: 1,000 Bytes
a95e33d
3153b84
 
 
a95e33d
 
3153b84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
import torch
import clip
from PIL import Image


def process(device, image, prompt):
    print("Inferring...")
    image = preprocess(image).unsqueeze(0).to(device)
    print(image)

    prompts = prompt.split("\n")
    text = clip.tokenize(prompts).to(device)
    print(text)

    with torch.no_grad():
        logits_per_image, logits_per_text = model(image, text)
        probs = logits_per_image.softmax(dim=-1).cpu().numpy()
        print(probs)

        return dict(zip(prompts, probs[0]))


if __name__ == "__main__":
    print("Getting device...")
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print("Loading model...")
    model, preprocess = clip.load("ViT-B/32", device=device)
    print("Loaded model.")

    iface = gr.Interface(
        fn=lambda i, p: process(device, i, p),
        inputs=[
            gr.Image(),
            gr.Textbox(lines=5, label="Prompts (newline-separated)"),
        ],
        outputs="label",
    )
    iface.launch()