File size: 1,142 Bytes
e6ad7fc
47c3daf
e6ad7fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47c3daf
7075496
47c3daf
e6ad7fc
 
 
 
 
 
 
 
 
 
c6c8cd6
e6ad7fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# import requests

# API_URL = "https://api-inference.huggingface.co/models/trpakov/vit-face-expression"
# headers = {"Authorization": "Bearer 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"}


# def query(filename):
#     with open(filename, "rb") as f:
#         data = f.read()
#     response = requests.post(API_URL, headers=headers, data=data)
#     return response.json()


from PIL import Image
from transformers import CLIPProcessor, CLIPModel

model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")


def query(filename):
    image = Image.open(filename)
    inputs = processor(
        text=["Happy", "Sad", "Surprised", "Angry", "Disgusted", "Neutral", "Fearful"],
        images=image,
        return_tensors="pt",
        padding=True,
    )
    outputs = model(**inputs)
    logits_per_image = outputs.logits_per_image
    probs = logits_per_image.softmax(dim=1)
    output = [{"label": label, "score": float(score)} for label, score in zip(["Happy", "Sad", "Surprised", "Angry", "Disgusted", "Neutral", "Fearful"], probs[0])]
    return output