chandralegend's picture
added dedicated model
e6ad7fc
raw
history blame
1.18 kB
# import requests
# API_URL = "https://api-inference.huggingface.co/models/trpakov/vit-face-expression"
# headers = {"Authorization": "Bearer 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"}
# def query(filename):
# with open(filename, "rb") as f:
# data = f.read()
# response = requests.post(API_URL, headers=headers, data=data)
# return response.json()
from PIL import Image
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
def query(filename):
image = Image.open(filename)
inputs = processor(
text=["Happy", "Sad", "Surprised", "Angry", "Disgusted", "Neutral", "Fearful"],
images=image,
return_tensors="pt",
padding=True,
)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = logits_per_image.softmax(dim=1)
output = {
label: prob.item()
for label, prob in zip(
["Happy", "Sad", "Surprised", "Angry", "Disgusted", "Neutral", "Fearful"],
probs[0],
)
}
return output