# Importing some modules
import gradio as gr
from transformers import pipeline
import torch
# Loading in the model
MODEL_AGE = pipeline('image-classification', model='nateraw/vit-age-classifier', device=0 if torch.cuda.is_available() else -1)
MODEL_EMOTION = pipeline('image-classification', model='dennisjooo/emotion_classification', device=0 if torch.cuda.is_available() else -1)
def classify_image(image, top_k):
# Getting the classification result
age_result = MODEL_AGE(image)
emotion_result = MODEL_EMOTION(image)
# Reformating the classification result into a dictionary
age_result = {result['label']: result['score'] for result in age_result[:min(int(top_k), 8)]}
emotion_result = {result['label']: result['score'] for result in emotion_result[:min(int(top_k), 7)]}
# Add some text comment to it lol
comment = text_comment(list(age_result.keys())[0])
# Returning the classification result
return age_result, comment, emotion_result
# Snarky comment based on age
def text_comment(pred_class):
match pred_class:
case "3-9":
return "Lost your way to the playground?"
case "10-19":
return "But Mom, I'm not a kid anymore!"
case "20-29":
return "You're in your prime!"
case "30-39":
return "Oof, watch out for those wrinkles!"
case "40-49":
return "You're still young at heart!"
case "50-59":
return "Retirement is just around the corner!"
case "60-69":
return "You're a senior citizen now!"
case "more than 70":
return "Hey Siri, play 'My Way' by Frank Sinatra"
if __name__ == "__main__":
# Definining the title of the interface
title_text = """
# I will guess your age and mood based on your picture!
---
Totally not creepy, I promise :)
Made by [Dennis Jonathan](dennisjooo.github.io). A project for REA Mastering AI course.
Age guessing model from [nateraw/vit-age-classifier](https://huggingface.co/nateraw/vit-age-classifier)
Mood-guessing model is a [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k)
trained on [FastJobs/Visual_Emotional_Analysis](https://huggingface.co/datasets/FastJobs/Visual_Emotional_Analysis)
"""
# Creating the Gradio interface
with gr.Blocks() as demo:
gr.Markdown(title_text)
with gr.Row(equal_height=True):
with gr.Column():
# Creating the input block
image = gr.Image(label="Upload a picture of yourself", type="pil", scale=2)
# Creating the example block
gr.Examples(examples=[
"./images/andrew.jpg",
"./images/feifei.jpg",
"./images/geoff.jpg",
"./images/ilya.jpg",
"./images/karpathy.jpg",
"./images/lex.jpg"
], inputs=[image], label="Or choose an example")
with gr.Column():
# Getting the top k hyperparameter
top_k = gr.Number(label="How many guesses do I get?", value=1)
# Creating the output block
age_label = gr.Label(label="Hey it's me, your age!")
comment = gr.Textbox(label="Based on your age, I think you are...",
placeholder="I'm still learning, so I might be wrong!")
emotion_label = gr.Label(label="Hey it's me, your emotion!")
with gr.Row():
# Submit button
btn = gr.Button("Beep boop, guess my age and emotion!")
btn.click(classify_image, inputs=[image, top_k], outputs=[age_label, comment, emotion_label])
# Clear button
clear = gr.Button("Poof begone!")
clear.click(lambda: [None, None, None, None], inputs=[], outputs=[image, age_label, comment, emotion_label])
# Launching the interface
demo.launch(share=True, debug=True)