|
|
|
import gradio as gr |
|
from transformers import pipeline |
|
import numpy as np |
|
from PIL import Image |
|
age_classifier = pipeline("image-classification", model="nateraw/vit-age-classifier") |
|
emotion_classifier = pipeline("image-classification", model="jhoppanne/Emotion-Image-Classification-V2") |
|
def pred_age_emotion(input_image): |
|
if isinstance(input_image,np.ndarray): |
|
img = Image.fromarray(input_image) |
|
|
|
age_result = age_classifier(img) |
|
age_score = age_result[0].get('score') |
|
age_label = age_result[0].get('label') |
|
txt1 ='' |
|
txt1 += f'The Model predict that the person in this image is around {age_label} years old.\n' |
|
txt1 += f'with confident score : {age_score*100:.2f}%' |
|
|
|
emotion_result = emotion_classifier(img) |
|
emotion_score = emotion_result[0].get('score') |
|
emotion_label = emotion_result[1].get('label') |
|
txt2='' |
|
txt2+= f'The Model predict that the emotion of person in this image is {emotion_label}.\n' |
|
txt2+= f'with confident score : {emotion_score*100:.2f}% ' |
|
else: |
|
txt1,txt2 = "sorry, unable to process the image" |
|
return txt1, txt2 |
|
|
|
def pred_emotion(input_image): |
|
return |
|
iface = gr.Interface(fn=pred_age_emotion, inputs = gr.Image(), outputs = ["text", "text"]) |
|
iface.launch(share=True) |