File size: 1,472 Bytes
594204d
 
 
407cd15
f6f09af
594204d
d8fc58c
594204d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# !pip install transformers==4.37.2 gradio==4.25.0
import gradio as gr
from transformers import pipeline
import numpy as np
from PIL import Image
age_classifier = pipeline("image-classification", model="nateraw/vit-age-classifier")
emotion_classifier = pipeline("image-classification", model="jhoppanne/Emotion-Image-Classification-V2")
def pred_age_emotion(input_image):
    if isinstance(input_image,np.ndarray):
        img = Image.fromarray(input_image)
        #age classifier
        age_result = age_classifier(img)
        age_score = age_result[0].get('score')
        age_label = age_result[0].get('label')
        txt1 =''
        txt1 += f'The Model predict that the person in this image is around {age_label} years old.\n'
        txt1 += f'with confident score : {age_score*100:.2f}%'
        #emotion classifier
        emotion_result = emotion_classifier(img)
        emotion_score = emotion_result[0].get('score')
        emotion_label = emotion_result[1].get('label')
        txt2=''
        txt2+= f'The Model predict that the emotion of person in this image is {emotion_label}.\n'
        txt2+= f'with confident score : {emotion_score*100:.2f}% '
    else:
        txt1,txt2 =  "sorry, unable to process the image"
    return txt1, txt2
    # return f"Data type of uploaded image: {type(img)}"
def pred_emotion(input_image):
    return
iface = gr.Interface(fn=pred_age_emotion, inputs = gr.Image(), outputs = ["text", "text"])
iface.launch(share=True)