import gradio as gr from fastai.vision.all import * import skimage learn = load_learner('export.pkl') def from_csv(x): try: pattern = '\/([A-Za-z\d]+.jpg)' match = re.findall(pattern,str(x)) # print(match) x = df.loc[df['image'] == match[0]] # print(x) y = x['emotion'].item() # #y=x['label'].values[0] return str(y) except: # print('check these files') # print(x) return 0 labels = learn.dls.vocab def predict(img): img = PILImage.create(img) pred,pred_idx,probs = learn.predict(img) return {labels[i]: float(probs[i]) for i in range(len(labels))} title = "EMOTIONAL DAMAGE" description = "A EMOTION classifier trained with fastai. Created as a demo for Gradio and HuggingFace Spaces." article="
" examples = ['happylee.jpg','damu.jpeg','angry.jpeg','kili.jpg'] interpretation='default' enable_queue=True gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=5),title=title,description=description,article=article,examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch()