File size: 2,495 Bytes
dade934
 
e3ed1f8
 
 
 
 
 
f5ce556
69e8141
f5ce556
e3ed1f8
 
 
 
 
5fc096b
 
 
e3ed1f8
7b2403a
 
 
 
 
 
 
 
 
 
 
 
 
 
e92e867
7b2403a
 
89c0b05
7b2403a
2d9f1e0
89c0b05
5fc096b
415c3de
e3ed1f8
 
 
 
 
 
 
 
 
 
 
 
 
 
79e8cf6
7a63bb5
2032d4e
5fc096b
2032d4e
e3ed1f8
 
8c1b2ed
fd876cd
d647e93
 
32961aa
0e8a5c7
7cc8ae2
7a63bb5
63705fb
f3340c3
 
8c1b2ed
 
19233b4
34b577e
dade934
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr

import tensorflow as tf
import cv2
import numpy as np


# Load the saved model
model = tf.keras.models.load_model('model/model.h5')

# model = gr.load('maxineattobrah/RealTimeEmotionDetection', src='models')

# Define the face cascade and emotions
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

no_face_detection_alert = "Cannot Detect Face"
low_confidence_alert = "Cannot Detect Emotion"

# Define the predict_emotion function

def predict_emotion(frame):
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	faces = face_cascade.detectMultiScale(gray, 1.3, 5)
	for (x, y, w, h) in faces:
		face = gray[y:y+h, x:x+w]
		face = cv2.resize(face, (48, 48), interpolation = cv2.INTER_AREA)
		if np.sum([face])!=0:
			face = face.astype('float')/255.0
			face = tf.keras.utils.img_to_array(face)
			face = np.expand_dims(face, axis=0)
			prediction = model.predict(face)
			if any(prob >.5 for prob in prediction[0]):
				emotion = emotions[np.argmax(prediction)]
				cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
				cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 0), 2)
			else:
				cv2.putText(frame, low_confidence_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
	                                2)
		else:
			cv2.putText(frame, no_face_detection_alert, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

	return frame

# Start the video capture and emotion detection
# cap = cv2.VideoCapture(0)
# while True:
#     ret, frame = cap.read()
#     if ret:
#         frame = predict_emotion(frame)
#         cv2.imshow('Live Facial Emotion Detection', frame)
#     if cv2.waitKey(1) == ord('q'):
#         break
# cap.release()
# cv2.destroyAllWindows()


input_image = gr.Image(sources = ["webcam"], streaming = True, label="Your Face")
# video = gr.inputs.Video(source = "webcam" )

output_image = gr.Image( type="numpy", label="Detected Emotion" )



iface = gr.Interface(
	fn = predict_emotion, 
	inputs=input_image, 
	outputs=output_image,
	batch = True,
	max_batch_size = 100000,
	# interpretation = "default",
	title = "Mood Detectives",
	description = "Real-Time Emotion Detection Using Facial Expressions:\nCan our model detect if you are angry, happy, sad, fearful, disgusted, surprised or neutral?",
	live = True

	)

# iface.queue(concurrency_count=1000)

iface.launch()