maxinethegreat commited on
Commit
e3ed1f8
1 Parent(s): 95d0890

attempt to use emotion detection model

Browse files
Files changed (1) hide show
  1. app.py +49 -3
app.py CHANGED
@@ -1,13 +1,59 @@
1
  import gradio as gr
2
 
 
 
 
 
 
3
  def greet(name):
4
  return "Hello " + name + "!!"
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface = gr.Interface(
8
- fn = greet,
9
- inputs="text",
10
- outputs="text",
 
11
  title = "Mood Detective",
12
  description = "real-time emotion detection"
13
  )
 
1
  import gradio as gr
2
 
3
+ import tensorflow as tf
4
+ import cv2
5
+ import numpy as np
6
+
7
+
8
  def greet(name):
9
  return "Hello " + name + "!!"
10
 
11
 
12
+
13
+ # Load the saved model
14
+ model = tf.keras.models.load_model('model/cnn_9_layer_model.h5')
15
+
16
+ # Define the face cascade and emotions
17
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
18
+ emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
19
+
20
+ # Define the predict_emotion function
21
+ def predict_emotion(frame):
22
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
23
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
24
+ for (x, y, w, h) in faces:
25
+ face = gray[y:y+h, x:x+w]
26
+ face = cv2.resize(face, (48, 48))
27
+ face = np.expand_dims(face, axis=-1)
28
+ face = np.expand_dims(face, axis=0)
29
+ prediction = model.predict(face)
30
+ emotion = emotions[np.argmax(prediction)]
31
+ cv2.putText(frame, emotion, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
32
+ cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
33
+ return frame
34
+
35
+ # Start the video capture and emotion detection
36
+ # cap = cv2.VideoCapture(0)
37
+ # while True:
38
+ # ret, frame = cap.read()
39
+ # if ret:
40
+ # frame = predict_emotion(frame)
41
+ # cv2.imshow('Live Facial Emotion Detection', frame)
42
+ # if cv2.waitKey(1) == ord('q'):
43
+ # break
44
+ # cap.release()
45
+ # cv2.destroyAllWindows()
46
+
47
+
48
+ image = gr.inputs.Image(source = "webcam", shape=(50,50) )
49
+ label = gr.outputs.Label(num_top_classes=7)
50
+
51
+
52
  iface = gr.Interface(
53
+ fn = predict_emotion,
54
+ inputs=image,
55
+ outputs=label,
56
+ interpretation = "default",
57
  title = "Mood Detective",
58
  description = "real-time emotion detection"
59
  )