jaimin commited on
Commit
e698804
1 Parent(s): bf53f45

Upload 4 files

Browse files
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from concurrent.futures import ThreadPoolExecutor
3
+ import gradio as gr
4
+ from bpm_app.heartBPM_modified_copy import heart
5
+ from stress_detection.eyebrow_detection_modified_copy import stress
6
+ from age_estimator.mivolo.demo_copy import main as age_estimation_main
7
+
8
+ def process_video(video_file):
9
+ # Validate the input file path
10
+ if not video_file or not os.path.isfile(video_file):
11
+ return {'error': 'Invalid video path'}
12
+
13
+ # Run functions in parallel
14
+ with ThreadPoolExecutor() as executor:
15
+ heart_future = executor.submit(heart, video_file)
16
+ stress_future = executor.submit(stress, video_file, duration=30)
17
+
18
+ # Define parameters for age estimation
19
+ output_folder = 'output'
20
+ detector_weights = 'age_estimator/mivolo/models/yolov8x_person_face.pt'
21
+ checkpoint = 'age_estimator/mivolo/models/model_imdb_cross_person_4.22_99.46.pth.tar'
22
+ device = 'cpu'
23
+ with_persons = True
24
+ disable_faces = False
25
+ draw = True
26
+
27
+ age_future = executor.submit(
28
+ age_estimation_main, video_file, output_folder, detector_weights, checkpoint, device, with_persons, disable_faces, draw
29
+ )
30
+
31
+ # Retrieve results
32
+ avg_bpm, frames_processed = heart_future.result()
33
+ stressed_count, not_stressed_count, most_frequent_label = stress_future.result()
34
+ absolute_age, lower_bound, upper_bound = age_future.result()
35
+
36
+ # Compile results
37
+ results = {
38
+ 'Average BPM': avg_bpm,
39
+ 'Most Frequent State': most_frequent_label,
40
+ 'Age Range': f"{lower_bound} - {upper_bound}"
41
+ }
42
+
43
+ return results
44
+
45
+ # Define Gradio interface
46
+ gr.Interface(
47
+ fn=process_video,
48
+ inputs=gr.Video(label="Upload a video file"),
49
+ outputs="json",
50
+ title="Parallel Video Processing for Heart Rate, Stress, and Age Estimation"
51
+ ).launch()
eyebrow_detection_modified_copy.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from scipy.spatial import distance as dist
3
+ from imutils import face_utils
4
+ import numpy as np
5
+ import imutils
6
+ import time
7
+ import dlib
8
+ import cv2
9
+ import matplotlib.pyplot as plt
10
+ from keras.preprocessing.image import img_to_array
11
+ from keras.models import load_model
12
+
13
+ def eye_brow_distance(leye, reye):
14
+ global points
15
+ distq = dist.euclidean(leye, reye)
16
+ points.append(int(distq))
17
+ return distq
18
+
19
+ def emotion_finder(faces, frame):
20
+ global emotion_classifier
21
+ EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"]
22
+ x, y, w, h = face_utils.rect_to_bb(faces)
23
+ frame = frame[y:y + h, x:x + w]
24
+ roi = cv2.resize(frame, (64, 64))
25
+ roi = roi.astype("float") / 255.0
26
+ roi = img_to_array(roi)
27
+ roi = np.expand_dims(roi, axis=0)
28
+ preds = emotion_classifier.predict(roi)[0]
29
+ emotion_probability = np.max(preds)
30
+ label = EMOTIONS[preds.argmax()]
31
+ return label
32
+
33
+ def normalize_values(points, disp):
34
+ normalized_value = abs(disp - np.min(points)) / abs(np.max(points) - np.min(points))
35
+ stress_value = np.exp(-(normalized_value))
36
+ return stress_value
37
+
38
+ def stress(video_path, duration):
39
+ global points, emotion_classifier
40
+ detector = dlib.get_frontal_face_detector()
41
+ predictor = dlib.shape_predictor("stress_detection/models/data")
42
+ emotion_classifier = load_model("stress_detection/models/_mini_XCEPTION.102-0.66.hdf5", compile=False)
43
+
44
+ cap = cv2.VideoCapture(video_path)
45
+ points = []
46
+ stress_labels = []
47
+ start_time = time.time()
48
+
49
+ while True:
50
+ current_time = time.time()
51
+ if current_time - start_time >= duration:
52
+ break
53
+
54
+ ret, frame = cap.read()
55
+ if not ret:
56
+ break
57
+
58
+ frame = cv2.flip(frame, 1)
59
+ frame = imutils.resize(frame, width=500, height=500)
60
+
61
+ (lBegin, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
62
+ (rBegin, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
63
+
64
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
65
+
66
+ try:
67
+ detections = detector(gray, 0)
68
+ for detection in detections:
69
+ emotion = emotion_finder(detection, gray)
70
+ shape = predictor(gray, detection)
71
+ shape = face_utils.shape_to_np(shape)
72
+
73
+ leyebrow = shape[lBegin:lEnd]
74
+ reyebrow = shape[rBegin:rEnd]
75
+
76
+ distq = eye_brow_distance(leyebrow[-1], reyebrow[0])
77
+ stress_value = normalize_values(points, distq)
78
+
79
+ # Determine stress label for this frame
80
+ if emotion in ['scared', 'sad', 'angry'] and stress_value >= 0.75:
81
+ stress_label = 'stressed'
82
+ else:
83
+ stress_label = 'not stressed'
84
+
85
+ # Store stress label in list
86
+ stress_labels.append(stress_label)
87
+
88
+ except Exception as e:
89
+ print(f'Error: {e}')
90
+
91
+ key = cv2.waitKey(1) & 0xFF
92
+ if key == ord('q'):
93
+ break
94
+
95
+ cap.release()
96
+
97
+ # Count occurrences of 'stressed' and 'not stressed'
98
+ stressed_count = stress_labels.count('stressed')
99
+ not_stressed_count = stress_labels.count('not stressed')
100
+
101
+ # Determine which label occurred more frequently
102
+ if stressed_count > not_stressed_count:
103
+ most_frequent_label = 'stressed'
104
+ else:
105
+ most_frequent_label = 'not stressed'
106
+
107
+ return stressed_count, not_stressed_count, most_frequent_label
108
+
109
+ def main():
110
+ # Argument parsing
111
+ parser = argparse.ArgumentParser(description='Stress Detection from Video')
112
+ parser.add_argument('--video', type=str, required=True, default='output.mp4', help='Path to the input video file')
113
+ parser.add_argument('--duration', type=int, default=30, help='Duration for analysis in seconds')
114
+ args = parser.parse_args()
115
+
116
+ # Call the stress function and get the results
117
+ stressed_count, not_stressed_count, most_frequent_label = stress(args.video, args.duration)
118
+
119
+ # Display the result
120
+ print(f"Stressed frames: {stressed_count}")
121
+ print(f"Not stressed frames: {not_stressed_count}")
122
+ print(f"Most frequent state: {most_frequent_label}")
123
+
124
+ if __name__ == '__main__':
125
+ main()
heartBPM_modified_copy.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import time
4
+ from cvzone.FaceDetectionModule import FaceDetector
5
+
6
+ # Initialization
7
+ videoWidth = 160
8
+ videoHeight = 120
9
+ videoChannels = 3
10
+ videoFrameRate = 15
11
+
12
+ # Helper Methods
13
+ def buildGauss(frame, levels):
14
+ pyramid = [frame]
15
+ for level in range(levels):
16
+ frame = cv2.pyrDown(frame)
17
+ pyramid.append(frame)
18
+ return pyramid
19
+
20
+ def reconstructFrame(pyramid, index, levels):
21
+ filteredFrame = pyramid[index]
22
+ for level in range(levels):
23
+ filteredFrame = cv2.pyrUp(filteredFrame)
24
+ filteredFrame = filteredFrame[:videoHeight, :videoWidth]
25
+ return filteredFrame
26
+
27
+ # Main heart rate function
28
+ def heart(video_file_path):
29
+ levels = 3
30
+ alpha = 170
31
+ minFrequency = 1.0
32
+ maxFrequency = 2.0
33
+ bufferSize = 150
34
+ bufferIndex = 0
35
+
36
+ detector = FaceDetector()
37
+
38
+ video = cv2.VideoCapture(video_file_path)
39
+
40
+ firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
41
+ firstGauss = buildGauss(firstFrame, levels + 1)[levels]
42
+ videoGauss = np.zeros((bufferSize, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
43
+ fourierTransformAvg = np.zeros((bufferSize))
44
+
45
+ frequencies = (1.0 * videoFrameRate) * np.arange(bufferSize) / (1.0 * bufferSize)
46
+ mask = (frequencies >= minFrequency) & (frequencies <= maxFrequency)
47
+
48
+ bpmCalculationFrequency = 10
49
+ bpmBufferIndex = 0
50
+ bpmBufferSize = 10
51
+ bpmBuffer = np.zeros((bpmBufferSize))
52
+
53
+ bpmList = []
54
+ startTime = time.time()
55
+ frameCount = 0
56
+
57
+ while True:
58
+ ret, frame = video.read()
59
+ if not ret:
60
+ break
61
+
62
+ elapsedTime = time.time() - startTime
63
+ if elapsedTime >= 30:
64
+ break
65
+
66
+ frame, bboxs = detector.findFaces(frame, draw=False)
67
+ frameCount += 1
68
+
69
+ if bboxs:
70
+ x1, y1, w1, h1 = bboxs[0]['bbox']
71
+
72
+ # Check if the bounding box is valid
73
+ if x1 >= 0 and y1 >= 0 and w1 > 0 and h1 > 0:
74
+ detectionFrame = frame[y1:y1 + h1, x1:x1 + w1]
75
+
76
+ # Check if detectionFrame is valid and not empty before resizing
77
+ if detectionFrame.size != 0:
78
+ detectionFrame = cv2.resize(detectionFrame, (videoWidth, videoHeight))
79
+
80
+ videoGauss[bufferIndex] = buildGauss(detectionFrame, levels + 1)[levels]
81
+ fourierTransform = np.fft.fft(videoGauss, axis=0)
82
+ fourierTransform[mask == False] = 0
83
+
84
+ if bufferIndex % bpmCalculationFrequency == 0:
85
+ for buf in range(bufferSize):
86
+ fourierTransformAvg[buf] = np.real(fourierTransform[buf]).mean()
87
+ hz = frequencies[np.argmax(fourierTransformAvg)]
88
+ bpm = 60.0 * hz
89
+ bpmBuffer[bpmBufferIndex] = bpm
90
+ bpmBufferIndex = (bpmBufferIndex + 1) % bpmBufferSize
91
+ bpmList.append(bpmBuffer.mean())
92
+
93
+ bufferIndex = (bufferIndex + 1) % bufferSize
94
+ else:
95
+ # If no face is detected, skip to the next frame
96
+ continue
97
+
98
+ avgBPM = np.mean(bpmList) if bpmList else 0
99
+ video.release()
100
+
101
+ return avgBPM, frameCount
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ huggingface_hub
2
+ tensorflow
3
+ ultralytics==8.1.0
4
+ timm==0.8.13.dev0
5
+ yt_dlp
6
+ lapx>=0.5.2
7
+ typing-extensions
8
+ cvzone
9
+ keras
10
+ cmake
11
+ dlib
12
+ imutils
13
+ opencv-python
14
+