Heartandstress / heartBPM_modified_copy.py
jaimin's picture
Upload 2 files
b7940d5 verified
raw
history blame
3.25 kB
import numpy as np
import cv2
import time
from cvzone.FaceDetectionModule import FaceDetector
# Initialization
videoWidth = 160
videoHeight = 120
videoChannels = 3
videoFrameRate = 15
# Helper Methods
def buildGauss(frame, levels):
pyramid = [frame]
for level in range(levels):
frame = cv2.pyrDown(frame)
pyramid.append(frame)
return pyramid
def reconstructFrame(pyramid, index, levels):
filteredFrame = pyramid[index]
for level in range(levels):
filteredFrame = cv2.pyrUp(filteredFrame)
filteredFrame = filteredFrame[:videoHeight, :videoWidth]
return filteredFrame
# Main heart rate function
def heart(video_file_path):
levels = 3
alpha = 170
minFrequency = 1.0
maxFrequency = 2.0
bufferSize = 150
bufferIndex = 0
detector = FaceDetector()
video = cv2.VideoCapture(video_file_path)
firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
firstGauss = buildGauss(firstFrame, levels + 1)[levels]
videoGauss = np.zeros((bufferSize, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
fourierTransformAvg = np.zeros((bufferSize))
frequencies = (1.0 * videoFrameRate) * np.arange(bufferSize) / (1.0 * bufferSize)
mask = (frequencies >= minFrequency) & (frequencies <= maxFrequency)
bpmCalculationFrequency = 10
bpmBufferIndex = 0
bpmBufferSize = 10
bpmBuffer = np.zeros((bpmBufferSize))
bpmList = []
startTime = time.time()
frameCount = 0
while True:
ret, frame = video.read()
if not ret:
break
elapsedTime = time.time() - startTime
if elapsedTime >= 30:
break
frame, bboxs = detector.findFaces(frame, draw=False)
frameCount += 1
if bboxs:
x1, y1, w1, h1 = bboxs[0]['bbox']
# Check if the bounding box is valid
if x1 >= 0 and y1 >= 0 and w1 > 0 and h1 > 0:
detectionFrame = frame[y1:y1 + h1, x1:x1 + w1]
# Check if detectionFrame is valid and not empty before resizing
if detectionFrame.size != 0:
detectionFrame = cv2.resize(detectionFrame, (videoWidth, videoHeight))
videoGauss[bufferIndex] = buildGauss(detectionFrame, levels + 1)[levels]
fourierTransform = np.fft.fft(videoGauss, axis=0)
fourierTransform[mask == False] = 0
if bufferIndex % bpmCalculationFrequency == 0:
for buf in range(bufferSize):
fourierTransformAvg[buf] = np.real(fourierTransform[buf]).mean()
hz = frequencies[np.argmax(fourierTransformAvg)]
bpm = 60.0 * hz
bpmBuffer[bpmBufferIndex] = bpm
bpmBufferIndex = (bpmBufferIndex + 1) % bpmBufferSize
bpmList.append(bpmBuffer.mean())
bufferIndex = (bufferIndex + 1) % bufferSize
else:
# If no face is detected, skip to the next frame
continue
avgBPM = np.mean(bpmList) if bpmList else 0
video.release()
return avgBPM, frameCount