File size: 2,407 Bytes
efabbbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
import matplotlib.pyplot as plt
import tempfile, cv2, dlib, imutils
import torch
from scipy.spatial import distance
from imutils import face_utils

def eye_aspect_ratio(eye):
    A, B, C = [distance.euclidean(eye[i], eye[j]) for i, j in [(1, 5), (2, 4), (0, 3)]]
    return (A + B) / (2.0 * C)

def detect_blinks(video_file):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("assets/models/shape_predictor_68_face_landmarks.dat")
    
    cap = cv2.VideoCapture(video_file)
    ear_list, counter, total = [], 0, 0
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        gray = cv2.cvtColor(imutils.resize(frame, width=500), cv2.COLOR_BGR2GRAY)
        for rect in detector(gray, 0):
            shape = face_utils.shape_to_np(predictor(gray, rect))
            left_eye, right_eye = [shape[face_utils.FACIAL_LANDMARKS_IDXS[eye][0]:face_utils.FACIAL_LANDMARKS_IDXS[eye][1]] for eye in ["left_eye", "right_eye"]]
            ear = sum(eye_aspect_ratio(eye) for eye in [left_eye, right_eye]) / 2.0
            ear_list.append(ear)
            
            if ear < 0.3:
                counter += 1
            elif counter >= 5:
                total += 1
                counter = 0
            else:
                counter = 0
    
    cap.release()
    
    plt.figure(figsize=(10, 4))
    plt.plot(ear_list)
    plt.title('Eye Aspect Ratio over Time')
    plt.xlabel('Frame')
    plt.ylabel('EAR')
    plt.tight_layout()
    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
    plt.savefig(temp_file.name)
    plt.close()
    
    return total, temp_file.name

def create_blink_tab():
    with gr.Row():
        with gr.Column(scale=2):
            input_video = gr.Video(label="Input Video")
            with gr.Row():
                clear_btn = gr.Button("Clear", scale=1)
                submit_btn = gr.Button("Analyze", scale=1, elem_classes="submit")
        with gr.Column(scale=1):
            output_count = gr.Label(label="Blink Count")
            output_plot = gr.Image(label="EAR Plot")
    
    submit_btn.click(fn=detect_blinks, inputs=[input_video], outputs=[output_count, output_plot])
    clear_btn.click(lambda: (None, None, None), outputs=[input_video, output_count, output_plot])
    gr.Examples(["./assets/videos/fitness.mp4"], [input_video])