vitorcalvi commited on
Commit
efabbbd
1 Parent(s): 72e8f6a

pre-launch

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. LICENSE +21 -0
  2. README.md +34 -7
  3. __pycache__/audio_OK.cpython-310.pyc +0 -0
  4. __pycache__/fixapp.cpython-310.pyc +0 -0
  5. __pycache__/mpstest.cpython-310.pyc +0 -0
  6. __pycache__/tinnitus.cpython-310.pyc +0 -0
  7. __pycache__/ui_components.cpython-310.pyc +0 -0
  8. app.py +66 -0
  9. app/__init__.py +0 -0
  10. app/__pycache__/__init__.cpython-310.pyc +0 -0
  11. app/__pycache__/__init__.cpython-312.pyc +0 -0
  12. app/__pycache__/__init__.cpython-38.pyc +0 -0
  13. app/__pycache__/app_utils.cpython-310.pyc +0 -0
  14. app/__pycache__/app_utils.cpython-312.pyc +0 -0
  15. app/__pycache__/app_utils.cpython-38.pyc +0 -0
  16. app/__pycache__/authors.cpython-310.pyc +0 -0
  17. app/__pycache__/authors.cpython-312.pyc +0 -0
  18. app/__pycache__/authors.cpython-38.pyc +0 -0
  19. app/__pycache__/config.cpython-310.pyc +0 -0
  20. app/__pycache__/config.cpython-312.pyc +0 -0
  21. app/__pycache__/config.cpython-38.pyc +0 -0
  22. app/__pycache__/description.cpython-310.pyc +0 -0
  23. app/__pycache__/description.cpython-312.pyc +0 -0
  24. app/__pycache__/description.cpython-38.pyc +0 -0
  25. app/__pycache__/face_utils.cpython-310.pyc +0 -0
  26. app/__pycache__/face_utils.cpython-312.pyc +0 -0
  27. app/__pycache__/face_utils.cpython-38.pyc +0 -0
  28. app/__pycache__/model.cpython-310.pyc +0 -0
  29. app/__pycache__/model.cpython-312.pyc +0 -0
  30. app/__pycache__/model.cpython-38.pyc +0 -0
  31. app/__pycache__/model_architectures.cpython-310.pyc +0 -0
  32. app/__pycache__/model_architectures.cpython-312.pyc +0 -0
  33. app/__pycache__/model_architectures.cpython-38.pyc +0 -0
  34. app/__pycache__/plot.cpython-310.pyc +0 -0
  35. app/__pycache__/plot.cpython-312.pyc +0 -0
  36. app/__pycache__/plot.cpython-38.pyc +0 -0
  37. app/app_utils.py +321 -0
  38. app/au_processing.py +64 -0
  39. app/authors.py +34 -0
  40. app/config.py +49 -0
  41. app/description.py +46 -0
  42. app/face_utils.py +68 -0
  43. app/image_processing.py +49 -0
  44. app/model.py +64 -0
  45. app/model_architectures.py +150 -0
  46. app/plot.py +29 -0
  47. app/sleep_quality_processing.py +94 -0
  48. app/video_processing.py +132 -0
  49. app_gpuzero.py +64 -0
  50. assets/.DS_Store +0 -0
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Elena Ryumina
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,13 +1,40 @@
1
  ---
2
- title: MMESA ZeroGPU
3
- emoji: 📈
4
- colorFrom: red
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.39.0
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Multi-Modal for Emotion and Sentiment Analysis (MMESA)
3
+ emoji: 😀😲😐😥🥴😱😡
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.24.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
+ short_description: A tool to detect Stress, Anxiety and Depression
12
  ---
13
 
14
+ ## Technologies
15
+
16
+ This project utilizes various Python scripts for different aspects of analysis and recognition:
17
+
18
+ - `blink_detection.py`: Detects and analyzes blinking patterns.
19
+ - `body_movement_analysis.py`: Analyzes body movements.
20
+ - `emotion_analysis.py`: Analyzes emotional states.
21
+ - `face_expressions.py`: Recognizes facial expressions.
22
+ - `FACS_analysis_sad.py`: Performs Facial Action Coding System analysis for sadness.
23
+ - `gaze_estimation.py`: Estimates gaze direction.
24
+ - `head_posture_detection.py`: Detects head posture.
25
+ - `heart_rate_variability.py`: Analyzes heart rate variability.
26
+ - `posture_analysis.py`: Analyzes posture.
27
+ - `roberta_chatbot.py`: Chatbot using the RoBERTa model.
28
+ - `sentiment_analysis.py`: Performs sentiment analysis.
29
+ - `skin_analysis.py`: Analyzes skin conditions.
30
+ - `sleep_quality.py`: Evaluates sleep quality.
31
+ - `speech_emotion_recognition.py`: Recognizes emotions from speech.
32
+ - `speech_stress_analysis.py`: Analyzes stress levels from speech.
33
+
34
+ These scripts combine to provide comprehensive analysis capabilities for various aspects of human behavior and physiology.
35
+
36
+ ## Upload Trick to HG
37
+
38
+ git lfs track "_.dat" && git lfs track "_.pt" && git add .gitattributes && git add assets/models/shape_predictor_68_face_landmarks.dat && git add assets/models/FER_dinamic_LSTM_IEMOCAP.pt && git add assets/models/FER_static_ResNet50_AffectNet.pt && git commit -m 'bigfiles' && git push origin main --force
39
+
40
+ git add . && git commit -m 'pre-launch' && git push
__pycache__/audio_OK.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
__pycache__/fixapp.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
__pycache__/mpstest.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
__pycache__/tinnitus.cpython-310.pyc ADDED
Binary file (3.24 kB). View file
 
__pycache__/ui_components.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from tabs.heart_rate_variability import create_hrv_tab
3
+ from tabs.blink_detection import create_blink_tab
4
+ from tabs.gaze_estimation import create_gaze_estimation_tab
5
+ from tabs.speech_stress_analysis import create_voice_stress_tab
6
+ from tabs.head_posture_detection import create_head_posture_tab
7
+ from tabs.face_expressions import create_face_expressions_tab
8
+ from tabs.speech_emotion_recognition import create_emotion_recognition_tab
9
+ from tabs.sleep_quality import create_sleep_quality_tab
10
+ from tabs.sentiment_analysis import create_sentiment_tab
11
+ from tabs.emotion_analysis import create_emotion_tab
12
+ from tabs.body_movement_analysis import create_body_movement_tab
13
+ from tabs.posture_analysis import create_posture_analysis_tab
14
+ from tabs.skin_analysis import create_skin_conductance_tab
15
+ from tabs.FACS_analysis_sad import create_facs_analysis_sad_tab
16
+ from tabs.roberta_chatbot import create_roberta_chatbot_tab
17
+ import spaces
18
+
19
+ # Import the UI components
20
+ from ui_components import CUSTOM_CSS, HEADER_HTML, DISCLAIMER_HTML
21
+
22
+ TAB_STRUCTURE = [
23
+ ("Visual Analysis", [
24
+ ("Emotional Face Expressions", create_face_expressions_tab),
25
+ ("FACS for Stress, Anxiety, Depression", create_facs_analysis_sad_tab),
26
+ ("Gaze Estimation", create_gaze_estimation_tab),
27
+ ("Head Posture", create_head_posture_tab),
28
+ ("Blink Rate", create_blink_tab),
29
+ ("Sleep Quality", create_sleep_quality_tab),
30
+ ("Heart Rate Variability", create_hrv_tab),
31
+ ("Body Movement", create_body_movement_tab),
32
+ ("Posture", create_posture_analysis_tab),
33
+ ("Skin", create_skin_conductance_tab)
34
+ ]),
35
+ ("Speech Analysis", [
36
+ ("Speech Stress", create_voice_stress_tab),
37
+ ("Speech Emotion", create_emotion_recognition_tab)
38
+ ]),
39
+ ("Text Analysis", [
40
+ ("Sentiment", create_sentiment_tab),
41
+ ("Emotion", create_emotion_tab),
42
+ ("Roberta Mental Health Chatbot", create_roberta_chatbot_tab)
43
+ ]),
44
+ ("Brain Analysis (coming soon)", [
45
+ ])
46
+ ]
47
+
48
+ @spaces.GPU
49
+ def create_demo():
50
+ with gr.Blocks(css=CUSTOM_CSS) as demo:
51
+ gr.Markdown(HEADER_HTML)
52
+ with gr.Tabs(elem_classes=["main-tab"]):
53
+ for main_tab, sub_tabs in TAB_STRUCTURE:
54
+ with gr.Tab(main_tab):
55
+ with gr.Tabs():
56
+ for sub_tab, create_fn in sub_tabs:
57
+ with gr.Tab(sub_tab):
58
+ create_fn()
59
+ gr.HTML(DISCLAIMER_HTML)
60
+ return demo
61
+
62
+ # Create the demo instance
63
+ demo = create_demo()
64
+
65
+ if __name__ == "__main__":
66
+ demo.queue(api_open=True).launch(share=False)
app/__init__.py ADDED
File without changes
app/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (150 Bytes). View file
 
app/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (168 Bytes). View file
 
app/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (162 Bytes). View file
 
app/__pycache__/app_utils.cpython-310.pyc ADDED
Binary file (8.47 kB). View file
 
app/__pycache__/app_utils.cpython-312.pyc ADDED
Binary file (8.32 kB). View file
 
app/__pycache__/app_utils.cpython-38.pyc ADDED
Binary file (4.24 kB). View file
 
app/__pycache__/authors.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
app/__pycache__/authors.cpython-312.pyc ADDED
Binary file (2.44 kB). View file
 
app/__pycache__/authors.cpython-38.pyc ADDED
Binary file (2.43 kB). View file
 
app/__pycache__/config.cpython-310.pyc ADDED
Binary file (984 Bytes). View file
 
app/__pycache__/config.cpython-312.pyc ADDED
Binary file (1.31 kB). View file
 
app/__pycache__/config.cpython-38.pyc ADDED
Binary file (985 Bytes). View file
 
app/__pycache__/description.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
app/__pycache__/description.cpython-312.pyc ADDED
Binary file (1.73 kB). View file
 
app/__pycache__/description.cpython-38.pyc ADDED
Binary file (1.63 kB). View file
 
app/__pycache__/face_utils.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
app/__pycache__/face_utils.cpython-312.pyc ADDED
Binary file (4.27 kB). View file
 
app/__pycache__/face_utils.cpython-38.pyc ADDED
Binary file (2.22 kB). View file
 
app/__pycache__/model.cpython-310.pyc ADDED
Binary file (2.77 kB). View file
 
app/__pycache__/model.cpython-312.pyc ADDED
Binary file (4.17 kB). View file
 
app/__pycache__/model.cpython-38.pyc ADDED
Binary file (2.7 kB). View file
 
app/__pycache__/model_architectures.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
app/__pycache__/model_architectures.cpython-312.pyc ADDED
Binary file (9.69 kB). View file
 
app/__pycache__/model_architectures.cpython-38.pyc ADDED
Binary file (5.25 kB). View file
 
app/__pycache__/plot.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
app/__pycache__/plot.cpython-312.pyc ADDED
Binary file (1.6 kB). View file
 
app/__pycache__/plot.cpython-38.pyc ADDED
Binary file (1.12 kB). View file
 
app/app_utils.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: app_utils.py
3
+ Author: Elena Ryumina and Dmitry Ryumin (modified by Assistant)
4
+ Description: This module contains utility functions for facial expression recognition application, including FACS Analysis for SAD.
5
+ License: MIT License
6
+ """
7
+
8
+ import torch
9
+ import numpy as np
10
+ import mediapipe as mp
11
+ from PIL import Image
12
+ import cv2
13
+ from pytorch_grad_cam.utils.image import show_cam_on_image
14
+ import matplotlib.pyplot as plt
15
+
16
+ # Importing necessary components for the Gradio app
17
+ from app.model import pth_model_static, pth_model_dynamic, cam, pth_processing
18
+ from app.face_utils import get_box, display_info
19
+ from app.config import DICT_EMO, config_data
20
+ from app.plot import statistics_plot
21
+
22
+ mp_face_mesh = mp.solutions.face_mesh
23
+
24
+ def preprocess_image_and_predict(inp):
25
+ inp = np.array(inp)
26
+
27
+ if inp is None:
28
+ return None, None, None
29
+
30
+ try:
31
+ h, w = inp.shape[:2]
32
+ except Exception:
33
+ return None, None, None
34
+
35
+ with mp_face_mesh.FaceMesh(
36
+ max_num_faces=1,
37
+ refine_landmarks=False,
38
+ min_detection_confidence=0.5,
39
+ min_tracking_confidence=0.5,
40
+ ) as face_mesh:
41
+ results = face_mesh.process(inp)
42
+ if results.multi_face_landmarks:
43
+ for fl in results.multi_face_landmarks:
44
+ startX, startY, endX, endY = get_box(fl, w, h)
45
+ cur_face = inp[startY:endY, startX:endX]
46
+ cur_face_n = pth_processing(Image.fromarray(cur_face))
47
+ with torch.no_grad():
48
+ prediction = (
49
+ torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
50
+ .detach()
51
+ .numpy()[0]
52
+ )
53
+ confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
54
+ grayscale_cam = cam(input_tensor=cur_face_n)
55
+ grayscale_cam = grayscale_cam[0, :]
56
+ cur_face_hm = cv2.resize(cur_face,(224,224))
57
+ cur_face_hm = np.float32(cur_face_hm) / 255
58
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True)
59
+
60
+ return cur_face, heatmap, confidences
61
+
62
+ def preprocess_frame_and_predict_aus(frame):
63
+ if len(frame.shape) == 2:
64
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
65
+ elif frame.shape[2] == 4:
66
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
67
+
68
+ with mp_face_mesh.FaceMesh(
69
+ max_num_faces=1,
70
+ refine_landmarks=False,
71
+ min_detection_confidence=0.5,
72
+ min_tracking_confidence=0.5
73
+ ) as face_mesh:
74
+ results = face_mesh.process(frame)
75
+
76
+ if results.multi_face_landmarks:
77
+ h, w = frame.shape[:2]
78
+ for fl in results.multi_face_landmarks:
79
+ startX, startY, endX, endY = get_box(fl, w, h)
80
+ cur_face = frame[startY:endY, startX:endX]
81
+ cur_face_n = pth_processing(Image.fromarray(cur_face))
82
+
83
+ with torch.no_grad():
84
+ features = pth_model_static(cur_face_n)
85
+ au_intensities = features_to_au_intensities(features)
86
+
87
+ grayscale_cam = cam(input_tensor=cur_face_n)
88
+ grayscale_cam = grayscale_cam[0, :]
89
+ cur_face_hm = cv2.resize(cur_face, (224, 224))
90
+ cur_face_hm = np.float32(cur_face_hm) / 255
91
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True)
92
+
93
+ return cur_face, au_intensities, heatmap
94
+
95
+ return None, None, None
96
+
97
+ def features_to_au_intensities(features):
98
+ features_np = features.detach().cpu().numpy()[0]
99
+ au_intensities = (features_np - features_np.min()) / (features_np.max() - features_np.min())
100
+ return au_intensities[:24] # Assuming we want 24 AUs
101
+
102
+ def preprocess_video_and_predict(video):
103
+ cap = cv2.VideoCapture(video)
104
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
105
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
106
+ fps = np.round(cap.get(cv2.CAP_PROP_FPS))
107
+
108
+ path_save_video_face = 'result_face.mp4'
109
+ vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
110
+
111
+ path_save_video_hm = 'result_hm.mp4'
112
+ vid_writer_hm = cv2.VideoWriter(path_save_video_hm, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
113
+
114
+ lstm_features = []
115
+ count_frame = 1
116
+ count_face = 0
117
+ probs = []
118
+ frames = []
119
+ au_intensities_list = []
120
+ last_output = None
121
+ last_heatmap = None
122
+ last_au_intensities = None
123
+ cur_face = None
124
+
125
+ with mp_face_mesh.FaceMesh(
126
+ max_num_faces=1,
127
+ refine_landmarks=False,
128
+ min_detection_confidence=0.5,
129
+ min_tracking_confidence=0.5) as face_mesh:
130
+
131
+ while cap.isOpened():
132
+ _, frame = cap.read()
133
+ if frame is None: break
134
+
135
+ frame_copy = frame.copy()
136
+ frame_copy.flags.writeable = False
137
+ frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
138
+ results = face_mesh.process(frame_copy)
139
+ frame_copy.flags.writeable = True
140
+
141
+ if results.multi_face_landmarks:
142
+ for fl in results.multi_face_landmarks:
143
+ startX, startY, endX, endY = get_box(fl, w, h)
144
+ cur_face = frame_copy[startY:endY, startX: endX]
145
+
146
+ if count_face%config_data.FRAME_DOWNSAMPLING == 0:
147
+ cur_face_copy = pth_processing(Image.fromarray(cur_face))
148
+ with torch.no_grad():
149
+ features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
150
+ au_intensities = features_to_au_intensities(pth_model_static(cur_face_copy))
151
+
152
+ grayscale_cam = cam(input_tensor=cur_face_copy)
153
+ grayscale_cam = grayscale_cam[0, :]
154
+ cur_face_hm = cv2.resize(cur_face,(224,224), interpolation = cv2.INTER_AREA)
155
+ cur_face_hm = np.float32(cur_face_hm) / 255
156
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=False)
157
+ last_heatmap = heatmap
158
+ last_au_intensities = au_intensities
159
+
160
+ if len(lstm_features) == 0:
161
+ lstm_features = [features]*10
162
+ else:
163
+ lstm_features = lstm_features[1:] + [features]
164
+
165
+ lstm_f = torch.from_numpy(np.vstack(lstm_features))
166
+ lstm_f = torch.unsqueeze(lstm_f, 0)
167
+ with torch.no_grad():
168
+ output = pth_model_dynamic(lstm_f).detach().numpy()
169
+ last_output = output
170
+
171
+ if count_face == 0:
172
+ count_face += 1
173
+
174
+ else:
175
+ if last_output is not None:
176
+ output = last_output
177
+ heatmap = last_heatmap
178
+ au_intensities = last_au_intensities
179
+
180
+ elif last_output is None:
181
+ output = np.empty((1, 7))
182
+ output[:] = np.nan
183
+ au_intensities = np.empty(24)
184
+ au_intensities[:] = np.nan
185
+
186
+ probs.append(output[0])
187
+ frames.append(count_frame)
188
+ au_intensities_list.append(au_intensities)
189
+ else:
190
+ if last_output is not None:
191
+ lstm_features = []
192
+ empty = np.empty((7))
193
+ empty[:] = np.nan
194
+ probs.append(empty)
195
+ frames.append(count_frame)
196
+ au_intensities_list.append(np.full(24, np.nan))
197
+
198
+ if cur_face is not None:
199
+ heatmap_f = display_info(heatmap, 'Frame: {}'.format(count_frame), box_scale=.3)
200
+
201
+ cur_face = cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR)
202
+ cur_face = cv2.resize(cur_face, (224,224), interpolation = cv2.INTER_AREA)
203
+ cur_face = display_info(cur_face, 'Frame: {}'.format(count_frame), box_scale=.3)
204
+ vid_writer_face.write(cur_face)
205
+ vid_writer_hm.write(heatmap_f)
206
+
207
+ count_frame += 1
208
+ if count_face != 0:
209
+ count_face += 1
210
+
211
+ vid_writer_face.release()
212
+ vid_writer_hm.release()
213
+
214
+ stat = statistics_plot(frames, probs)
215
+ au_stat = au_statistics_plot(frames, au_intensities_list)
216
+
217
+ if not stat or not au_stat:
218
+ return None, None, None, None, None
219
+
220
+ return video, path_save_video_face, path_save_video_hm, stat, au_stat
221
+
222
+ def au_statistics_plot(frames, au_intensities_list):
223
+ fig, ax = plt.subplots(figsize=(12, 6))
224
+ au_intensities_array = np.array(au_intensities_list)
225
+
226
+ for i in range(au_intensities_array.shape[1]):
227
+ ax.plot(frames, au_intensities_array[:, i], label=f'AU{i+1}')
228
+
229
+ ax.set_xlabel('Frame')
230
+ ax.set_ylabel('AU Intensity')
231
+ ax.set_title('Action Unit Intensities Over Time')
232
+ ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
233
+ plt.tight_layout()
234
+ return fig
235
+
236
+ def preprocess_video_and_predict_sleep_quality(video):
237
+ cap = cv2.VideoCapture(video)
238
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
239
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
240
+ fps = np.round(cap.get(cv2.CAP_PROP_FPS))
241
+
242
+ path_save_video_original = 'result_original.mp4'
243
+ path_save_video_face = 'result_face.mp4'
244
+ path_save_video_sleep = 'result_sleep.mp4'
245
+
246
+ vid_writer_original = cv2.VideoWriter(path_save_video_original, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
247
+ vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
248
+ vid_writer_sleep = cv2.VideoWriter(path_save_video_sleep, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
249
+
250
+ frames = []
251
+ sleep_quality_scores = []
252
+ eye_bags_images = []
253
+
254
+ with mp_face_mesh.FaceMesh(
255
+ max_num_faces=1,
256
+ refine_landmarks=False,
257
+ min_detection_confidence=0.5,
258
+ min_tracking_confidence=0.5) as face_mesh:
259
+
260
+ while cap.isOpened():
261
+ ret, frame = cap.read()
262
+ if not ret:
263
+ break
264
+
265
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
266
+ results = face_mesh.process(frame_rgb)
267
+
268
+ if results.multi_face_landmarks:
269
+ for fl in results.multi_face_landmarks:
270
+ startX, startY, endX, endY = get_box(fl, w, h)
271
+ cur_face = frame_rgb[startY:endY, startX:endX]
272
+
273
+ sleep_quality_score, eye_bags_image = analyze_sleep_quality(cur_face)
274
+ sleep_quality_scores.append(sleep_quality_score)
275
+ eye_bags_images.append(cv2.resize(eye_bags_image, (224, 224)))
276
+
277
+ sleep_quality_viz = create_sleep_quality_visualization(cur_face, sleep_quality_score)
278
+
279
+ cur_face = cv2.resize(cur_face, (224, 224))
280
+
281
+ vid_writer_face.write(cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR))
282
+ vid_writer_sleep.write(sleep_quality_viz)
283
+
284
+ vid_writer_original.write(frame)
285
+ frames.append(len(frames) + 1)
286
+
287
+ cap.release()
288
+ vid_writer_original.release()
289
+ vid_writer_face.release()
290
+ vid_writer_sleep.release()
291
+
292
+ sleep_stat = sleep_quality_statistics_plot(frames, sleep_quality_scores)
293
+
294
+ if eye_bags_images:
295
+ average_eye_bags_image = np.mean(np.array(eye_bags_images), axis=0).astype(np.uint8)
296
+ else:
297
+ average_eye_bags_image = np.zeros((224, 224, 3), dtype=np.uint8)
298
+
299
+ return (path_save_video_original, path_save_video_face, path_save_video_sleep,
300
+ average_eye_bags_image, sleep_stat)
301
+
302
+ def analyze_sleep_quality(face_image):
303
+ # Placeholder function - implement your sleep quality analysis here
304
+ sleep_quality_score = np.random.random()
305
+ eye_bags_image = cv2.resize(face_image, (224, 224))
306
+ return sleep_quality_score, eye_bags_image
307
+
308
+ def create_sleep_quality_visualization(face_image, sleep_quality_score):
309
+ viz = face_image.copy()
310
+ cv2.putText(viz, f"Sleep Quality: {sleep_quality_score:.2f}", (10, 30),
311
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
312
+ return cv2.cvtColor(viz, cv2.COLOR_RGB2BGR)
313
+
314
+ def sleep_quality_statistics_plot(frames, sleep_quality_scores):
315
+ # Placeholder function - implement your statistics plotting here
316
+ fig, ax = plt.subplots()
317
+ ax.plot(frames, sleep_quality_scores)
318
+ ax.set_xlabel('Frame')
319
+ ax.set_ylabel('Sleep Quality Score')
320
+ ax.set_title('Sleep Quality Over Time')
321
+ return fig
app/au_processing.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import cv2
4
+ import torch
5
+ from PIL import Image
6
+ from app.model import pth_model_static, cam, pth_processing
7
+ from app.face_utils import get_box
8
+ import mediapipe as mp
9
+
10
+ mp_face_mesh = mp.solutions.face_mesh
11
+
12
+ def preprocess_frame_and_predict_aus(frame):
13
+ if len(frame.shape) == 2:
14
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
15
+ elif frame.shape[2] == 4:
16
+ frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
17
+
18
+ with mp_face_mesh.FaceMesh(
19
+ max_num_faces=1,
20
+ refine_landmarks=False,
21
+ min_detection_confidence=0.5,
22
+ min_tracking_confidence=0.5
23
+ ) as face_mesh:
24
+ results = face_mesh.process(frame)
25
+
26
+ if results.multi_face_landmarks:
27
+ h, w = frame.shape[:2]
28
+ for fl in results.multi_face_landmarks:
29
+ startX, startY, endX, endY = get_box(fl, w, h)
30
+ cur_face = frame[startY:endY, startX:endX]
31
+ cur_face_n = pth_processing(Image.fromarray(cur_face))
32
+
33
+ with torch.no_grad():
34
+ features = pth_model_static(cur_face_n)
35
+ au_intensities = features_to_au_intensities(features)
36
+
37
+ grayscale_cam = cam(input_tensor=cur_face_n)
38
+ grayscale_cam = grayscale_cam[0, :]
39
+ cur_face_hm = cv2.resize(cur_face, (224, 224))
40
+ cur_face_hm = np.float32(cur_face_hm) / 255
41
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True)
42
+
43
+ return cur_face, au_intensities, heatmap
44
+
45
+ return None, None, None
46
+
47
+ def features_to_au_intensities(features):
48
+ features_np = features.detach().cpu().numpy()[0]
49
+ au_intensities = (features_np - features_np.min()) / (features_np.max() - features_np.min())
50
+ return au_intensities[:24] # Assuming we want 24 AUs
51
+
52
+ def au_statistics_plot(frames, au_intensities_list):
53
+ fig, ax = plt.subplots(figsize=(12, 6))
54
+ au_intensities_array = np.array(au_intensities_list)
55
+
56
+ for i in range(au_intensities_array.shape[1]):
57
+ ax.plot(frames, au_intensities_array[:, i], label=f'AU{i+1}')
58
+
59
+ ax.set_xlabel('Frame')
60
+ ax.set_ylabel('AU Intensity')
61
+ ax.set_title('Action Unit Intensities Over Time')
62
+ ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
63
+ plt.tight_layout()
64
+ return fig
app/authors.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: authors.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: About the authors.
5
+ License: MIT License
6
+ """
7
+
8
+
9
+ AUTHORS = """
10
+ Authors: [Elena Ryumina](https://github.com/ElenaRyumina), [Dmitry Ryumin](https://github.com/DmitryRyumin), [Denis Dresvyanskiy](https://www.uni-ulm.de/en/nt/staff/research-assistants/dresvyanskiy/), [Maxim Markitantov](https://hci.nw.ru/en/employees/10) and [Alexey Karpov](https://hci.nw.ru/en/employees/1)
11
+
12
+ Authorship contribution:
13
+
14
+ App developers: ``Elena Ryumina`` and ``Dmitry Ryumin``
15
+
16
+ Methodology developers: ``Elena Ryumina``, ``Denis Dresvyanskiy`` and ``Alexey Karpov``
17
+
18
+ Model developer: ``Elena Ryumina``
19
+
20
+ TensorFlow to PyTorch model converters: ``Maxim Markitantov`` and ``Elena Ryumina``
21
+
22
+ Citation
23
+
24
+ If you are using EMO-AffectNetModel in your research, please consider to cite research [paper](https://www.sciencedirect.com/science/article/pii/S0925231222012656). Here is an example of BibTeX entry:
25
+
26
+ <div class="highlight highlight-text-bibtex notranslate position-relative overflow-auto" dir="auto"><pre><span class="pl-k">@article</span>{<span class="pl-en">RYUMINA2022</span>,
27
+ <span class="pl-s">title</span> = <span class="pl-s"><span class="pl-pds">{</span>In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study<span class="pl-pds">}</span></span>,
28
+ <span class="pl-s">author</span> = <span class="pl-s"><span class="pl-pds">{</span>Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov<span class="pl-pds">}</span></span>,
29
+ <span class="pl-s">journal</span> = <span class="pl-s"><span class="pl-pds">{</span>Neurocomputing<span class="pl-pds">}</span></span>,
30
+ <span class="pl-s">year</span> = <span class="pl-s"><span class="pl-pds">{</span>2022<span class="pl-pds">}</span></span>,
31
+ <span class="pl-s">doi</span> = <span class="pl-s"><span class="pl-pds">{</span>10.1016/j.neucom.2022.10.013<span class="pl-pds">}</span></span>,
32
+ <span class="pl-s">url</span> = <span class="pl-s"><span class="pl-pds">{</span>https://www.sciencedirect.com/science/article/pii/S0925231222012656<span class="pl-pds">}</span></span>,
33
+ }</div>
34
+ """
app/config.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: config.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Configuration file.
5
+ License: MIT License
6
+ """
7
+
8
+ import toml
9
+ from typing import Dict
10
+ from types import SimpleNamespace
11
+
12
+
13
+ def flatten_dict(prefix: str, d: Dict) -> Dict:
14
+ result = {}
15
+
16
+ for k, v in d.items():
17
+ if isinstance(v, dict):
18
+ result.update(flatten_dict(f"{prefix}{k}_", v))
19
+ else:
20
+ result[f"{prefix}{k}"] = v
21
+
22
+ return result
23
+
24
+
25
+ config = toml.load("config.toml")
26
+
27
+ config_data = flatten_dict("", config)
28
+
29
+ config_data = SimpleNamespace(**config_data)
30
+
31
+ DICT_EMO = {
32
+ 0: "Neutral",
33
+ 1: "Happiness",
34
+ 2: "Sadness",
35
+ 3: "Surprise",
36
+ 4: "Fear",
37
+ 5: "Disgust",
38
+ 6: "Anger",
39
+ }
40
+
41
+ COLORS = {
42
+ 0: 'blue',
43
+ 1: 'orange',
44
+ 2: 'green',
45
+ 3: 'red',
46
+ 4: 'purple',
47
+ 5: 'brown',
48
+ 6: 'pink'
49
+ }
app/description.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: description.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Project description for the Gradio app.
5
+ License: MIT License
6
+ """
7
+
8
+ # Importing necessary components for the Gradio app
9
+ from app.config import config_data
10
+
11
+ DESCRIPTION_STATIC = f"""\
12
+ # Static Facial Expression Recognition
13
+ <div class="app-flex-container">
14
+ <img src="https://img.shields.io/badge/version-v{config_data.APP_VERSION}-rc0" alt="Version">
15
+ <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition&countColor=%23263759&style=flat" /></a>
16
+ <a href="https://paperswithcode.com/paper/in-search-of-a-robust-facial-expressions"><img src="https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/in-search-of-a-robust-facial-expressions/facial-expression-recognition-on-affectnet" /></a>
17
+ </div>
18
+ """
19
+
20
+ DESCRIPTION_DYNAMIC = f"""\
21
+ # Dynamic Facial Expression Recognition
22
+ <div class="app-flex-container">
23
+ <img src="https://img.shields.io/badge/version-v{config_data.APP_VERSION}-rc0" alt="Version">
24
+ <a href="https://paperswithcode.com/paper/in-search-of-a-robust-facial-expressions"><img src="https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/in-search-of-a-robust-facial-expressions/facial-expression-recognition-on-affectnet" /></a>
25
+ </div>
26
+ """
27
+
28
+ DESCRIPTION_SLEEP_QUALITY = """
29
+ # Sleep Quality Analysis
30
+
31
+ This tab analyzes sleep quality based on facial features, focusing on skin tone and eye bags.
32
+
33
+ ## How to use:
34
+ 1. Upload a video of a person's face.
35
+ 2. Click 'Submit' to process the video.
36
+ 3. View the results, including:
37
+ - Original video
38
+ - Processed face video
39
+ - Sleep quality analysis video
40
+ - Eye bags detection image
41
+ - Sleep quality statistics over time
42
+
43
+ The analysis provides insights into potential sleep issues based on visual cues.
44
+
45
+ Note: This analysis is for informational purposes only and should not be considered a medical diagnosis. Always consult with a healthcare professional for sleep-related concerns.
46
+ """
app/face_utils.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: face_utils.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module contains utility functions related to facial landmarks and image processing.
5
+ License: MIT License
6
+ """
7
+
8
+ import numpy as np
9
+ import math
10
+ import cv2
11
+
12
+
13
+ def norm_coordinates(normalized_x, normalized_y, image_width, image_height):
14
+ x_px = min(math.floor(normalized_x * image_width), image_width - 1)
15
+ y_px = min(math.floor(normalized_y * image_height), image_height - 1)
16
+ return x_px, y_px
17
+
18
+
19
+ def get_box(fl, w, h):
20
+ idx_to_coors = {}
21
+ for idx, landmark in enumerate(fl.landmark):
22
+ landmark_px = norm_coordinates(landmark.x, landmark.y, w, h)
23
+ if landmark_px:
24
+ idx_to_coors[idx] = landmark_px
25
+
26
+ x_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 0])
27
+ y_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 1])
28
+ endX = np.max(np.asarray(list(idx_to_coors.values()))[:, 0])
29
+ endY = np.max(np.asarray(list(idx_to_coors.values()))[:, 1])
30
+
31
+ (startX, startY) = (max(0, x_min), max(0, y_min))
32
+ (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
33
+
34
+ return startX, startY, endX, endY
35
+
36
+ def display_info(img, text, margin=1.0, box_scale=1.0):
37
+ img_copy = img.copy()
38
+ img_h, img_w, _ = img_copy.shape
39
+ line_width = int(min(img_h, img_w) * 0.001)
40
+ thickness = max(int(line_width / 3), 1)
41
+
42
+ font_face = cv2.FONT_HERSHEY_SIMPLEX
43
+ font_color = (0, 0, 0)
44
+ font_scale = thickness / 1.5
45
+
46
+ t_w, t_h = cv2.getTextSize(text, font_face, font_scale, None)[0]
47
+
48
+ margin_n = int(t_h * margin)
49
+ sub_img = img_copy[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale),
50
+ img_w - t_w - margin_n - int(2 * t_h * box_scale): img_w - margin_n]
51
+
52
+ white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255
53
+
54
+ img_copy[0 + margin_n: 0 + margin_n + t_h + int(2 * t_h * box_scale),
55
+ img_w - t_w - margin_n - int(2 * t_h * box_scale):img_w - margin_n] = cv2.addWeighted(sub_img, 0.5, white_rect, .5, 1.0)
56
+
57
+ cv2.putText(img=img_copy,
58
+ text=text,
59
+ org=(img_w - t_w - margin_n - int(2 * t_h * box_scale) // 2,
60
+ 0 + margin_n + t_h + int(2 * t_h * box_scale) // 2),
61
+ fontFace=font_face,
62
+ fontScale=font_scale,
63
+ color=font_color,
64
+ thickness=thickness,
65
+ lineType=cv2.LINE_AA,
66
+ bottomLeftOrigin=False)
67
+
68
+ return img_copy
app/image_processing.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from PIL import Image
4
+ import torch
5
+ from app.model import pth_model_static, cam, pth_processing
6
+ from app.face_utils import get_box
7
+ from app.config import DICT_EMO
8
+ from pytorch_grad_cam.utils.image import show_cam_on_image
9
+ import mediapipe as mp
10
+
11
+ mp_face_mesh = mp.solutions.face_mesh
12
+
13
+ def preprocess_image_and_predict(inp):
14
+ inp = np.array(inp)
15
+
16
+ if inp is None:
17
+ return None, None, None
18
+
19
+ try:
20
+ h, w = inp.shape[:2]
21
+ except Exception:
22
+ return None, None, None
23
+
24
+ with mp_face_mesh.FaceMesh(
25
+ max_num_faces=1,
26
+ refine_landmarks=False,
27
+ min_detection_confidence=0.5,
28
+ min_tracking_confidence=0.5,
29
+ ) as face_mesh:
30
+ results = face_mesh.process(inp)
31
+ if results.multi_face_landmarks:
32
+ for fl in results.multi_face_landmarks:
33
+ startX, startY, endX, endY = get_box(fl, w, h)
34
+ cur_face = inp[startY:endY, startX:endX]
35
+ cur_face_n = pth_processing(Image.fromarray(cur_face))
36
+ with torch.no_grad():
37
+ prediction = (
38
+ torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
39
+ .detach()
40
+ .numpy()[0]
41
+ )
42
+ confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
43
+ grayscale_cam = cam(input_tensor=cur_face_n)
44
+ grayscale_cam = grayscale_cam[0, :]
45
+ cur_face_hm = cv2.resize(cur_face,(224,224))
46
+ cur_face_hm = np.float32(cur_face_hm) / 255
47
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=True)
48
+
49
+ return cur_face, heatmap, confidences
app/model.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: model.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module provides functions for loading and processing a pre-trained deep learning model
5
+ for facial expression recognition.
6
+ License: MIT License
7
+ """
8
+
9
+ import torch
10
+ import requests
11
+ from PIL import Image
12
+ from torchvision import transforms
13
+ from pytorch_grad_cam import GradCAM
14
+
15
+ # Importing necessary components for the Gradio app
16
+ from app.config import config_data
17
+ from app.model_architectures import ResNet50, LSTMPyTorch
18
+
19
+
20
+ def load_model(model_url, model_path):
21
+ try:
22
+ with requests.get(model_url, stream=True) as response:
23
+ with open(model_path, "wb") as file:
24
+ for chunk in response.iter_content(chunk_size=8192):
25
+ file.write(chunk)
26
+ return model_path
27
+ except Exception as e:
28
+ print(f"Error loading model: {e}")
29
+ return None
30
+
31
+ path_static = load_model(config_data.model_static_url, config_data.model_static_path)
32
+ pth_model_static = ResNet50(7, channels=3)
33
+ pth_model_static.load_state_dict(torch.load(path_static))
34
+ pth_model_static.eval()
35
+
36
+ path_dynamic = load_model(config_data.model_dynamic_url, config_data.model_dynamic_path)
37
+ pth_model_dynamic = LSTMPyTorch()
38
+ pth_model_dynamic.load_state_dict(torch.load(path_dynamic))
39
+ pth_model_dynamic.eval()
40
+
41
+ target_layers = [pth_model_static.layer4]
42
+ cam = GradCAM(model=pth_model_static, target_layers=target_layers)
43
+
44
+ def pth_processing(fp):
45
+ class PreprocessInput(torch.nn.Module):
46
+ def init(self):
47
+ super(PreprocessInput, self).init()
48
+
49
+ def forward(self, x):
50
+ x = x.to(torch.float32)
51
+ x = torch.flip(x, dims=(0,))
52
+ x[0, :, :] -= 91.4953
53
+ x[1, :, :] -= 103.8827
54
+ x[2, :, :] -= 131.0912
55
+ return x
56
+
57
+ def get_img_torch(img, target_size=(224, 224)):
58
+ transform = transforms.Compose([transforms.PILToTensor(), PreprocessInput()])
59
+ img = img.resize(target_size, Image.Resampling.NEAREST)
60
+ img = transform(img)
61
+ img = torch.unsqueeze(img, 0)
62
+ return img
63
+
64
+ return get_img_torch(fp)
app/model_architectures.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: model.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module provides model architectures.
5
+ License: MIT License
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import math
12
+
13
+ class Bottleneck(nn.Module):
14
+ expansion = 4
15
+ def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
16
+ super(Bottleneck, self).__init__()
17
+
18
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False)
19
+ self.batch_norm1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.99)
20
+
21
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding='same', bias=False)
22
+ self.batch_norm2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.99)
23
+
24
+ self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0, bias=False)
25
+ self.batch_norm3 = nn.BatchNorm2d(out_channels*self.expansion, eps=0.001, momentum=0.99)
26
+
27
+ self.i_downsample = i_downsample
28
+ self.stride = stride
29
+ self.relu = nn.ReLU()
30
+
31
+ def forward(self, x):
32
+ identity = x.clone()
33
+ x = self.relu(self.batch_norm1(self.conv1(x)))
34
+
35
+ x = self.relu(self.batch_norm2(self.conv2(x)))
36
+
37
+ x = self.conv3(x)
38
+ x = self.batch_norm3(x)
39
+
40
+ #downsample if needed
41
+ if self.i_downsample is not None:
42
+ identity = self.i_downsample(identity)
43
+ #add identity
44
+ x+=identity
45
+ x=self.relu(x)
46
+
47
+ return x
48
+
49
+ class Conv2dSame(torch.nn.Conv2d):
50
+
51
+ def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int:
52
+ return max((math.ceil(i / s) - 1) * s + (k - 1) * d + 1 - i, 0)
53
+
54
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
55
+ ih, iw = x.size()[-2:]
56
+
57
+ pad_h = self.calc_same_pad(i=ih, k=self.kernel_size[0], s=self.stride[0], d=self.dilation[0])
58
+ pad_w = self.calc_same_pad(i=iw, k=self.kernel_size[1], s=self.stride[1], d=self.dilation[1])
59
+
60
+ if pad_h > 0 or pad_w > 0:
61
+ x = F.pad(
62
+ x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
63
+ )
64
+ return F.conv2d(
65
+ x,
66
+ self.weight,
67
+ self.bias,
68
+ self.stride,
69
+ self.padding,
70
+ self.dilation,
71
+ self.groups,
72
+ )
73
+
74
+ class ResNet(nn.Module):
75
+ def __init__(self, ResBlock, layer_list, num_classes, num_channels=3):
76
+ super(ResNet, self).__init__()
77
+ self.in_channels = 64
78
+
79
+ self.conv_layer_s2_same = Conv2dSame(num_channels, 64, 7, stride=2, groups=1, bias=False)
80
+ self.batch_norm1 = nn.BatchNorm2d(64, eps=0.001, momentum=0.99)
81
+ self.relu = nn.ReLU()
82
+ self.max_pool = nn.MaxPool2d(kernel_size = 3, stride=2)
83
+
84
+ self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64, stride=1)
85
+ self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
86
+ self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
87
+ self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
88
+
89
+ self.avgpool = nn.AdaptiveAvgPool2d((1,1))
90
+ self.fc1 = nn.Linear(512*ResBlock.expansion, 512)
91
+ self.relu1 = nn.ReLU()
92
+ self.fc2 = nn.Linear(512, num_classes)
93
+
94
+ def extract_features(self, x):
95
+ x = self.relu(self.batch_norm1(self.conv_layer_s2_same(x)))
96
+ x = self.max_pool(x)
97
+ # print(x.shape)
98
+ x = self.layer1(x)
99
+ x = self.layer2(x)
100
+ x = self.layer3(x)
101
+ x = self.layer4(x)
102
+
103
+ x = self.avgpool(x)
104
+ x = x.reshape(x.shape[0], -1)
105
+ x = self.fc1(x)
106
+ return x
107
+
108
+ def forward(self, x):
109
+ x = self.extract_features(x)
110
+ x = self.relu1(x)
111
+ x = self.fc2(x)
112
+ return x
113
+
114
+ def _make_layer(self, ResBlock, blocks, planes, stride=1):
115
+ ii_downsample = None
116
+ layers = []
117
+
118
+ if stride != 1 or self.in_channels != planes*ResBlock.expansion:
119
+ ii_downsample = nn.Sequential(
120
+ nn.Conv2d(self.in_channels, planes*ResBlock.expansion, kernel_size=1, stride=stride, bias=False, padding=0),
121
+ nn.BatchNorm2d(planes*ResBlock.expansion, eps=0.001, momentum=0.99)
122
+ )
123
+
124
+ layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
125
+ self.in_channels = planes*ResBlock.expansion
126
+
127
+ for i in range(blocks-1):
128
+ layers.append(ResBlock(self.in_channels, planes))
129
+
130
+ return nn.Sequential(*layers)
131
+
132
+ def ResNet50(num_classes, channels=3):
133
+ return ResNet(Bottleneck, [3,4,6,3], num_classes, channels)
134
+
135
+
136
+ class LSTMPyTorch(nn.Module):
137
+ def __init__(self):
138
+ super(LSTMPyTorch, self).__init__()
139
+
140
+ self.lstm1 = nn.LSTM(input_size=512, hidden_size=512, batch_first=True, bidirectional=False)
141
+ self.lstm2 = nn.LSTM(input_size=512, hidden_size=256, batch_first=True, bidirectional=False)
142
+ self.fc = nn.Linear(256, 7)
143
+ self.softmax = nn.Softmax(dim=1)
144
+
145
+ def forward(self, x):
146
+ x, _ = self.lstm1(x)
147
+ x, _ = self.lstm2(x)
148
+ x = self.fc(x[:, -1, :])
149
+ x = self.softmax(x)
150
+ return x
app/plot.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: config.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Plotting statistical information.
5
+ License: MIT License
6
+ """
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+
10
+ # Importing necessary components for the Gradio app
11
+ from app.config import DICT_EMO, COLORS
12
+
13
+
14
+ def statistics_plot(frames, probs):
15
+ fig, ax = plt.subplots(figsize=(10, 4))
16
+ fig.subplots_adjust(left=0.07, bottom=0.14, right=0.98, top=0.8, wspace=0, hspace=0)
17
+ # Установка параметров left, bottom, right, top, чтобы выделить место для легенды и названий осей
18
+ probs = np.array(probs)
19
+ for i in range(7):
20
+ try:
21
+ ax.plot(frames, probs[:, i], label=DICT_EMO[i], c=COLORS[i])
22
+ except Exception:
23
+ return None
24
+
25
+ ax.legend(loc='upper center', bbox_to_anchor=(0.47, 1.2), ncol=7, fontsize=12)
26
+ ax.set_xlabel('Frames', fontsize=12) # Добавляем подпись к оси X
27
+ ax.set_ylabel('Probability', fontsize=12) # Добавляем подпись к оси Y
28
+ ax.grid(True)
29
+ return plt
app/sleep_quality_processing.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import mediapipe as mp
5
+ from app.face_utils import get_box
6
+
7
+ mp_face_mesh = mp.solutions.face_mesh
8
+
9
+ def preprocess_video_and_predict_sleep_quality(video):
10
+ cap = cv2.VideoCapture(video)
11
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
12
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
13
+ fps = np.round(cap.get(cv2.CAP_PROP_FPS))
14
+
15
+ path_save_video_original = 'result_original.mp4'
16
+ path_save_video_face = 'result_face.mp4'
17
+ path_save_video_sleep = 'result_sleep.mp4'
18
+
19
+ vid_writer_original = cv2.VideoWriter(path_save_video_original, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
20
+ vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
21
+ vid_writer_sleep = cv2.VideoWriter(path_save_video_sleep, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
22
+
23
+ frames = []
24
+ sleep_quality_scores = []
25
+ eye_bags_images = []
26
+
27
+ with mp_face_mesh.FaceMesh(
28
+ max_num_faces=1,
29
+ refine_landmarks=False,
30
+ min_detection_confidence=0.5,
31
+ min_tracking_confidence=0.5) as face_mesh:
32
+
33
+ while cap.isOpened():
34
+ ret, frame = cap.read()
35
+ if not ret:
36
+ break
37
+
38
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
39
+ results = face_mesh.process(frame_rgb)
40
+
41
+ if results.multi_face_landmarks:
42
+ for fl in results.multi_face_landmarks:
43
+ startX, startY, endX, endY = get_box(fl, w, h)
44
+ cur_face = frame_rgb[startY:endY, startX:endX]
45
+
46
+ sleep_quality_score, eye_bags_image = analyze_sleep_quality(cur_face)
47
+ sleep_quality_scores.append(sleep_quality_score)
48
+ eye_bags_images.append(cv2.resize(eye_bags_image, (224, 224)))
49
+
50
+ sleep_quality_viz = create_sleep_quality_visualization(cur_face, sleep_quality_score)
51
+
52
+ cur_face = cv2.resize(cur_face, (224, 224))
53
+
54
+ vid_writer_face.write(cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR))
55
+ vid_writer_sleep.write(sleep_quality_viz)
56
+
57
+ vid_writer_original.write(frame)
58
+ frames.append(len(frames) + 1)
59
+
60
+ cap.release()
61
+ vid_writer_original.release()
62
+ vid_writer_face.release()
63
+ vid_writer_sleep.release()
64
+
65
+ sleep_stat = sleep_quality_statistics_plot(frames, sleep_quality_scores)
66
+
67
+ if eye_bags_images:
68
+ average_eye_bags_image = np.mean(np.array(eye_bags_images), axis=0).astype(np.uint8)
69
+ else:
70
+ average_eye_bags_image = np.zeros((224, 224, 3), dtype=np.uint8)
71
+
72
+ return (path_save_video_original, path_save_video_face, path_save_video_sleep,
73
+ average_eye_bags_image, sleep_stat)
74
+
75
+ def analyze_sleep_quality(face_image):
76
+ # Placeholder function - implement your sleep quality analysis here
77
+ sleep_quality_score = np.random.random()
78
+ eye_bags_image = cv2.resize(face_image, (224, 224))
79
+ return sleep_quality_score, eye_bags_image
80
+
81
+ def create_sleep_quality_visualization(face_image, sleep_quality_score):
82
+ viz = face_image.copy()
83
+ cv2.putText(viz, f"Sleep Quality: {sleep_quality_score:.2f}", (10, 30),
84
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
85
+ return cv2.cvtColor(viz, cv2.COLOR_RGB2BGR)
86
+
87
+ def sleep_quality_statistics_plot(frames, sleep_quality_scores):
88
+ fig, ax = plt.subplots()
89
+ ax.plot(frames, sleep_quality_scores)
90
+ ax.set_xlabel('Frame')
91
+ ax.set_ylabel('Sleep Quality Score')
92
+ ax.set_title('Sleep Quality Over Time')
93
+ plt.tight_layout()
94
+ return fig
app/video_processing.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ from PIL import Image
5
+ import mediapipe as mp
6
+ from app.model import pth_model_static, pth_model_dynamic, cam, pth_processing
7
+ from app.face_utils import get_box, display_info
8
+ from app.config import config_data
9
+ from app.plot import statistics_plot
10
+ from .au_processing import features_to_au_intensities, au_statistics_plot
11
+
12
+ mp_face_mesh = mp.solutions.face_mesh
13
+
14
+ def preprocess_video_and_predict(video):
15
+ cap = cv2.VideoCapture(video)
16
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
17
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
18
+ fps = np.round(cap.get(cv2.CAP_PROP_FPS))
19
+
20
+ path_save_video_face = 'result_face.mp4'
21
+ vid_writer_face = cv2.VideoWriter(path_save_video_face, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
22
+
23
+ path_save_video_hm = 'result_hm.mp4'
24
+ vid_writer_hm = cv2.VideoWriter(path_save_video_hm, cv2.VideoWriter_fourcc(*'mp4v'), fps, (224, 224))
25
+
26
+ lstm_features = []
27
+ count_frame = 1
28
+ count_face = 0
29
+ probs = []
30
+ frames = []
31
+ au_intensities_list = []
32
+ last_output = None
33
+ last_heatmap = None
34
+ last_au_intensities = None
35
+ cur_face = None
36
+
37
+ with mp_face_mesh.FaceMesh(
38
+ max_num_faces=1,
39
+ refine_landmarks=False,
40
+ min_detection_confidence=0.5,
41
+ min_tracking_confidence=0.5) as face_mesh:
42
+
43
+ while cap.isOpened():
44
+ _, frame = cap.read()
45
+ if frame is None: break
46
+
47
+ frame_copy = frame.copy()
48
+ frame_copy.flags.writeable = False
49
+ frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
50
+ results = face_mesh.process(frame_copy)
51
+ frame_copy.flags.writeable = True
52
+
53
+ if results.multi_face_landmarks:
54
+ for fl in results.multi_face_landmarks:
55
+ startX, startY, endX, endY = get_box(fl, w, h)
56
+ cur_face = frame_copy[startY:endY, startX: endX]
57
+
58
+ if count_face%config_data.FRAME_DOWNSAMPLING == 0:
59
+ cur_face_copy = pth_processing(Image.fromarray(cur_face))
60
+ with torch.no_grad():
61
+ features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
62
+ au_intensities = features_to_au_intensities(pth_model_static(cur_face_copy))
63
+
64
+ grayscale_cam = cam(input_tensor=cur_face_copy)
65
+ grayscale_cam = grayscale_cam[0, :]
66
+ cur_face_hm = cv2.resize(cur_face,(224,224), interpolation = cv2.INTER_AREA)
67
+ cur_face_hm = np.float32(cur_face_hm) / 255
68
+ heatmap = show_cam_on_image(cur_face_hm, grayscale_cam, use_rgb=False)
69
+ last_heatmap = heatmap
70
+ last_au_intensities = au_intensities
71
+
72
+ if len(lstm_features) == 0:
73
+ lstm_features = [features]*10
74
+ else:
75
+ lstm_features = lstm_features[1:] + [features]
76
+
77
+ lstm_f = torch.from_numpy(np.vstack(lstm_features))
78
+ lstm_f = torch.unsqueeze(lstm_f, 0)
79
+ with torch.no_grad():
80
+ output = pth_model_dynamic(lstm_f).detach().numpy()
81
+ last_output = output
82
+
83
+ if count_face == 0:
84
+ count_face += 1
85
+
86
+ else:
87
+ if last_output is not None:
88
+ output = last_output
89
+ heatmap = last_heatmap
90
+ au_intensities = last_au_intensities
91
+
92
+ elif last_output is None:
93
+ output = np.empty((1, 7))
94
+ output[:] = np.nan
95
+ au_intensities = np.empty(24)
96
+ au_intensities[:] = np.nan
97
+
98
+ probs.append(output[0])
99
+ frames.append(count_frame)
100
+ au_intensities_list.append(au_intensities)
101
+ else:
102
+ if last_output is not None:
103
+ lstm_features = []
104
+ empty = np.empty((7))
105
+ empty[:] = np.nan
106
+ probs.append(empty)
107
+ frames.append(count_frame)
108
+ au_intensities_list.append(np.full(24, np.nan))
109
+
110
+ if cur_face is not None:
111
+ heatmap_f = display_info(heatmap, 'Frame: {}'.format(count_frame), box_scale=.3)
112
+
113
+ cur_face = cv2.cvtColor(cur_face, cv2.COLOR_RGB2BGR)
114
+ cur_face = cv2.resize(cur_face, (224,224), interpolation = cv2.INTER_AREA)
115
+ cur_face = display_info(cur_face, 'Frame: {}'.format(count_frame), box_scale=.3)
116
+ vid_writer_face.write(cur_face)
117
+ vid_writer_hm.write(heatmap_f)
118
+
119
+ count_frame += 1
120
+ if count_face != 0:
121
+ count_face += 1
122
+
123
+ vid_writer_face.release()
124
+ vid_writer_hm.release()
125
+
126
+ stat = statistics_plot(frames, probs)
127
+ au_stat = au_statistics_plot(frames, au_intensities_list)
128
+
129
+ if not stat or not au_stat:
130
+ return None, None, None, None, None
131
+
132
+ return video, path_save_video_face, path_save_video_hm, stat, au_stat
app_gpuzero.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from tabs.heart_rate_variability import create_hrv_tab
3
+ from tabs.blink_detection import create_blink_tab
4
+ from tabs.gaze_estimation import create_gaze_estimation_tab
5
+ from tabs.speech_stress_analysis import create_voice_stress_tab
6
+ from tabs.head_posture_detection import create_head_posture_tab
7
+ from tabs.face_expressions import create_face_expressions_tab
8
+ from tabs.speech_emotion_recognition import create_emotion_recognition_tab
9
+ from tabs.sleep_quality import create_sleep_quality_tab
10
+ from tabs.sentiment_analysis import create_sentiment_tab
11
+ from tabs.emotion_analysis import create_emotion_tab
12
+ from tabs.body_movement_analysis import create_body_movement_tab
13
+ from tabs.posture_analysis import create_posture_analysis_tab
14
+ from tabs.skin_analysis import create_skin_conductance_tab
15
+ from tabs.FACS_analysis_sad import create_facs_analysis_sad_tab
16
+ from tabs.roberta_chatbot import create_roberta_chatbot_tab
17
+
18
+ # Import the UI components
19
+ from ui_components import CUSTOM_CSS, HEADER_HTML, DISCLAIMER_HTML
20
+
21
+ TAB_STRUCTURE = [
22
+ ("Visual Analysis", [
23
+ ("Emotional Face Expressions", create_face_expressions_tab),
24
+ ("FACS for Stress, Anxiety, Depression", create_facs_analysis_sad_tab),
25
+ ("Gaze Estimation", create_gaze_estimation_tab),
26
+ ("Head Posture", create_head_posture_tab),
27
+ ("Blink Rate", create_blink_tab),
28
+ ("Sleep Quality", create_sleep_quality_tab),
29
+ ("Heart Rate Variability", create_hrv_tab),
30
+ ("Body Movement", create_body_movement_tab),
31
+ ("Posture", create_posture_analysis_tab),
32
+ ("Skin", create_skin_conductance_tab)
33
+ ]),
34
+ ("Speech Analysis", [
35
+ ("Speech Stress", create_voice_stress_tab),
36
+ ("Speech Emotion", create_emotion_recognition_tab)
37
+ ]),
38
+ ("Text Analysis", [
39
+ ("Sentiment", create_sentiment_tab),
40
+ ("Emotion", create_emotion_tab),
41
+ ("Roberta Mental Health Chatbot", create_roberta_chatbot_tab)
42
+ ]),
43
+ ("Brain Analysis (coming soon)", [
44
+ ])
45
+ ]
46
+
47
+ def create_demo():
48
+ with gr.Blocks(css=CUSTOM_CSS) as demo:
49
+ gr.Markdown(HEADER_HTML)
50
+ with gr.Tabs(elem_classes=["main-tab"]):
51
+ for main_tab, sub_tabs in TAB_STRUCTURE:
52
+ with gr.Tab(main_tab):
53
+ with gr.Tabs():
54
+ for sub_tab, create_fn in sub_tabs:
55
+ with gr.Tab(sub_tab):
56
+ create_fn()
57
+ gr.HTML(DISCLAIMER_HTML)
58
+ return demo
59
+
60
+ # Create the demo instance
61
+ demo = create_demo()
62
+
63
+ if __name__ == "__main__":
64
+ demo.queue(api_open=True).launch(share=False)
assets/.DS_Store ADDED
Binary file (6.15 kB). View file