Spaces:
Running
on
Zero
Running
on
Zero
DmitryRyumin
commited on
Commit
β’
257d039
1
Parent(s):
0b1e325
Summary
Browse files- README.md +3 -3
- app/description.py +2 -0
- app/event_handlers/clear.py +2 -2
- app/event_handlers/event_handlers.py +12 -1
- app/event_handlers/submit.py +10 -3
- app/event_handlers/video.py +19 -1
- app/tabs.py +4 -4
- app/utils.py +1 -1
- config.toml +5 -3
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: MASAI
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
python_version: 3.12.7
|
8 |
sdk_version: 5.4.0
|
|
|
1 |
---
|
2 |
title: MASAI
|
3 |
+
emoji: ππ²ππ₯π₯΄π±π‘
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: pink
|
6 |
sdk: gradio
|
7 |
python_version: 3.12.7
|
8 |
sdk_version: 5.4.0
|
app/description.py
CHANGED
@@ -14,4 +14,6 @@ DESCRIPTION = f"""\
|
|
14 |
<div class="app-flex-container">
|
15 |
<img src="https://img.shields.io/badge/version-v{config_data.AppSettings_APP_VERSION}-stable" alt="Version">
|
16 |
</div>
|
|
|
|
|
17 |
"""
|
|
|
14 |
<div class="app-flex-container">
|
15 |
<img src="https://img.shields.io/badge/version-v{config_data.AppSettings_APP_VERSION}-stable" alt="Version">
|
16 |
</div>
|
17 |
+
|
18 |
+
> MASAI is an advanced tool that assesses and interprets human affective states by integrating data from multiple modalities such as audio, video, text. MASAI accepts multimedia file, extracts audio, video, and text (using ASR), analyzes and combines three modalities, and predicts six basic emotions (Happiness, Sadness, Anger, Surprise, Disgust, Fear), neutral state (Neutral), as well as sentiment (Negative, Positive, Neutral).
|
19 |
"""
|
app/event_handlers/clear.py
CHANGED
@@ -39,6 +39,6 @@ def event_handler_clear() -> tuple[
|
|
39 |
gr.Plot(value=None, visible=False),
|
40 |
gr.Plot(value=None, visible=False),
|
41 |
gr.Row(visible=False),
|
42 |
-
gr.Textbox(value=None, visible=False),
|
43 |
-
gr.Textbox(value=None, visible=False),
|
44 |
)
|
|
|
39 |
gr.Plot(value=None, visible=False),
|
40 |
gr.Plot(value=None, visible=False),
|
41 |
gr.Row(visible=False),
|
42 |
+
gr.Textbox(value=None, info=None, container=False, visible=False),
|
43 |
+
gr.Textbox(value=None, info=None, container=False, visible=False),
|
44 |
)
|
app/event_handlers/event_handlers.py
CHANGED
@@ -30,7 +30,18 @@ def setup_app_event_handlers(
|
|
30 |
triggers=[video.change, video.upload, video.stop_recording, video.clear],
|
31 |
fn=event_handler_video,
|
32 |
inputs=[video],
|
33 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
queue=True,
|
35 |
)
|
36 |
|
|
|
30 |
triggers=[video.change, video.upload, video.stop_recording, video.clear],
|
31 |
fn=event_handler_video,
|
32 |
inputs=[video],
|
33 |
+
outputs=[
|
34 |
+
clear,
|
35 |
+
submit,
|
36 |
+
text,
|
37 |
+
waveform,
|
38 |
+
faces,
|
39 |
+
emotion_stats,
|
40 |
+
sent_stats,
|
41 |
+
time_row,
|
42 |
+
video_duration,
|
43 |
+
calculate_time,
|
44 |
+
],
|
45 |
queue=True,
|
46 |
)
|
47 |
|
app/event_handlers/submit.py
CHANGED
@@ -55,7 +55,7 @@ def event_handler_submit(
|
|
55 |
gr.Textbox,
|
56 |
gr.Textbox,
|
57 |
]:
|
58 |
-
with Timer() as
|
59 |
if video:
|
60 |
if video.split(".")[-1] == "webm":
|
61 |
video = convert_webm_to_mp4(video)
|
@@ -197,8 +197,15 @@ def event_handler_submit(
|
|
197 |
gr.Plot(value=plt_sent, visible=True),
|
198 |
gr.Row(visible=True),
|
199 |
gr.Textbox(
|
200 |
-
value=config_data.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
visible=True,
|
202 |
),
|
203 |
-
gr.Textbox(value=t, visible=True),
|
204 |
)
|
|
|
55 |
gr.Textbox,
|
56 |
gr.Textbox,
|
57 |
]:
|
58 |
+
with Timer() as timer:
|
59 |
if video:
|
60 |
if video.split(".")[-1] == "webm":
|
61 |
video = convert_webm_to_mp4(video)
|
|
|
197 |
gr.Plot(value=plt_sent, visible=True),
|
198 |
gr.Row(visible=True),
|
199 |
gr.Textbox(
|
200 |
+
value=config_data.OtherMessages_SEC.format(vfe.dur),
|
201 |
+
info=config_data.InformationMessages_VIDEO_DURATION,
|
202 |
+
container=True,
|
203 |
+
visible=True,
|
204 |
+
),
|
205 |
+
gr.Textbox(
|
206 |
+
value=timer.execution_time,
|
207 |
+
info=config_data.InformationMessages_INFERENCE_TIME,
|
208 |
+
container=True,
|
209 |
visible=True,
|
210 |
),
|
|
|
211 |
)
|
app/event_handlers/video.py
CHANGED
@@ -11,7 +11,18 @@ import gradio as gr
|
|
11 |
from app.config import config_data
|
12 |
|
13 |
|
14 |
-
def event_handler_video(video: str) -> tuple[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
is_video_valid = bool(video)
|
16 |
|
17 |
return (
|
@@ -23,4 +34,11 @@ def event_handler_video(video: str) -> tuple[gr.Button, gr.Button, gr.Textbox]:
|
|
23 |
container=False,
|
24 |
elem_classes="noti-results-" + str(is_video_valid).lower(),
|
25 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
)
|
|
|
11 |
from app.config import config_data
|
12 |
|
13 |
|
14 |
+
def event_handler_video(video: str) -> tuple[
|
15 |
+
gr.Button,
|
16 |
+
gr.Button,
|
17 |
+
gr.Textbox,
|
18 |
+
gr.Plot,
|
19 |
+
gr.Plot,
|
20 |
+
gr.Plot,
|
21 |
+
gr.Plot,
|
22 |
+
gr.Row,
|
23 |
+
gr.Textbox,
|
24 |
+
gr.Textbox,
|
25 |
+
]:
|
26 |
is_video_valid = bool(video)
|
27 |
|
28 |
return (
|
|
|
34 |
container=False,
|
35 |
elem_classes="noti-results-" + str(is_video_valid).lower(),
|
36 |
),
|
37 |
+
gr.Plot(value=None, visible=False),
|
38 |
+
gr.Plot(value=None, visible=False),
|
39 |
+
gr.Plot(value=None, visible=False),
|
40 |
+
gr.Plot(value=None, visible=False),
|
41 |
+
gr.Row(visible=False),
|
42 |
+
gr.Textbox(value=None, info=None, container=False, visible=False),
|
43 |
+
gr.Textbox(value=None, info=None, container=False, visible=False),
|
44 |
)
|
app/tabs.py
CHANGED
@@ -183,12 +183,12 @@ def app_tab():
|
|
183 |
)
|
184 |
|
185 |
|
186 |
-
def settings_app_tab():
|
187 |
-
|
188 |
|
189 |
|
190 |
-
def about_app_tab():
|
191 |
-
|
192 |
|
193 |
|
194 |
def about_authors_tab():
|
|
|
183 |
)
|
184 |
|
185 |
|
186 |
+
# def settings_app_tab():
|
187 |
+
# pass
|
188 |
|
189 |
|
190 |
+
# def about_app_tab():
|
191 |
+
# pass
|
192 |
|
193 |
|
194 |
def about_authors_tab():
|
app/utils.py
CHANGED
@@ -32,7 +32,7 @@ class Timer(ContextDecorator):
|
|
32 |
|
33 |
def __exit__(self, *args):
|
34 |
self.end = time.time()
|
35 |
-
self.execution_time = f"
|
36 |
|
37 |
def __str__(self):
|
38 |
return self.execution_time
|
|
|
32 |
|
33 |
def __exit__(self, *args):
|
34 |
self.end = time.time()
|
35 |
+
self.execution_time = f"{self.end - self.start:.2f} seconds"
|
36 |
|
37 |
def __str__(self):
|
38 |
return self.execution_time
|
config.toml
CHANGED
@@ -30,11 +30,13 @@ NOTI_RESULTS = [
|
|
30 |
"Video uploaded, you can perform calculations",
|
31 |
]
|
32 |
REC_TEXT = "Recognized text"
|
33 |
-
VIDEO_DURATION = "Video duration
|
|
|
34 |
|
35 |
[OtherMessages]
|
36 |
CLEAR = "Clear"
|
37 |
SUBMIT = "Calculate"
|
|
|
38 |
|
39 |
[Labels]
|
40 |
VIDEO = "Video"
|
@@ -45,8 +47,8 @@ SENT_STATS = "Statistics of sentiments"
|
|
45 |
|
46 |
[TabCreators]
|
47 |
"β App" = "app_tab"
|
48 |
-
"βοΈ Settings" = "settings_app_tab"
|
49 |
-
"π‘ About App" = "about_app_tab"
|
50 |
"π Authors" = "about_authors_tab"
|
51 |
"π Requirements" = "requirements_app_tab"
|
52 |
|
|
|
30 |
"Video uploaded, you can perform calculations",
|
31 |
]
|
32 |
REC_TEXT = "Recognized text"
|
33 |
+
VIDEO_DURATION = "Video duration"
|
34 |
+
INFERENCE_TIME = "Inference time"
|
35 |
|
36 |
[OtherMessages]
|
37 |
CLEAR = "Clear"
|
38 |
SUBMIT = "Calculate"
|
39 |
+
SEC = "{:.2f} seconds"
|
40 |
|
41 |
[Labels]
|
42 |
VIDEO = "Video"
|
|
|
47 |
|
48 |
[TabCreators]
|
49 |
"β App" = "app_tab"
|
50 |
+
# "βοΈ Settings" = "settings_app_tab"
|
51 |
+
# "π‘ About App" = "about_app_tab"
|
52 |
"π Authors" = "about_authors_tab"
|
53 |
"π Requirements" = "requirements_app_tab"
|
54 |
|