Spaces:
Running
Running
DmitryRyumin
commited on
Commit
β’
05d81c3
1
Parent(s):
0e30f40
Summary
Browse files- app.css +4 -0
- app/app.py +105 -0
- app/authors.py +54 -1
- app/components.py +2 -2
- app/description_steps.py +17 -0
- app/event_handlers/calculate_practical_tasks.py +182 -54
- app/event_handlers/calculate_pt_scores_blocks.py +96 -1
- app/event_handlers/clear_blocks.py +11 -0
- app/event_handlers/dropdown_candidates.py +109 -0
- app/event_handlers/event_handlers.py +60 -0
- app/event_handlers/examples_blocks.py +3 -0
- app/event_handlers/practical_subtasks.py +211 -8
- app/event_handlers/practical_task_sorted.py +0 -7
- app/tabs.py +175 -8
- app/utils.py +66 -0
- config.toml +27 -9
- requirements.txt +1 -1
app.css
CHANGED
@@ -15,6 +15,10 @@ div.files-container {
|
|
15 |
max-height: 350px;
|
16 |
}
|
17 |
|
|
|
|
|
|
|
|
|
18 |
.dataframe div.table-wrap {
|
19 |
height: auto !important;
|
20 |
}
|
|
|
15 |
max-height: 350px;
|
16 |
}
|
17 |
|
18 |
+
div.files-container:hover label[data-testid="block-label"] {
|
19 |
+
display: none;
|
20 |
+
}
|
21 |
+
|
22 |
.dataframe div.table-wrap {
|
23 |
height: auto !important;
|
24 |
}
|
app/app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File: app.py
|
3 |
+
Author: Elena Ryumina and Dmitry Ryumin
|
4 |
+
Description: About the app.
|
5 |
+
License: MIT License
|
6 |
+
"""
|
7 |
+
|
8 |
+
APP = """
|
9 |
+
<div>
|
10 |
+
<div style="max-width: 90%; margin: auto; padding: 20px;">
|
11 |
+
<p style="text-align: center;">
|
12 |
+
<img src="https://raw.githubusercontent.com/aimclub/OCEANAI/main/docs/source/_static/logo.svg" alt="Logo" style="width: 20%; height: auto; display: block; margin: auto;">
|
13 |
+
</p>
|
14 |
+
|
15 |
+
<blockquote>
|
16 |
+
<a href="https://oceanai.readthedocs.io/en/latest/">OCEAN-AI</a> is an open-source library consisting of a set of algorithms for intellectual analysis of human behavior based on multimodal data for automatic personality traits (PT) assessment. The library evaluates five PT: <strong>O</strong>penness to experience, <strong>C</strong>onscientiousness, <strong>E</strong>xtraversion, <strong>A</strong>greeableness, Non-<strong>N</strong>euroticism.
|
17 |
+
</blockquote>
|
18 |
+
|
19 |
+
<p style="text-align: center;">
|
20 |
+
<img src="https://raw.githubusercontent.com/aimclub/OCEANAI/main/docs/source/_static/Pipeline_OCEANAI.en.svg" alt="Pipeline" style="max-width: 60%; height: auto; display: block; margin: auto;">
|
21 |
+
</p>
|
22 |
+
|
23 |
+
<hr>
|
24 |
+
|
25 |
+
<h2>OCEAN-AI includes three main algorithms:</h2>
|
26 |
+
<ol>
|
27 |
+
<li>Audio Information Analysis Algorithm (AIA).</li>
|
28 |
+
<li>Video Information Analysis Algorithm (VIA).</li>
|
29 |
+
<li>Text Information Analysis Algorithm (TIA).</li>
|
30 |
+
<li>Multimodal Information Fusion Algorithm (MIF).</li>
|
31 |
+
</ol>
|
32 |
+
|
33 |
+
<p>The AIA, VIA and TIA algorithms implement the functions of strong artificial intelligence (AI) in terms of complexing acoustic, visual and linguistic features built on different principles (hand-crafted and deep features), i.e. these algorithms implement the approaches of composite (hybrid) AI. The necessary pre-processing of audio, video and text information, the calculation of visual, acoustic and linguistic features and the output of predictions of personality traits based on them are carried out in the algorithms.</p>
|
34 |
+
|
35 |
+
<p>The MIF algorithm is a combination of three information analysis algorithms (AIA, VIA and TIA). This algorithm performs feature-level fusion obtained by the AIA, VIA and TIA algorithms.</p>
|
36 |
+
|
37 |
+
<p>In addition to the main task - unimodal and multimodal personality traits assessment, the features implemented in <a href="https://oceanai.readthedocs.io/en/latest/">OCEAN-AI</a> will allow researchers to solve other problems of analyzing human behavior, for example, affective state recognition.</p>
|
38 |
+
|
39 |
+
<p>The library solves practical tasks:</p>
|
40 |
+
<ol>
|
41 |
+
<li><a href="https://oceanai.readthedocs.io/en/latest/user_guide/notebooks/Pipeline_practical_task_1.html">Ranking of potential candidates by professional responsibilities</a>.</li>
|
42 |
+
<li><a href="https://oceanai.readthedocs.io/en/latest/user_guide/notebooks/Pipeline_practical_task_2.html">Predicting consumer preferences for industrial goods</a>.</li>
|
43 |
+
<li><a href="https://oceanai.readthedocs.io/ru/latest/user_guide/notebooks/Pipeline_practical_task_3.html">Forming effective work teams</a>.</li>
|
44 |
+
</ol>
|
45 |
+
|
46 |
+
<p><a href="https://oceanai.readthedocs.io/en/latest/">OCEAN-AI</a> uses the latest open-source libraries for audio, video and text processing: <a href="https://librosa.org/">librosa</a>, <a href="https://audeering.github.io/opensmile-python/">openSMILE</a>, <a href="https://pypi.org/project/opencv-python/">openCV</a>, <a href="https://google.github.io/mediapipe/getting_started/python">mediapipe</a>, <a href="https://pypi.org/project/transformers">transformers</a>.</p>
|
47 |
+
|
48 |
+
<p><a href="https://oceanai.readthedocs.io/en/latest/">OCEAN-AI</a> is written in the <a href="https://www.python.org/">python programming language</a>. Neural network models are implemented and trained using an open-source library code <a href="https://www.tensorflow.org/">TensorFlow</a>.</p>
|
49 |
+
|
50 |
+
<hr>
|
51 |
+
|
52 |
+
<h2>Research data</h2>
|
53 |
+
|
54 |
+
<p>The <a href="https://oceanai.readthedocs.io/en/latest/">OCEAN-AI</a> library was tested on two corpora:</p>
|
55 |
+
|
56 |
+
<ol>
|
57 |
+
<li>The publicly available and large-scale <a href="https://chalearnlap.cvc.uab.cat/dataset/24/description/">First Impressions V2 corpus</a>.</li>
|
58 |
+
<li>On the first publicly available Russian-language <a href="https://hci.nw.ru/en/pages/mupta-corpus">Multimodal Personality Traits Assessment (MuPTA) corpus</a>.</li>
|
59 |
+
</ol>
|
60 |
+
|
61 |
+
<hr>
|
62 |
+
|
63 |
+
<h2>Publications</h2>
|
64 |
+
|
65 |
+
<h3>Journals</h3>
|
66 |
+
<pre>
|
67 |
+
<code>
|
68 |
+
@article{ryumina22_neurocomputing,
|
69 |
+
author = {Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov},
|
70 |
+
title = {In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study},
|
71 |
+
journal = {Neurocomputing},
|
72 |
+
volume = {514},
|
73 |
+
pages = {435-450},
|
74 |
+
year = {2022},
|
75 |
+
doi = {<a href="https://doi.org/10.1016/j.neucom.2022.10.013">https://doi.org/10.1016/j.neucom.2022.10.013</a>},
|
76 |
+
}
|
77 |
+
|
78 |
+
@article{ryumina24_eswa,
|
79 |
+
author = {Elena Ryumina and Maxim Markitantov and Dmitry Ryumin and Alexey Karpov},
|
80 |
+
title = {OCEAN-AI Framework with EmoFormer Cross-Hemiface Attention Approach for Personality Traits Assessment},
|
81 |
+
journal = {Expert Systems with Applications},
|
82 |
+
volume = {239},
|
83 |
+
pages = {122441},
|
84 |
+
year = {2024},
|
85 |
+
doi = {<a href="https://doi.org/10.1016/j.eswa.2023.122441">https://doi.org/10.1016/j.eswa.2023.122441</a>},
|
86 |
+
}
|
87 |
+
</code>
|
88 |
+
</pre>
|
89 |
+
|
90 |
+
<h3>Conferences</h3>
|
91 |
+
<pre>
|
92 |
+
<code>
|
93 |
+
@inproceedings{ryumina23_interspeech,
|
94 |
+
author = {Elena Ryumina and Dmitry Ryumin and Maxim Markitantov and Heysem Kaya and Alexey Karpov},
|
95 |
+
title = {Multimodal Personality Traits Assessment (MuPTA) Corpus: The Impact of Spontaneous and Read Speech},
|
96 |
+
year = {2023},
|
97 |
+
booktitle = {INTERSPEECH},
|
98 |
+
pages = {4049--4053},
|
99 |
+
doi = {<a href="https://doi.org/10.21437/Interspeech.2023-1686">https://doi.org/10.21437/Interspeech.2023-1686</a>},
|
100 |
+
}
|
101 |
+
</code>
|
102 |
+
</pre>
|
103 |
+
</div>
|
104 |
+
</div>
|
105 |
+
"""
|
app/authors.py
CHANGED
@@ -6,5 +6,58 @@ License: MIT License
|
|
6 |
"""
|
7 |
|
8 |
AUTHORS = """
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
"""
|
|
|
6 |
"""
|
7 |
|
8 |
AUTHORS = """
|
9 |
+
<div style="display: flex; justify-content: center;">
|
10 |
+
<div style="flex-basis: 40%;">
|
11 |
+
<a href="https://github.com/ElenaRyumina">
|
12 |
+
<img src="https://readme-typing-svg.demolab.com?font=Roboto&duration=1500&pause=100&color=3081F7&vCenter=true&multiline=true&width=435&height=70&lines=Elena+Ryumina;Artificial+Intelligence+Researcher" alt="ElenaRyumina" />
|
13 |
+
</a>
|
14 |
+
<div style="display: flex; margin-bottom: 6px;">
|
15 |
+
<a href="https://www.scopus.com/authid/detail.uri?authorId=57220572427">
|
16 |
+
<img src="https://img.shields.io/badge/Scopus-%23E9711C.svg?&style=flat-square&logo=scopus&logoColor=white" alt="" style="margin-right: 6px;" />
|
17 |
+
</a>
|
18 |
+
<a href="https://scholar.google.com/citations?user=DOBkQssAAAAJ">
|
19 |
+
<img src="https://img.shields.io/badge/Google%20Scholar-%234285F4.svg?&style=flat-square&logo=google-scholar&logoColor=white" alt="" style="margin-right: 6px;" />
|
20 |
+
</a>
|
21 |
+
<a href="https://orcid.org/0000-0002-4135-6949">
|
22 |
+
<img src="https://img.shields.io/badge/ORCID-0000--0002--4135--6949-green.svg?&style=flat-square&logo=orcid&logoColor=white" alt="" />
|
23 |
+
</a>
|
24 |
+
</div>
|
25 |
+
<a href="https://github.com/ElenaRyumina" style="display: inline-block;">
|
26 |
+
<img src="https://github-stats-alpha.vercel.app/api?username=ElenaRyumina&cc=3081F7&tc=FFFFFF&ic=FFFFFF&bc=FFFFFF" alt="" />
|
27 |
+
</a>
|
28 |
+
<div style="display: flex;">
|
29 |
+
<img src="https://komarev.com/ghpvc/?username=ElenaRyumina&style=flat-square" alt="" />
|
30 |
+
</div>
|
31 |
+
</div>
|
32 |
+
|
33 |
+
<div style="flex-basis: 40%;">
|
34 |
+
<a href="https://github.com/DmitryRyumin">
|
35 |
+
<img src="https://readme-typing-svg.demolab.com?font=Roboto&duration=1500&pause=100&color=3081F7&vCenter=true&multiline=true&width=435&height=70&lines=Dr.+Dmitry+Ryumin;Artificial+Intelligence+Researcher" alt="DmitryRyumin" />
|
36 |
+
</a>
|
37 |
+
<div style="display: flex; margin-bottom: 6px;">
|
38 |
+
<a href="https://dmitryryumin.github.io">
|
39 |
+
<img src="https://img.shields.io/badge/Website-blue??&style=flat-square&logo=opsgenie&logoColor=white" alt="" style="margin-right: 6px;" />
|
40 |
+
</a>
|
41 |
+
<a href="https://www.scopus.com/authid/detail.uri?authorId=57191960214">
|
42 |
+
<img src="https://img.shields.io/badge/Scopus-%23E9711C.svg?&style=flat-square&logo=scopus&logoColor=white" alt="" style="margin-right: 6px;" />
|
43 |
+
</a>
|
44 |
+
<a href="https://scholar.google.com/citations?user=LrTIp5IAAAAJ">
|
45 |
+
<img src="https://img.shields.io/badge/Google%20Scholar-%234285F4.svg?&style=flat-square&logo=google-scholar&logoColor=white" alt="" style="margin-right: 6px;" />
|
46 |
+
</a>
|
47 |
+
<a href="https://orcid.org/0000-0002-7935-0569">
|
48 |
+
<img src="https://img.shields.io/badge/ORCID-0000--0002--7935--0569-green.svg?&style=flat-square&logo=orcid&logoColor=white" alt="" style="margin-right: 6px;" />
|
49 |
+
</a>
|
50 |
+
<a href="mailto:[email protected]">
|
51 |
+
<img src="https://img.shields.io/badge/-Email-red?style=flat-square&logo=gmail&logoColor=white" alt="" />
|
52 |
+
</a>
|
53 |
+
</div>
|
54 |
+
<a href="https://github.com/DmitryRyumin" style="display: inline-block;">
|
55 |
+
<img src="https://github-stats-alpha.vercel.app/api?username=DmitryRyumin&cc=3081F7&tc=FFFFFF&ic=FFFFFF&bc=FFFFFF" alt="" />
|
56 |
+
</a>
|
57 |
+
<div style="display: flex;">
|
58 |
+
<img src="https://custom-icon-badges.demolab.com/badge/dynamic/json?style=flat-square&logo=fire&logoColor=fff&color=orange&label=GitHub%20streak&query=%24.currentStreak.length&suffix=%20days&url=https%3A%2F%2Fstreak-stats.demolab.com%2F%3Fuser%3Ddmitryryumin%26type%3Djson" alt="" style="margin-right: 6px;" />
|
59 |
+
<img src="https://komarev.com/ghpvc/?username=DmitryRyumin&style=flat-square" alt="" />
|
60 |
+
</div>
|
61 |
+
</div>
|
62 |
+
</div>
|
63 |
"""
|
app/components.py
CHANGED
@@ -154,8 +154,8 @@ def number_create_ui(
|
|
154 |
def dropdown_create_ui(
|
155 |
label: Optional[str] = None,
|
156 |
info: Optional[str] = None,
|
157 |
-
choices: List =
|
158 |
-
value: List =
|
159 |
multiselect: bool = False,
|
160 |
show_label: bool = True,
|
161 |
interactive: bool = True,
|
|
|
154 |
def dropdown_create_ui(
|
155 |
label: Optional[str] = None,
|
156 |
info: Optional[str] = None,
|
157 |
+
choices: Optional[List[str]] = None,
|
158 |
+
value: Optional[List[str]] = None,
|
159 |
multiselect: bool = False,
|
160 |
show_label: bool = True,
|
161 |
interactive: bool = True,
|
app/description_steps.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File: description.py
|
3 |
+
Author: Dmitry Ryumin
|
4 |
+
Description: Project description for the Gradio app.
|
5 |
+
License: MIT License
|
6 |
+
"""
|
7 |
+
|
8 |
+
# Importing necessary components for the Gradio app
|
9 |
+
from app.config import config_data
|
10 |
+
|
11 |
+
STEP_1 = f"""\
|
12 |
+
<h2 align="center">{config_data.InformationMessages_STEP_1}</h2>
|
13 |
+
"""
|
14 |
+
|
15 |
+
STEP_2 = f"""\
|
16 |
+
<h2 align="center">{config_data.InformationMessages_STEP_2}</h2>
|
17 |
+
"""
|
app/event_handlers/calculate_practical_tasks.py
CHANGED
@@ -7,46 +7,28 @@ License: MIT License
|
|
7 |
|
8 |
from app.oceanai_init import b5
|
9 |
import gradio as gr
|
10 |
-
import pandas as pd
|
11 |
from pathlib import Path
|
12 |
|
13 |
# Importing necessary components for the Gradio app
|
14 |
from app.config import config_data
|
|
|
|
|
|
|
|
|
|
|
15 |
from app.components import html_message, dataframe, files_create_ui, video_create_ui
|
16 |
|
17 |
|
18 |
-
def
|
19 |
-
|
20 |
-
|
21 |
-
if drop_id:
|
22 |
-
df = pd.DataFrame(df.drop(["ID"], axis=1))
|
23 |
-
|
24 |
-
df.index.name = "ID"
|
25 |
-
df.index += 1
|
26 |
-
df.index = df.index.map(str)
|
27 |
-
|
28 |
-
return df
|
29 |
|
30 |
|
31 |
-
def
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"Extraversion": "EXT",
|
37 |
-
"Agreeableness": "AGR",
|
38 |
-
"Non-Neuroticism": "NNEU",
|
39 |
-
}
|
40 |
-
)
|
41 |
-
columns_to_round = df_rounded.columns[1:]
|
42 |
-
df_rounded[columns_to_round] = df_rounded[columns_to_round].apply(
|
43 |
-
lambda x: [round(i, 3) for i in x]
|
44 |
)
|
45 |
-
return df_rounded
|
46 |
-
|
47 |
-
|
48 |
-
def colleague_type(subtask):
|
49 |
-
return "minor" if "junior" in subtask.lower() else "major"
|
50 |
|
51 |
|
52 |
def event_handler_calculate_practical_task_blocks(
|
@@ -61,18 +43,107 @@ def event_handler_calculate_practical_task_blocks(
|
|
61 |
target_score_agr,
|
62 |
target_score_nneu,
|
63 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
):
|
65 |
-
if practical_subtasks.lower() == "professional
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
df_professional_skills = read_csv_file(config_data.Links_PROFESSIONAL_SKILLS)
|
67 |
|
68 |
b5._priority_skill_calculation(
|
69 |
df_files=pt_scores.iloc[:, 1:],
|
70 |
correlation_coefficients=df_professional_skills,
|
71 |
threshold=threshold_professional_skills,
|
72 |
-
out=
|
73 |
)
|
74 |
|
75 |
-
# Optional
|
76 |
df = apply_rounding_and_rename_columns(b5.df_files_priority_skill_)
|
77 |
|
78 |
professional_skills_list = (
|
@@ -81,15 +152,10 @@ def event_handler_calculate_practical_task_blocks(
|
|
81 |
|
82 |
professional_skills_list.remove(dropdown_professional_skills)
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
"AGR",
|
89 |
-
"NNEU",
|
90 |
-
] + professional_skills_list
|
91 |
-
|
92 |
-
df_hidden = df.drop(columns=professional_skills_list)
|
93 |
|
94 |
df_hidden.to_csv(config_data.Filenames_PT_SKILLS_SCORES)
|
95 |
|
@@ -131,7 +197,9 @@ def event_handler_calculate_practical_task_blocks(
|
|
131 |
practical_subtasks.lower() == "finding a suitable junior colleague"
|
132 |
or practical_subtasks.lower() == "finding a suitable senior colleague"
|
133 |
):
|
134 |
-
df_correlation_coefficients = read_csv_file(
|
|
|
|
|
135 |
|
136 |
b5._colleague_ranking(
|
137 |
df_files=pt_scores.iloc[:, 1:],
|
@@ -148,18 +216,9 @@ def event_handler_calculate_practical_task_blocks(
|
|
148 |
out=False,
|
149 |
)
|
150 |
|
151 |
-
|
152 |
-
df = df = apply_rounding_and_rename_columns(b5.df_files_colleague_)
|
153 |
|
154 |
-
|
155 |
-
"OPE",
|
156 |
-
"CON",
|
157 |
-
"EXT",
|
158 |
-
"AGR",
|
159 |
-
"NNEU",
|
160 |
-
]
|
161 |
-
|
162 |
-
df_hidden = df.drop(columns=professional_skills_list)
|
163 |
|
164 |
df_hidden.to_csv(
|
165 |
colleague_type(practical_subtasks) + config_data.Filenames_COLLEAGUE_RANKING
|
@@ -196,6 +255,75 @@ def event_handler_calculate_practical_task_blocks(
|
|
196 |
),
|
197 |
html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
|
198 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
else:
|
200 |
gr.Info(config_data.InformationMessages_NOTI_IN_DEV)
|
201 |
|
|
|
7 |
|
8 |
from app.oceanai_init import b5
|
9 |
import gradio as gr
|
|
|
10 |
from pathlib import Path
|
11 |
|
12 |
# Importing necessary components for the Gradio app
|
13 |
from app.config import config_data
|
14 |
+
from app.utils import (
|
15 |
+
read_csv_file,
|
16 |
+
apply_rounding_and_rename_columns,
|
17 |
+
preprocess_scores_df,
|
18 |
+
)
|
19 |
from app.components import html_message, dataframe, files_create_ui, video_create_ui
|
20 |
|
21 |
|
22 |
+
def colleague_type(subtask):
|
23 |
+
return "minor" if "junior" in subtask.lower() else "major"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
|
26 |
+
def consumer_preferences(subtask):
|
27 |
+
return (
|
28 |
+
config_data.Filenames_CAR_CHARACTERISTICS
|
29 |
+
if "mobile device" in subtask.lower()
|
30 |
+
else config_data.Filenames_MDA_CATEGORIES
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
)
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
def event_handler_calculate_practical_task_blocks(
|
|
|
43 |
target_score_agr,
|
44 |
target_score_nneu,
|
45 |
equal_coefficient,
|
46 |
+
number_priority,
|
47 |
+
number_importance_traits,
|
48 |
+
threshold_consumer_preferences,
|
49 |
+
number_openness,
|
50 |
+
number_conscientiousness,
|
51 |
+
number_extraversion,
|
52 |
+
number_agreeableness,
|
53 |
+
number_non_neuroticism,
|
54 |
):
|
55 |
+
if practical_subtasks.lower() == "professional groups":
|
56 |
+
sum_weights = sum(
|
57 |
+
[
|
58 |
+
number_openness,
|
59 |
+
number_conscientiousness,
|
60 |
+
number_extraversion,
|
61 |
+
number_agreeableness,
|
62 |
+
number_non_neuroticism,
|
63 |
+
]
|
64 |
+
)
|
65 |
+
|
66 |
+
if sum_weights != 100:
|
67 |
+
gr.Warning(config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights))
|
68 |
+
|
69 |
+
return (
|
70 |
+
gr.Row(visible=False),
|
71 |
+
gr.Column(visible=False),
|
72 |
+
dataframe(visible=False),
|
73 |
+
files_create_ui(
|
74 |
+
None,
|
75 |
+
"single",
|
76 |
+
[".csv"],
|
77 |
+
config_data.OtherMessages_EXPORT_PS,
|
78 |
+
True,
|
79 |
+
False,
|
80 |
+
False,
|
81 |
+
"csv-container",
|
82 |
+
),
|
83 |
+
video_create_ui(visible=False),
|
84 |
+
html_message(
|
85 |
+
config_data.InformationMessages_SUM_WEIGHTS.format(sum_weights),
|
86 |
+
False,
|
87 |
+
True,
|
88 |
+
),
|
89 |
+
)
|
90 |
+
else:
|
91 |
+
b5._candidate_ranking(
|
92 |
+
df_files=pt_scores.iloc[:, 1:],
|
93 |
+
weigths_openness=number_openness,
|
94 |
+
weigths_conscientiousness=number_conscientiousness,
|
95 |
+
weigths_extraversion=number_extraversion,
|
96 |
+
weigths_agreeableness=number_agreeableness,
|
97 |
+
weigths_non_neuroticism=number_non_neuroticism,
|
98 |
+
out=False,
|
99 |
+
)
|
100 |
+
|
101 |
+
df = apply_rounding_and_rename_columns(b5.df_files_ranking_)
|
102 |
+
|
103 |
+
df_hidden = df.drop(columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS)
|
104 |
+
|
105 |
+
df_hidden.to_csv(config_data.Filenames_POTENTIAL_CANDIDATES)
|
106 |
+
|
107 |
+
df_hidden.reset_index(inplace=True)
|
108 |
+
|
109 |
+
person_id = int(df_hidden.iloc[0]["Person ID"]) - 1
|
110 |
+
|
111 |
+
return (
|
112 |
+
gr.Row(visible=True),
|
113 |
+
gr.Column(visible=True),
|
114 |
+
dataframe(
|
115 |
+
headers=df_hidden.columns.tolist(),
|
116 |
+
values=df_hidden.values.tolist(),
|
117 |
+
visible=True,
|
118 |
+
),
|
119 |
+
files_create_ui(
|
120 |
+
config_data.Filenames_POTENTIAL_CANDIDATES,
|
121 |
+
"single",
|
122 |
+
[".csv"],
|
123 |
+
config_data.OtherMessages_EXPORT_PG,
|
124 |
+
True,
|
125 |
+
False,
|
126 |
+
True,
|
127 |
+
"csv-container",
|
128 |
+
),
|
129 |
+
video_create_ui(
|
130 |
+
value=files[person_id],
|
131 |
+
file_name=Path(files[person_id]).name,
|
132 |
+
label="Best Person ID - " + str(person_id + 1),
|
133 |
+
visible=True,
|
134 |
+
),
|
135 |
+
html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
|
136 |
+
)
|
137 |
+
elif practical_subtasks.lower() == "professional skills":
|
138 |
df_professional_skills = read_csv_file(config_data.Links_PROFESSIONAL_SKILLS)
|
139 |
|
140 |
b5._priority_skill_calculation(
|
141 |
df_files=pt_scores.iloc[:, 1:],
|
142 |
correlation_coefficients=df_professional_skills,
|
143 |
threshold=threshold_professional_skills,
|
144 |
+
out=False,
|
145 |
)
|
146 |
|
|
|
147 |
df = apply_rounding_and_rename_columns(b5.df_files_priority_skill_)
|
148 |
|
149 |
professional_skills_list = (
|
|
|
152 |
|
153 |
professional_skills_list.remove(dropdown_professional_skills)
|
154 |
|
155 |
+
df_hidden = df.drop(
|
156 |
+
columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS
|
157 |
+
+ professional_skills_list
|
158 |
+
)
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
df_hidden.to_csv(config_data.Filenames_PT_SKILLS_SCORES)
|
161 |
|
|
|
197 |
practical_subtasks.lower() == "finding a suitable junior colleague"
|
198 |
or practical_subtasks.lower() == "finding a suitable senior colleague"
|
199 |
):
|
200 |
+
df_correlation_coefficients = read_csv_file(
|
201 |
+
config_data.Links_FINDING_COLLEAGUE, ["ID"]
|
202 |
+
)
|
203 |
|
204 |
b5._colleague_ranking(
|
205 |
df_files=pt_scores.iloc[:, 1:],
|
|
|
216 |
out=False,
|
217 |
)
|
218 |
|
219 |
+
df = apply_rounding_and_rename_columns(b5.df_files_colleague_)
|
|
|
220 |
|
221 |
+
df_hidden = df.drop(columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
|
223 |
df_hidden.to_csv(
|
224 |
colleague_type(practical_subtasks) + config_data.Filenames_COLLEAGUE_RANKING
|
|
|
255 |
),
|
256 |
html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
|
257 |
)
|
258 |
+
elif (
|
259 |
+
practical_subtasks.lower() == "car characteristics"
|
260 |
+
or practical_subtasks.lower() == "mobile device application categories"
|
261 |
+
):
|
262 |
+
if practical_subtasks.lower() == "car characteristics":
|
263 |
+
df_correlation_coefficients = read_csv_file(
|
264 |
+
config_data.Links_CAR_CHARACTERISTICS,
|
265 |
+
["Style and performance", "Safety and practicality"],
|
266 |
+
)
|
267 |
+
if practical_subtasks.lower() == "mobile device application categories":
|
268 |
+
df_correlation_coefficients = read_csv_file(
|
269 |
+
config_data.Links_MDA_CATEGORIES
|
270 |
+
)
|
271 |
+
|
272 |
+
pt_scores_copy = pt_scores.iloc[:, 1:].copy()
|
273 |
+
|
274 |
+
preprocess_scores_df(pt_scores_copy, "Person ID")
|
275 |
+
|
276 |
+
b5._priority_calculation(
|
277 |
+
df_files=pt_scores_copy,
|
278 |
+
correlation_coefficients=df_correlation_coefficients,
|
279 |
+
col_name_ocean="Trait",
|
280 |
+
threshold=threshold_consumer_preferences,
|
281 |
+
number_priority=number_priority,
|
282 |
+
number_importance_traits=number_importance_traits,
|
283 |
+
out=False,
|
284 |
+
)
|
285 |
+
|
286 |
+
df_files_priority = b5.df_files_priority_.copy()
|
287 |
+
df_files_priority.reset_index(inplace=True)
|
288 |
+
|
289 |
+
df = apply_rounding_and_rename_columns(df_files_priority.iloc[:, 1:])
|
290 |
+
|
291 |
+
preprocess_scores_df(df, "Person ID")
|
292 |
+
|
293 |
+
df_hidden = df.drop(columns=config_data.Settings_SHORT_PROFESSIONAL_SKILLS)
|
294 |
+
|
295 |
+
df_hidden.to_csv(consumer_preferences(practical_subtasks))
|
296 |
+
|
297 |
+
df_hidden.reset_index(inplace=True)
|
298 |
+
|
299 |
+
person_id = int(df_hidden.iloc[0]["Person ID"]) - 1
|
300 |
+
|
301 |
+
return (
|
302 |
+
gr.Row(visible=True),
|
303 |
+
gr.Column(visible=True),
|
304 |
+
dataframe(
|
305 |
+
headers=df_hidden.columns.tolist(),
|
306 |
+
values=df_hidden.values.tolist(),
|
307 |
+
visible=True,
|
308 |
+
),
|
309 |
+
files_create_ui(
|
310 |
+
consumer_preferences(practical_subtasks),
|
311 |
+
"single",
|
312 |
+
[".csv"],
|
313 |
+
config_data.OtherMessages_EXPORT_CP,
|
314 |
+
True,
|
315 |
+
False,
|
316 |
+
True,
|
317 |
+
"csv-container",
|
318 |
+
),
|
319 |
+
video_create_ui(
|
320 |
+
value=files[person_id],
|
321 |
+
file_name=Path(files[person_id]).name,
|
322 |
+
label="Best Person ID - " + str(person_id + 1),
|
323 |
+
visible=True,
|
324 |
+
),
|
325 |
+
html_message(config_data.InformationMessages_NOTI_IN_DEV, False, False),
|
326 |
+
)
|
327 |
else:
|
328 |
gr.Info(config_data.InformationMessages_NOTI_IN_DEV)
|
329 |
|
app/event_handlers/calculate_pt_scores_blocks.py
CHANGED
@@ -10,6 +10,8 @@ import gradio as gr
|
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.oceanai_init import b5
|
12 |
from app.config import config_data
|
|
|
|
|
13 |
from app.practical_tasks import supported_practical_tasks
|
14 |
from app.components import (
|
15 |
html_message,
|
@@ -38,6 +40,12 @@ def event_handler_calculate_pt_scores_blocks(files, evt_data: gr.EventData):
|
|
38 |
df_files = b5.df_files_.copy()
|
39 |
df_files.reset_index(inplace=True)
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
return (
|
42 |
html_message(config_data.InformationMessages_NOTI_VIDEOS, False, False),
|
43 |
dataframe(
|
@@ -55,6 +63,7 @@ def event_handler_calculate_pt_scores_blocks(files, evt_data: gr.EventData):
|
|
55 |
True,
|
56 |
"csv-container",
|
57 |
),
|
|
|
58 |
gr.Column(visible=True),
|
59 |
radio_create_ui(
|
60 |
first_practical_task,
|
@@ -80,7 +89,7 @@ def event_handler_calculate_pt_scores_blocks(files, evt_data: gr.EventData):
|
|
80 |
visible=False,
|
81 |
render=True,
|
82 |
),
|
83 |
-
gr.Column(visible=
|
84 |
number_create_ui(visible=False),
|
85 |
dropdown_create_ui(visible=False),
|
86 |
number_create_ui(visible=False),
|
@@ -89,6 +98,92 @@ def event_handler_calculate_pt_scores_blocks(files, evt_data: gr.EventData):
|
|
89 |
number_create_ui(visible=False),
|
90 |
number_create_ui(visible=False),
|
91 |
number_create_ui(visible=False),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
button(
|
93 |
config_data.OtherMessages_CALCULATE_PRACTICAL_TASK,
|
94 |
True,
|
|
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.oceanai_init import b5
|
12 |
from app.config import config_data
|
13 |
+
from app.description_steps import STEP_2
|
14 |
+
from app.utils import read_csv_file, extract_profession_weights
|
15 |
from app.practical_tasks import supported_practical_tasks
|
16 |
from app.components import (
|
17 |
html_message,
|
|
|
40 |
df_files = b5.df_files_.copy()
|
41 |
df_files.reset_index(inplace=True)
|
42 |
|
43 |
+
df_traits_priority_for_professions = read_csv_file(config_data.Links_PROFESSIONS)
|
44 |
+
weights_professions, interactive_professions = extract_profession_weights(
|
45 |
+
df_traits_priority_for_professions,
|
46 |
+
config_data.Settings_DROPDOWN_CANDIDATES[0],
|
47 |
+
)
|
48 |
+
|
49 |
return (
|
50 |
html_message(config_data.InformationMessages_NOTI_VIDEOS, False, False),
|
51 |
dataframe(
|
|
|
63 |
True,
|
64 |
"csv-container",
|
65 |
),
|
66 |
+
gr.HTML(value=STEP_2, visible=True),
|
67 |
gr.Column(visible=True),
|
68 |
radio_create_ui(
|
69 |
first_practical_task,
|
|
|
89 |
visible=False,
|
90 |
render=True,
|
91 |
),
|
92 |
+
gr.Column(visible=True),
|
93 |
number_create_ui(visible=False),
|
94 |
dropdown_create_ui(visible=False),
|
95 |
number_create_ui(visible=False),
|
|
|
98 |
number_create_ui(visible=False),
|
99 |
number_create_ui(visible=False),
|
100 |
number_create_ui(visible=False),
|
101 |
+
number_create_ui(visible=False),
|
102 |
+
number_create_ui(visible=False),
|
103 |
+
number_create_ui(visible=False),
|
104 |
+
dropdown_create_ui(
|
105 |
+
label=f"Potential candidates by professional responsibilities ({len(config_data.Settings_DROPDOWN_CANDIDATES)})",
|
106 |
+
info=config_data.InformationMessages_DROPDOWN_CANDIDATES_INFO,
|
107 |
+
choices=config_data.Settings_DROPDOWN_CANDIDATES,
|
108 |
+
value=config_data.Settings_DROPDOWN_CANDIDATES[0],
|
109 |
+
visible=True,
|
110 |
+
elem_classes="dropdown-container",
|
111 |
+
),
|
112 |
+
number_create_ui(
|
113 |
+
value=weights_professions[0],
|
114 |
+
minimum=config_data.Values_0_100[0],
|
115 |
+
maximum=config_data.Values_0_100[1],
|
116 |
+
step=1,
|
117 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_OPE_LABEL,
|
118 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
119 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
120 |
+
),
|
121 |
+
show_label=True,
|
122 |
+
interactive=interactive_professions,
|
123 |
+
visible=True,
|
124 |
+
render=True,
|
125 |
+
elem_classes="number-container",
|
126 |
+
),
|
127 |
+
number_create_ui(
|
128 |
+
value=weights_professions[1],
|
129 |
+
minimum=config_data.Values_0_100[0],
|
130 |
+
maximum=config_data.Values_0_100[1],
|
131 |
+
step=1,
|
132 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_CON_LABEL,
|
133 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
134 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
135 |
+
),
|
136 |
+
show_label=True,
|
137 |
+
interactive=interactive_professions,
|
138 |
+
visible=True,
|
139 |
+
render=True,
|
140 |
+
elem_classes="number-container",
|
141 |
+
),
|
142 |
+
number_create_ui(
|
143 |
+
value=weights_professions[2],
|
144 |
+
minimum=config_data.Values_0_100[0],
|
145 |
+
maximum=config_data.Values_0_100[1],
|
146 |
+
step=1,
|
147 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_EXT_LABEL,
|
148 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
149 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
150 |
+
),
|
151 |
+
show_label=True,
|
152 |
+
interactive=interactive_professions,
|
153 |
+
visible=True,
|
154 |
+
render=True,
|
155 |
+
elem_classes="number-container",
|
156 |
+
),
|
157 |
+
number_create_ui(
|
158 |
+
value=weights_professions[3],
|
159 |
+
minimum=config_data.Values_0_100[0],
|
160 |
+
maximum=config_data.Values_0_100[1],
|
161 |
+
step=1,
|
162 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_AGR_LABEL,
|
163 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
164 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
165 |
+
),
|
166 |
+
show_label=True,
|
167 |
+
interactive=interactive_professions,
|
168 |
+
visible=True,
|
169 |
+
render=True,
|
170 |
+
elem_classes="number-container",
|
171 |
+
),
|
172 |
+
number_create_ui(
|
173 |
+
value=weights_professions[4],
|
174 |
+
minimum=config_data.Values_0_100[0],
|
175 |
+
maximum=config_data.Values_0_100[1],
|
176 |
+
step=1,
|
177 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_NNEU_LABEL,
|
178 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
179 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
180 |
+
),
|
181 |
+
show_label=True,
|
182 |
+
interactive=interactive_professions,
|
183 |
+
visible=True,
|
184 |
+
render=True,
|
185 |
+
elem_classes="number-container",
|
186 |
+
),
|
187 |
button(
|
188 |
config_data.OtherMessages_CALCULATE_PRACTICAL_TASK,
|
189 |
True,
|
app/event_handlers/clear_blocks.py
CHANGED
@@ -9,6 +9,7 @@ import gradio as gr
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.config import config_data
|
|
|
12 |
from app.practical_tasks import supported_practical_tasks
|
13 |
from app.components import (
|
14 |
html_message,
|
@@ -48,6 +49,7 @@ def event_handler_clear_blocks():
|
|
48 |
False,
|
49 |
"csv-container",
|
50 |
),
|
|
|
51 |
gr.Column(visible=False),
|
52 |
radio_create_ui(
|
53 |
first_practical_task,
|
@@ -82,6 +84,15 @@ def event_handler_clear_blocks():
|
|
82 |
number_create_ui(visible=False),
|
83 |
number_create_ui(visible=False),
|
84 |
number_create_ui(visible=False),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
gr.Row(visible=False),
|
86 |
gr.Column(visible=False),
|
87 |
dataframe(visible=False),
|
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.config import config_data
|
12 |
+
from app.description_steps import STEP_2
|
13 |
from app.practical_tasks import supported_practical_tasks
|
14 |
from app.components import (
|
15 |
html_message,
|
|
|
49 |
False,
|
50 |
"csv-container",
|
51 |
),
|
52 |
+
gr.HTML(value=STEP_2, visible=False),
|
53 |
gr.Column(visible=False),
|
54 |
radio_create_ui(
|
55 |
first_practical_task,
|
|
|
84 |
number_create_ui(visible=False),
|
85 |
number_create_ui(visible=False),
|
86 |
number_create_ui(visible=False),
|
87 |
+
number_create_ui(visible=False),
|
88 |
+
number_create_ui(visible=False),
|
89 |
+
number_create_ui(visible=False),
|
90 |
+
dropdown_create_ui(visible=False),
|
91 |
+
number_create_ui(visible=False),
|
92 |
+
number_create_ui(visible=False),
|
93 |
+
number_create_ui(visible=False),
|
94 |
+
number_create_ui(visible=False),
|
95 |
+
number_create_ui(visible=False),
|
96 |
gr.Row(visible=False),
|
97 |
gr.Column(visible=False),
|
98 |
dataframe(visible=False),
|
app/event_handlers/dropdown_candidates.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File: dropdown_candidates.py
|
3 |
+
Author: Elena Ryumina and Dmitry Ryumin
|
4 |
+
Description: Event handler for Gradio app to filter dropdown candidates based on selected dropdown candidates.
|
5 |
+
License: MIT License
|
6 |
+
"""
|
7 |
+
|
8 |
+
# Importing necessary components for the Gradio app
|
9 |
+
from app.config import config_data
|
10 |
+
from app.utils import read_csv_file, extract_profession_weights
|
11 |
+
from app.components import number_create_ui, dropdown_create_ui
|
12 |
+
|
13 |
+
|
14 |
+
def event_handler_dropdown_candidates(practical_subtasks, dropdown_candidates):
|
15 |
+
if practical_subtasks.lower() == "professional groups":
|
16 |
+
df_traits_priority_for_professions = read_csv_file(
|
17 |
+
config_data.Links_PROFESSIONS
|
18 |
+
)
|
19 |
+
|
20 |
+
weights, interactive = extract_profession_weights(
|
21 |
+
df_traits_priority_for_professions,
|
22 |
+
dropdown_candidates,
|
23 |
+
)
|
24 |
+
|
25 |
+
return (
|
26 |
+
number_create_ui(
|
27 |
+
value=weights[0],
|
28 |
+
minimum=config_data.Values_0_100[0],
|
29 |
+
maximum=config_data.Values_0_100[1],
|
30 |
+
step=1,
|
31 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_OPE_LABEL,
|
32 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
33 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
34 |
+
),
|
35 |
+
show_label=True,
|
36 |
+
interactive=interactive,
|
37 |
+
visible=True,
|
38 |
+
render=True,
|
39 |
+
elem_classes="number-container",
|
40 |
+
),
|
41 |
+
number_create_ui(
|
42 |
+
value=weights[1],
|
43 |
+
minimum=config_data.Values_0_100[0],
|
44 |
+
maximum=config_data.Values_0_100[1],
|
45 |
+
step=1,
|
46 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_CON_LABEL,
|
47 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
48 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
49 |
+
),
|
50 |
+
show_label=True,
|
51 |
+
interactive=interactive,
|
52 |
+
visible=True,
|
53 |
+
render=True,
|
54 |
+
elem_classes="number-container",
|
55 |
+
),
|
56 |
+
number_create_ui(
|
57 |
+
value=weights[2],
|
58 |
+
minimum=config_data.Values_0_100[0],
|
59 |
+
maximum=config_data.Values_0_100[1],
|
60 |
+
step=1,
|
61 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_EXT_LABEL,
|
62 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
63 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
64 |
+
),
|
65 |
+
show_label=True,
|
66 |
+
interactive=interactive,
|
67 |
+
visible=True,
|
68 |
+
render=True,
|
69 |
+
elem_classes="number-container",
|
70 |
+
),
|
71 |
+
number_create_ui(
|
72 |
+
value=weights[3],
|
73 |
+
minimum=config_data.Values_0_100[0],
|
74 |
+
maximum=config_data.Values_0_100[1],
|
75 |
+
step=1,
|
76 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_AGR_LABEL,
|
77 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
78 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
79 |
+
),
|
80 |
+
show_label=True,
|
81 |
+
interactive=interactive,
|
82 |
+
visible=True,
|
83 |
+
render=True,
|
84 |
+
elem_classes="number-container",
|
85 |
+
),
|
86 |
+
number_create_ui(
|
87 |
+
value=weights[4],
|
88 |
+
minimum=config_data.Values_0_100[0],
|
89 |
+
maximum=config_data.Values_0_100[1],
|
90 |
+
step=1,
|
91 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_NNEU_LABEL,
|
92 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
93 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
94 |
+
),
|
95 |
+
show_label=True,
|
96 |
+
interactive=interactive,
|
97 |
+
visible=True,
|
98 |
+
render=True,
|
99 |
+
elem_classes="number-container",
|
100 |
+
),
|
101 |
+
)
|
102 |
+
else:
|
103 |
+
return (
|
104 |
+
number_create_ui(visible=False),
|
105 |
+
number_create_ui(visible=False),
|
106 |
+
number_create_ui(visible=False),
|
107 |
+
number_create_ui(visible=False),
|
108 |
+
number_create_ui(visible=False),
|
109 |
+
)
|
app/event_handlers/event_handlers.py
CHANGED
@@ -19,6 +19,7 @@ from app.event_handlers.calculate_pt_scores_blocks import (
|
|
19 |
)
|
20 |
from app.event_handlers.practical_tasks import event_handler_practical_tasks
|
21 |
from app.event_handlers.practical_subtasks import event_handler_practical_subtasks
|
|
|
22 |
from app.event_handlers.calculate_practical_tasks import (
|
23 |
event_handler_calculate_practical_task_blocks,
|
24 |
)
|
@@ -34,6 +35,7 @@ def setup_app_event_handlers(
|
|
34 |
clear_app,
|
35 |
pt_scores,
|
36 |
csv_pt_scores,
|
|
|
37 |
practical_tasks,
|
38 |
practical_subtasks,
|
39 |
settings_practical_tasks,
|
@@ -45,6 +47,15 @@ def setup_app_event_handlers(
|
|
45 |
target_score_agr,
|
46 |
target_score_nneu,
|
47 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
calculate_practical_task,
|
49 |
practical_subtasks_selected,
|
50 |
practical_tasks_column,
|
@@ -78,6 +89,7 @@ def setup_app_event_handlers(
|
|
78 |
notifications,
|
79 |
pt_scores,
|
80 |
csv_pt_scores,
|
|
|
81 |
practical_tasks_column,
|
82 |
practical_tasks,
|
83 |
practical_subtasks,
|
@@ -91,6 +103,15 @@ def setup_app_event_handlers(
|
|
91 |
target_score_agr,
|
92 |
target_score_nneu,
|
93 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
calculate_practical_task,
|
95 |
sorted_videos,
|
96 |
sorted_videos_column,
|
@@ -120,6 +141,7 @@ def setup_app_event_handlers(
|
|
120 |
clear_app,
|
121 |
pt_scores,
|
122 |
csv_pt_scores,
|
|
|
123 |
practical_tasks_column,
|
124 |
practical_tasks,
|
125 |
practical_subtasks,
|
@@ -133,6 +155,15 @@ def setup_app_event_handlers(
|
|
133 |
target_score_agr,
|
134 |
target_score_nneu,
|
135 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
sorted_videos,
|
137 |
sorted_videos_column,
|
138 |
practical_task_sorted,
|
@@ -162,6 +193,27 @@ def setup_app_event_handlers(
|
|
162 |
target_score_agr,
|
163 |
target_score_nneu,
|
164 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
],
|
166 |
queue=True,
|
167 |
)
|
@@ -179,6 +231,14 @@ def setup_app_event_handlers(
|
|
179 |
target_score_agr,
|
180 |
target_score_nneu,
|
181 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
],
|
183 |
outputs=[
|
184 |
sorted_videos,
|
|
|
19 |
)
|
20 |
from app.event_handlers.practical_tasks import event_handler_practical_tasks
|
21 |
from app.event_handlers.practical_subtasks import event_handler_practical_subtasks
|
22 |
+
from app.event_handlers.dropdown_candidates import event_handler_dropdown_candidates
|
23 |
from app.event_handlers.calculate_practical_tasks import (
|
24 |
event_handler_calculate_practical_task_blocks,
|
25 |
)
|
|
|
35 |
clear_app,
|
36 |
pt_scores,
|
37 |
csv_pt_scores,
|
38 |
+
step_2,
|
39 |
practical_tasks,
|
40 |
practical_subtasks,
|
41 |
settings_practical_tasks,
|
|
|
47 |
target_score_agr,
|
48 |
target_score_nneu,
|
49 |
equal_coefficient,
|
50 |
+
number_priority,
|
51 |
+
number_importance_traits,
|
52 |
+
threshold_consumer_preferences,
|
53 |
+
dropdown_candidates,
|
54 |
+
number_openness,
|
55 |
+
number_conscientiousness,
|
56 |
+
number_extraversion,
|
57 |
+
number_agreeableness,
|
58 |
+
number_non_neuroticism,
|
59 |
calculate_practical_task,
|
60 |
practical_subtasks_selected,
|
61 |
practical_tasks_column,
|
|
|
89 |
notifications,
|
90 |
pt_scores,
|
91 |
csv_pt_scores,
|
92 |
+
step_2,
|
93 |
practical_tasks_column,
|
94 |
practical_tasks,
|
95 |
practical_subtasks,
|
|
|
103 |
target_score_agr,
|
104 |
target_score_nneu,
|
105 |
equal_coefficient,
|
106 |
+
number_priority,
|
107 |
+
number_importance_traits,
|
108 |
+
threshold_consumer_preferences,
|
109 |
+
dropdown_candidates,
|
110 |
+
number_openness,
|
111 |
+
number_conscientiousness,
|
112 |
+
number_extraversion,
|
113 |
+
number_agreeableness,
|
114 |
+
number_non_neuroticism,
|
115 |
calculate_practical_task,
|
116 |
sorted_videos,
|
117 |
sorted_videos_column,
|
|
|
141 |
clear_app,
|
142 |
pt_scores,
|
143 |
csv_pt_scores,
|
144 |
+
step_2,
|
145 |
practical_tasks_column,
|
146 |
practical_tasks,
|
147 |
practical_subtasks,
|
|
|
155 |
target_score_agr,
|
156 |
target_score_nneu,
|
157 |
equal_coefficient,
|
158 |
+
number_priority,
|
159 |
+
number_importance_traits,
|
160 |
+
threshold_consumer_preferences,
|
161 |
+
dropdown_candidates,
|
162 |
+
number_openness,
|
163 |
+
number_conscientiousness,
|
164 |
+
number_extraversion,
|
165 |
+
number_agreeableness,
|
166 |
+
number_non_neuroticism,
|
167 |
sorted_videos,
|
168 |
sorted_videos_column,
|
169 |
practical_task_sorted,
|
|
|
193 |
target_score_agr,
|
194 |
target_score_nneu,
|
195 |
equal_coefficient,
|
196 |
+
number_priority,
|
197 |
+
number_importance_traits,
|
198 |
+
threshold_consumer_preferences,
|
199 |
+
dropdown_candidates,
|
200 |
+
number_openness,
|
201 |
+
number_conscientiousness,
|
202 |
+
number_extraversion,
|
203 |
+
number_agreeableness,
|
204 |
+
number_non_neuroticism,
|
205 |
+
],
|
206 |
+
queue=True,
|
207 |
+
)
|
208 |
+
dropdown_candidates.change(
|
209 |
+
fn=event_handler_dropdown_candidates,
|
210 |
+
inputs=[practical_subtasks, dropdown_candidates],
|
211 |
+
outputs=[
|
212 |
+
number_openness,
|
213 |
+
number_conscientiousness,
|
214 |
+
number_extraversion,
|
215 |
+
number_agreeableness,
|
216 |
+
number_non_neuroticism,
|
217 |
],
|
218 |
queue=True,
|
219 |
)
|
|
|
231 |
target_score_agr,
|
232 |
target_score_nneu,
|
233 |
equal_coefficient,
|
234 |
+
number_priority,
|
235 |
+
number_importance_traits,
|
236 |
+
threshold_consumer_preferences,
|
237 |
+
number_openness,
|
238 |
+
number_conscientiousness,
|
239 |
+
number_extraversion,
|
240 |
+
number_agreeableness,
|
241 |
+
number_non_neuroticism,
|
242 |
],
|
243 |
outputs=[
|
244 |
sorted_videos,
|
app/event_handlers/examples_blocks.py
CHANGED
@@ -13,4 +13,7 @@ def event_handler_examples_blocks():
|
|
13 |
"videos/video1.mp4",
|
14 |
"videos/video2.mp4",
|
15 |
"videos/video3.mp4",
|
|
|
|
|
|
|
16 |
]
|
|
|
13 |
"videos/video1.mp4",
|
14 |
"videos/video2.mp4",
|
15 |
"videos/video3.mp4",
|
16 |
+
"videos/video4.mp4",
|
17 |
+
"videos/video5.mp4",
|
18 |
+
"videos/video6.mp4",
|
19 |
]
|
app/event_handlers/practical_subtasks.py
CHANGED
@@ -9,6 +9,7 @@ import gradio as gr
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.config import config_data
|
|
|
12 |
from app.components import number_create_ui, dropdown_create_ui
|
13 |
|
14 |
|
@@ -17,7 +18,114 @@ def event_handler_practical_subtasks(
|
|
17 |
):
|
18 |
practical_subtasks_selected[practical_tasks] = practical_subtasks
|
19 |
|
20 |
-
if practical_subtasks.lower() == "professional
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
return (
|
22 |
practical_subtasks_selected,
|
23 |
gr.Column(visible=True),
|
@@ -27,7 +135,7 @@ def event_handler_practical_subtasks(
|
|
27 |
maximum=1.0,
|
28 |
step=0.01,
|
29 |
label=config_data.Labels_THRESHOLD_PROFESSIONAL_SKILLS_LABEL,
|
30 |
-
info=config_data.
|
31 |
show_label=True,
|
32 |
interactive=True,
|
33 |
visible=True,
|
@@ -48,6 +156,15 @@ def event_handler_practical_subtasks(
|
|
48 |
number_create_ui(visible=False),
|
49 |
number_create_ui(visible=False),
|
50 |
number_create_ui(visible=False),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
)
|
52 |
elif (
|
53 |
practical_subtasks.lower() == "finding a suitable junior colleague"
|
@@ -64,7 +181,7 @@ def event_handler_practical_subtasks(
|
|
64 |
maximum=1.0,
|
65 |
step=0.000001,
|
66 |
label=config_data.Labels_TARGET_SCORE_OPE_LABEL,
|
67 |
-
info=config_data.
|
68 |
show_label=True,
|
69 |
interactive=True,
|
70 |
visible=True,
|
@@ -77,7 +194,7 @@ def event_handler_practical_subtasks(
|
|
77 |
maximum=1.0,
|
78 |
step=0.000001,
|
79 |
label=config_data.Labels_TARGET_SCORE_CON_LABEL,
|
80 |
-
info=config_data.
|
81 |
show_label=True,
|
82 |
interactive=True,
|
83 |
visible=True,
|
@@ -90,7 +207,7 @@ def event_handler_practical_subtasks(
|
|
90 |
maximum=1.0,
|
91 |
step=0.000001,
|
92 |
label=config_data.Labels_TARGET_SCORE_EXT_LABEL,
|
93 |
-
info=config_data.
|
94 |
show_label=True,
|
95 |
interactive=True,
|
96 |
visible=True,
|
@@ -103,7 +220,7 @@ def event_handler_practical_subtasks(
|
|
103 |
maximum=1.0,
|
104 |
step=0.000001,
|
105 |
label=config_data.Labels_TARGET_SCORE_AGR_LABEL,
|
106 |
-
info=config_data.
|
107 |
show_label=True,
|
108 |
interactive=True,
|
109 |
visible=True,
|
@@ -116,7 +233,7 @@ def event_handler_practical_subtasks(
|
|
116 |
maximum=1.0,
|
117 |
step=0.000001,
|
118 |
label=config_data.Labels_TARGET_SCORE_NNEU_LABEL,
|
119 |
-
info=config_data.
|
120 |
show_label=True,
|
121 |
interactive=True,
|
122 |
visible=True,
|
@@ -129,13 +246,90 @@ def event_handler_practical_subtasks(
|
|
129 |
maximum=1.0,
|
130 |
step=0.01,
|
131 |
label=config_data.Labels_EQUAL_COEFFICIENT_LABEL,
|
132 |
-
info=config_data.
|
133 |
show_label=True,
|
134 |
interactive=True,
|
135 |
visible=True,
|
136 |
render=True,
|
137 |
elem_classes="number-container",
|
138 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
)
|
140 |
else:
|
141 |
return (
|
@@ -149,4 +343,13 @@ def event_handler_practical_subtasks(
|
|
149 |
number_create_ui(visible=False),
|
150 |
number_create_ui(visible=False),
|
151 |
number_create_ui(visible=False),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
)
|
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.config import config_data
|
12 |
+
from app.utils import read_csv_file, extract_profession_weights
|
13 |
from app.components import number_create_ui, dropdown_create_ui
|
14 |
|
15 |
|
|
|
18 |
):
|
19 |
practical_subtasks_selected[practical_tasks] = practical_subtasks
|
20 |
|
21 |
+
if practical_subtasks.lower() == "professional groups":
|
22 |
+
df_traits_priority_for_professions = read_csv_file(
|
23 |
+
config_data.Links_PROFESSIONS
|
24 |
+
)
|
25 |
+
weights_professions, interactive_professions = extract_profession_weights(
|
26 |
+
df_traits_priority_for_professions,
|
27 |
+
config_data.Settings_DROPDOWN_CANDIDATES[0],
|
28 |
+
)
|
29 |
+
|
30 |
+
return (
|
31 |
+
practical_subtasks_selected,
|
32 |
+
gr.Column(visible=True),
|
33 |
+
number_create_ui(visible=False),
|
34 |
+
dropdown_create_ui(visible=False),
|
35 |
+
number_create_ui(visible=False),
|
36 |
+
number_create_ui(visible=False),
|
37 |
+
number_create_ui(visible=False),
|
38 |
+
number_create_ui(visible=False),
|
39 |
+
number_create_ui(visible=False),
|
40 |
+
number_create_ui(visible=False),
|
41 |
+
number_create_ui(visible=False),
|
42 |
+
number_create_ui(visible=False),
|
43 |
+
number_create_ui(visible=False),
|
44 |
+
dropdown_create_ui(
|
45 |
+
label=f"Potential candidates by professional responsibilities ({len(config_data.Settings_DROPDOWN_CANDIDATES)})",
|
46 |
+
info=config_data.InformationMessages_DROPDOWN_CANDIDATES_INFO,
|
47 |
+
choices=config_data.Settings_DROPDOWN_CANDIDATES,
|
48 |
+
value=config_data.Settings_DROPDOWN_CANDIDATES[0],
|
49 |
+
visible=True,
|
50 |
+
elem_classes="dropdown-container",
|
51 |
+
),
|
52 |
+
number_create_ui(
|
53 |
+
value=weights_professions[0],
|
54 |
+
minimum=config_data.Values_0_100[0],
|
55 |
+
maximum=config_data.Values_0_100[1],
|
56 |
+
step=1,
|
57 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_OPE_LABEL,
|
58 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
59 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
60 |
+
),
|
61 |
+
show_label=True,
|
62 |
+
interactive=interactive_professions,
|
63 |
+
visible=True,
|
64 |
+
render=True,
|
65 |
+
elem_classes="number-container",
|
66 |
+
),
|
67 |
+
number_create_ui(
|
68 |
+
value=weights_professions[1],
|
69 |
+
minimum=config_data.Values_0_100[0],
|
70 |
+
maximum=config_data.Values_0_100[1],
|
71 |
+
step=1,
|
72 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_CON_LABEL,
|
73 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
74 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
75 |
+
),
|
76 |
+
show_label=True,
|
77 |
+
interactive=interactive_professions,
|
78 |
+
visible=True,
|
79 |
+
render=True,
|
80 |
+
elem_classes="number-container",
|
81 |
+
),
|
82 |
+
number_create_ui(
|
83 |
+
value=weights_professions[2],
|
84 |
+
minimum=config_data.Values_0_100[0],
|
85 |
+
maximum=config_data.Values_0_100[1],
|
86 |
+
step=1,
|
87 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_EXT_LABEL,
|
88 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
89 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
90 |
+
),
|
91 |
+
show_label=True,
|
92 |
+
interactive=interactive_professions,
|
93 |
+
visible=True,
|
94 |
+
render=True,
|
95 |
+
elem_classes="number-container",
|
96 |
+
),
|
97 |
+
number_create_ui(
|
98 |
+
value=weights_professions[3],
|
99 |
+
minimum=config_data.Values_0_100[0],
|
100 |
+
maximum=config_data.Values_0_100[1],
|
101 |
+
step=1,
|
102 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_AGR_LABEL,
|
103 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
104 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
105 |
+
),
|
106 |
+
show_label=True,
|
107 |
+
interactive=interactive_professions,
|
108 |
+
visible=True,
|
109 |
+
render=True,
|
110 |
+
elem_classes="number-container",
|
111 |
+
),
|
112 |
+
number_create_ui(
|
113 |
+
value=weights_professions[4],
|
114 |
+
minimum=config_data.Values_0_100[0],
|
115 |
+
maximum=config_data.Values_0_100[1],
|
116 |
+
step=1,
|
117 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_NNEU_LABEL,
|
118 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
119 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
120 |
+
),
|
121 |
+
show_label=True,
|
122 |
+
interactive=interactive_professions,
|
123 |
+
visible=True,
|
124 |
+
render=True,
|
125 |
+
elem_classes="number-container",
|
126 |
+
),
|
127 |
+
)
|
128 |
+
elif practical_subtasks.lower() == "professional skills":
|
129 |
return (
|
130 |
practical_subtasks_selected,
|
131 |
gr.Column(visible=True),
|
|
|
135 |
maximum=1.0,
|
136 |
step=0.01,
|
137 |
label=config_data.Labels_THRESHOLD_PROFESSIONAL_SKILLS_LABEL,
|
138 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
139 |
show_label=True,
|
140 |
interactive=True,
|
141 |
visible=True,
|
|
|
156 |
number_create_ui(visible=False),
|
157 |
number_create_ui(visible=False),
|
158 |
number_create_ui(visible=False),
|
159 |
+
number_create_ui(visible=False),
|
160 |
+
number_create_ui(visible=False),
|
161 |
+
number_create_ui(visible=False),
|
162 |
+
dropdown_create_ui(visible=False),
|
163 |
+
number_create_ui(visible=False),
|
164 |
+
number_create_ui(visible=False),
|
165 |
+
number_create_ui(visible=False),
|
166 |
+
number_create_ui(visible=False),
|
167 |
+
number_create_ui(visible=False),
|
168 |
)
|
169 |
elif (
|
170 |
practical_subtasks.lower() == "finding a suitable junior colleague"
|
|
|
181 |
maximum=1.0,
|
182 |
step=0.000001,
|
183 |
label=config_data.Labels_TARGET_SCORE_OPE_LABEL,
|
184 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
185 |
show_label=True,
|
186 |
interactive=True,
|
187 |
visible=True,
|
|
|
194 |
maximum=1.0,
|
195 |
step=0.000001,
|
196 |
label=config_data.Labels_TARGET_SCORE_CON_LABEL,
|
197 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
198 |
show_label=True,
|
199 |
interactive=True,
|
200 |
visible=True,
|
|
|
207 |
maximum=1.0,
|
208 |
step=0.000001,
|
209 |
label=config_data.Labels_TARGET_SCORE_EXT_LABEL,
|
210 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
211 |
show_label=True,
|
212 |
interactive=True,
|
213 |
visible=True,
|
|
|
220 |
maximum=1.0,
|
221 |
step=0.000001,
|
222 |
label=config_data.Labels_TARGET_SCORE_AGR_LABEL,
|
223 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
224 |
show_label=True,
|
225 |
interactive=True,
|
226 |
visible=True,
|
|
|
233 |
maximum=1.0,
|
234 |
step=0.000001,
|
235 |
label=config_data.Labels_TARGET_SCORE_NNEU_LABEL,
|
236 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
237 |
show_label=True,
|
238 |
interactive=True,
|
239 |
visible=True,
|
|
|
246 |
maximum=1.0,
|
247 |
step=0.01,
|
248 |
label=config_data.Labels_EQUAL_COEFFICIENT_LABEL,
|
249 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
250 |
show_label=True,
|
251 |
interactive=True,
|
252 |
visible=True,
|
253 |
render=True,
|
254 |
elem_classes="number-container",
|
255 |
),
|
256 |
+
number_create_ui(visible=False),
|
257 |
+
number_create_ui(visible=False),
|
258 |
+
number_create_ui(visible=False),
|
259 |
+
dropdown_create_ui(visible=False),
|
260 |
+
number_create_ui(visible=False),
|
261 |
+
number_create_ui(visible=False),
|
262 |
+
number_create_ui(visible=False),
|
263 |
+
number_create_ui(visible=False),
|
264 |
+
number_create_ui(visible=False),
|
265 |
+
)
|
266 |
+
elif (
|
267 |
+
practical_subtasks.lower() == "car characteristics"
|
268 |
+
or practical_subtasks.lower() == "mobile device application categories"
|
269 |
+
):
|
270 |
+
df_correlation_coefficients = read_csv_file(
|
271 |
+
config_data.Links_CAR_CHARACTERISTICS,
|
272 |
+
["Trait", "Style and performance", "Safety and practicality"],
|
273 |
+
)
|
274 |
+
|
275 |
+
return (
|
276 |
+
practical_subtasks_selected,
|
277 |
+
gr.Column(visible=True),
|
278 |
+
number_create_ui(visible=False),
|
279 |
+
dropdown_create_ui(visible=False),
|
280 |
+
number_create_ui(visible=False),
|
281 |
+
number_create_ui(visible=False),
|
282 |
+
number_create_ui(visible=False),
|
283 |
+
number_create_ui(visible=False),
|
284 |
+
number_create_ui(visible=False),
|
285 |
+
number_create_ui(visible=False),
|
286 |
+
number_create_ui(
|
287 |
+
value=3,
|
288 |
+
minimum=1,
|
289 |
+
maximum=df_correlation_coefficients.columns.size,
|
290 |
+
step=1,
|
291 |
+
label=config_data.Labels_NUMBER_PRIORITY_LABEL,
|
292 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
293 |
+
1, df_correlation_coefficients.columns.size
|
294 |
+
),
|
295 |
+
show_label=True,
|
296 |
+
interactive=True,
|
297 |
+
visible=True,
|
298 |
+
render=True,
|
299 |
+
elem_classes="number-container",
|
300 |
+
),
|
301 |
+
number_create_ui(
|
302 |
+
value=3,
|
303 |
+
minimum=1,
|
304 |
+
maximum=5,
|
305 |
+
step=1,
|
306 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_TRAITS_LABEL,
|
307 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(1, 5),
|
308 |
+
show_label=True,
|
309 |
+
interactive=True,
|
310 |
+
visible=True,
|
311 |
+
render=True,
|
312 |
+
elem_classes="number-container",
|
313 |
+
),
|
314 |
+
number_create_ui(
|
315 |
+
value=0.55,
|
316 |
+
minimum=0.0,
|
317 |
+
maximum=1.0,
|
318 |
+
step=0.01,
|
319 |
+
label=config_data.Labels_THRESHOLD_CONSUMER_PREFERENCES_LABEL,
|
320 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
321 |
+
show_label=True,
|
322 |
+
interactive=True,
|
323 |
+
visible=True,
|
324 |
+
render=True,
|
325 |
+
elem_classes="number-container",
|
326 |
+
),
|
327 |
+
dropdown_create_ui(visible=False),
|
328 |
+
number_create_ui(visible=False),
|
329 |
+
number_create_ui(visible=False),
|
330 |
+
number_create_ui(visible=False),
|
331 |
+
number_create_ui(visible=False),
|
332 |
+
number_create_ui(visible=False),
|
333 |
)
|
334 |
else:
|
335 |
return (
|
|
|
343 |
number_create_ui(visible=False),
|
344 |
number_create_ui(visible=False),
|
345 |
number_create_ui(visible=False),
|
346 |
+
number_create_ui(visible=False),
|
347 |
+
number_create_ui(visible=False),
|
348 |
+
number_create_ui(visible=False),
|
349 |
+
dropdown_create_ui(visible=False),
|
350 |
+
number_create_ui(visible=False),
|
351 |
+
number_create_ui(visible=False),
|
352 |
+
number_create_ui(visible=False),
|
353 |
+
number_create_ui(visible=False),
|
354 |
+
number_create_ui(visible=False),
|
355 |
)
|
app/event_handlers/practical_task_sorted.py
CHANGED
@@ -15,13 +15,6 @@ from app.components import video_create_ui
|
|
15 |
def event_handler_practical_task_sorted(
|
16 |
files, practical_task_sorted, evt_data: gr.SelectData
|
17 |
):
|
18 |
-
# print(
|
19 |
-
# f"{evt_data.value}, {evt_data.index}, {evt_data.target}, {evt_data.selected}, {evt_data._data}"
|
20 |
-
# )
|
21 |
-
# print(practical_task_sorted)
|
22 |
-
|
23 |
-
# print(evt_data, evt_data.index[0], practical_task_sorted.iloc[evt_data.index[0]])
|
24 |
-
|
25 |
person_id = int(practical_task_sorted.iloc[evt_data.index[0]]["Person ID"]) - 1
|
26 |
|
27 |
if evt_data.index[0] == 0:
|
|
|
15 |
def event_handler_practical_task_sorted(
|
16 |
files, practical_task_sorted, evt_data: gr.SelectData
|
17 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
person_id = int(practical_task_sorted.iloc[evt_data.index[0]]["Person ID"]) - 1
|
19 |
|
20 |
if evt_data.index[0] == 0:
|
app/tabs.py
CHANGED
@@ -9,9 +9,12 @@ import gradio as gr
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.description import DESCRIPTION
|
|
|
|
|
12 |
from app.authors import AUTHORS
|
13 |
from app.config import config_data
|
14 |
from app.practical_tasks import supported_practical_tasks
|
|
|
15 |
from app.components import (
|
16 |
html_message,
|
17 |
files_create_ui,
|
@@ -27,6 +30,8 @@ from app.components import (
|
|
27 |
def app_tab():
|
28 |
gr.Markdown(value=DESCRIPTION)
|
29 |
|
|
|
|
|
30 |
with gr.Row():
|
31 |
files = files_create_ui()
|
32 |
|
@@ -62,6 +67,8 @@ def app_tab():
|
|
62 |
"csv-container",
|
63 |
)
|
64 |
|
|
|
|
|
65 |
first_practical_task = next(iter(supported_practical_tasks))
|
66 |
|
67 |
with gr.Column(scale=1, visible=False, render=True) as practical_tasks_column:
|
@@ -95,7 +102,7 @@ def app_tab():
|
|
95 |
maximum=1.0,
|
96 |
step=0.01,
|
97 |
label=config_data.Labels_THRESHOLD_PROFESSIONAL_SKILLS_LABEL,
|
98 |
-
info=config_data.
|
99 |
show_label=True,
|
100 |
interactive=True,
|
101 |
visible=False,
|
@@ -118,7 +125,7 @@ def app_tab():
|
|
118 |
maximum=1.0,
|
119 |
step=0.000001,
|
120 |
label=config_data.Labels_TARGET_SCORE_OPE_LABEL,
|
121 |
-
info=config_data.
|
122 |
show_label=True,
|
123 |
interactive=True,
|
124 |
visible=False,
|
@@ -132,7 +139,7 @@ def app_tab():
|
|
132 |
maximum=1.0,
|
133 |
step=0.000001,
|
134 |
label=config_data.Labels_TARGET_SCORE_CON_LABEL,
|
135 |
-
info=config_data.
|
136 |
show_label=True,
|
137 |
interactive=True,
|
138 |
visible=False,
|
@@ -146,7 +153,7 @@ def app_tab():
|
|
146 |
maximum=1.0,
|
147 |
step=0.000001,
|
148 |
label=config_data.Labels_TARGET_SCORE_EXT_LABEL,
|
149 |
-
info=config_data.
|
150 |
show_label=True,
|
151 |
interactive=True,
|
152 |
visible=False,
|
@@ -160,7 +167,7 @@ def app_tab():
|
|
160 |
maximum=1.0,
|
161 |
step=0.000001,
|
162 |
label=config_data.Labels_TARGET_SCORE_AGR_LABEL,
|
163 |
-
info=config_data.
|
164 |
show_label=True,
|
165 |
interactive=True,
|
166 |
visible=False,
|
@@ -174,7 +181,7 @@ def app_tab():
|
|
174 |
maximum=1.0,
|
175 |
step=0.000001,
|
176 |
label=config_data.Labels_TARGET_SCORE_NNEU_LABEL,
|
177 |
-
info=config_data.
|
178 |
show_label=True,
|
179 |
interactive=True,
|
180 |
visible=False,
|
@@ -188,7 +195,42 @@ def app_tab():
|
|
188 |
maximum=1.0,
|
189 |
step=0.01,
|
190 |
label=config_data.Labels_EQUAL_COEFFICIENT_LABEL,
|
191 |
-
info=config_data.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
show_label=True,
|
193 |
interactive=True,
|
194 |
visible=False,
|
@@ -196,6 +238,117 @@ def app_tab():
|
|
196 |
elem_classes="number-container",
|
197 |
)
|
198 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
calculate_practical_task = button(
|
200 |
config_data.OtherMessages_CALCULATE_PRACTICAL_TASK,
|
201 |
True,
|
@@ -247,6 +400,7 @@ def app_tab():
|
|
247 |
clear_app,
|
248 |
pt_scores,
|
249 |
csv_pt_scores,
|
|
|
250 |
practical_tasks,
|
251 |
practical_subtasks,
|
252 |
settings_practical_tasks,
|
@@ -258,6 +412,15 @@ def app_tab():
|
|
258 |
target_score_agr,
|
259 |
target_score_nneu,
|
260 |
equal_coefficient,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
calculate_practical_task,
|
262 |
practical_subtasks_selected,
|
263 |
practical_tasks_column,
|
@@ -270,5 +433,9 @@ def app_tab():
|
|
270 |
)
|
271 |
|
272 |
|
|
|
|
|
|
|
|
|
273 |
def about_authors_tab():
|
274 |
-
return gr.
|
|
|
9 |
|
10 |
# Importing necessary components for the Gradio app
|
11 |
from app.description import DESCRIPTION
|
12 |
+
from app.description_steps import STEP_1, STEP_2
|
13 |
+
from app.app import APP
|
14 |
from app.authors import AUTHORS
|
15 |
from app.config import config_data
|
16 |
from app.practical_tasks import supported_practical_tasks
|
17 |
+
from app.utils import read_csv_file, extract_profession_weights
|
18 |
from app.components import (
|
19 |
html_message,
|
20 |
files_create_ui,
|
|
|
30 |
def app_tab():
|
31 |
gr.Markdown(value=DESCRIPTION)
|
32 |
|
33 |
+
gr.HTML(value=STEP_1)
|
34 |
+
|
35 |
with gr.Row():
|
36 |
files = files_create_ui()
|
37 |
|
|
|
67 |
"csv-container",
|
68 |
)
|
69 |
|
70 |
+
step_2 = gr.HTML(value=STEP_2, visible=False)
|
71 |
+
|
72 |
first_practical_task = next(iter(supported_practical_tasks))
|
73 |
|
74 |
with gr.Column(scale=1, visible=False, render=True) as practical_tasks_column:
|
|
|
102 |
maximum=1.0,
|
103 |
step=0.01,
|
104 |
label=config_data.Labels_THRESHOLD_PROFESSIONAL_SKILLS_LABEL,
|
105 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
106 |
show_label=True,
|
107 |
interactive=True,
|
108 |
visible=False,
|
|
|
125 |
maximum=1.0,
|
126 |
step=0.000001,
|
127 |
label=config_data.Labels_TARGET_SCORE_OPE_LABEL,
|
128 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
129 |
show_label=True,
|
130 |
interactive=True,
|
131 |
visible=False,
|
|
|
139 |
maximum=1.0,
|
140 |
step=0.000001,
|
141 |
label=config_data.Labels_TARGET_SCORE_CON_LABEL,
|
142 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
143 |
show_label=True,
|
144 |
interactive=True,
|
145 |
visible=False,
|
|
|
153 |
maximum=1.0,
|
154 |
step=0.000001,
|
155 |
label=config_data.Labels_TARGET_SCORE_EXT_LABEL,
|
156 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
157 |
show_label=True,
|
158 |
interactive=True,
|
159 |
visible=False,
|
|
|
167 |
maximum=1.0,
|
168 |
step=0.000001,
|
169 |
label=config_data.Labels_TARGET_SCORE_AGR_LABEL,
|
170 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
171 |
show_label=True,
|
172 |
interactive=True,
|
173 |
visible=False,
|
|
|
181 |
maximum=1.0,
|
182 |
step=0.000001,
|
183 |
label=config_data.Labels_TARGET_SCORE_NNEU_LABEL,
|
184 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
185 |
show_label=True,
|
186 |
interactive=True,
|
187 |
visible=False,
|
|
|
195 |
maximum=1.0,
|
196 |
step=0.01,
|
197 |
label=config_data.Labels_EQUAL_COEFFICIENT_LABEL,
|
198 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
199 |
+
show_label=True,
|
200 |
+
interactive=True,
|
201 |
+
visible=False,
|
202 |
+
render=True,
|
203 |
+
elem_classes="number-container",
|
204 |
+
)
|
205 |
+
|
206 |
+
df_correlation_coefficients = read_csv_file(
|
207 |
+
config_data.Links_CAR_CHARACTERISTICS,
|
208 |
+
["Trait", "Style and performance", "Safety and practicality"],
|
209 |
+
)
|
210 |
+
|
211 |
+
number_priority = number_create_ui(
|
212 |
+
value=3,
|
213 |
+
minimum=1,
|
214 |
+
maximum=df_correlation_coefficients.columns.size,
|
215 |
+
step=1,
|
216 |
+
label=config_data.Labels_NUMBER_PRIORITY_LABEL,
|
217 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
218 |
+
1, df_correlation_coefficients.columns.size
|
219 |
+
),
|
220 |
+
show_label=True,
|
221 |
+
interactive=True,
|
222 |
+
visible=False,
|
223 |
+
render=True,
|
224 |
+
elem_classes="number-container",
|
225 |
+
)
|
226 |
+
|
227 |
+
number_importance_traits = number_create_ui(
|
228 |
+
value=3,
|
229 |
+
minimum=1,
|
230 |
+
maximum=5,
|
231 |
+
step=1,
|
232 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_TRAITS_LABEL,
|
233 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(1, 5),
|
234 |
show_label=True,
|
235 |
interactive=True,
|
236 |
visible=False,
|
|
|
238 |
elem_classes="number-container",
|
239 |
)
|
240 |
|
241 |
+
threshold_consumer_preferences = number_create_ui(
|
242 |
+
value=0.55,
|
243 |
+
minimum=0.0,
|
244 |
+
maximum=1.0,
|
245 |
+
step=0.01,
|
246 |
+
label=config_data.Labels_THRESHOLD_CONSUMER_PREFERENCES_LABEL,
|
247 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(0, 1.0),
|
248 |
+
show_label=True,
|
249 |
+
interactive=True,
|
250 |
+
visible=False,
|
251 |
+
render=True,
|
252 |
+
elem_classes="number-container",
|
253 |
+
)
|
254 |
+
|
255 |
+
dropdown_candidates = dropdown_create_ui(
|
256 |
+
label=f"Potential candidates by professional responsibilities ({len(config_data.Settings_DROPDOWN_CANDIDATES)})",
|
257 |
+
info=config_data.InformationMessages_DROPDOWN_CANDIDATES_INFO,
|
258 |
+
choices=config_data.Settings_DROPDOWN_CANDIDATES,
|
259 |
+
value=config_data.Settings_DROPDOWN_CANDIDATES[0],
|
260 |
+
visible=False,
|
261 |
+
elem_classes="dropdown-container",
|
262 |
+
)
|
263 |
+
|
264 |
+
df_traits_priority_for_professions = read_csv_file(
|
265 |
+
config_data.Links_PROFESSIONS
|
266 |
+
)
|
267 |
+
weights_professions, interactive_professions = extract_profession_weights(
|
268 |
+
df_traits_priority_for_professions,
|
269 |
+
config_data.Settings_DROPDOWN_CANDIDATES[0],
|
270 |
+
)
|
271 |
+
|
272 |
+
number_openness = number_create_ui(
|
273 |
+
value=weights_professions[0],
|
274 |
+
minimum=config_data.Values_0_100[0],
|
275 |
+
maximum=config_data.Values_0_100[1],
|
276 |
+
step=1,
|
277 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_OPE_LABEL,
|
278 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
279 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
280 |
+
),
|
281 |
+
show_label=True,
|
282 |
+
interactive=interactive_professions,
|
283 |
+
visible=False,
|
284 |
+
render=True,
|
285 |
+
elem_classes="number-container",
|
286 |
+
)
|
287 |
+
|
288 |
+
number_conscientiousness = number_create_ui(
|
289 |
+
value=weights_professions[1],
|
290 |
+
minimum=config_data.Values_0_100[0],
|
291 |
+
maximum=config_data.Values_0_100[1],
|
292 |
+
step=1,
|
293 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_CON_LABEL,
|
294 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
295 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
296 |
+
),
|
297 |
+
show_label=True,
|
298 |
+
interactive=interactive_professions,
|
299 |
+
visible=False,
|
300 |
+
render=True,
|
301 |
+
elem_classes="number-container",
|
302 |
+
)
|
303 |
+
|
304 |
+
number_extraversion = number_create_ui(
|
305 |
+
value=weights_professions[2],
|
306 |
+
minimum=config_data.Values_0_100[0],
|
307 |
+
maximum=config_data.Values_0_100[1],
|
308 |
+
step=1,
|
309 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_EXT_LABEL,
|
310 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
311 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
312 |
+
),
|
313 |
+
show_label=True,
|
314 |
+
interactive=interactive_professions,
|
315 |
+
visible=False,
|
316 |
+
render=True,
|
317 |
+
elem_classes="number-container",
|
318 |
+
)
|
319 |
+
|
320 |
+
number_agreeableness = number_create_ui(
|
321 |
+
value=weights_professions[3],
|
322 |
+
minimum=config_data.Values_0_100[0],
|
323 |
+
maximum=config_data.Values_0_100[1],
|
324 |
+
step=1,
|
325 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_AGR_LABEL,
|
326 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
327 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
328 |
+
),
|
329 |
+
show_label=True,
|
330 |
+
interactive=interactive_professions,
|
331 |
+
visible=False,
|
332 |
+
render=True,
|
333 |
+
elem_classes="number-container",
|
334 |
+
)
|
335 |
+
|
336 |
+
number_non_neuroticism = number_create_ui(
|
337 |
+
value=weights_professions[4],
|
338 |
+
minimum=config_data.Values_0_100[0],
|
339 |
+
maximum=config_data.Values_0_100[1],
|
340 |
+
step=1,
|
341 |
+
label=config_data.Labels_NUMBER_IMPORTANCE_NNEU_LABEL,
|
342 |
+
info=config_data.InformationMessages_VALUE_FROM_TO_INFO.format(
|
343 |
+
config_data.Values_0_100[0], config_data.Values_0_100[1]
|
344 |
+
),
|
345 |
+
show_label=True,
|
346 |
+
interactive=interactive_professions,
|
347 |
+
visible=False,
|
348 |
+
render=True,
|
349 |
+
elem_classes="number-container",
|
350 |
+
)
|
351 |
+
|
352 |
calculate_practical_task = button(
|
353 |
config_data.OtherMessages_CALCULATE_PRACTICAL_TASK,
|
354 |
True,
|
|
|
400 |
clear_app,
|
401 |
pt_scores,
|
402 |
csv_pt_scores,
|
403 |
+
step_2,
|
404 |
practical_tasks,
|
405 |
practical_subtasks,
|
406 |
settings_practical_tasks,
|
|
|
412 |
target_score_agr,
|
413 |
target_score_nneu,
|
414 |
equal_coefficient,
|
415 |
+
number_priority,
|
416 |
+
number_importance_traits,
|
417 |
+
threshold_consumer_preferences,
|
418 |
+
dropdown_candidates,
|
419 |
+
number_openness,
|
420 |
+
number_conscientiousness,
|
421 |
+
number_extraversion,
|
422 |
+
number_agreeableness,
|
423 |
+
number_non_neuroticism,
|
424 |
calculate_practical_task,
|
425 |
practical_subtasks_selected,
|
426 |
practical_tasks_column,
|
|
|
433 |
)
|
434 |
|
435 |
|
436 |
+
def about_app_tab():
|
437 |
+
return gr.HTML(value=APP)
|
438 |
+
|
439 |
+
|
440 |
def about_authors_tab():
|
441 |
+
return gr.HTML(value=AUTHORS)
|
app/utils.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File: utils.py
|
3 |
+
Author: Elena Ryumina and Dmitry Ryumin
|
4 |
+
Description: Utility functions.
|
5 |
+
License: MIT License
|
6 |
+
"""
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
|
10 |
+
|
11 |
+
def preprocess_scores_df(df, name):
|
12 |
+
df.index.name = name
|
13 |
+
df.index += 1
|
14 |
+
df.index = df.index.map(str)
|
15 |
+
|
16 |
+
return df
|
17 |
+
|
18 |
+
|
19 |
+
def read_csv_file(file_path, drop_columns=[]):
|
20 |
+
df = pd.read_csv(file_path)
|
21 |
+
|
22 |
+
if len(drop_columns) != 0:
|
23 |
+
df = pd.DataFrame(df.drop(drop_columns, axis=1))
|
24 |
+
|
25 |
+
return preprocess_scores_df(df, "ID")
|
26 |
+
|
27 |
+
|
28 |
+
def round_numeric_values(x):
|
29 |
+
if isinstance(x, (int, float)):
|
30 |
+
return round(x, 3)
|
31 |
+
|
32 |
+
return x
|
33 |
+
|
34 |
+
|
35 |
+
def apply_rounding_and_rename_columns(df):
|
36 |
+
df_rounded = df.rename(
|
37 |
+
columns={
|
38 |
+
"Openness": "OPE",
|
39 |
+
"Conscientiousness": "CON",
|
40 |
+
"Extraversion": "EXT",
|
41 |
+
"Agreeableness": "AGR",
|
42 |
+
"Non-Neuroticism": "NNEU",
|
43 |
+
}
|
44 |
+
)
|
45 |
+
|
46 |
+
columns_to_round = df_rounded.columns[1:]
|
47 |
+
df_rounded[columns_to_round] = df_rounded[columns_to_round].applymap(
|
48 |
+
round_numeric_values
|
49 |
+
)
|
50 |
+
|
51 |
+
return df_rounded
|
52 |
+
|
53 |
+
|
54 |
+
def extract_profession_weights(df, dropdown_candidates):
|
55 |
+
try:
|
56 |
+
weights_professions = df.loc[df["Profession"] == dropdown_candidates, :].values[
|
57 |
+
0
|
58 |
+
][1:]
|
59 |
+
interactive_professions = False
|
60 |
+
except Exception:
|
61 |
+
weights_professions = [0] * 5
|
62 |
+
interactive_professions = True
|
63 |
+
else:
|
64 |
+
weights_professions = list(map(int, weights_professions))
|
65 |
+
|
66 |
+
return weights_professions, interactive_professions
|
config.toml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
[AppSettings]
|
2 |
-
APP_VERSION = "0.
|
3 |
CSS_PATH = "app.css"
|
4 |
|
5 |
[InformationMessages]
|
@@ -7,15 +7,13 @@ NOTI_VIDEOS = "Select the video(s)"
|
|
7 |
PRACTICAL_TASKS_INFO = "Choose a practical task"
|
8 |
PRACTICAL_SUBTASKS_INFO = "Choose a practical subtask"
|
9 |
NOTI_IN_DEV = "In development"
|
10 |
-
THRESHOLD_PROFESSIONAL_SKILLS_INFO = "Set value from 0 to 1.0"
|
11 |
DROPDOWN_PROFESSIONAL_SKILLS_INFO = "What professional skill are you interested in?"
|
12 |
DROPDOWN_DROPDOWN_COLLEAGUES_INFO = "What colleague are you interested in?"
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
EQUAL_COEFFICIENT_INFO = "Set value from 0 to 1.0"
|
19 |
|
20 |
[OtherMessages]
|
21 |
CALCULATE_PT_SCORES = "Calculation of Big Five personality traits scores"
|
@@ -23,8 +21,10 @@ CALCULATE_PRACTICAL_TASK = "Solving a practical task"
|
|
23 |
CLEAR_APP = "Clear"
|
24 |
EXAMPLES_APP = "Examples"
|
25 |
EXPORT_PT_SCORES = "Export Big Five personality traits to a CSV file"
|
|
|
26 |
EXPORT_PS = "Export ranking professional skill results to a CSV file"
|
27 |
EXPORT_WT = "Export ranking effective work teams results to a CSV file"
|
|
|
28 |
NOTI_CALCULATE = "You can calculate Big Five personality traits scores"
|
29 |
|
30 |
[Labels]
|
@@ -37,23 +37,41 @@ TARGET_SCORE_EXT_LABEL = "Extraversion target score"
|
|
37 |
TARGET_SCORE_AGR_LABEL = "Agreeableness target score"
|
38 |
TARGET_SCORE_NNEU_LABEL = "Non-Neuroticism target score"
|
39 |
EQUAL_COEFFICIENT_LABEL = "Equal coefficient"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
[TabCreators]
|
42 |
"App" = "app_tab"
|
|
|
43 |
"About the Authors" = "about_authors_tab"
|
44 |
|
45 |
[Filenames]
|
46 |
PT_SCORES = "personality_traits_scores.csv"
|
47 |
PT_SKILLS_SCORES = "personality_skills_scores.csv"
|
48 |
COLLEAGUE_RANKING = "_colleague_ranking.csv"
|
|
|
|
|
|
|
49 |
|
50 |
[Settings]
|
|
|
51 |
DROPDOWN_PROFESSIONAL_SKILLS = ["Analytical", "Interactive", "Routine", "Non-Routine"]
|
52 |
DROPDOWN_COLLEAGUES = ["major", "minor"]
|
|
|
53 |
|
54 |
[Values]
|
55 |
TARGET_SCORES = [0.527886, 0.522337, 0.458468, 0.51761, 0.444649]
|
|
|
56 |
|
57 |
[Links]
|
58 |
PROFESSIONAL_SKILLS = "https://download.sberdisk.ru/download/file/478678231?token=0qiZwliLtHWWYMv&filename=professional_skills.csv"
|
59 |
-
FINDING_COLLEAGUE = "https://download.sberdisk.ru/download/file/478675819?token=LuB7L1QsEY0UuSs&filename=colleague_ranking.csv"
|
|
|
|
|
|
|
|
1 |
[AppSettings]
|
2 |
+
APP_VERSION = "0.7.0"
|
3 |
CSS_PATH = "app.css"
|
4 |
|
5 |
[InformationMessages]
|
|
|
7 |
PRACTICAL_TASKS_INFO = "Choose a practical task"
|
8 |
PRACTICAL_SUBTASKS_INFO = "Choose a practical subtask"
|
9 |
NOTI_IN_DEV = "In development"
|
|
|
10 |
DROPDOWN_PROFESSIONAL_SKILLS_INFO = "What professional skill are you interested in?"
|
11 |
DROPDOWN_DROPDOWN_COLLEAGUES_INFO = "What colleague are you interested in?"
|
12 |
+
DROPDOWN_CANDIDATES_INFO = "What profession are you interested in?"
|
13 |
+
VALUE_FROM_TO_INFO = "Set value from {} to {}"
|
14 |
+
SUM_WEIGHTS = "The sum of the weights of the personality traits should be 100, not {}"
|
15 |
+
STEP_1 = "Step 1: Calculation of personality traits scores"
|
16 |
+
STEP_2 = "Step 2: Solving a practical task"
|
|
|
17 |
|
18 |
[OtherMessages]
|
19 |
CALCULATE_PT_SCORES = "Calculation of Big Five personality traits scores"
|
|
|
21 |
CLEAR_APP = "Clear"
|
22 |
EXAMPLES_APP = "Examples"
|
23 |
EXPORT_PT_SCORES = "Export Big Five personality traits to a CSV file"
|
24 |
+
EXPORT_PG = "Export ranking professional groups results to a CSV file"
|
25 |
EXPORT_PS = "Export ranking professional skill results to a CSV file"
|
26 |
EXPORT_WT = "Export ranking effective work teams results to a CSV file"
|
27 |
+
EXPORT_CP = "Export consumer preferences for industrial goods results to a CSV file"
|
28 |
NOTI_CALCULATE = "You can calculate Big Five personality traits scores"
|
29 |
|
30 |
[Labels]
|
|
|
37 |
TARGET_SCORE_AGR_LABEL = "Agreeableness target score"
|
38 |
TARGET_SCORE_NNEU_LABEL = "Non-Neuroticism target score"
|
39 |
EQUAL_COEFFICIENT_LABEL = "Equal coefficient"
|
40 |
+
NUMBER_PRIORITY_LABEL = "Priority number"
|
41 |
+
NUMBER_IMPORTANCE_TRAITS_LABEL = "Importance traits number"
|
42 |
+
NUMBER_IMPORTANCE_OPE_LABEL = "Openness weight"
|
43 |
+
NUMBER_IMPORTANCE_CON_LABEL = "Conscientiousness weight"
|
44 |
+
NUMBER_IMPORTANCE_EXT_LABEL = "Extraversion weight"
|
45 |
+
NUMBER_IMPORTANCE_AGR_LABEL = "Agreeableness weight"
|
46 |
+
NUMBER_IMPORTANCE_NNEU_LABEL = "Non-Neuroticism weight"
|
47 |
+
THRESHOLD_CONSUMER_PREFERENCES_LABEL = "Polarity traits threshold"
|
48 |
|
49 |
[TabCreators]
|
50 |
"App" = "app_tab"
|
51 |
+
"About the App" = "about_app_tab"
|
52 |
"About the Authors" = "about_authors_tab"
|
53 |
|
54 |
[Filenames]
|
55 |
PT_SCORES = "personality_traits_scores.csv"
|
56 |
PT_SKILLS_SCORES = "personality_skills_scores.csv"
|
57 |
COLLEAGUE_RANKING = "_colleague_ranking.csv"
|
58 |
+
CAR_CHARACTERISTICS = "auto_characteristics_priorities.csv"
|
59 |
+
MDA_CATEGORIES = "divice_characteristics_priorities.csv"
|
60 |
+
POTENTIAL_CANDIDATES = "potential_candidates.csv"
|
61 |
|
62 |
[Settings]
|
63 |
+
SHORT_PROFESSIONAL_SKILLS = ["OPE", "CON", "EXT", "AGR", "NNEU"]
|
64 |
DROPDOWN_PROFESSIONAL_SKILLS = ["Analytical", "Interactive", "Routine", "Non-Routine"]
|
65 |
DROPDOWN_COLLEAGUES = ["major", "minor"]
|
66 |
+
DROPDOWN_CANDIDATES = ["Managers/executives", "Entrepreneurship", "Social/Non profit making professions", "Public sector professions", "Scientists/researchers, and engineers", "Custom"]
|
67 |
|
68 |
[Values]
|
69 |
TARGET_SCORES = [0.527886, 0.522337, 0.458468, 0.51761, 0.444649]
|
70 |
+
0_100 = [0, 100]
|
71 |
|
72 |
[Links]
|
73 |
PROFESSIONAL_SKILLS = "https://download.sberdisk.ru/download/file/478678231?token=0qiZwliLtHWWYMv&filename=professional_skills.csv"
|
74 |
+
FINDING_COLLEAGUE = "https://download.sberdisk.ru/download/file/478675819?token=LuB7L1QsEY0UuSs&filename=colleague_ranking.csv"
|
75 |
+
CAR_CHARACTERISTICS = "https://download.sberdisk.ru/download/file/478675818?token=EjfLMqOeK8cfnOu&filename=auto_characteristics.csv"
|
76 |
+
MDA_CATEGORIES = "https://download.sberdisk.ru/download/file/478676690?token=7KcAxPqMpWiYQnx&filename=divice_characteristics.csv"
|
77 |
+
PROFESSIONS = "https://download.sberdisk.ru/download/file/478675798?token=fF5fNZVpthQlEV0&filename=traits_priority_for_professions.csv"
|
requirements.txt
CHANGED
@@ -2,6 +2,6 @@ gradio==4.23.0
|
|
2 |
requests==2.31.0
|
3 |
PyYAML==6.0.1
|
4 |
toml==0.10.2
|
5 |
-
oceanai==1.0.
|
6 |
tf-keras==2.16.0
|
7 |
pandas==2.2.1
|
|
|
2 |
requests==2.31.0
|
3 |
PyYAML==6.0.1
|
4 |
toml==0.10.2
|
5 |
+
oceanai==1.0.0a27
|
6 |
tf-keras==2.16.0
|
7 |
pandas==2.2.1
|