ElenaRyumina commited on
Commit
87a92fb
1 Parent(s): 52d84c5
app/app.py CHANGED
@@ -65,16 +65,15 @@ APP = """
65
  <h3>Journals</h3>
66
  <pre>
67
  <code>
68
- @article{ryumina22_neurocomputing,
69
- author = {Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov},
70
- title = {In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study},
71
- journal = {Neurocomputing},
72
- volume = {514},
73
- pages = {435-450},
74
- year = {2022},
75
- doi = {<a href="https://doi.org/10.1016/j.neucom.2022.10.013">https://doi.org/10.1016/j.neucom.2022.10.013</a>},
76
  }
77
-
78
  @article{ryumina24_eswa,
79
  author = {Elena Ryumina and Maxim Markitantov and Dmitry Ryumin and Alexey Karpov},
80
  title = {OCEAN-AI Framework with EmoFormer Cross-Hemiface Attention Approach for Personality Traits Assessment},
@@ -83,6 +82,15 @@ APP = """
83
  pages = {122441},
84
  year = {2024},
85
  doi = {<a href="https://doi.org/10.1016/j.eswa.2023.122441">https://doi.org/10.1016/j.eswa.2023.122441</a>},
 
 
 
 
 
 
 
 
 
86
  }
87
  </code>
88
  </pre>
@@ -90,6 +98,13 @@ APP = """
90
  <h3>Conferences</h3>
91
  <pre>
92
  <code>
 
 
 
 
 
 
 
93
  @inproceedings{ryumina23_interspeech,
94
  author = {Elena Ryumina and Dmitry Ryumin and Maxim Markitantov and Heysem Kaya and Alexey Karpov},
95
  title = {Multimodal Personality Traits Assessment (MuPTA) Corpus: The Impact of Spontaneous and Read Speech},
 
65
  <h3>Journals</h3>
66
  <pre>
67
  <code>
68
+ @article{ryumina24_prl,
69
+ author = {Ryumina, Elena and Markitantov, Maxim and Ryumin, Dmitry and Karpov, Alexey},
70
+ title = {Gated Siamese Fusion Network based on Multimodal Deep and Hand-Crafted Features for Personality Traits Assessment},
71
+ journal = {Pattern Recognition Letters},
72
+ volume = {185},
73
+ pages = {45--51},
74
+ year = {2024},
75
+ doi = {<a href="https://doi.org/10.1016/j.patrec.2024.07.004">https://doi.org/10.1016/j.patrec.2024.07.004</a>},
76
  }
 
77
  @article{ryumina24_eswa,
78
  author = {Elena Ryumina and Maxim Markitantov and Dmitry Ryumin and Alexey Karpov},
79
  title = {OCEAN-AI Framework with EmoFormer Cross-Hemiface Attention Approach for Personality Traits Assessment},
 
82
  pages = {122441},
83
  year = {2024},
84
  doi = {<a href="https://doi.org/10.1016/j.eswa.2023.122441">https://doi.org/10.1016/j.eswa.2023.122441</a>},
85
+ }
86
+ @article{ryumina22_neurocomputing,
87
+ author = {Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov},
88
+ title = {In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study},
89
+ journal = {Neurocomputing},
90
+ volume = {514},
91
+ pages = {435-450},
92
+ year = {2022},
93
+ doi = {<a href="https://doi.org/10.1016/j.neucom.2022.10.013">https://doi.org/10.1016/j.neucom.2022.10.013</a>},
94
  }
95
  </code>
96
  </pre>
 
98
  <h3>Conferences</h3>
99
  <pre>
100
  <code>
101
+ @inproceedings{ryumina24_interspeech,
102
+ author = {Elena Ryumina and Dmitry Ryumin and and Alexey Karpov},
103
+ title = {OCEAN-AI: Open Multimodal Framework for Personality Traits Assessment and HR-Processes Automatization},
104
+ year = {2024},
105
+ booktitle = {INTERSPEECH},
106
+ pages = {in press},
107
+ }
108
  @inproceedings{ryumina23_interspeech,
109
  author = {Elena Ryumina and Dmitry Ryumin and Maxim Markitantov and Heysem Kaya and Alexey Karpov},
110
  title = {Multimodal Personality Traits Assessment (MuPTA) Corpus: The Impact of Spontaneous and Read Speech},
app/description.py CHANGED
@@ -15,6 +15,10 @@ TEMPLATE = """\
15
  <img src="https://img.shields.io/badge/version-v{version}-rc0" alt="{version_label}">
16
  <a href='https://github.com/DmitryRyumin/OCEANAI' target='_blank'><img src='https://img.shields.io/github/stars/DmitryRyumin/OCEANAI?style=flat' alt='GitHub' /></a>
17
  </div>
 
 
 
 
18
  """
19
 
20
  DESCRIPTIONS = [
 
15
  <img src="https://img.shields.io/badge/version-v{version}-rc0" alt="{version_label}">
16
  <a href='https://github.com/DmitryRyumin/OCEANAI' target='_blank'><img src='https://img.shields.io/github/stars/DmitryRyumin/OCEANAI?style=flat' alt='GitHub' /></a>
17
  </div>
18
+
19
+ The models used in OCEAN-AI were trained on 15-second clips from the ChaLearn First Impression v2 dataset.
20
+ For more reliable predictions, 15-second videos are recommended, but OCEAN-AI can analyze videos of any length.
21
+ Due to limited computational resources on HuggingFace, we provide six 3-second videos as examples.
22
  """
23
 
24
  DESCRIPTIONS = [
app/event_handlers/practical_subtasks.py CHANGED
@@ -317,10 +317,21 @@ def event_handler_practical_subtasks(
317
  or practical_subtasks.lower() == "mobile device application categories"
318
  or practical_subtasks.lower() == "clothing style correlation"
319
  ):
320
- df_correlation_coefficients = read_csv_file(
321
- config_data.Links_CAR_CHARACTERISTICS,
322
- ["Trait", "Style and performance", "Safety and practicality"],
323
- )
 
 
 
 
 
 
 
 
 
 
 
324
 
325
  return (
326
  practical_subtasks_selected,
 
317
  or practical_subtasks.lower() == "mobile device application categories"
318
  or practical_subtasks.lower() == "clothing style correlation"
319
  ):
320
+ if practical_subtasks.lower() == "car characteristics":
321
+
322
+ df_correlation_coefficients = read_csv_file(
323
+ config_data.Links_CAR_CHARACTERISTICS,
324
+ ["Trait", "Style and performance", "Safety and practicality"],
325
+ )
326
+
327
+ elif practical_subtasks.lower() == "mobile device application categories":
328
+
329
+ df_correlation_coefficients = read_csv_file(
330
+ config_data.Links_MDA_CATEGORIES
331
+ )
332
+
333
+ elif practical_subtasks.lower() == "clothing style correlation":
334
+ df_correlation_coefficients = read_csv_file(config_data.Links_CLOTHING_SC)
335
 
336
  return (
337
  practical_subtasks_selected,
app/oceanai_init.py CHANGED
@@ -26,10 +26,10 @@ def oceanai_initialization():
26
 
27
  # Загрузка весов аудиомоделей
28
  url = _b5.weights_for_big5_["audio"][corpus]["hc"]["sberdisk"]
29
- _ = _b5.load_audio_model_weights_hc(url=url, out=out)
30
 
31
  url = _b5.weights_for_big5_["audio"][corpus]["nn"]["sberdisk"]
32
- _ = _b5.load_audio_model_weights_nn(url=url, out=out)
33
 
34
  # Формирование видеомоделей
35
  _ = _b5.load_video_model_hc(lang="en", out=out)
@@ -38,16 +38,16 @@ def oceanai_initialization():
38
 
39
  # Загрузка весов видеомоделей
40
  url = _b5.weights_for_big5_["video"][corpus]["hc"]["sberdisk"]
41
- _ = _b5.load_video_model_weights_hc(url=url, out=out)
42
 
43
  url = _b5.weights_for_big5_["video"][corpus]["fe"]["sberdisk"]
44
- _ = _b5.load_video_model_weights_deep_fe(url=url, out=out)
45
 
46
  url = _b5.weights_for_big5_["video"][corpus]["nn"]["sberdisk"]
47
- _ = _b5.load_video_model_weights_nn(url=url, out=out)
48
 
49
  # Загрузка словаря с экспертными признаками (текстовая модальность)
50
- _ = _b5.load_text_features(out=out)
51
 
52
  # Формирование текстовых моделей
53
  _ = _b5.setup_translation_model() # только для русского языка
@@ -57,17 +57,17 @@ def oceanai_initialization():
57
 
58
  # Загрузка весов текстовых моделей
59
  url = _b5.weights_for_big5_["text"][corpus]["hc"]["sberdisk"]
60
- _ = _b5.load_text_model_weights_hc(url=url, out=out)
61
 
62
  url = _b5.weights_for_big5_["text"][corpus]["nn"]["sberdisk"]
63
- _ = _b5.load_text_model_weights_nn(url=url, out=out)
64
 
65
  # Формирование модели для мультимодального объединения информации
66
  _ = _b5.load_avt_model_b5(out=out)
67
 
68
  # Загрузка весов модели для мультимодального объединения информации
69
  url = _b5.weights_for_big5_["avt"][corpus]["b5"]["sberdisk"]
70
- _ = _b5.load_avt_model_weights_b5(url=url, out=out)
71
 
72
  return _b5
73
 
 
26
 
27
  # Загрузка весов аудиомоделей
28
  url = _b5.weights_for_big5_["audio"][corpus]["hc"]["sberdisk"]
29
+ _ = _b5.load_audio_model_weights_hc(url=url, out=out, force_reload=False)
30
 
31
  url = _b5.weights_for_big5_["audio"][corpus]["nn"]["sberdisk"]
32
+ _ = _b5.load_audio_model_weights_nn(url=url, out=out, force_reload=False)
33
 
34
  # Формирование видеомоделей
35
  _ = _b5.load_video_model_hc(lang="en", out=out)
 
38
 
39
  # Загрузка весов видеомоделей
40
  url = _b5.weights_for_big5_["video"][corpus]["hc"]["sberdisk"]
41
+ _ = _b5.load_video_model_weights_hc(url=url, out=out, force_reload=False)
42
 
43
  url = _b5.weights_for_big5_["video"][corpus]["fe"]["sberdisk"]
44
+ _ = _b5.load_video_model_weights_deep_fe(url=url, out=out, force_reload=False)
45
 
46
  url = _b5.weights_for_big5_["video"][corpus]["nn"]["sberdisk"]
47
+ _ = _b5.load_video_model_weights_nn(url=url, out=out, force_reload=False)
48
 
49
  # Загрузка словаря с экспертными признаками (текстовая модальность)
50
+ _ = _b5.load_text_features(out=out, force_reload=False)
51
 
52
  # Формирование текстовых моделей
53
  _ = _b5.setup_translation_model() # только для русского языка
 
57
 
58
  # Загрузка весов текстовых моделей
59
  url = _b5.weights_for_big5_["text"][corpus]["hc"]["sberdisk"]
60
+ _ = _b5.load_text_model_weights_hc(url=url, out=out, force_reload=False)
61
 
62
  url = _b5.weights_for_big5_["text"][corpus]["nn"]["sberdisk"]
63
+ _ = _b5.load_text_model_weights_nn(url=url, out=out, force_reload=False)
64
 
65
  # Формирование модели для мультимодального объединения информации
66
  _ = _b5.load_avt_model_b5(out=out)
67
 
68
  # Загрузка весов модели для мультимодального объединения информации
69
  url = _b5.weights_for_big5_["avt"][corpus]["b5"]["sberdisk"]
70
+ _ = _b5.load_avt_model_weights_b5(url=url, out=out, force_reload=False)
71
 
72
  return _b5
73
 
video_metadata.yaml CHANGED
@@ -1,14 +1,14 @@
1
  video_metadata:
2
- 1_9093a4ca3c0c834.mp4:
3
- - Emily
4
- - Taylor
5
6
- - "+1 (555) 456-7890"
7
- 2_a6a198e51d073b0.mp4:
8
  - Michael
9
  - Brown
10
11
  - "+1 (555) 234-5678"
 
 
 
 
 
12
  3_9987232dd677712.mp4:
13
  - Jack
14
  - Smith
 
1
  video_metadata:
2
+ 1_a6a198e51d073b0.mp4:
 
 
 
 
 
3
  - Michael
4
  - Brown
5
6
  - "+1 (555) 234-5678"
7
+ 2_9093a4ca3c0c834.mp4:
8
+ - Emily
9
+ - Taylor
10
11
+ - "+1 (555) 456-7890"
12
  3_9987232dd677712.mp4:
13
  - Jack
14
  - Smith
videos/{2_a6a198e51d073b0.mp4 → 1_a6a198e51d073b0.mp4} RENAMED
File without changes
videos/{1_9093a4ca3c0c834.mp4 → 2_9093a4ca3c0c834.mp4} RENAMED
File without changes