JeeweonJung commited on
Commit
d934460
1 Parent(s): 899a131

Update model

Browse files
Files changed (23) hide show
  1. README.md +17 -17
  2. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/14epoch.pth +3 -0
  3. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/RESULTS.md +17 -0
  4. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/config.yaml +213 -0
  5. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/backward_time.png +0 -0
  6. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/clip.png +0 -0
  7. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/eer.png +0 -0
  8. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/forward_time.png +0 -0
  9. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/gpu_max_cached_mem_GB.png +0 -0
  10. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/grad_norm.png +0 -0
  11. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/iter_time.png +0 -0
  12. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/loss.png +0 -0
  13. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/loss_scale.png +0 -0
  14. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/mindcf.png +0 -0
  15. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/n_trials.png +0 -0
  16. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/nontrg_mean.png +0 -0
  17. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/nontrg_std.png +0 -0
  18. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/optim0_lr0.png +0 -0
  19. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/optim_step_time.png +0 -0
  20. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/train_time.png +0 -0
  21. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/trg_mean.png +0 -0
  22. data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/trg_std.png +0 -0
  23. meta.yaml +6 -6
README.md CHANGED
@@ -13,7 +13,7 @@ license: cc-by-4.0
13
 
14
  ### `espnet/voxcelebs12_mfaconformer_mel`
15
 
16
- This model was trained by Jungjee using voxceleb recipe in [espnet](https://github.com/espnet/espnet/).
17
 
18
  ### Demo: How to use in ESPnet2
19
 
@@ -22,7 +22,7 @@ if you haven't done that already.
22
 
23
  ```bash
24
  cd espnet
25
- git checkout 77cb785e7b1d74345a520b30328069426990068d
26
  pip install -e .
27
  cd egs2/voxceleb/spk1
28
  ./run.sh --skip_data_prep false --skip_train true --download_model espnet/voxcelebs12_mfaconformer_mel
@@ -31,34 +31,34 @@ cd egs2/voxceleb/spk1
31
  <!-- Generated by scripts/utils/show_spk_result.py -->
32
  # RESULTS
33
  ## Environments
34
- date: 2024-01-02 18:45:26.125135
35
 
36
- - python version: 3.9.16 (main, Mar 8 2023, 14:00:05) [GCC 11.2.0]
37
- - espnet version: 202310
38
- - pytorch version: 2.0.1
39
 
40
  | | Mean | Std |
41
  |---|---|---|
42
- | Target | 7.8749 | 3.7367 |
43
- | Non-target | 2.3675 | 2.3675 |
44
 
45
  | Model name | EER(%) | minDCF |
46
  |---|---|---|
47
- | conf/tuning/train_mfa_conformer_adamw | 0.862 | 0.06275 |
48
 
49
  ## SPK config
50
 
51
  <details><summary>expand</summary>
52
 
53
  ```
54
- config: conf/tuning/train_mfa_conformer_adamw.yaml
55
  print_config: false
56
  log_level: INFO
57
  drop_last_iter: true
58
  dry_run: false
59
  iterator_type: category
60
  valid_iterator_type: sequence
61
- output_dir: exp/spk_train_mfa_conformer_adamw_raw_sp
62
  ngpu: 1
63
  seed: 0
64
  num_workers: 6
@@ -69,7 +69,7 @@ dist_world_size: 4
69
  dist_rank: 0
70
  local_rank: 0
71
  dist_master_addr: localhost
72
- dist_master_port: 46597
73
  dist_launcher: null
74
  multiprocessing_distributed: true
75
  unused_parameters: false
@@ -121,8 +121,8 @@ init_param: []
121
  ignore_init_mismatch: false
122
  freeze_param: []
123
  num_iters_per_epoch: null
124
- batch_size: 512
125
- valid_batch_size: 40
126
  batch_bins: 1000000
127
  valid_batch_bins: null
128
  train_shape_file:
@@ -173,11 +173,11 @@ optim_conf:
173
  amsgrad: false
174
  scheduler: cosineannealingwarmuprestarts
175
  scheduler_conf:
176
- first_cycle_steps: 250000
177
  cycle_mult: 1.0
178
  max_lr: 0.001
179
  min_lr: 1.0e-08
180
- warmup_steps: 10000
181
  gamma: 0.7
182
  init: null
183
  use_preprocessor: true
@@ -262,7 +262,7 @@ loss_conf:
262
  k_top: 5
263
  required:
264
  - output_dir
265
- version: '202310'
266
  distributed: true
267
  ```
268
 
 
13
 
14
  ### `espnet/voxcelebs12_mfaconformer_mel`
15
 
16
+ This model was trained by JeeweonJung using voxceleb recipe in [espnet](https://github.com/espnet/espnet/).
17
 
18
  ### Demo: How to use in ESPnet2
19
 
 
22
 
23
  ```bash
24
  cd espnet
25
+ git checkout 49b39d0256bca3e40a5eec6b3779c7c3fb2b544d
26
  pip install -e .
27
  cd egs2/voxceleb/spk1
28
  ./run.sh --skip_data_prep false --skip_train true --download_model espnet/voxcelebs12_mfaconformer_mel
 
31
  <!-- Generated by scripts/utils/show_spk_result.py -->
32
  # RESULTS
33
  ## Environments
34
+ date: 2024-06-29 01:06:46.764928
35
 
36
+ - python version: 3.10.14 (main, May 6 2024, 19:42:50) [GCC 11.2.0]
37
+ - espnet version: 202402
38
+ - pytorch version: 1.13.1
39
 
40
  | | Mean | Std |
41
  |---|---|---|
42
+ | Target | 7.7962 | 4.0017 |
43
+ | Non-target | 2.1924 | 2.1924 |
44
 
45
  | Model name | EER(%) | minDCF |
46
  |---|---|---|
47
+ | conf/tuning/train_mfa_conformer_adamw_fixEncoderBug2 | 0.782 | 0.06567 |
48
 
49
  ## SPK config
50
 
51
  <details><summary>expand</summary>
52
 
53
  ```
54
+ config: conf/tuning/train_mfa_conformer_adamw_fixEncoderBug2.yaml
55
  print_config: false
56
  log_level: INFO
57
  drop_last_iter: true
58
  dry_run: false
59
  iterator_type: category
60
  valid_iterator_type: sequence
61
+ output_dir: exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp
62
  ngpu: 1
63
  seed: 0
64
  num_workers: 6
 
69
  dist_rank: 0
70
  local_rank: 0
71
  dist_master_addr: localhost
72
+ dist_master_port: 38945
73
  dist_launcher: null
74
  multiprocessing_distributed: true
75
  unused_parameters: false
 
121
  ignore_init_mismatch: false
122
  freeze_param: []
123
  num_iters_per_epoch: null
124
+ batch_size: 200
125
+ valid_batch_size: 10
126
  batch_bins: 1000000
127
  valid_batch_bins: null
128
  train_shape_file:
 
173
  amsgrad: false
174
  scheduler: cosineannealingwarmuprestarts
175
  scheduler_conf:
176
+ first_cycle_steps: 300000
177
  cycle_mult: 1.0
178
  max_lr: 0.001
179
  min_lr: 1.0e-08
180
+ warmup_steps: 30000
181
  gamma: 0.7
182
  init: null
183
  use_preprocessor: true
 
262
  k_top: 5
263
  required:
264
  - output_dir
265
+ version: '202402'
266
  distributed: true
267
  ```
268
 
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/14epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cadbc010c5f8680d1b77ff9a8d82d4525bbef7a781ceb7bffbda7f079fc1c9c6
3
+ size 260989417
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/RESULTS.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- Generated by scripts/utils/show_spk_result.py -->
2
+ # RESULTS
3
+ ## Environments
4
+ date: 2024-06-29 01:06:46.764928
5
+
6
+ - python version: 3.10.14 (main, May 6 2024, 19:42:50) [GCC 11.2.0]
7
+ - espnet version: 202402
8
+ - pytorch version: 1.13.1
9
+
10
+ | | Mean | Std |
11
+ |---|---|---|
12
+ | Target | 7.7962 | 4.0017 |
13
+ | Non-target | 2.1924 | 2.1924 |
14
+
15
+ | Model name | EER(%) | minDCF |
16
+ |---|---|---|
17
+ | conf/tuning/train_mfa_conformer_adamw_fixEncoderBug2 | 0.782 | 0.06567 |
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/config.yaml ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/tuning/train_mfa_conformer_adamw_fixEncoderBug2.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: true
5
+ dry_run: false
6
+ iterator_type: category
7
+ valid_iterator_type: sequence
8
+ output_dir: exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp
9
+ ngpu: 1
10
+ seed: 0
11
+ num_workers: 6
12
+ num_att_plot: 0
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: 4
16
+ dist_rank: 0
17
+ local_rank: 0
18
+ dist_master_addr: localhost
19
+ dist_master_port: 38945
20
+ dist_launcher: null
21
+ multiprocessing_distributed: true
22
+ unused_parameters: false
23
+ sharded_ddp: false
24
+ cudnn_enabled: true
25
+ cudnn_benchmark: true
26
+ cudnn_deterministic: false
27
+ collect_stats: false
28
+ write_collected_feats: false
29
+ max_epoch: 40
30
+ patience: null
31
+ val_scheduler_criterion:
32
+ - valid
33
+ - loss
34
+ early_stopping_criterion:
35
+ - valid
36
+ - loss
37
+ - min
38
+ best_model_criterion:
39
+ - - valid
40
+ - eer
41
+ - min
42
+ keep_nbest_models: 3
43
+ nbest_averaging_interval: 0
44
+ grad_clip: 9999
45
+ grad_clip_type: 2.0
46
+ grad_noise: false
47
+ accum_grad: 1
48
+ no_forward_run: false
49
+ resume: true
50
+ train_dtype: float32
51
+ use_amp: false
52
+ log_interval: 100
53
+ use_matplotlib: true
54
+ use_tensorboard: true
55
+ create_graph_in_tensorboard: false
56
+ use_wandb: false
57
+ wandb_project: null
58
+ wandb_id: null
59
+ wandb_entity: null
60
+ wandb_name: null
61
+ wandb_model_log_interval: -1
62
+ detect_anomaly: false
63
+ use_lora: false
64
+ save_lora_only: true
65
+ lora_conf: {}
66
+ pretrain_path: null
67
+ init_param: []
68
+ ignore_init_mismatch: false
69
+ freeze_param: []
70
+ num_iters_per_epoch: null
71
+ batch_size: 200
72
+ valid_batch_size: 10
73
+ batch_bins: 1000000
74
+ valid_batch_bins: null
75
+ train_shape_file:
76
+ - exp/spk_stats_16k_sp/train/speech_shape
77
+ valid_shape_file:
78
+ - exp/spk_stats_16k_sp/valid/speech_shape
79
+ batch_type: folded
80
+ valid_batch_type: null
81
+ fold_length:
82
+ - 120000
83
+ sort_in_batch: descending
84
+ shuffle_within_batch: false
85
+ sort_batch: descending
86
+ multiple_iterator: false
87
+ chunk_length: 500
88
+ chunk_shift_ratio: 0.5
89
+ num_cache_chunks: 1024
90
+ chunk_excluded_key_prefixes: []
91
+ chunk_default_fs: null
92
+ train_data_path_and_name_and_type:
93
+ - - dump/raw/voxceleb12_devs_sp/wav.scp
94
+ - speech
95
+ - sound
96
+ - - dump/raw/voxceleb12_devs_sp/utt2spk
97
+ - spk_labels
98
+ - text
99
+ valid_data_path_and_name_and_type:
100
+ - - dump/raw/voxceleb1_test/trial.scp
101
+ - speech
102
+ - sound
103
+ - - dump/raw/voxceleb1_test/trial2.scp
104
+ - speech2
105
+ - sound
106
+ - - dump/raw/voxceleb1_test/trial_label
107
+ - spk_labels
108
+ - text
109
+ allow_variable_data_keys: false
110
+ max_cache_size: 0.0
111
+ max_cache_fd: 32
112
+ allow_multi_rates: false
113
+ valid_max_cache_size: null
114
+ exclude_weight_decay: false
115
+ exclude_weight_decay_conf: {}
116
+ optim: adamw
117
+ optim_conf:
118
+ lr: 0.001
119
+ weight_decay: 1.0e-07
120
+ amsgrad: false
121
+ scheduler: cosineannealingwarmuprestarts
122
+ scheduler_conf:
123
+ first_cycle_steps: 300000
124
+ cycle_mult: 1.0
125
+ max_lr: 0.001
126
+ min_lr: 1.0e-08
127
+ warmup_steps: 30000
128
+ gamma: 0.7
129
+ init: null
130
+ use_preprocessor: true
131
+ input_size: null
132
+ target_duration: 3.0
133
+ spk2utt: dump/raw/voxceleb12_devs_sp/spk2utt
134
+ spk_num: 21615
135
+ sample_rate: 16000
136
+ num_eval: 10
137
+ rir_scp: ''
138
+ model_conf:
139
+ extract_feats_in_collect_stats: false
140
+ frontend: melspec_torch
141
+ frontend_conf:
142
+ preemp: true
143
+ n_fft: 512
144
+ log: true
145
+ win_length: 400
146
+ hop_length: 160
147
+ n_mels: 80
148
+ normalize: mn
149
+ specaug: null
150
+ specaug_conf: {}
151
+ normalize: null
152
+ normalize_conf: {}
153
+ encoder: mfaconformer
154
+ encoder_conf:
155
+ output_size: 512
156
+ attention_heads: 8
157
+ linear_units: 2048
158
+ num_blocks: 6
159
+ dropout_rate: 0.1
160
+ positional_dropout_rate: 0.1
161
+ attention_dropout_rate: 0.1
162
+ input_layer: conv2d2
163
+ normalize_before: true
164
+ macaron_style: true
165
+ rel_pos_type: latest
166
+ pos_enc_layer_type: rel_pos
167
+ selfattention_layer_type: rel_selfattn
168
+ activation_type: swish
169
+ use_cnn_module: true
170
+ cnn_module_kernel: 15
171
+ pooling: chn_attn_stat
172
+ pooling_conf: {}
173
+ projector: rawnet3
174
+ projector_conf:
175
+ output_size: 192
176
+ preprocessor: spk
177
+ preprocessor_conf:
178
+ target_duration: 3.0
179
+ sample_rate: 16000
180
+ num_eval: 5
181
+ noise_apply_prob: 0.5
182
+ noise_info:
183
+ - - 1.0
184
+ - dump/raw/musan_speech.scp
185
+ - - 4
186
+ - 7
187
+ - - 13
188
+ - 20
189
+ - - 1.0
190
+ - dump/raw/musan_noise.scp
191
+ - - 1
192
+ - 1
193
+ - - 0
194
+ - 15
195
+ - - 1.0
196
+ - dump/raw/musan_music.scp
197
+ - - 1
198
+ - 1
199
+ - - 5
200
+ - 15
201
+ rir_apply_prob: 0.5
202
+ rir_scp: dump/raw/rirs.scp
203
+ loss: aamsoftmax_sc_topk
204
+ loss_conf:
205
+ margin: 0.3
206
+ scale: 30
207
+ K: 3
208
+ mp: 0.06
209
+ k_top: 5
210
+ required:
211
+ - output_dir
212
+ version: '202402'
213
+ distributed: true
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/backward_time.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/clip.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/eer.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/forward_time.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/gpu_max_cached_mem_GB.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/grad_norm.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/iter_time.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/loss.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/loss_scale.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/mindcf.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/n_trials.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/nontrg_mean.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/nontrg_std.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/optim0_lr0.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/optim_step_time.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/train_time.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/trg_mean.png ADDED
data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/images/trg_std.png ADDED
meta.yaml CHANGED
@@ -1,8 +1,8 @@
1
- espnet: '202310'
2
  files:
3
- model_file: save_exp/spk_train_mfa_conformer_adamw_raw_sp/29epoch.pth
4
- python: "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]"
5
- timestamp: 1704255534.987843
6
- torch: 2.0.1
7
  yaml_files:
8
- train_config: save_exp/spk_train_mfa_conformer_adamw_raw_sp/config.yaml
 
1
+ espnet: '202402'
2
  files:
3
+ model_file: /data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/14epoch.pth
4
+ python: 3.10.14 (main, May 6 2024, 19:42:50) [GCC 11.2.0]
5
+ timestamp: 1719638105.821687
6
+ torch: 1.13.1
7
  yaml_files:
8
+ train_config: /data/user_data/jeeweonj/espnet_jee/egs2/voxceleb/spk1/exp/spk_train_mfa_conformer_adamw_fixEncoderBug2_raw_sp/config.yaml