jvision commited on
Commit
59cfd8c
1 Parent(s): c1c8aa1

first commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ checkpoint_85000.pth filter=lfs diff=lfs merge=lfs -text
36
+ supplemental/model_se.pth.tar filter=lfs diff=lfs merge=lfs -text
37
+ supplemental/speakers-dataset.pth filter=lfs diff=lfs merge=lfs -text
38
+ supplemental/speakers-combined.pth filter=lfs diff=lfs merge=lfs -text
checkpoint_85000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5562a87e9e03e29e4695c6eb218dc0ca7783e441daa9cc2875e4ef96d42f1d50
3
+ size 1043196301
config.json ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "logger_uri": null,
3
+ "project_name": "VCTK_Indian_and_SouthAfrican_Males_Females",
4
+ "run_description": "",
5
+ "print_step": 50,
6
+ "plot_step": 100,
7
+ "model_param_stats": false,
8
+ "wandb_entity": null,
9
+ "dashboard_logger": "tensorboard",
10
+ "log_model_step": 1000,
11
+ "save_step": 5000,
12
+ "save_n_checkpoints": 2,
13
+ "save_checkpoints": true,
14
+ "save_all_best": false,
15
+ "save_best_after": 10000,
16
+ "target_loss": "loss_1",
17
+ "print_eval": false,
18
+ "test_delay_epochs": 0,
19
+ "run_eval": true,
20
+ "run_eval_steps": null,
21
+ "distributed_backend": "nccl",
22
+ "distributed_url": "tcp://localhost:54321",
23
+ "mixed_precision": true,
24
+ "epochs": 1000,
25
+ "batch_size": 16,
26
+ "eval_batch_size": 16,
27
+ "grad_clip": [
28
+ 1000.0,
29
+ 1000.0
30
+ ],
31
+ "scheduler_after_epoch": true,
32
+ "lr": 0.001,
33
+ "optimizer": "AdamW",
34
+ "optimizer_params": {
35
+ "betas": [
36
+ 0.8,
37
+ 0.99
38
+ ],
39
+ "eps": 1e-09,
40
+ "weight_decay": 0.01
41
+ },
42
+ "lr_scheduler": null,
43
+ "lr_scheduler_params": {},
44
+ "use_grad_scaler": false,
45
+ "cudnn_enable": true,
46
+ "cudnn_deterministic": false,
47
+ "cudnn_benchmark": false,
48
+ "training_seed": 54321,
49
+ "model": "vits",
50
+ "num_loader_workers": 8,
51
+ "num_eval_loader_workers": 0,
52
+ "use_noise_augment": false,
53
+ "audio": {
54
+ "fft_size": 1024,
55
+ "sample_rate": 16000,
56
+ "win_length": 1024,
57
+ "hop_length": 256,
58
+ "num_mels": 80,
59
+ "mel_fmin": 0,
60
+ "mel_fmax": null
61
+ },
62
+ "use_phonemes": false,
63
+ "phonemizer": "espeak",
64
+ "phoneme_language": "en-us",
65
+ "compute_input_seq_cache": true,
66
+ "text_cleaner": "multilingual_cleaners",
67
+ "enable_eos_bos_chars": false,
68
+ "test_sentences_file": "",
69
+ "phoneme_cache_path": "projects/art/phoneme_cache",
70
+ "characters": {
71
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
72
+ "vocab_dict": null,
73
+ "pad": "_",
74
+ "eos": "&",
75
+ "bos": "*",
76
+ "blank": null,
77
+ "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
78
+ "punctuations": "!'(),-.:;? ",
79
+ "phonemes": "iy\u0268\u0289\u026fu\u026a\u028f\u028ae\u00f8\u0258\u0259\u0275\u0264o\u025b\u0153\u025c\u025e\u028c\u0254\u00e6\u0250a\u0276\u0251\u0252\u1d7b\u0298\u0253\u01c0\u0257\u01c3\u0284\u01c2\u0260\u01c1\u029bpbtd\u0288\u0256c\u025fk\u0261q\u0262\u0294\u0274\u014b\u0272\u0273n\u0271m\u0299r\u0280\u2c71\u027e\u027d\u0278\u03b2fv\u03b8\u00f0sz\u0283\u0292\u0282\u0290\u00e7\u029dx\u0263\u03c7\u0281\u0127\u0295h\u0266\u026c\u026e\u028b\u0279\u027bj\u0270l\u026d\u028e\u029f\u02c8\u02cc\u02d0\u02d1\u028dw\u0265\u029c\u02a2\u02a1\u0255\u0291\u027a\u0267\u025a\u02de\u026b'\u0303' ",
80
+ "is_unique": true,
81
+ "is_sorted": true
82
+ },
83
+ "add_blank": true,
84
+ "batch_group_size": 48,
85
+ "loss_masking": null,
86
+ "min_audio_len": 1,
87
+ "max_audio_len": 160000,
88
+ "min_text_len": 1,
89
+ "max_text_len": Infinity,
90
+ "compute_f0": false,
91
+ "compute_energy": false,
92
+ "compute_linear_spec": true,
93
+ "precompute_num_workers": 12,
94
+ "start_by_longest": true,
95
+ "shuffle": false,
96
+ "drop_last": false,
97
+ "datasets": [
98
+ {
99
+ "formatter": "vctk",
100
+ "dataset_name": "",
101
+ "path": "/home/iguana/projects/java/tts-voicetrain/dataset-library/VCTK_Indian_and_SouthAfrican_Males_Females",
102
+ "meta_file_train": "",
103
+ "ignored_speakers": [],
104
+ "language": "en",
105
+ "phonemizer": "",
106
+ "meta_file_val": "",
107
+ "meta_file_attn_mask": ""
108
+ }
109
+ ],
110
+ "test_sentences": [
111
+ [
112
+ "This is an example of a sentence to be synthesized.",
113
+ "VCTK_p248",
114
+ null,
115
+ "en"
116
+ ],
117
+ [
118
+ "This is an example of a sentence to be synthesized.",
119
+ "VCTK_p251",
120
+ null,
121
+ "en"
122
+ ]
123
+ ],
124
+ "eval_split_max_size": 256,
125
+ "eval_split_size": 0.01,
126
+ "use_speaker_weighted_sampler": false,
127
+ "speaker_weighted_sampler_alpha": 1.0,
128
+ "use_language_weighted_sampler": false,
129
+ "language_weighted_sampler_alpha": 1.0,
130
+ "use_length_weighted_sampler": false,
131
+ "length_weighted_sampler_alpha": 1.0,
132
+ "model_args": {
133
+ "num_chars": 284,
134
+ "out_channels": 513,
135
+ "spec_segment_size": 32,
136
+ "hidden_channels": 192,
137
+ "hidden_channels_ffn_text_encoder": 768,
138
+ "num_heads_text_encoder": 2,
139
+ "num_layers_text_encoder": 10,
140
+ "kernel_size_text_encoder": 3,
141
+ "dropout_p_text_encoder": 0.1,
142
+ "dropout_p_duration_predictor": 0.5,
143
+ "kernel_size_posterior_encoder": 5,
144
+ "dilation_rate_posterior_encoder": 1,
145
+ "num_layers_posterior_encoder": 16,
146
+ "kernel_size_flow": 5,
147
+ "dilation_rate_flow": 1,
148
+ "num_layers_flow": 4,
149
+ "resblock_type_decoder": "2",
150
+ "resblock_kernel_sizes_decoder": [
151
+ 3,
152
+ 7,
153
+ 11
154
+ ],
155
+ "resblock_dilation_sizes_decoder": [
156
+ [
157
+ 1,
158
+ 3,
159
+ 5
160
+ ],
161
+ [
162
+ 1,
163
+ 3,
164
+ 5
165
+ ],
166
+ [
167
+ 1,
168
+ 3,
169
+ 5
170
+ ]
171
+ ],
172
+ "upsample_rates_decoder": [
173
+ 8,
174
+ 8,
175
+ 2,
176
+ 2
177
+ ],
178
+ "upsample_initial_channel_decoder": 512,
179
+ "upsample_kernel_sizes_decoder": [
180
+ 16,
181
+ 16,
182
+ 4,
183
+ 4
184
+ ],
185
+ "periods_multi_period_discriminator": [
186
+ 2,
187
+ 3,
188
+ 5,
189
+ 7,
190
+ 11
191
+ ],
192
+ "use_sdp": true,
193
+ "noise_scale": 1.0,
194
+ "inference_noise_scale": 0.667,
195
+ "length_scale": 1.0,
196
+ "noise_scale_dp": 1.0,
197
+ "inference_noise_scale_dp": 1.0,
198
+ "max_inference_len": null,
199
+ "init_discriminator": true,
200
+ "use_spectral_norm_disriminator": false,
201
+ "use_speaker_embedding": false,
202
+ "num_speakers": 13,
203
+ "speakers_file": "supplemental/speakers.pth",
204
+ "d_vector_file": [
205
+ "supplemental/speakers-base.json",
206
+ "supplemental/speakers-dataset.pth"
207
+ ],
208
+ "speaker_embedding_channels": 256,
209
+ "use_d_vector_file": true,
210
+ "d_vector_dim": 512,
211
+ "detach_dp_input": true,
212
+ "use_language_embedding": true,
213
+ "embedded_language_dim": 4,
214
+ "num_languages": 3,
215
+ "language_ids_file": "supplemental/language_ids.json",
216
+ "use_speaker_encoder_as_loss": true,
217
+ "speaker_encoder_config_path": "supplemental/config_se.json",
218
+ "speaker_encoder_model_path": "supplemental/model_se.pth.tar",
219
+ "condition_dp_on_speaker": true,
220
+ "freeze_encoder": false,
221
+ "freeze_DP": false,
222
+ "freeze_PE": false,
223
+ "freeze_flow_decoder": false,
224
+ "freeze_waveform_decoder": false,
225
+ "encoder_sample_rate": null,
226
+ "interpolate_z": true,
227
+ "reinit_DP": false,
228
+ "reinit_text_encoder": false
229
+ },
230
+ "lr_gen": 0.0002,
231
+ "lr_disc": 0.0002,
232
+ "lr_scheduler_gen": "ExponentialLR",
233
+ "lr_scheduler_gen_params": {
234
+ "gamma": 0.999875,
235
+ "last_epoch": -1
236
+ },
237
+ "lr_scheduler_disc": "ExponentialLR",
238
+ "lr_scheduler_disc_params": {
239
+ "gamma": 0.999875,
240
+ "last_epoch": -1
241
+ },
242
+ "kl_loss_alpha": 1.0,
243
+ "disc_loss_alpha": 1.0,
244
+ "gen_loss_alpha": 1.0,
245
+ "feat_loss_alpha": 1.0,
246
+ "mel_loss_alpha": 45.0,
247
+ "dur_loss_alpha": 1.0,
248
+ "speaker_encoder_loss_alpha": 9.0,
249
+ "return_wav": true,
250
+ "use_weighted_sampler": false,
251
+ "weighted_sampler_attrs": {},
252
+ "weighted_sampler_multipliers": {},
253
+ "r": 1,
254
+ "num_speakers": 13,
255
+ "use_speaker_embedding": false,
256
+ "speakers_file": "supplemental/speakers.pth",
257
+ "speaker_embedding_channels": 256,
258
+ "language_ids_file": "supplemental/language_ids.json",
259
+ "use_language_embedding": true,
260
+ "use_d_vector_file": true,
261
+ "d_vector_file": [
262
+ "supplemental/speakers-base.json",
263
+ "supplemental/speakers-dataset.pth"
264
+ ],
265
+ "d_vector_dim": 512
266
+ }
prepare_model.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+
5
+ # Load the data from the provided dictionary
6
+ data = {
7
+ "VCTK_p251": {"age": 26, "gender": "M", "accents": "Indian", "region": "India", "comments": ""},
8
+ "VCTK_p376": {"age": 22, "gender": "M", "accents": "Indian", "region": "India", "comments": ""},
9
+ "VCTK_p248": {"age": 23, "gender": "F", "accents": "Indian", "region": "India", "comments": ""},
10
+ "VCTK_p314": {"age": 26, "gender": "F", "accents": "SouthAfrican", "region": "Cape Town", "comments": ""},
11
+ "VCTK_p323": {"age": 19, "gender": "F", "accents": "SouthAfrican", "region": "Pretoria", "comments": ""},
12
+ "VCTK_p336": {"age": 18, "gender": "F", "accents": "SouthAfrican", "region": "Jo-hannesburg", "comments": ""},
13
+ "VCTK_p347": {"age": 26, "gender": "M", "accents": "SouthAfrican", "region": "Jo-hannesburg", "comments": ""}
14
+ }
15
+
16
+ # Convert the data to JSON format
17
+ json_data = json.dumps(data, indent=2)
18
+
19
+ # Save the JSON data to a file
20
+ with open('speakers-log.json', 'w') as file:
21
+ file.write(json_data)
22
+
23
+ # Run the TTS command to get the speaker indices
24
+ command = "tts --model_path checkpoint_85000.pth --config_path config.json --list_speaker_idxs | grep -vE '^(\s*\||\s*>|\s*$)'"
25
+ output = subprocess.check_output(command, shell=True, text=True)
26
+
27
+ # Parse the JSON output into a Python dictionary
28
+ speaker_indices = eval(output)
29
+
30
+ # Load the speaker IDs from speakers.json
31
+ with open('speakers-log.json', 'r') as file:
32
+ speaker_ids = json.load(file)
33
+
34
+
35
+ for speaker_idx in speaker_indices:
36
+ # # Remove the 'VCTK_' prefix
37
+ speaker_id = speaker_idx
38
+ # speaker_id = speaker_idx.replace('VCTK_', '')
39
+
40
+ # Lookup the speaker ID in the loaded speaker IDs
41
+ if speaker_id in speaker_ids:
42
+ speaker_id_json = speaker_ids[speaker_id]
43
+ else:
44
+ continue
45
+
46
+ # # Generate the TTS command to create the audio file
47
+ text = f"Hello, I am from {speaker_id_json['region']}. I hope that you will select my voice for your project. Thank you."
48
+ # # make samples directory if it doesn't exist
49
+ if not os.path.exists("samples"):
50
+ os.makedirs("samples")
51
+
52
+ out_path = f"samples/{speaker_id}.wav"
53
+ tts_command = f"tts --text \"{text}\" --model_path checkpoint_85000.pth --language_idx en --config_path config.json --speaker_idx \"{speaker_id}\" --out_path {out_path}"
54
+
55
+ # Execute the TTS command
56
+ os.system(tts_command)
samples/VCTK_p248.wav ADDED
Binary file (241 kB). View file
 
samples/VCTK_p251.wav ADDED
Binary file (222 kB). View file
 
samples/VCTK_p314.wav ADDED
Binary file (233 kB). View file
 
samples/VCTK_p323.wav ADDED
Binary file (255 kB). View file
 
samples/VCTK_p336.wav ADDED
Binary file (213 kB). View file
 
samples/VCTK_p347.wav ADDED
Binary file (234 kB). View file
 
samples/VCTK_p376.wav ADDED
Binary file (201 kB). View file
 
speakers-log.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "VCTK_p251": {
3
+ "age": 26,
4
+ "gender": "M",
5
+ "accents": "Indian",
6
+ "region": "India",
7
+ "comments": ""
8
+ },
9
+ "VCTK_p376": {
10
+ "age": 22,
11
+ "gender": "M",
12
+ "accents": "Indian",
13
+ "region": "India",
14
+ "comments": ""
15
+ },
16
+ "VCTK_p248": {
17
+ "age": 23,
18
+ "gender": "F",
19
+ "accents": "Indian",
20
+ "region": "India",
21
+ "comments": ""
22
+ },
23
+ "VCTK_p314": {
24
+ "age": 26,
25
+ "gender": "F",
26
+ "accents": "SouthAfrican",
27
+ "region": "Cape Town",
28
+ "comments": ""
29
+ },
30
+ "VCTK_p323": {
31
+ "age": 19,
32
+ "gender": "F",
33
+ "accents": "SouthAfrican",
34
+ "region": "Pretoria",
35
+ "comments": ""
36
+ },
37
+ "VCTK_p336": {
38
+ "age": 18,
39
+ "gender": "F",
40
+ "accents": "SouthAfrican",
41
+ "region": "Jo-hannesburg",
42
+ "comments": ""
43
+ },
44
+ "VCTK_p347": {
45
+ "age": 26,
46
+ "gender": "M",
47
+ "accents": "SouthAfrican",
48
+ "region": "Jo-hannesburg",
49
+ "comments": ""
50
+ }
51
+ }
supplemental/config_se.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "speaker_encoder",
3
+ "run_name": "speaker_encoder",
4
+ "run_description": "resnet speaker encoder trained with commonvoice all languages dev and train, Voxceleb 1 dev and Voxceleb 2 dev",
5
+ "epochs": 100000,
6
+ "batch_size": null,
7
+ "eval_batch_size": null,
8
+ "mixed_precision": false,
9
+ "run_eval": true,
10
+ "test_delay_epochs": 0,
11
+ "print_eval": false,
12
+ "print_step": 50,
13
+ "tb_plot_step": 100,
14
+ "tb_model_param_stats": false,
15
+ "save_step": 1000,
16
+ "checkpoint": true,
17
+ "keep_all_best": false,
18
+ "keep_after": 10000,
19
+ "num_loader_workers": 8,
20
+ "num_val_loader_workers": 0,
21
+ "use_noise_augment": false,
22
+ "output_path": "../checkpoints/speaker_encoder/language_balanced/normalized/angleproto-4-samples-by-speakers/",
23
+ "distributed_backend": "nccl",
24
+ "distributed_url": "tcp://localhost:54321",
25
+ "audio": {
26
+ "fft_size": 512,
27
+ "win_length": 400,
28
+ "hop_length": 160,
29
+ "frame_shift_ms": null,
30
+ "frame_length_ms": null,
31
+ "stft_pad_mode": "reflect",
32
+ "sample_rate": 16000,
33
+ "resample": false,
34
+ "preemphasis": 0.97,
35
+ "ref_level_db": 20,
36
+ "do_sound_norm": false,
37
+ "do_trim_silence": false,
38
+ "trim_db": 60,
39
+ "power": 1.5,
40
+ "griffin_lim_iters": 60,
41
+ "num_mels": 64,
42
+ "mel_fmin": 0.0,
43
+ "mel_fmax": 8000.0,
44
+ "spec_gain": 20,
45
+ "signal_norm": false,
46
+ "min_level_db": -100,
47
+ "symmetric_norm": false,
48
+ "max_norm": 4.0,
49
+ "clip_norm": false,
50
+ "stats_path": null,
51
+ "do_rms_norm": true,
52
+ "db_level": -27.0
53
+ },
54
+ "datasets": [
55
+ {
56
+ "name": "voxceleb2",
57
+ "path": "/workspace/scratch/ecasanova/datasets/VoxCeleb/vox2_dev_aac/",
58
+ "meta_file_train": null,
59
+ "ununsed_speakers": null,
60
+ "meta_file_val": null,
61
+ "meta_file_attn_mask": "",
62
+ "language": "voxceleb"
63
+ }
64
+ ],
65
+ "model_params": {
66
+ "model_name": "resnet",
67
+ "input_dim": 64,
68
+ "use_torch_spec": true,
69
+ "log_input": true,
70
+ "proj_dim": 512
71
+ },
72
+ "audio_augmentation": {
73
+ "p": 0.5,
74
+ "rir": {
75
+ "rir_path": "/workspace/store/ecasanova/ComParE/RIRS_NOISES/simulated_rirs/",
76
+ "conv_mode": "full"
77
+ },
78
+ "additive": {
79
+ "sounds_path": "/workspace/store/ecasanova/ComParE/musan/",
80
+ "speech": {
81
+ "min_snr_in_db": 13,
82
+ "max_snr_in_db": 20,
83
+ "min_num_noises": 1,
84
+ "max_num_noises": 1
85
+ },
86
+ "noise": {
87
+ "min_snr_in_db": 0,
88
+ "max_snr_in_db": 15,
89
+ "min_num_noises": 1,
90
+ "max_num_noises": 1
91
+ },
92
+ "music": {
93
+ "min_snr_in_db": 5,
94
+ "max_snr_in_db": 15,
95
+ "min_num_noises": 1,
96
+ "max_num_noises": 1
97
+ }
98
+ },
99
+ "gaussian": {
100
+ "p": 0.0,
101
+ "min_amplitude": 0.0,
102
+ "max_amplitude": 1e-05
103
+ }
104
+ },
105
+ "storage": {
106
+ "sample_from_storage_p": 0.5,
107
+ "storage_size": 40
108
+ },
109
+ "max_train_step": 1000000,
110
+ "loss": "angleproto",
111
+ "grad_clip": 3.0,
112
+ "lr": 0.0001,
113
+ "lr_decay": false,
114
+ "warmup_steps": 4000,
115
+ "wd": 1e-06,
116
+ "steps_plot_stats": 100,
117
+ "num_speakers_in_batch": 100,
118
+ "num_utters_per_speaker": 4,
119
+ "skip_speakers": true,
120
+ "voice_len": 2.0
121
+ }
supplemental/language_ids.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "en": 0,
3
+ "fr-fr": 1,
4
+ "pt-br": 2
5
+ }
supplemental/model_se.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f96efb20cbeeefd81fd8336d7f0155bf8902f82f9474e58ccb19d9e12345172
3
+ size 44610930
supplemental/speaker_ids.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "female-en-5": 0,
3
+ "female-en-5\n": 1,
4
+ "female-pt-4\n": 2,
5
+ "male-en-2": 3,
6
+ "male-en-2\n": 4,
7
+ "male-pt-3\n": 5,
8
+ "VCTK_p248": 6,
9
+ "VCTK_p251": 7,
10
+ "VCTK_p314": 8,
11
+ "VCTK_p323": 9,
12
+ "VCTK_p336": 10,
13
+ "VCTK_p347": 11,
14
+ "VCTK_p376": 12
15
+ }
supplemental/speakers-base.json ADDED
The diff for this file is too large to render. See raw diff
 
supplemental/speakers-combined.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602a4fdf06e7747a455be7db565aa4f4d7908a4342b312089fb6f4e680a53cda
3
+ size 13565103
supplemental/speakers-dataset.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:440c0a3ef09bf372dc97afd0dc79fbdeb1ba897b5041fbc2b5d53d12f4803355
3
+ size 13384303
supplemental/speakers.json ADDED
Binary file (110 MB). View file