Text-to-Speech
TensorFlowTTS
audio
text-to-mel
dathudeptrai commited on
Commit
1ed2cd9
1 Parent(s): 1db491d

🖤 Update config, processor and checkpoint for Tacotron2 Thorsten German.

Browse files
Files changed (3) hide show
  1. config.yml +86 -0
  2. model.h5 +3 -0
  3. processor.json +1 -0
config.yml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is the hyperparameter configuration file for Tacotron2 v1.
2
+ # Please make sure this is adjusted for the Thorsten dataset. If you want to
3
+ # apply to the other dataset, you might need to carefully change some parameters.
4
+ # This configuration performs 200k iters but 65k iters is enough to get a good models.
5
+
6
+ ###########################################################
7
+ # FEATURE EXTRACTION SETTING #
8
+ ###########################################################
9
+ hop_size: 256 # Hop size.
10
+ format: "npy"
11
+
12
+
13
+ ###########################################################
14
+ # NETWORK ARCHITECTURE SETTING #
15
+ ###########################################################
16
+ model_type: "tacotron2"
17
+
18
+ tacotron2_params:
19
+ dataset: thorsten
20
+ embedding_hidden_size: 512
21
+ initializer_range: 0.02
22
+ embedding_dropout_prob: 0.1
23
+ n_speakers: 1
24
+ n_conv_encoder: 5
25
+ encoder_conv_filters: 512
26
+ encoder_conv_kernel_sizes: 5
27
+ encoder_conv_activation: 'relu'
28
+ encoder_conv_dropout_rate: 0.5
29
+ encoder_lstm_units: 256
30
+ n_prenet_layers: 2
31
+ prenet_units: 256
32
+ prenet_activation: 'relu'
33
+ prenet_dropout_rate: 0.5
34
+ n_lstm_decoder: 1
35
+ reduction_factor: 1
36
+ decoder_lstm_units: 1024
37
+ attention_dim: 128
38
+ attention_filters: 32
39
+ attention_kernel: 31
40
+ n_mels: 80
41
+ n_conv_postnet: 5
42
+ postnet_conv_filters: 512
43
+ postnet_conv_kernel_sizes: 5
44
+ postnet_dropout_rate: 0.1
45
+ attention_type: "lsa"
46
+
47
+ ###########################################################
48
+ # DATA LOADER SETTING #
49
+ ###########################################################
50
+ batch_size: 32 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
51
+ remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
52
+ allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
53
+ mel_length_threshold: 32 # remove all targets has mel_length <= 32
54
+ is_shuffle: true # shuffle dataset after each epoch.
55
+ use_fixed_shapes: true # use_fixed_shapes for training (2x speed-up)
56
+ # refer (https://github.com/dathudeptrai/TensorflowTTS/issues/34#issuecomment-642309118)
57
+
58
+ ###########################################################
59
+ # OPTIMIZER & SCHEDULER SETTING #
60
+ ###########################################################
61
+ optimizer_params:
62
+ initial_learning_rate: 0.001
63
+ end_learning_rate: 0.00001
64
+ decay_steps: 150000 # < train_max_steps is recommend.
65
+ warmup_proportion: 0.02
66
+ weight_decay: 0.001
67
+
68
+ gradient_accumulation_steps: 1
69
+ var_train_expr: null # trainable variable expr (eg. 'embeddings|decoder_cell' )
70
+ # must separate by |. if var_train_expr is null then we
71
+ # training all variables.
72
+ ###########################################################
73
+ # INTERVAL SETTING #
74
+ ###########################################################
75
+ train_max_steps: 200000 # Number of training steps.
76
+ save_interval_steps: 2000 # Interval steps to save checkpoint.
77
+ eval_interval_steps: 500 # Interval steps to evaluate the network.
78
+ log_interval_steps: 200 # Interval steps to record the training log.
79
+ start_schedule_teacher_forcing: 200001 # don't need to apply schedule teacher forcing.
80
+ start_ratio_value: 0.5 # start ratio of scheduled teacher forcing.
81
+ schedule_decay_steps: 50000 # decay step scheduled teacher forcing.
82
+ end_ratio_value: 0.0 # end ratio of scheduled teacher forcing.
83
+ ###########################################################
84
+ # OTHER SETTING #
85
+ ###########################################################
86
+ num_save_intermediate_results: 1 # Number of results to be saved as intermediate results.
model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afcbb68abd2d3b74b32ffc5ed645362c6aec68b4b761a822b6379b69c0caf4b2
3
+ size 127989712
processor.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"symbol_to_id": {"pad": 0, "-": 1, "!": 2, "'": 3, "(": 4, ")": 5, ",": 6, ".": 7, "?": 8, " ": 9, "A": 10, "B": 11, "C": 12, "D": 13, "E": 14, "F": 15, "G": 16, "H": 17, "I": 18, "J": 19, "K": 20, "L": 21, "M": 22, "N": 23, "O": 24, "P": 25, "Q": 26, "R": 27, "S": 28, "T": 29, "U": 30, "V": 31, "W": 32, "X": 33, "Y": 34, "Z": 35, "a": 36, "b": 37, "c": 38, "d": 39, "e": 40, "f": 41, "g": 42, "h": 43, "i": 44, "j": 45, "k": 46, "l": 47, "m": 48, "n": 49, "o": 50, "p": 51, "q": 52, "r": 53, "s": 54, "t": 55, "u": 56, "v": 57, "w": 58, "x": 59, "y": 60, "z": 61, "eos": 62}, "id_to_symbol": {"0": "pad", "1": "-", "2": "!", "3": "'", "4": "(", "5": ")", "6": ",", "7": ".", "8": "?", "9": " ", "10": "A", "11": "B", "12": "C", "13": "D", "14": "E", "15": "F", "16": "G", "17": "H", "18": "I", "19": "J", "20": "K", "21": "L", "22": "M", "23": "N", "24": "O", "25": "P", "26": "Q", "27": "R", "28": "S", "29": "T", "30": "U", "31": "V", "32": "W", "33": "X", "34": "Y", "35": "Z", "36": "a", "37": "b", "38": "c", "39": "d", "40": "e", "41": "f", "42": "g", "43": "h", "44": "i", "45": "j", "46": "k", "47": "l", "48": "m", "49": "n", "50": "o", "51": "p", "52": "q", "53": "r", "54": "s", "55": "t", "56": "u", "57": "v", "58": "w", "59": "x", "60": "y", "61": "z", "62": "eos"}, "speakers_map": {"thorsten": 0}, "processor_name": "ThorstenProcessor"}