LydiaSanyu commited on
Commit
c7614e5
1 Parent(s): 2b1f89b

Update hyperparams.yaml

Browse files
Files changed (1) hide show
  1. hyperparams.yaml +64 -9
hyperparams.yaml CHANGED
@@ -1,9 +1,64 @@
1
- # yamllint disable
2
- attn_loss: 0.0
3
- attn_weight: 0.0
4
- end-of-epoch: true
5
- epoch: 594
6
- gate_loss: 0.0002034485078183934
7
- loss: 0.30019739270210266
8
- mel_loss: 0.29999393224716187
9
- unixtime: 1661531278.4188352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mask_padding: True
2
+ n_mel_channels: 80
3
+ n_symbols: 148
4
+ symbols_embedding_dim: 512
5
+ encoder_kernel_size: 5
6
+ encoder_n_convolutions: 3
7
+ encoder_embedding_dim: 512
8
+ attention_rnn_dim: 1024
9
+ attention_dim: 128
10
+ attention_location_n_filters: 32
11
+ attention_location_kernel_size: 31
12
+ n_frames_per_step: 1
13
+ decoder_rnn_dim: 1024
14
+ prenet_dim: 256
15
+ max_decoder_steps: 1000
16
+ gate_threshold: 0.5
17
+ p_attention_dropout: 0.1
18
+ p_decoder_dropout: 0.1
19
+ postnet_embedding_dim: 512
20
+ postnet_kernel_size: 5
21
+ postnet_n_convolutions: 5
22
+ decoder_no_early_stopping: False
23
+ sample_rate: 22050
24
+
25
+ # Model
26
+ model: !new:speechbrain.lobes.models.Tacotron2.Tacotron2
27
+ mask_padding: !ref <mask_padding>
28
+ n_mel_channels: !ref <n_mel_channels>
29
+ # symbols
30
+ n_symbols: !ref <n_symbols>
31
+ symbols_embedding_dim: !ref <symbols_embedding_dim>
32
+ # encoder
33
+ encoder_kernel_size: !ref <encoder_kernel_size>
34
+ encoder_n_convolutions: !ref <encoder_n_convolutions>
35
+ encoder_embedding_dim: !ref <encoder_embedding_dim>
36
+ # attention
37
+ attention_rnn_dim: !ref <attention_rnn_dim>
38
+ attention_dim: !ref <attention_dim>
39
+ # attention location
40
+ attention_location_n_filters: !ref <attention_location_n_filters>
41
+ attention_location_kernel_size: !ref <attention_location_kernel_size>
42
+ # decoder
43
+ n_frames_per_step: !ref <n_frames_per_step>
44
+ decoder_rnn_dim: !ref <decoder_rnn_dim>
45
+ prenet_dim: !ref <prenet_dim>
46
+ max_decoder_steps: !ref <max_decoder_steps>
47
+ gate_threshold: !ref <gate_threshold>
48
+ p_attention_dropout: !ref <p_attention_dropout>
49
+ p_decoder_dropout: !ref <p_decoder_dropout>
50
+ # postnet
51
+ postnet_embedding_dim: !ref <postnet_embedding_dim>
52
+ postnet_kernel_size: !ref <postnet_kernel_size>
53
+ postnet_n_convolutions: !ref <postnet_n_convolutions>
54
+ decoder_no_early_stopping: !ref <decoder_no_early_stopping>
55
+
56
+ # Function that converts the text into a sequence of valid characters.
57
+ text_to_sequence: !name:speechbrain.utils.text_to_sequence.text_to_sequence
58
+
59
+ modules:
60
+ model: !ref <model>
61
+
62
+ pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer
63
+ loadables:
64
+ model: !ref <model>