Fhrozen's picture
add files
23d0252
raw
history blame
3.57 kB
allow_cache: false
batch_max_steps: 8400
batch_size: 64
config: conf/hifigan.v1.yaml
dev_dumpdir: dump/dev/norm
dev_feats_scp: null
dev_segments: null
dev_wav_scp: null
discriminator_adv_loss_params:
average_by_discriminators: false
discriminator_grad_norm: -1
discriminator_optimizer_params:
betas:
- 0.5
- 0.9
lr: 0.0002
weight_decay: 0.0
discriminator_optimizer_type: Adam
discriminator_params:
follow_official_norm: true
period_discriminator_params:
bias: true
channels: 32
downsample_scales:
- 3
- 3
- 3
- 3
- 1
in_channels: 1
kernel_sizes:
- 5
- 3
max_downsample_channels: 1024
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
out_channels: 1
use_spectral_norm: false
use_weight_norm: true
periods:
- 2
- 3
- 5
- 7
- 11
scale_discriminator_params:
bias: true
channels: 128
downsample_scales:
- 4
- 4
- 4
- 4
- 1
in_channels: 1
kernel_sizes:
- 15
- 41
- 5
- 3
max_downsample_channels: 1024
max_groups: 16
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
out_channels: 1
scale_downsample_pooling: AvgPool1d
scale_downsample_pooling_params:
kernel_size: 4
padding: 2
stride: 2
scales: 3
discriminator_scheduler_params:
gamma: 0.5
milestones:
- 200000
- 400000
- 600000
- 800000
discriminator_scheduler_type: MultiStepLR
discriminator_train_start_steps: 0
discriminator_type: HiFiGANMultiScaleMultiPeriodDiscriminator
distributed: false
eval_interval_steps: 1000
feat_match_loss_params:
average_by_discriminators: false
average_by_layers: false
include_final_outputs: false
fft_size: 2048
fmax: 7600
fmin: 80
format: hdf5
generator_adv_loss_params:
average_by_discriminators: false
generator_grad_norm: -1
generator_optimizer_params:
betas:
- 0.5
- 0.9
lr: 0.0002
weight_decay: 0.0
generator_optimizer_type: Adam
generator_params:
bias: true
channels: 512
in_channels: 80
kernel_size: 7
nonlinear_activation: LeakyReLU
nonlinear_activation_params:
negative_slope: 0.1
out_channels: 1
resblock_dilations:
- - 1
- 3
- 5
- - 1
- 3
- 5
- - 1
- 3
- 5
resblock_kernel_sizes:
- 3
- 7
- 11
upsample_kernel_sizes:
- 10
- 10
- 8
- 6
upsample_scales:
- 5
- 5
- 4
- 3
use_additional_convs: true
use_weight_norm: true
generator_scheduler_params:
gamma: 0.5
milestones:
- 200000
- 400000
- 600000
- 800000
generator_scheduler_type: MultiStepLR
generator_train_start_steps: 1
generator_type: HiFiGANGenerator
global_gain_scale: 1.0
hop_size: 300
lambda_adv: 1.0
lambda_aux: 45.0
lambda_feat_match: 2.0
log_interval_steps: 100
mel_loss_params:
fft_size: 2048
fmax: 12000
fmin: 0
fs: 24000
hop_size: 300
log_base: null
num_mels: 80
win_length: 1200
window: hann
num_mels: 80
num_save_intermediate_results: 4
num_workers: 6
outdir: exp/train_no_dev_hifigan.v1
pin_memory: true
pretrain: ''
rank: 0
remove_short_samples: false
resume: exp/train_no_dev_hifigan.v1/checkpoint-390000steps.pkl
sampling_rate: 24000
save_interval_steps: 10000
train_dumpdir: dump/train_no_dev/norm
train_feats_scp: null
train_max_steps: 1000000
train_segments: null
train_wav_scp: null
trim_frame_size: 1024
trim_hop_size: 256
trim_silence: false
trim_threshold_in_db: 20
use_feat_match_loss: true
use_mel_loss: true
use_stft_loss: false
verbose: 1
version: 0.5.5
win_length: 1200
window: hann