|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seed: 3 |
|
__set_seed: !apply:torch.manual_seed [3] |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_folder: /network/tmp1/subakany/whamr |
|
|
|
|
|
|
|
|
|
|
|
base_folder_dm: /network/tmp1/subakany/wsj0-processed/si_tr_s/ |
|
|
|
experiment_name: convtasnet-whamr |
|
output_folder: results/convtasnet-whamr/3 |
|
train_log: results/convtasnet-whamr/3/train_log.txt |
|
save_folder: results/convtasnet-whamr/3/save |
|
|
|
|
|
train_data: results/convtasnet-whamr/3/save/whamr_tr.csv |
|
valid_data: results/convtasnet-whamr/3/save/whamr_cv.csv |
|
test_data: results/convtasnet-whamr/3/save/whamr_tt.csv |
|
skip_prep: false |
|
|
|
|
|
auto_mix_prec: false |
|
test_only: false |
|
num_spks: 2 |
|
progressbar: true |
|
save_audio: false |
|
sample_rate: 8000 |
|
|
|
|
|
N_epochs: 200 |
|
batch_size: 1 |
|
lr: 0.00015 |
|
clip_grad_norm: 5 |
|
loss_upper_lim: 999999 |
|
|
|
limit_training_signal_len: false |
|
|
|
|
|
training_signal_len: 32000000 |
|
|
|
|
|
dynamic_mixing: true |
|
|
|
|
|
|
|
|
|
|
|
|
|
rir_path: /miniscratch/subakany/whamr_rirs_wav |
|
|
|
|
|
|
|
threshold_byloss: true |
|
threshold: -30 |
|
|
|
|
|
N_encoder_out: 256 |
|
out_channels: 256 |
|
kernel_size: 16 |
|
kernel_stride: 8 |
|
|
|
|
|
dataloader_opts: |
|
batch_size: 1 |
|
num_workers: 3 |
|
|
|
|
|
|
|
Encoder: &id001 !new:speechbrain.lobes.models.dual_path.Encoder |
|
kernel_size: 16 |
|
out_channels: 256 |
|
|
|
|
|
MaskNet: &id003 !new:speechbrain.lobes.models.conv_tasnet.MaskNet |
|
|
|
N: 256 |
|
B: 256 |
|
H: 512 |
|
P: 3 |
|
X: 6 |
|
R: 4 |
|
C: 2 |
|
norm_type: gLN |
|
causal: false |
|
mask_nonlinear: relu |
|
|
|
Decoder: &id002 !new:speechbrain.lobes.models.dual_path.Decoder |
|
in_channels: 256 |
|
out_channels: 1 |
|
kernel_size: 16 |
|
stride: 8 |
|
bias: false |
|
|
|
|
|
optimizer: !name:torch.optim.Adam |
|
lr: 0.00015 |
|
weight_decay: 0 |
|
|
|
loss: !name:speechbrain.nnet.losses.get_si_snr_with_pitwrapper |
|
|
|
lr_scheduler: &id005 !new:speechbrain.nnet.schedulers.ReduceLROnPlateau |
|
|
|
factor: 0.5 |
|
patience: 2 |
|
dont_halve_until_epoch: 85 |
|
|
|
epoch_counter: &id004 !new:speechbrain.utils.epoch_loop.EpochCounter |
|
limit: 200 |
|
|
|
modules: |
|
encoder: *id001 |
|
decoder: *id002 |
|
masknet: *id003 |
|
save_all_checkpoints: true |
|
checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer |
|
checkpoints_dir: results/convtasnet-whamr/3/save |
|
recoverables: |
|
encoder: *id001 |
|
decoder: *id002 |
|
masknet: *id003 |
|
counter: *id004 |
|
lr_scheduler: *id005 |
|
train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger |
|
save_file: results/convtasnet-whamr/3/train_log.txt |
|
|
|
pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer |
|
loadables: |
|
encoder: !ref <Encoder> |
|
masknet: !ref <MaskNet> |
|
decoder: !ref <Decoder> |
|
|