|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
seed: 1994 |
|
__set_seed: !!python/object/apply:torch.manual_seed [1234] |
|
output_folder: results/non_semi_final_stac |
|
wer_file: results/non_semi_final_stac/wer.txt |
|
save_folder: results/non_semi_final_stac/save |
|
train_log: results/non_semi_final_stac/train_log.txt |
|
|
|
|
|
|
|
|
|
data_folder: junk |
|
train_tsv_file: junk/train.tsv |
|
dev_tsv_file: junk/dev.tsv |
|
test_tsv_file: junk/test.tsv |
|
accented_letters: true |
|
|
|
csv_folder: /gpfsscratch/rech/nou/uzn19yk/switched_data/extended_clean/ |
|
train_csv: /gpfsscratch/rech/nou/uzn19yk/switched_data/extended_clean//train.csv |
|
valid_csv: /gpfsscratch/rech/nou/uzn19yk/switched_data/extended_clean//dev.csv |
|
test_csv: |
|
- all_tests/cs_test.csv |
|
- all_tests/stac_test.csv |
|
|
|
|
|
|
|
avoid_if_longer_than: 13.0 |
|
avoid_if_shorter_than: 0.5 |
|
|
|
|
|
number_of_epochs: 20 |
|
lr: 0.0002 |
|
lr_weights: 0.01 |
|
sorting: ascending |
|
auto_mix_prec: false |
|
sample_rate: 16000 |
|
language_modelling: true |
|
ngram_lm_path: arpas/pluslanguages_everything.arpa |
|
|
|
|
|
|
|
|
|
batch_size: 3 |
|
test_batch_size: 4 |
|
|
|
|
|
dataloader_options: |
|
batch_size: 3 |
|
num_workers: 6 |
|
|
|
test_dataloader_options: |
|
batch_size: 4 |
|
num_workers: 6 |
|
|
|
|
|
activation: !name:torch.nn.Sigmoid |
|
dnn_layers: 1 |
|
dnn_neurons: 768 |
|
freeze_encoder: true |
|
|
|
|
|
output_neurons: 76 |
|
|
|
|
|
|
|
epoch_counter: &id006 !new:speechbrain.utils.epoch_loop.EpochCounter |
|
limit: 20 |
|
|
|
encoder_dim: 3217 |
|
enc: &id001 !new:speechbrain.nnet.RNN.LSTM |
|
input_shape: [null, null, 3217] |
|
num_layers: 2 |
|
bidirectional: true |
|
dropout: 0.2 |
|
hidden_size: 1024 |
|
|
|
ctc_lin: &id002 !new:speechbrain.nnet.linear.Linear |
|
|
|
input_size: 2048 |
|
n_neurons: 76 |
|
|
|
log_softmax: !new:speechbrain.nnet.activations.Softmax |
|
apply_log: true |
|
|
|
ctc_cost: !name:speechbrain.nnet.losses.ctc_loss |
|
blank_index: 0 |
|
|
|
modules: |
|
enc: *id001 |
|
ctc_lin: *id002 |
|
model: &id003 !new:torch.nn.ModuleList |
|
- [*id001, *id002] |
|
model_opt_class: !name:torch.optim.Adam |
|
lr: 0.0002 |
|
|
|
weights_opt_class: !name:torch.optim.Adam |
|
lr: 0.01 |
|
|
|
lr_annealing_model: &id004 !new:speechbrain.nnet.schedulers.NewBobScheduler |
|
initial_value: 0.0002 |
|
improvement_threshold: 0.0025 |
|
annealing_factor: 0.8 |
|
patient: 0 |
|
|
|
lr_annealing_weights: &id005 !new:speechbrain.nnet.schedulers.NewBobScheduler |
|
initial_value: 0.01 |
|
improvement_threshold: 0.0025 |
|
annealing_factor: 0.9 |
|
patient: 0 |
|
|
|
label_encoder: &id007 !new:speechbrain.dataio.encoder.CTCTextEncoder |
|
|
|
|
|
checkpointer: !new:speechbrain.utils.checkpoints.Checkpointer |
|
checkpoints_dir: results/non_semi_final_stac/save |
|
recoverables: |
|
model: *id003 |
|
scheduler_model: *id004 |
|
scheduler_encoder: *id005 |
|
counter: *id006 |
|
tokenizer: *id007 |
|
blank_index: 0 |
|
unk_index: 1 |
|
|
|
|
|
train_logger: !new:speechbrain.utils.train_logger.FileTrainLogger |
|
save_file: results/non_semi_final_stac/train_log.txt |
|
|
|
error_rate_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats |
|
|
|
cer_computer: !name:speechbrain.utils.metric_stats.ErrorRateStats |
|
split_tokens: true |
|
|