speech-ulm-lstm / config.yaml
soumi-maiti's picture
Update model
908f278
config: conf/train_lm_rnn_unit1024_nlayers3_dropout0.2.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/lm_train_lm_rnn_unit1024_nlayers3_dropout0.2_word
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: 2
dist_rank: 0
local_rank: 0
dist_master_addr: localhost
dist_master_port: 35485
dist_launcher: null
multiprocessing_distributed: true
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 40
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- loss
- min
keep_nbest_models: 1
nbest_averaging_interval: 0
grad_clip: 5.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: true
log_interval: null
use_matplotlib: true
use_tensorboard: true
create_graph_in_tensorboard: false
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: null
batch_size: 20
valid_batch_size: null
batch_bins: 10000000
valid_batch_bins: null
train_shape_file:
- exp/lm_stats_word/train/text_shape.word
valid_shape_file:
- exp/lm_stats_word/valid/text_shape.word
batch_type: numel
valid_batch_type: null
fold_length:
- 150
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/lm_train.txt
- text
- text
valid_data_path_and_name_and_type:
- - nunit_50_rep/lm_valid_50_rep.txt
- text
- text
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 0.002
scheduler: reducelronplateau
scheduler_conf:
mode: min
factor: 0.5
patience: 2
token_list:
- <blank>
- <unk>
- '7'
- '35'
- '44'
- '49'
- '20'
- '47'
- '0'
- '3'
- '46'
- '45'
- '31'
- '11'
- '28'
- '4'
- '37'
- '43'
- '26'
- '36'
- '18'
- '32'
- '5'
- '14'
- '33'
- '16'
- '9'
- '8'
- '17'
- '30'
- '24'
- '48'
- '21'
- '34'
- '6'
- '29'
- '38'
- '23'
- '39'
- '10'
- '27'
- '19'
- '40'
- '42'
- '25'
- '41'
- '12'
- '15'
- '1'
- '2'
- '13'
- '22'
- <sos/eos>
init: null
model_conf:
ignore_id: 0
use_preprocessor: true
token_type: word
bpemodel: null
non_linguistic_symbols: null
cleaner: null
g2p: null
lm: seq_rnn
lm_conf:
unit: 1024
nlayers: 3
dropout_rate: 0.2
required:
- output_dir
- token_list
version: '202207'
distributed: true