E2-F5-TTS / src /f5_tts /configs /E2TTS_Base_train.yaml
mrfakename's picture
Sync from GitHub repo
490128a verified
raw
history blame
1.49 kB
hydra:
run:
dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}
datasets:
name: Emilia_ZH_EN # dataset name
batch_size_per_gpu: 38400 # 8 GPUs, 8 * 38400 = 307200
batch_size_type: frame # "frame" or "sample"
max_samples: 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
num_workers: 16
optim:
epochs: 15
learning_rate: 7.5e-5
num_warmup_updates: 20000 # warmup steps
grad_accumulation_steps: 1 # note: updates = steps / grad_accumulation_steps
max_grad_norm: 1.0 # gradient clipping
bnb_optimizer: False # use bnb 8bit AdamW optimizer or not
model:
name: E2TTS_Base
tokenizer: pinyin
tokenizer_path: None # if tokenizer = 'custom', define the path to the tokenizer you want to use (should be vocab.txt)
arch:
dim: 1024
depth: 24
heads: 16
ff_mult: 4
mel_spec:
target_sample_rate: 24000
n_mel_channels: 100
hop_length: 256
win_length: 1024
n_fft: 1024
mel_spec_type: vocos # 'vocos' or 'bigvgan'
vocoder:
is_local: False # use local offline ckpt or not
local_path: None # local vocoder path
ckpts:
logger: wandb # wandb | tensorboard | None
save_per_updates: 50000 # save checkpoint per steps
last_per_steps: 5000 # save last checkpoint per steps
save_dir: ckpts/${model.name}_${model.mel_spec.mel_spec_type}_${model.tokenizer}_${datasets.name}