adam_beta1: 0.9 adam_beta2: 0.99 freeze_text_encoder: true global_batch_size: 16 gradient_accumulation_steps: 1 learning_rate: 0.0001 lr_scheduler_type: !!python/object/apply:transformers.trainer_utils.SchedulerType - constant_with_warmup max_duration_in_seconds: 20.0 mixed_precision: fp16 model_name_or_path: parler-tts/parler-tts-mini-v1 num_train_epochs: 500.0 per_device_train_batch_size: 16 temperature: 1.0 warmup_steps: 50 weight_decay: 0.01