adam_beta1: 0.9 adam_beta2: 0.99 freeze_text_encoder: true global_batch_size: 2 gradient_accumulation_steps: 0 learning_rate: 0.0001 lr_scheduler_type: !!python/object/apply:transformers.trainer_utils.SchedulerType - constant_with_warmup max_duration_in_seconds: 20.0 mixed_precision: fp16 model_name_or_path: parler-tts/parler-tts-mini-v1 num_train_epochs: 50.0 per_device_train_batch_size: 2 temperature: 1.0 warmup_steps: 50 weight_decay: 0.01