Kleber commited on
Commit
89c855c
·
verified ·
1 Parent(s): 67f12fb

Upload folder using huggingface_hub

Browse files
best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf0629ee6895e6f9ddc834bb6d9d3e768bfa29fbffc8abf7747ba8414cf366c
3
+ size 1039966218
best_model_85756.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf0629ee6895e6f9ddc834bb6d9d3e768bfa29fbffc8abf7747ba8414cf366c
3
+ size 1039966218
checkpoint_110000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55492c876d90b6498e9d18f2076c61d49f0394398a58cb8a01d22ace55d9bf5d
3
+ size 1039966218
checkpoint_115000.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76bac8f2bfca048e86ea9d779374222d2d7e7e672e36994437f589b69fea9e26
3
+ size 1039966218
config.json ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_path": "/home/kk/storage",
3
+ "logger_uri": null,
4
+ "run_name": "YourTTS-finetuned-from-yourtts-base-male-finetuned-female_only",
5
+ "project_name": "YourTTS",
6
+ "run_description": "\n - Original YourTTS trained using VCTK dataset\n ",
7
+ "print_step": 50,
8
+ "plot_step": 100,
9
+ "model_param_stats": false,
10
+ "wandb_entity": "kleber",
11
+ "dashboard_logger": "wandb",
12
+ "save_on_interrupt": true,
13
+ "log_model_step": 1000,
14
+ "save_step": 5000,
15
+ "save_n_checkpoints": 2,
16
+ "save_checkpoints": true,
17
+ "save_all_best": false,
18
+ "save_best_after": 0,
19
+ "target_loss": "loss_1",
20
+ "print_eval": false,
21
+ "test_delay_epochs": 0,
22
+ "run_eval": true,
23
+ "run_eval_steps": null,
24
+ "distributed_backend": "nccl",
25
+ "distributed_url": "tcp://localhost:54321",
26
+ "mixed_precision": false,
27
+ "precision": "fp16",
28
+ "epochs": 1000,
29
+ "batch_size": 12,
30
+ "eval_batch_size": 12,
31
+ "grad_clip": [
32
+ 1000,
33
+ 1000
34
+ ],
35
+ "scheduler_after_epoch": true,
36
+ "lr": 0.001,
37
+ "optimizer": "AdamW",
38
+ "optimizer_params": {
39
+ "betas": [
40
+ 0.8,
41
+ 0.99
42
+ ],
43
+ "eps": 1e-09,
44
+ "weight_decay": 0.01
45
+ },
46
+ "lr_scheduler": null,
47
+ "lr_scheduler_params": {},
48
+ "use_grad_scaler": false,
49
+ "allow_tf32": false,
50
+ "cudnn_enable": true,
51
+ "cudnn_deterministic": false,
52
+ "cudnn_benchmark": false,
53
+ "training_seed": 54321,
54
+ "model": "vits",
55
+ "num_loader_workers": 8,
56
+ "num_eval_loader_workers": 0,
57
+ "use_noise_augment": false,
58
+ "audio": {
59
+ "fft_size": 1024,
60
+ "sample_rate": 16000,
61
+ "win_length": 1024,
62
+ "hop_length": 256,
63
+ "num_mels": 80,
64
+ "mel_fmin": 0.0,
65
+ "mel_fmax": null
66
+ },
67
+ "use_phonemes": false,
68
+ "phonemizer": "espeak",
69
+ "phoneme_language": "en",
70
+ "compute_input_seq_cache": true,
71
+ "text_cleaner": "multilingual_cleaners",
72
+ "enable_eos_bos_chars": false,
73
+ "test_sentences_file": "",
74
+ "phoneme_cache_path": null,
75
+ "characters": {
76
+ "characters_class": "TTS.tts.models.vits.VitsCharacters",
77
+ "vocab_dict": null,
78
+ "pad": "_",
79
+ "eos": "&",
80
+ "bos": "*",
81
+ "blank": null,
82
+ "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
83
+ "punctuations": "!'(),-.:;? ",
84
+ "phonemes": "",
85
+ "is_unique": true,
86
+ "is_sorted": true
87
+ },
88
+ "add_blank": true,
89
+ "batch_group_size": 48,
90
+ "loss_masking": null,
91
+ "min_audio_len": 1,
92
+ "max_audio_len": 640000,
93
+ "min_text_len": 1,
94
+ "max_text_len": Infinity,
95
+ "compute_f0": false,
96
+ "compute_energy": false,
97
+ "compute_linear_spec": true,
98
+ "precompute_num_workers": 12,
99
+ "start_by_longest": true,
100
+ "shuffle": false,
101
+ "drop_last": false,
102
+ "datasets": [
103
+ {
104
+ "formatter": "du",
105
+ "dataset_name": "du",
106
+ "path": "/home/kk/kinya_dataset",
107
+ "meta_file_train": "actress_train_manifest.tsv",
108
+ "ignored_speakers": null,
109
+ "language": "rw",
110
+ "phonemizer": "",
111
+ "meta_file_val": "actress_val_manifest.tsv",
112
+ "meta_file_attn_mask": ""
113
+ }
114
+ ],
115
+ "test_sentences": [
116
+ [
117
+ "Umunyamwuga w'ubuzima ashobora gufasha muribi:",
118
+ "Actress",
119
+ null,
120
+ "rw"
121
+ ],
122
+ [
123
+ "Ambasaderi yavuze ko AGUKA izagirwamo uruhare n'ibigo bikora imirimo itandukanye.",
124
+ "Actress",
125
+ null,
126
+ "rw"
127
+ ],
128
+ [
129
+ "Kuri iyi nshuro biratandukanye uraza ukayisaba ugahita uyitahana.",
130
+ "Actress",
131
+ null,
132
+ "rw"
133
+ ],
134
+ [
135
+ "Avuga ko muri ubu bukwe nta nzoga zigeze zihabwa abari babufitemo inshingano cyane cyane abambariye umugeni.",
136
+ "Actress",
137
+ null,
138
+ "rw"
139
+ ],
140
+ [
141
+ "Twe rero ikintu turi gukora cyane ni ukubyirinda.",
142
+ "Actress",
143
+ null,
144
+ "rw"
145
+ ]
146
+ ],
147
+ "eval_split_max_size": 256,
148
+ "eval_split_size": 0.01,
149
+ "use_speaker_weighted_sampler": false,
150
+ "speaker_weighted_sampler_alpha": 1.0,
151
+ "use_language_weighted_sampler": false,
152
+ "language_weighted_sampler_alpha": 1.0,
153
+ "use_length_weighted_sampler": false,
154
+ "length_weighted_sampler_alpha": 1.0,
155
+ "model_args": {
156
+ "num_chars": 165,
157
+ "out_channels": 513,
158
+ "spec_segment_size": 32,
159
+ "hidden_channels": 192,
160
+ "hidden_channels_ffn_text_encoder": 768,
161
+ "num_heads_text_encoder": 2,
162
+ "num_layers_text_encoder": 10,
163
+ "kernel_size_text_encoder": 3,
164
+ "dropout_p_text_encoder": 0.1,
165
+ "dropout_p_duration_predictor": 0.5,
166
+ "kernel_size_posterior_encoder": 5,
167
+ "dilation_rate_posterior_encoder": 1,
168
+ "num_layers_posterior_encoder": 16,
169
+ "kernel_size_flow": 5,
170
+ "dilation_rate_flow": 1,
171
+ "num_layers_flow": 4,
172
+ "resblock_type_decoder": "2",
173
+ "resblock_kernel_sizes_decoder": [
174
+ 3,
175
+ 7,
176
+ 11
177
+ ],
178
+ "resblock_dilation_sizes_decoder": [
179
+ [
180
+ 1,
181
+ 3,
182
+ 5
183
+ ],
184
+ [
185
+ 1,
186
+ 3,
187
+ 5
188
+ ],
189
+ [
190
+ 1,
191
+ 3,
192
+ 5
193
+ ]
194
+ ],
195
+ "upsample_rates_decoder": [
196
+ 8,
197
+ 8,
198
+ 2,
199
+ 2
200
+ ],
201
+ "upsample_initial_channel_decoder": 512,
202
+ "upsample_kernel_sizes_decoder": [
203
+ 16,
204
+ 16,
205
+ 4,
206
+ 4
207
+ ],
208
+ "periods_multi_period_discriminator": [
209
+ 2,
210
+ 3,
211
+ 5,
212
+ 7,
213
+ 11
214
+ ],
215
+ "use_sdp": true,
216
+ "noise_scale": 1.0,
217
+ "inference_noise_scale": 0.667,
218
+ "length_scale": 1,
219
+ "noise_scale_dp": 1.0,
220
+ "inference_noise_scale_dp": 1.0,
221
+ "max_inference_len": null,
222
+ "init_discriminator": true,
223
+ "use_spectral_norm_disriminator": false,
224
+ "use_speaker_embedding": false,
225
+ "num_speakers": 0,
226
+ "speakers_file": "/home/kk/storage/YourTTS-finetuned-from-yourtts-base-male-finetuned-female_only-July-13-2024_05+59PM-dbf1a08a/speakers.pth",
227
+ "d_vector_file": [
228
+ "/home/kk/kinya_dataset/speakers.pth"
229
+ ],
230
+ "speaker_embedding_channels": 256,
231
+ "use_d_vector_file": true,
232
+ "d_vector_dim": 512,
233
+ "detach_dp_input": true,
234
+ "use_language_embedding": false,
235
+ "embedded_language_dim": 4,
236
+ "num_languages": 0,
237
+ "language_ids_file": null,
238
+ "use_speaker_encoder_as_loss": false,
239
+ "speaker_encoder_config_path": "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json",
240
+ "speaker_encoder_model_path": "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar",
241
+ "condition_dp_on_speaker": true,
242
+ "freeze_encoder": false,
243
+ "freeze_DP": false,
244
+ "freeze_PE": false,
245
+ "freeze_flow_decoder": false,
246
+ "freeze_waveform_decoder": false,
247
+ "encoder_sample_rate": null,
248
+ "interpolate_z": true,
249
+ "reinit_DP": false,
250
+ "reinit_text_encoder": false
251
+ },
252
+ "lr_gen": 0.0002,
253
+ "lr_disc": 0.0002,
254
+ "lr_scheduler_gen": "ExponentialLR",
255
+ "lr_scheduler_gen_params": {
256
+ "gamma": 0.999875,
257
+ "last_epoch": -1
258
+ },
259
+ "lr_scheduler_disc": "ExponentialLR",
260
+ "lr_scheduler_disc_params": {
261
+ "gamma": 0.999875,
262
+ "last_epoch": -1
263
+ },
264
+ "kl_loss_alpha": 1.0,
265
+ "disc_loss_alpha": 1.0,
266
+ "gen_loss_alpha": 1.0,
267
+ "feat_loss_alpha": 1.0,
268
+ "mel_loss_alpha": 45.0,
269
+ "dur_loss_alpha": 1.0,
270
+ "speaker_encoder_loss_alpha": 9.0,
271
+ "return_wav": true,
272
+ "use_weighted_sampler": true,
273
+ "weighted_sampler_attrs": {
274
+ "speaker_name": 1.0
275
+ },
276
+ "weighted_sampler_multipliers": {},
277
+ "r": 1,
278
+ "num_speakers": 0,
279
+ "use_speaker_embedding": false,
280
+ "speakers_file": "/home/kk/storage/YourTTS-finetuned-from-yourtts-base-male-finetuned-female_only-July-13-2024_05+59PM-dbf1a08a/speakers.pth",
281
+ "speaker_embedding_channels": 256,
282
+ "language_ids_file": null,
283
+ "use_language_embedding": false,
284
+ "use_d_vector_file": true,
285
+ "d_vector_file": [
286
+ "/home/kk/kinya_dataset/speakers.pth"
287
+ ],
288
+ "d_vector_dim": 512
289
+ }
speakers.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e56cd70f88a7b49de9c123fd938c95f6c4e9fea28d8c1d10a348528740143d
3
+ size 928
train.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from trainer import Trainer, TrainerArgs
5
+
6
+ from TTS.bin.compute_embeddings import compute_embeddings
7
+ from TTS.bin.resample import resample_files
8
+ from TTS.config.shared_configs import BaseDatasetConfig
9
+ from TTS.tts.configs.vits_config import VitsConfig
10
+ from TTS.tts.datasets import load_tts_samples
11
+ from TTS.tts.models.vits import CharactersConfig, Vits, VitsArgs, VitsAudioConfig
12
+ from TTS.utils.downloaders import download_vctk
13
+
14
+ torch.set_num_threads(24)
15
+
16
+ # pylint: disable=W0105
17
+ """
18
+ This recipe replicates the first experiment proposed in the YourTTS paper (https://arxiv.org/abs/2112.02418).
19
+ YourTTS model is based on the VITS model however it uses external speaker embeddings extracted from a pre-trained speaker encoder and has small architecture changes.
20
+ In addition, YourTTS can be trained in multilingual data, however, this recipe replicates the single language training using the VCTK dataset.
21
+ If you are interested in multilingual training, we have commented on parameters on the VitsArgs class instance that should be enabled for multilingual training.
22
+ In addition, you will need to add the extra datasets following the VCTK as an example.
23
+ """
24
+ CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
25
+
26
+ # Name of the run for the Trainer
27
+ RUN_NAME = "YourTTS-finetuned-from-yourtts-base-male-finetuned-female_only"
28
+
29
+ # Path where you want to save the models outputs (configs, checkpoints and tensorboard logs)
30
+ OUT_PATH = "/home/kk/storage" #os.path.dirname(os.path.abspath(__file__)) # "/raid/coqui/Checkpoints/original-YourTTS/"
31
+
32
+ # If you want to do transfer learning and speedup your training you can set here the path to the original YourTTS model
33
+ RESTORE_PATH = "/home/kk/storage/YourTTS-finetuned-from-yourtts-base-male-finetuned-female_only-July-13-2024_02+47AM-dbf1a08a/checkpoint_85000.pth" # None # "/root/.local/share/tts/tts_models--multilingual--multi-dataset--your_tts/model_file.pth"
34
+
35
+ # This paramter is useful to debug, it skips the training epochs and just do the evaluation and produce the test sentences
36
+ SKIP_TRAIN_EPOCH = False
37
+
38
+ # Set here the batch size to be used in training and evaluation
39
+ BATCH_SIZE = 12
40
+
41
+ # Training Sampling rate and the target sampling rate for resampling the downloaded dataset (Note: If you change this you might need to redownload the dataset !!)
42
+ # Note: If you add new datasets, please make sure that the dataset sampling rate and this parameter are matching, otherwise resample your audios
43
+ SAMPLE_RATE = 16000
44
+
45
+ # Max audio length in seconds to be used in training (every audio bigger than it will be ignored)
46
+ MAX_AUDIO_LEN_IN_SECONDS = 40
47
+
48
+ ### Download VCTK dataset
49
+ #VCTK_DOWNLOAD_PATH = os.path.join(CURRENT_PATH, "VCTK")
50
+ # Define the number of threads used during the audio resampling
51
+ NUM_RESAMPLE_THREADS = 10
52
+ # Check if VCTK dataset is not already downloaded, if not download it
53
+ #if not os.path.exists(VCTK_DOWNLOAD_PATH):
54
+ # print(">>> Downloading VCTK dataset:")
55
+ # download_vctk(VCTK_DOWNLOAD_PATH)
56
+ # resample_files(VCTK_DOWNLOAD_PATH, SAMPLE_RATE, file_ext="flac", n_jobs=NUM_RESAMPLE_THREADS)
57
+
58
+ # init configs
59
+ vctk_config = BaseDatasetConfig(
60
+ formatter="du",
61
+ dataset_name="du",
62
+ meta_file_train="actress_train_manifest.tsv",
63
+ meta_file_val="actress_val_manifest.tsv",
64
+ path="/home/kk/kinya_dataset",
65
+ language="rw",
66
+ # ignored_speakers=[
67
+ # "p261",
68
+ # "p225",
69
+ # "p294",
70
+ # "p347",
71
+ # "p238",
72
+ # "p234",
73
+ # "p248",
74
+ # "p335",
75
+ # "p245",
76
+ # "p326",
77
+ # "p302",
78
+ # ], # Ignore the test speakers to full replicate the paper experiment
79
+ )
80
+
81
+ # Add here all datasets configs, in our case we just want to train with the VCTK dataset then we need to add just VCTK. Note: If you want to add new datasets, just add them here and it will automatically compute the speaker embeddings (d-vectors) for this new dataset :)
82
+ DATASETS_CONFIG_LIST = [vctk_config]
83
+
84
+ ### Extract speaker embeddings
85
+ SPEAKER_ENCODER_CHECKPOINT_PATH = (
86
+ "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/model_se.pth.tar"
87
+ )
88
+ SPEAKER_ENCODER_CONFIG_PATH = "https://github.com/coqui-ai/TTS/releases/download/speaker_encoder_model/config_se.json"
89
+
90
+ D_VECTOR_FILES = [] # List of speaker embeddings/d-vectors to be used during the training
91
+
92
+ # Iterates all the dataset configs checking if the speakers embeddings are already computated, if not compute it
93
+ for dataset_conf in DATASETS_CONFIG_LIST:
94
+ # Check if the embeddings weren't already computed, if not compute it
95
+ embeddings_file = os.path.join(dataset_conf.path, "speakers.pth")
96
+ if not os.path.isfile(embeddings_file):
97
+ print(f">>> Computing the speaker embeddings for the {dataset_conf.dataset_name} dataset")
98
+ compute_embeddings(
99
+ SPEAKER_ENCODER_CHECKPOINT_PATH,
100
+ SPEAKER_ENCODER_CONFIG_PATH,
101
+ embeddings_file,
102
+ old_speakers_file=None,
103
+ config_dataset_path=None,
104
+ formatter_name=dataset_conf.formatter,
105
+ dataset_name=dataset_conf.dataset_name,
106
+ dataset_path=dataset_conf.path,
107
+ meta_file_train=dataset_conf.meta_file_train,
108
+ meta_file_val=dataset_conf.meta_file_val,
109
+ disable_cuda=False,
110
+ no_eval=False,
111
+ )
112
+ D_VECTOR_FILES.append(embeddings_file)
113
+
114
+
115
+ # Audio config used in training.
116
+ audio_config = VitsAudioConfig(
117
+ sample_rate=SAMPLE_RATE,
118
+ hop_length=256,
119
+ win_length=1024,
120
+ fft_size=1024,
121
+ mel_fmin=0.0,
122
+ mel_fmax=None,
123
+ num_mels=80,
124
+ )
125
+
126
+ # Init VITSArgs setting the arguments that are needed for the YourTTS model
127
+ model_args = VitsArgs(
128
+ d_vector_file=D_VECTOR_FILES,
129
+ use_d_vector_file=True,
130
+ d_vector_dim=512,
131
+ num_layers_text_encoder=10,
132
+ speaker_encoder_model_path=SPEAKER_ENCODER_CHECKPOINT_PATH,
133
+ speaker_encoder_config_path=SPEAKER_ENCODER_CONFIG_PATH,
134
+ resblock_type_decoder="2", # In the paper, we accidentally trained the YourTTS using ResNet blocks type 2, if you like you can use the ResNet blocks type 1 like the VITS model
135
+ # Useful parameters to enable the Speaker Consistency Loss (SCL) described in the paper
136
+ # use_speaker_encoder_as_loss=True,
137
+ # Useful parameters to enable multilingual training
138
+ # use_language_embedding=True,
139
+ # embedded_language_dim=4,
140
+ )
141
+
142
+ # General training config, here you can change the batch size and others useful parameters
143
+ config = VitsConfig(
144
+ output_path=OUT_PATH,
145
+ model_args=model_args,
146
+ run_name=RUN_NAME,
147
+ project_name="YourTTS",
148
+ run_description="""
149
+ - Original YourTTS trained using VCTK dataset
150
+ """,
151
+ dashboard_logger="wandb",
152
+ wandb_entity="kleber",
153
+ logger_uri=None,
154
+ audio=audio_config,
155
+ batch_size=BATCH_SIZE,
156
+ batch_group_size=48,
157
+ eval_batch_size=BATCH_SIZE,
158
+ num_loader_workers=8,
159
+ eval_split_max_size=256,
160
+ print_step=50,
161
+ plot_step=100,
162
+ log_model_step=1000,
163
+ save_step=5000,
164
+ save_n_checkpoints=2,
165
+ save_checkpoints=True,
166
+ target_loss="loss_1",
167
+ print_eval=False,
168
+ use_phonemes=False,
169
+ phonemizer="espeak",
170
+ phoneme_language="en",
171
+ compute_input_seq_cache=True,
172
+ add_blank=True,
173
+ text_cleaner="multilingual_cleaners",
174
+ characters=CharactersConfig(
175
+ characters_class="TTS.tts.models.vits.VitsCharacters",
176
+ pad="_",
177
+ eos="&",
178
+ bos="*",
179
+ blank=None,
180
+ characters="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\u00af\u00b7\u00df\u00e0\u00e1\u00e2\u00e3\u00e4\u00e6\u00e7\u00e8\u00e9\u00ea\u00eb\u00ec\u00ed\u00ee\u00ef\u00f1\u00f2\u00f3\u00f4\u00f5\u00f6\u00f9\u00fa\u00fb\u00fc\u00ff\u0101\u0105\u0107\u0113\u0119\u011b\u012b\u0131\u0142\u0144\u014d\u0151\u0153\u015b\u016b\u0171\u017a\u017c\u01ce\u01d0\u01d2\u01d4\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0451\u0454\u0456\u0457\u0491\u2013!'(),-.:;? ",
181
+ punctuations="!'(),-.:;? ",
182
+ phonemes="",
183
+ is_unique=True,
184
+ is_sorted=True,
185
+ ),
186
+ phoneme_cache_path=None,
187
+ precompute_num_workers=12,
188
+ start_by_longest=True,
189
+ datasets=DATASETS_CONFIG_LIST,
190
+ cudnn_benchmark=False,
191
+ max_audio_len=SAMPLE_RATE * MAX_AUDIO_LEN_IN_SECONDS,
192
+ mixed_precision=False,
193
+ test_sentences=[
194
+ [
195
+ "Umunyamwuga w'ubuzima ashobora gufasha muribi:",
196
+ "Actress",
197
+ None,
198
+ "rw",
199
+ ],
200
+ [
201
+ "Ambasaderi yavuze ko AGUKA izagirwamo uruhare n'ibigo bikora imirimo itandukanye.",
202
+ "Actress",
203
+ None,
204
+ "rw",
205
+ ],
206
+ [
207
+ "Kuri iyi nshuro biratandukanye uraza ukayisaba ugahita uyitahana.",
208
+ "Actress",
209
+ None,
210
+ "rw",
211
+ ],
212
+ [
213
+ "Avuga ko muri ubu bukwe nta nzoga zigeze zihabwa abari babufitemo inshingano cyane cyane abambariye umugeni.",
214
+ "Actress",
215
+ None,
216
+ "rw",
217
+ ],
218
+ [
219
+ "Twe rero ikintu turi gukora cyane ni ukubyirinda.",
220
+ "Actress",
221
+ None,
222
+ "rw",
223
+ ],
224
+ ],
225
+ # Enable the weighted sampler
226
+ use_weighted_sampler=True,
227
+ # Ensures that all speakers are seen in the training batch equally no matter how many samples each speaker has
228
+ weighted_sampler_attrs={"speaker_name": 1.0},
229
+ weighted_sampler_multipliers={},
230
+ # It defines the Speaker Consistency Loss (SCL) α to 9 like the paper
231
+ speaker_encoder_loss_alpha=9.0,
232
+ )
233
+
234
+ # Load all the datasets samples and split traning and evaluation sets
235
+ train_samples, eval_samples = load_tts_samples(
236
+ config.datasets,
237
+ eval_split=True,
238
+ eval_split_max_size=config.eval_split_max_size,
239
+ eval_split_size=config.eval_split_size,
240
+ )
241
+
242
+ # Init the model
243
+ model = Vits.init_from_config(config)
244
+
245
+ # Init the trainer and 🚀
246
+ trainer = Trainer(
247
+ TrainerArgs(restore_path=RESTORE_PATH, skip_train_epoch=SKIP_TRAIN_EPOCH),
248
+ config,
249
+ output_path=OUT_PATH,
250
+ model=model,
251
+ train_samples=train_samples,
252
+ eval_samples=eval_samples,
253
+ )
254
+ trainer.fit()
trainer_0_log.txt ADDED
The diff for this file is too large to render. See raw diff