Emmanuel Schmidbauer commited on
Commit
d33d54e
·
1 Parent(s): 8c8abff

add pretrained models

Browse files
configs/tts_infer.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ default_v2:
2
+ bert_base_path: pretrained_models/chinese-roberta-wwm-ext-large
3
+ cnhuhbert_base_path: pretrained_models/chinese-hubert-base
4
+ device: cpu
5
+ is_half: false
6
+ t2s_weights_path: pretrained_models/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt
7
+ version: v2
8
+ vits_config_path: pretrained_models/s2_config.json
9
+ vits_weights_path: pretrained_models/s2G2333k.pth
pretrained_models/chinese-hubert-base/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/data/docker/liujing04/gpt-vits/chinese-hubert-base",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "HubertModel"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": true,
47
+ "final_dropout": 0.1,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 3072,
53
+ "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.1,
55
+ "mask_feature_length": 10,
56
+ "mask_feature_min_masks": 0,
57
+ "mask_feature_prob": 0.0,
58
+ "mask_time_length": 10,
59
+ "mask_time_min_masks": 2,
60
+ "mask_time_prob": 0.05,
61
+ "model_type": "hubert",
62
+ "num_attention_heads": 12,
63
+ "num_conv_pos_embedding_groups": 16,
64
+ "num_conv_pos_embeddings": 128,
65
+ "num_feat_extract_layers": 7,
66
+ "num_hidden_layers": 12,
67
+ "pad_token_id": 0,
68
+ "torch_dtype": "float16",
69
+ "transformers_version": "4.30.2",
70
+ "use_weighted_layer_sum": false,
71
+ "vocab_size": 32
72
+ }
pretrained_models/chinese-hubert-base/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": false,
8
+ "sampling_rate": 16000
9
+ }
pretrained_models/chinese-hubert-base/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24164f129c66499d1346e2aa55f183250c223161ec2770c0da3d3b08cf432d3c
3
+ size 188811417
pretrained_models/chinese-roberta-wwm-ext-large/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "directionality": "bidi",
10
+ "eos_token_id": 2,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 512,
18
+ "model_type": "bert",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "pooler_fc_size": 768,
24
+ "pooler_num_attention_heads": 12,
25
+ "pooler_num_fc_layers": 3,
26
+ "pooler_size_per_head": 128,
27
+ "pooler_type": "first_token_transform",
28
+ "position_embedding_type": "absolute",
29
+ "torch_dtype": "float16",
30
+ "transformers_version": "4.30.2",
31
+ "type_vocab_size": 2,
32
+ "use_cache": true,
33
+ "vocab_size": 21128
34
+ }
pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53a693acc59ace251d143d068096ae0d7b79e4b1b503fa84c9dcf576448c1d8
3
+ size 651225145
pretrained_models/chinese-roberta-wwm-ext-large/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
pretrained_models/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:732f94e63b148066e24c7f9d2637f3374083e637635f07fbdb695dee20ddbe1f
3
+ size 155315150
pretrained_models/s2D2333k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae7fe8dd8c8f2e718de359e00edac88b0c71ab2fd10b07ad4cc45070eb8a836
3
+ size 93534164
pretrained_models/s2G2333k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924fdccaa3c574bf139c25c9759aa1ed3b3f99e19a7c529ee996c2bc17663695
3
+ size 106035259
pretrained_models/s2_config.json ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 100,
4
+ "eval_interval": 500,
5
+ "seed": 1234,
6
+ "epochs": 8,
7
+ "learning_rate": 0.0001,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 11,
14
+ "fp16_run": true,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 20480,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "text_low_lr_rate": 0.4,
22
+ "pretrained_s2G": "pretrained_models/s2G2333k.pth",
23
+ "pretrained_s2D": "pretrained_models/s2D2333k.pth",
24
+ "if_save_latest": true,
25
+ "if_save_every_weights": true,
26
+ "save_every_epoch": 4,
27
+ "gpu_numbers": "0"
28
+ },
29
+ "data": {
30
+ "max_wav_value": 32768.0,
31
+ "sampling_rate": 32000,
32
+ "filter_length": 2048,
33
+ "hop_length": 640,
34
+ "win_length": 2048,
35
+ "n_mel_channels": 128,
36
+ "mel_fmin": 0.0,
37
+ "mel_fmax": null,
38
+ "add_blank": true,
39
+ "n_speakers": 300,
40
+ "cleaned_text": true,
41
+ "exp_dir": "logs/s2"
42
+ },
43
+ "model": {
44
+ "inter_channels": 192,
45
+ "hidden_channels": 192,
46
+ "filter_channels": 768,
47
+ "n_heads": 2,
48
+ "n_layers": 6,
49
+ "kernel_size": 3,
50
+ "p_dropout": 0.1,
51
+ "resblock": "1",
52
+ "resblock_kernel_sizes": [
53
+ 3,
54
+ 7,
55
+ 11
56
+ ],
57
+ "resblock_dilation_sizes": [
58
+ [
59
+ 1,
60
+ 3,
61
+ 5
62
+ ],
63
+ [
64
+ 1,
65
+ 3,
66
+ 5
67
+ ],
68
+ [
69
+ 1,
70
+ 3,
71
+ 5
72
+ ]
73
+ ],
74
+ "upsample_rates": [
75
+ 10,
76
+ 8,
77
+ 2,
78
+ 2,
79
+ 2
80
+ ],
81
+ "upsample_initial_channel": 512,
82
+ "upsample_kernel_sizes": [
83
+ 16,
84
+ 16,
85
+ 8,
86
+ 2,
87
+ 2
88
+ ],
89
+ "n_layers_q": 3,
90
+ "use_spectral_norm": false,
91
+ "gin_channels": 512,
92
+ "semantic_frame_rate": "25hz",
93
+ "freeze_quantizer": true,
94
+ "version": "v2"
95
+ },
96
+ "s2_ckpt_dir": "logs/s2/big2k1",
97
+ "content_module": "cnhubert",
98
+ "save_weight_dir": "SoVITS_weights_v2",
99
+ "name": "big2k1",
100
+ "version": "v2"
101
+ }