alialek commited on
Commit
5313a4a
·
verified ·
1 Parent(s): 46fe5a2

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<unk>": 37
3
+ }
checkpoint/checkpoint-322000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86e6428230e9577e90b2d7b41579068b3b81a4b3f8c4fe881fc90e22275d4ec0
3
+ size 145282144
checkpoint/checkpoint-322000/model_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f353c441de9bed5f56ec02c32d4e4dca74d47b1cd615ca32d92ee97d25354728
3
+ size 187000136
checkpoint/checkpoint-322000/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67cb92598aebdabebc6f1de30c39c1bd8bab8d7d773acd9a65654bb65ff6972
3
+ size 291070844
checkpoint/checkpoint-322000/optimizer_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3961ec760ccf47766b4a432bd90101b7ab57752a3a14246e045617ed3c416d24
3
+ size 374071772
checkpoint/checkpoint-322000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23d33dbc5408c38c2b3d0136e8f891e5158a3d68e1c13064d4ac4e023d17436a
3
+ size 14472
checkpoint/checkpoint-322000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32147f54711815745fdc071a8a124294af40c0a2a1f8da6ba05c10c4b8dc4c77
3
+ size 988
checkpoint/checkpoint-322000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d2335ba34af16ab6d827b441ec3b888e7e1f8c320285cafa630de21f796933
3
+ size 1000
checkpoint/checkpoint-322000/scheduler_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54737f9ca6d8fa46a696806ab6e21b11abb7685b5a137d0021440e43b0df094c
3
+ size 1008
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "pytorch_dump",
3
+ "activation_dropout": 0.1,
4
+ "architectures": [
5
+ "VitsModelForPreTraining"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "depth_separable_channels": 2,
9
+ "depth_separable_num_layers": 3,
10
+ "discriminator_kernel_size": 5,
11
+ "discriminator_period_channels": [
12
+ 1,
13
+ 32,
14
+ 128,
15
+ 512,
16
+ 1024
17
+ ],
18
+ "discriminator_periods": [
19
+ 2,
20
+ 3,
21
+ 5,
22
+ 7,
23
+ 11
24
+ ],
25
+ "discriminator_scale_channels": [
26
+ 1,
27
+ 16,
28
+ 64,
29
+ 256,
30
+ 1024
31
+ ],
32
+ "discriminator_stride": 3,
33
+ "duration_predictor_dropout": 0.5,
34
+ "duration_predictor_filter_channels": 256,
35
+ "duration_predictor_flow_bins": 10,
36
+ "duration_predictor_kernel_size": 3,
37
+ "duration_predictor_num_flows": 4,
38
+ "duration_predictor_tail_bound": 5.0,
39
+ "ffn_dim": 768,
40
+ "ffn_kernel_size": 3,
41
+ "flow_size": 192,
42
+ "hidden_act": "relu",
43
+ "hidden_dropout": 0.1,
44
+ "hidden_size": 192,
45
+ "hop_length": 256,
46
+ "initializer_range": 0.02,
47
+ "layer_norm_eps": 1e-05,
48
+ "layerdrop": 0.1,
49
+ "leaky_relu_slope": 0.1,
50
+ "model_type": "vits",
51
+ "noise_scale": 0.667,
52
+ "noise_scale_duration": 0.8,
53
+ "num_attention_heads": 2,
54
+ "num_hidden_layers": 6,
55
+ "num_speakers": 1,
56
+ "posterior_encoder_num_wavenet_layers": 16,
57
+ "prior_encoder_num_flows": 4,
58
+ "prior_encoder_num_wavenet_layers": 4,
59
+ "resblock_dilation_sizes": [
60
+ [
61
+ 1,
62
+ 3,
63
+ 5
64
+ ],
65
+ [
66
+ 1,
67
+ 3,
68
+ 5
69
+ ],
70
+ [
71
+ 1,
72
+ 3,
73
+ 5
74
+ ]
75
+ ],
76
+ "resblock_kernel_sizes": [
77
+ 3,
78
+ 7,
79
+ 11
80
+ ],
81
+ "sampling_rate": 16000,
82
+ "segment_size": 8192,
83
+ "speaker_embedding_size": 0,
84
+ "speaking_rate": 1.0,
85
+ "spectrogram_bins": 513,
86
+ "torch_dtype": "float32",
87
+ "transformers_version": "4.46.1",
88
+ "upsample_initial_channel": 512,
89
+ "upsample_kernel_sizes": [
90
+ 16,
91
+ 16,
92
+ 4,
93
+ 4
94
+ ],
95
+ "upsample_rates": [
96
+ 8,
97
+ 8,
98
+ 2,
99
+ 2
100
+ ],
101
+ "use_bias": true,
102
+ "use_stochastic_duration_prediction": true,
103
+ "vocab_size": 37,
104
+ "wavenet_dilation_rate": 1,
105
+ "wavenet_dropout": 0.0,
106
+ "wavenet_kernel_size": 5,
107
+ "window_size": 4
108
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90744be4374cf121d6f0d274324f4dc9972389191087fe54cbd0315088d9be56
3
+ size 332159944
preprocessor_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "VitsFeatureExtractor",
3
+ "feature_size": 80,
4
+ "hop_length": 256,
5
+ "max_wav_value": 32768.0,
6
+ "n_fft": 1024,
7
+ "padding_side": "right",
8
+ "padding_value": 0.0,
9
+ "return_attention_mask": false,
10
+ "sampling_rate": 16000
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "о",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "unk_token": {
10
+ "content": "<unk>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_blank": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "о",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "37": {
13
+ "content": "<unk>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "clean_up_tokenization_spaces": true,
22
+ "is_uroman": false,
23
+ "language": "ava",
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "normalize": true,
26
+ "pad_token": "о",
27
+ "phonemize": false,
28
+ "tokenizer_class": "VitsTokenizer",
29
+ "unk_token": "<unk>",
30
+ "verbose": false
31
+ }
vocab.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ " ": 33,
3
+ "-": 4,
4
+ "_": 20,
5
+ "а": 34,
6
+ "б": 15,
7
+ "в": 24,
8
+ "г": 16,
9
+ "д": 13,
10
+ "е": 21,
11
+ "ж": 8,
12
+ "з": 5,
13
+ "и": 30,
14
+ "й": 23,
15
+ "к": 31,
16
+ "л": 12,
17
+ "м": 11,
18
+ "н": 1,
19
+ "о": 0,
20
+ "п": 2,
21
+ "р": 35,
22
+ "с": 17,
23
+ "т": 29,
24
+ "у": 18,
25
+ "ф": 26,
26
+ "х": 3,
27
+ "ц": 36,
28
+ "ч": 25,
29
+ "ш": 32,
30
+ "щ": 27,
31
+ "ъ": 6,
32
+ "ь": 19,
33
+ "э": 7,
34
+ "ю": 9,
35
+ "я": 14,
36
+ "ё": 28,
37
+ "ӏ": 10,
38
+ "–": 22
39
+ }