End of training
Browse filesSigned-off-by: jjyaoao <jjyaoao@126.com>
- .gitignore +1 -0
- README.md +60 -0
- added_tokens.json +4 -0
- app.py +114 -0
- config.json +92 -0
- preprocessor_config.json +19 -0
- pytorch_model.bin +3 -0
- requirements.txt +3 -0
- runs/Jul14_07-06-38_64fa198a07c3/events.out.tfevents.1689318514.64fa198a07c3.314.0 +3 -0
- runs/Jul16_12-21-55_48e7dd533140/events.out.tfevents.1689510259.48e7dd533140.399.0 +3 -0
- special_tokens_map.json +13 -0
- spm_char.model +3 -0
- tokenizer_config.json +11 -0
- training_args.bin +3 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
README.md
CHANGED
@@ -1,3 +1,63 @@
|
|
1 |
---
|
2 |
license: mit
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
tags:
|
4 |
+
- generated_from_trainer
|
5 |
+
datasets:
|
6 |
+
- common_voice_13_0
|
7 |
+
model-index:
|
8 |
+
- name: speecht5_finetuned_voxpopuli_nl
|
9 |
+
results: []
|
10 |
---
|
11 |
+
|
12 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
13 |
+
should probably proofread and complete it, then remove this comment. -->
|
14 |
+
|
15 |
+
# speecht5_finetuned_voxpopuli_nl
|
16 |
+
|
17 |
+
This model is a fine-tuned version of [arham061/speecht5_finetuned_voxpopuli_nl](https://huggingface.co/arham061/speecht5_finetuned_voxpopuli_nl) on the common_voice_13_0 dataset.
|
18 |
+
It achieves the following results on the evaluation set:
|
19 |
+
- Loss: 0.0.5508
|
20 |
+
|
21 |
+
## Model description
|
22 |
+
|
23 |
+
More information needed
|
24 |
+
|
25 |
+
## Intended uses & limitations
|
26 |
+
|
27 |
+
More information needed
|
28 |
+
|
29 |
+
## Training and evaluation data
|
30 |
+
|
31 |
+
More information needed
|
32 |
+
|
33 |
+
## Training procedure
|
34 |
+
|
35 |
+
### Training hyperparameters
|
36 |
+
|
37 |
+
The following hyperparameters were used during training:
|
38 |
+
- learning_rate: 1e-05
|
39 |
+
- train_batch_size: 4
|
40 |
+
- eval_batch_size: 2
|
41 |
+
- seed: 42
|
42 |
+
- gradient_accumulation_steps: 8
|
43 |
+
- total_train_batch_size: 32
|
44 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
45 |
+
- lr_scheduler_type: linear
|
46 |
+
- lr_scheduler_warmup_steps: 500
|
47 |
+
- training_steps: 3000
|
48 |
+
|
49 |
+
### Training results
|
50 |
+
|
51 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
52 |
+
|:-------------:|:-----:|:----:|:---------------:|
|
53 |
+
| 0.5058 | 7.74 | 1000 | 0.5431 |
|
54 |
+
| 0.4938 | 15.49 | 2000 | 0.5487 |
|
55 |
+
| 0.4909 | 23.23 | 3000 | 0.5508 |
|
56 |
+
|
57 |
+
|
58 |
+
### Framework versions
|
59 |
+
|
60 |
+
- Transformers 4.32.0.dev0
|
61 |
+
- Pytorch 2.0.0+cu117
|
62 |
+
- Datasets 2.13.1
|
63 |
+
- Tokenizers 0.13.3
|
added_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<ctc_blank>": 80,
|
3 |
+
"<mask>": 79
|
4 |
+
}
|
app.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
|
3 |
+
from urllib.request import urlopen
|
4 |
+
from io import BytesIO
|
5 |
+
import soundfile as sf
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
# Load the TTS model from the Hugging Face Hub
|
9 |
+
model_name = "arham061/speecht5_finetuned_voxpopuli_nl" # Replace with your actual model name
|
10 |
+
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
11 |
+
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
|
12 |
+
|
13 |
+
# Buckwalter to Unicode mapping
|
14 |
+
buck2uni = {
|
15 |
+
u"\u0627": "A",
|
16 |
+
u"\u0675": "A",
|
17 |
+
u"\u0673": "A",
|
18 |
+
u"\u0630": "A",
|
19 |
+
u"\u0622": "AA",
|
20 |
+
u"\u0628": "B",
|
21 |
+
u"\u067E": "P",
|
22 |
+
u"\u062A": "T",
|
23 |
+
u"\u0637": "T",
|
24 |
+
u"\u0679": "T",
|
25 |
+
u"\u062C": "J",
|
26 |
+
u"\u0633": "S",
|
27 |
+
u"\u062B": "S",
|
28 |
+
u"\u0635": "S",
|
29 |
+
u"\u0686": "CH",
|
30 |
+
u"\u062D": "H",
|
31 |
+
u"\u0647": "H",
|
32 |
+
u"\u0629": "H",
|
33 |
+
u"\u06DF": "H",
|
34 |
+
u"\u062E": "KH",
|
35 |
+
u"\u062F": "D",
|
36 |
+
u"\u0688": "D",
|
37 |
+
u"\u0630": "Z",
|
38 |
+
u"\u0632": "Z",
|
39 |
+
u"\u0636": "Z",
|
40 |
+
u"\u0638": "Z",
|
41 |
+
u"\u068E": "Z",
|
42 |
+
u"\u0631": "R",
|
43 |
+
u"\u0691": "R",
|
44 |
+
u"\u0634": "SH",
|
45 |
+
u"\u063A": "GH",
|
46 |
+
u"\u0641": "F",
|
47 |
+
u"\u06A9": "K",
|
48 |
+
u"\u0642": "K",
|
49 |
+
u"\u06AF": "G",
|
50 |
+
u"\u0644": "L",
|
51 |
+
u"\u0645": "M",
|
52 |
+
u"\u0646": "N",
|
53 |
+
u"\u06BA": "N",
|
54 |
+
u"\u0648": "O",
|
55 |
+
u"\u0649": "Y",
|
56 |
+
u"\u0626": "Y",
|
57 |
+
u"\u06CC": "Y",
|
58 |
+
u"\u06D2": "E",
|
59 |
+
u"\u06C1": "H",
|
60 |
+
u"\u064A": "E",
|
61 |
+
u"\u06C2": "AH",
|
62 |
+
u"\u06BE": "H",
|
63 |
+
u"\u0639": "A",
|
64 |
+
u"\u0643": "K",
|
65 |
+
u"\u0621": "A",
|
66 |
+
u"\u0624": "O",
|
67 |
+
u"\u060C": "", # separator ulta comma
|
68 |
+
}
|
69 |
+
|
70 |
+
def transString(string, reverse=0):
|
71 |
+
"""Given a Unicode string, transliterate into Buckwalter. To go from
|
72 |
+
Buckwalter back to Unicode, set reverse=1"""
|
73 |
+
for k, v in buck2uni.items():
|
74 |
+
if not reverse:
|
75 |
+
string = string.replace(k, v)
|
76 |
+
else:
|
77 |
+
string = string.replace(v, k)
|
78 |
+
return string
|
79 |
+
|
80 |
+
|
81 |
+
def generate_audio(text):
|
82 |
+
# Convert input text to Roman Urdu
|
83 |
+
roman_urdu = transString(text)
|
84 |
+
|
85 |
+
# Tokenize the input text
|
86 |
+
inputs = tokenizer(roman_urdu, return_tensors="pt").input_values
|
87 |
+
|
88 |
+
# Generate speech from the model
|
89 |
+
with torch.no_grad():
|
90 |
+
logits = model(inputs).logits
|
91 |
+
|
92 |
+
# Convert logits to audio waveform
|
93 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
94 |
+
audio = tokenizer.decode(predicted_ids[0], skip_special_tokens=True)
|
95 |
+
|
96 |
+
return audio
|
97 |
+
|
98 |
+
|
99 |
+
# Example usage
|
100 |
+
def main():
|
101 |
+
# Get input text in Urdu
|
102 |
+
input_text_urdu = input("Enter text in Urdu: ")
|
103 |
+
|
104 |
+
# Generate audio
|
105 |
+
audio_output = generate_audio(input_text_urdu)
|
106 |
+
|
107 |
+
# Save audio as a .wav file
|
108 |
+
sf.write("output.wav", audio_output, samplerate=22050)
|
109 |
+
|
110 |
+
print("Audio generated and saved as 'output.wav'")
|
111 |
+
|
112 |
+
|
113 |
+
if __name__ == "__main__":
|
114 |
+
main()
|
config.json
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "arham061/speecht5_finetuned_voxpopuli_nl",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"SpeechT5ForTextToSpeech"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"conv_bias": false,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"decoder_attention_heads": 12,
|
39 |
+
"decoder_ffn_dim": 3072,
|
40 |
+
"decoder_layerdrop": 0.1,
|
41 |
+
"decoder_layers": 6,
|
42 |
+
"decoder_start_token_id": 2,
|
43 |
+
"encoder_attention_heads": 12,
|
44 |
+
"encoder_ffn_dim": 3072,
|
45 |
+
"encoder_layerdrop": 0.1,
|
46 |
+
"encoder_layers": 12,
|
47 |
+
"encoder_max_relative_position": 160,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_norm": "group",
|
51 |
+
"feat_proj_dropout": 0.0,
|
52 |
+
"guided_attention_loss_num_heads": 2,
|
53 |
+
"guided_attention_loss_scale": 10.0,
|
54 |
+
"guided_attention_loss_sigma": 0.4,
|
55 |
+
"hidden_act": "gelu",
|
56 |
+
"hidden_dropout": 0.1,
|
57 |
+
"hidden_size": 768,
|
58 |
+
"initializer_range": 0.02,
|
59 |
+
"is_encoder_decoder": true,
|
60 |
+
"layer_norm_eps": 1e-05,
|
61 |
+
"mask_feature_length": 10,
|
62 |
+
"mask_feature_min_masks": 0,
|
63 |
+
"mask_feature_prob": 0.0,
|
64 |
+
"mask_time_length": 10,
|
65 |
+
"mask_time_min_masks": 2,
|
66 |
+
"mask_time_prob": 0.05,
|
67 |
+
"max_length": 1876,
|
68 |
+
"max_speech_positions": 1876,
|
69 |
+
"max_text_positions": 600,
|
70 |
+
"model_type": "speecht5",
|
71 |
+
"num_conv_pos_embedding_groups": 16,
|
72 |
+
"num_conv_pos_embeddings": 128,
|
73 |
+
"num_feat_extract_layers": 7,
|
74 |
+
"num_mel_bins": 80,
|
75 |
+
"pad_token_id": 1,
|
76 |
+
"positional_dropout": 0.1,
|
77 |
+
"reduction_factor": 2,
|
78 |
+
"scale_embedding": false,
|
79 |
+
"speaker_embedding_dim": 512,
|
80 |
+
"speech_decoder_postnet_dropout": 0.5,
|
81 |
+
"speech_decoder_postnet_kernel": 5,
|
82 |
+
"speech_decoder_postnet_layers": 5,
|
83 |
+
"speech_decoder_postnet_units": 256,
|
84 |
+
"speech_decoder_prenet_dropout": 0.5,
|
85 |
+
"speech_decoder_prenet_layers": 2,
|
86 |
+
"speech_decoder_prenet_units": 256,
|
87 |
+
"torch_dtype": "float32",
|
88 |
+
"transformers_version": "4.30.2",
|
89 |
+
"use_cache": false,
|
90 |
+
"use_guided_attention_loss": true,
|
91 |
+
"vocab_size": 81
|
92 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": false,
|
3 |
+
"feature_extractor_type": "SpeechT5FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"fmax": 7600,
|
6 |
+
"fmin": 80,
|
7 |
+
"frame_signal_scale": 1.0,
|
8 |
+
"hop_length": 16,
|
9 |
+
"mel_floor": 1e-10,
|
10 |
+
"num_mel_bins": 80,
|
11 |
+
"padding_side": "right",
|
12 |
+
"padding_value": 0.0,
|
13 |
+
"processor_class": "SpeechT5Processor",
|
14 |
+
"reduction_factor": 2,
|
15 |
+
"return_attention_mask": true,
|
16 |
+
"sampling_rate": 16000,
|
17 |
+
"win_function": "hann_window",
|
18 |
+
"win_length": 64
|
19 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:554b3a098956923d4719fd3445dcda4896dc343fd5ebaea12277e0971d530795
|
3 |
+
size 585485317
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch==1.9.0
|
2 |
+
transformers==4.11.3
|
3 |
+
soundfile==0.11.0
|
runs/Jul14_07-06-38_64fa198a07c3/events.out.tfevents.1689318514.64fa198a07c3.314.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb112cc6bc1f0058fd498ebe98dde9e48cb080b27e20659871723b3e15bfc882
|
3 |
+
size 32233
|
runs/Jul16_12-21-55_48e7dd533140/events.out.tfevents.1689510259.48e7dd533140.399.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9de4c790d9ba5a969003b6e7a3ecef192a10c0c00dfcce13d3b352674246ce0c
|
3 |
+
size 25700
|
special_tokens_map.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"mask_token": {
|
5 |
+
"content": "<mask>",
|
6 |
+
"lstrip": true,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"pad_token": "<pad>",
|
12 |
+
"unk_token": "<unk>"
|
13 |
+
}
|
spm_char.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fcc48f3e225f627b1641db410ceb0c8649bd2b0c982e150b03f8be3728ab560
|
3 |
+
size 238473
|
tokenizer_config.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"model_max_length": 600,
|
6 |
+
"pad_token": "<pad>",
|
7 |
+
"processor_class": "SpeechT5Processor",
|
8 |
+
"sp_model_kwargs": {},
|
9 |
+
"tokenizer_class": "SpeechT5Tokenizer",
|
10 |
+
"unk_token": "<unk>"
|
11 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efa9e2e07460ea4899c70de1b01cd01ed09a7e9b687abead7dfcf18a2149fc47
|
3 |
+
size 4155
|