Moustapha91 commited on
Commit
e9defbc
1 Parent(s): 2ba1e9b

End of training

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  license: mit
3
  base_model: microsoft/speecht5_tts
4
  tags:
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.3725
19
 
20
  ## Model description
21
 
@@ -42,24 +43,22 @@ The following hyperparameters were used during training:
42
  - total_train_batch_size: 32
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
- - lr_scheduler_warmup_steps: 500
46
- - training_steps: 500
47
  - mixed_precision_training: Native AMP
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:------:|:----:|:---------------:|
53
- | 0.6237 | 0.2540 | 100 | 0.5085 |
54
- | 0.5744 | 0.5079 | 200 | 0.4722 |
55
- | 0.4577 | 0.7619 | 300 | 0.3953 |
56
- | 0.4272 | 1.0159 | 400 | 0.3801 |
57
- | 0.4155 | 1.2698 | 500 | 0.3725 |
58
 
59
 
60
  ### Framework versions
61
 
62
- - Transformers 4.42.3
63
- - Pytorch 2.1.2
64
- - Datasets 2.20.0
65
  - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: transformers
3
  license: mit
4
  base_model: microsoft/speecht5_tts
5
  tags:
 
16
 
17
  This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.3868
20
 
21
  ## Model description
22
 
 
43
  - total_train_batch_size: 32
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_steps: 5000
47
+ - num_epochs: 3
48
  - mixed_precision_training: Native AMP
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:------:|:----:|:---------------:|
54
+ | 0.6522 | 0.9989 | 354 | 0.5330 |
55
+ | 0.5573 | 1.9979 | 708 | 0.4585 |
56
+ | 0.4417 | 2.9968 | 1062 | 0.3868 |
 
 
57
 
58
 
59
  ### Framework versions
60
 
61
+ - Transformers 4.44.2
62
+ - Pytorch 2.4.0
63
+ - Datasets 3.0.0
64
  - Tokenizers 0.19.1
config.json CHANGED
@@ -85,7 +85,7 @@
85
  "speech_decoder_prenet_layers": 2,
86
  "speech_decoder_prenet_units": 256,
87
  "torch_dtype": "float32",
88
- "transformers_version": "4.42.3",
89
  "use_cache": false,
90
  "use_guided_attention_loss": true,
91
  "vocab_size": 81
 
85
  "speech_decoder_prenet_layers": 2,
86
  "speech_decoder_prenet_units": 256,
87
  "torch_dtype": "float32",
88
+ "transformers_version": "4.44.2",
89
  "use_cache": false,
90
  "use_guided_attention_loss": true,
91
  "vocab_size": 81
generation_config.json CHANGED
@@ -5,5 +5,5 @@
5
  "eos_token_id": 2,
6
  "max_length": 1876,
7
  "pad_token_id": 1,
8
- "transformers_version": "4.42.3"
9
  }
 
5
  "eos_token_id": 2,
6
  "max_length": 1876,
7
  "pad_token_id": 1,
8
+ "transformers_version": "4.44.2"
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f44f1dab7a1837fdd2b3889ccac8010ba40cba30856ff4f5dab962885bff8f7
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d79601d2d95edc044d365df1b5ebdd6985cec0432295a052489d0570900d6c95
3
  size 577789320
runs/Oct15_17-08-29_d1eba1fcf911/events.out.tfevents.1729012112.d1eba1fcf911.30.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01aa172702387518c58a414d1c22e51f87a1c379f62d6667a8bc92a4efab20e6
3
+ size 16555
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7646afceb4953a76cfc29c81ca8283d930725cd1c7f75f8c566051d3318b5701
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a87420c29df410df1e1cf8fcb12fbeb9006a38d0494b77319350525f494661a1
3
+ size 5368