samitizerxu commited on
Commit
1b22227
1 Parent(s): 8f63fa2

Adding model files

Browse files
README.md CHANGED
@@ -1,3 +1,75 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - fr
4
+ license: apache-2.0
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - common_voice
8
+ - generated_from_trainer
9
+ datasets:
10
+ - common_voice
11
+ model-index:
12
+ - name: wav2vec2-cls-r-300m-fr
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # wav2vec2-cls-r-300m-fr
20
+
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the COMMON_VOICE - FR dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.6521
24
+ - Wer: 0.4330
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0003
44
+ - train_batch_size: 16
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 10.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
55
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
56
+ | 2.6773 | 0.8 | 500 | 1.3907 | 0.9864 |
57
+ | 0.9526 | 1.6 | 1000 | 0.7760 | 0.6448 |
58
+ | 0.6418 | 2.4 | 1500 | 0.7605 | 0.6194 |
59
+ | 0.5028 | 3.2 | 2000 | 0.6516 | 0.5322 |
60
+ | 0.4133 | 4.0 | 2500 | 0.6303 | 0.5097 |
61
+ | 0.3285 | 4.8 | 3000 | 0.6422 | 0.5062 |
62
+ | 0.2764 | 5.6 | 3500 | 0.5936 | 0.4748 |
63
+ | 0.2361 | 6.4 | 4000 | 0.6486 | 0.4683 |
64
+ | 0.2049 | 7.2 | 4500 | 0.6321 | 0.4532 |
65
+ | 0.176 | 8.0 | 5000 | 0.6230 | 0.4482 |
66
+ | 0.1393 | 8.8 | 5500 | 0.6595 | 0.4403 |
67
+ | 0.1141 | 9.6 | 6000 | 0.6552 | 0.4348 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.17.0.dev0
73
+ - Pytorch 1.10.2+cu102
74
+ - Datasets 1.18.2.dev0
75
+ - Tokenizers 0.11.0
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 98, "</s>": 99}
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 0.6521482467651367,
4
+ "eval_runtime": 218.1002,
5
+ "eval_samples": 5000,
6
+ "eval_samples_per_second": 22.925,
7
+ "eval_steps_per_second": 2.866,
8
+ "eval_wer": 0.43299040254091004,
9
+ "train_loss": 0.5376352182006836,
10
+ "train_runtime": 6902.5833,
11
+ "train_samples": 10000,
12
+ "train_samples_per_second": 14.487,
13
+ "train_steps_per_second": 0.905
14
+ }
checkpoint-6150/config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 10,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.0,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.05,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 97,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.17.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 100,
106
+ "xvector_output_dim": 512
107
+ }
checkpoint-6150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e391e5dad53b8b60bd8c4a460a7c1eabc540d5582ab892d1c343b9c3f6f4c02
3
+ size 2490878993
checkpoint-6150/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-6150/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59c9e0f83e7501c8783bd45d93ecd4209d484071c98d1f18a478a107dda79627
3
+ size 1262333681
checkpoint-6150/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39cba0a35f70c679ee24cbb51c03174b7983f412e9e4e73b301590c71c9f3731
3
+ size 14567
checkpoint-6150/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ff3aba594397e8587e51b929faa9fb4267682c4a88bb15276335b362cc33cf1
3
+ size 559
checkpoint-6150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99462ccf6199e5f06244f5b4a9f506a469244bf1524a5410fc4bd4e3df533ea6
3
+ size 623
checkpoint-6150/trainer_state.json ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 9.84,
5
+ "global_step": 6150,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.8,
12
+ "learning_rate": 0.000276096,
13
+ "loss": 2.6773,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.8,
18
+ "eval_loss": 1.390709400177002,
19
+ "eval_runtime": 221.8176,
20
+ "eval_samples_per_second": 22.541,
21
+ "eval_steps_per_second": 2.818,
22
+ "eval_wer": 0.9864438767290387,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 1.6,
27
+ "learning_rate": 0.00025209599999999994,
28
+ "loss": 0.9526,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 1.6,
33
+ "eval_loss": 0.7760354280471802,
34
+ "eval_runtime": 219.0168,
35
+ "eval_samples_per_second": 22.829,
36
+ "eval_steps_per_second": 2.854,
37
+ "eval_wer": 0.644847982692352,
38
+ "step": 1000
39
+ },
40
+ {
41
+ "epoch": 2.4,
42
+ "learning_rate": 0.00022809599999999998,
43
+ "loss": 0.6418,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 2.4,
48
+ "eval_loss": 0.760543942451477,
49
+ "eval_runtime": 217.3821,
50
+ "eval_samples_per_second": 23.001,
51
+ "eval_steps_per_second": 2.875,
52
+ "eval_wer": 0.6193928513889848,
53
+ "step": 1500
54
+ },
55
+ {
56
+ "epoch": 3.2,
57
+ "learning_rate": 0.000204096,
58
+ "loss": 0.5028,
59
+ "step": 2000
60
+ },
61
+ {
62
+ "epoch": 3.2,
63
+ "eval_loss": 0.6515631675720215,
64
+ "eval_runtime": 217.5138,
65
+ "eval_samples_per_second": 22.987,
66
+ "eval_steps_per_second": 2.873,
67
+ "eval_wer": 0.532187161959999,
68
+ "step": 2000
69
+ },
70
+ {
71
+ "epoch": 4.0,
72
+ "learning_rate": 0.00018009599999999998,
73
+ "loss": 0.4133,
74
+ "step": 2500
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_loss": 0.630308210849762,
79
+ "eval_runtime": 220.9761,
80
+ "eval_samples_per_second": 22.627,
81
+ "eval_steps_per_second": 2.828,
82
+ "eval_wer": 0.5097010287923772,
83
+ "step": 2500
84
+ },
85
+ {
86
+ "epoch": 4.8,
87
+ "learning_rate": 0.000156096,
88
+ "loss": 0.3285,
89
+ "step": 3000
90
+ },
91
+ {
92
+ "epoch": 4.8,
93
+ "eval_loss": 0.6422050595283508,
94
+ "eval_runtime": 216.2337,
95
+ "eval_samples_per_second": 23.123,
96
+ "eval_steps_per_second": 2.89,
97
+ "eval_wer": 0.5062487053787199,
98
+ "step": 3000
99
+ },
100
+ {
101
+ "epoch": 5.6,
102
+ "learning_rate": 0.00013209599999999998,
103
+ "loss": 0.2764,
104
+ "step": 3500
105
+ },
106
+ {
107
+ "epoch": 5.6,
108
+ "eval_loss": 0.5936103463172913,
109
+ "eval_runtime": 221.9715,
110
+ "eval_samples_per_second": 22.525,
111
+ "eval_steps_per_second": 2.816,
112
+ "eval_wer": 0.47476351584616444,
113
+ "step": 3500
114
+ },
115
+ {
116
+ "epoch": 6.4,
117
+ "learning_rate": 0.00010809599999999998,
118
+ "loss": 0.2361,
119
+ "step": 4000
120
+ },
121
+ {
122
+ "epoch": 6.4,
123
+ "eval_loss": 0.6486021280288696,
124
+ "eval_runtime": 217.2436,
125
+ "eval_samples_per_second": 23.016,
126
+ "eval_steps_per_second": 2.877,
127
+ "eval_wer": 0.4683191788073373,
128
+ "step": 4000
129
+ },
130
+ {
131
+ "epoch": 7.2,
132
+ "learning_rate": 8.4144e-05,
133
+ "loss": 0.2049,
134
+ "step": 4500
135
+ },
136
+ {
137
+ "epoch": 7.2,
138
+ "eval_loss": 0.6320939064025879,
139
+ "eval_runtime": 216.2222,
140
+ "eval_samples_per_second": 23.124,
141
+ "eval_steps_per_second": 2.891,
142
+ "eval_wer": 0.453198002255518,
143
+ "step": 4500
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "learning_rate": 6.014399999999999e-05,
148
+ "loss": 0.176,
149
+ "step": 5000
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_loss": 0.6229676604270935,
154
+ "eval_runtime": 217.7281,
155
+ "eval_samples_per_second": 22.964,
156
+ "eval_steps_per_second": 2.871,
157
+ "eval_wer": 0.44822665653985133,
158
+ "step": 5000
159
+ },
160
+ {
161
+ "epoch": 8.8,
162
+ "learning_rate": 3.6191999999999995e-05,
163
+ "loss": 0.1393,
164
+ "step": 5500
165
+ },
166
+ {
167
+ "epoch": 8.8,
168
+ "eval_loss": 0.6595410108566284,
169
+ "eval_runtime": 217.4753,
170
+ "eval_samples_per_second": 22.991,
171
+ "eval_steps_per_second": 2.874,
172
+ "eval_wer": 0.4402863126884393,
173
+ "step": 5500
174
+ },
175
+ {
176
+ "epoch": 9.6,
177
+ "learning_rate": 1.2192e-05,
178
+ "loss": 0.1141,
179
+ "step": 6000
180
+ },
181
+ {
182
+ "epoch": 9.6,
183
+ "eval_loss": 0.6552333831787109,
184
+ "eval_runtime": 216.5487,
185
+ "eval_samples_per_second": 23.089,
186
+ "eval_steps_per_second": 2.886,
187
+ "eval_wer": 0.43480862620543625,
188
+ "step": 6000
189
+ }
190
+ ],
191
+ "max_steps": 6250,
192
+ "num_train_epochs": 10,
193
+ "total_flos": 1.0577160442719683e+19,
194
+ "trial_name": null,
195
+ "trial_params": null
196
+ }
checkpoint-6150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca2ad775929458a40c80853a160c83c7e110384fc53cfba997ea42b133bea3d
3
+ size 3055
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 10,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.0,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.05,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 97,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.17.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 100,
106
+ "xvector_output_dim": 512
107
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 0.6521482467651367,
4
+ "eval_runtime": 218.1002,
5
+ "eval_samples": 5000,
6
+ "eval_samples_per_second": 22.925,
7
+ "eval_steps_per_second": 2.866,
8
+ "eval_wer": 0.43299040254091004
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f7098ad58c48ecfa8b6566c9e3a08616a070645236244ae70a2c46d15b4a8c0
3
+ size 1262333681
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./wav2vec2-cls-r-300m-fr", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 0.5376352182006836,
4
+ "train_runtime": 6902.5833,
5
+ "train_samples": 10000,
6
+ "train_samples_per_second": 14.487,
7
+ "train_steps_per_second": 0.905
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "global_step": 6250,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.8,
12
+ "learning_rate": 0.000276096,
13
+ "loss": 2.6773,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.8,
18
+ "eval_loss": 1.390709400177002,
19
+ "eval_runtime": 221.8176,
20
+ "eval_samples_per_second": 22.541,
21
+ "eval_steps_per_second": 2.818,
22
+ "eval_wer": 0.9864438767290387,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 1.6,
27
+ "learning_rate": 0.00025209599999999994,
28
+ "loss": 0.9526,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 1.6,
33
+ "eval_loss": 0.7760354280471802,
34
+ "eval_runtime": 219.0168,
35
+ "eval_samples_per_second": 22.829,
36
+ "eval_steps_per_second": 2.854,
37
+ "eval_wer": 0.644847982692352,
38
+ "step": 1000
39
+ },
40
+ {
41
+ "epoch": 2.4,
42
+ "learning_rate": 0.00022809599999999998,
43
+ "loss": 0.6418,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 2.4,
48
+ "eval_loss": 0.760543942451477,
49
+ "eval_runtime": 217.3821,
50
+ "eval_samples_per_second": 23.001,
51
+ "eval_steps_per_second": 2.875,
52
+ "eval_wer": 0.6193928513889848,
53
+ "step": 1500
54
+ },
55
+ {
56
+ "epoch": 3.2,
57
+ "learning_rate": 0.000204096,
58
+ "loss": 0.5028,
59
+ "step": 2000
60
+ },
61
+ {
62
+ "epoch": 3.2,
63
+ "eval_loss": 0.6515631675720215,
64
+ "eval_runtime": 217.5138,
65
+ "eval_samples_per_second": 22.987,
66
+ "eval_steps_per_second": 2.873,
67
+ "eval_wer": 0.532187161959999,
68
+ "step": 2000
69
+ },
70
+ {
71
+ "epoch": 4.0,
72
+ "learning_rate": 0.00018009599999999998,
73
+ "loss": 0.4133,
74
+ "step": 2500
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_loss": 0.630308210849762,
79
+ "eval_runtime": 220.9761,
80
+ "eval_samples_per_second": 22.627,
81
+ "eval_steps_per_second": 2.828,
82
+ "eval_wer": 0.5097010287923772,
83
+ "step": 2500
84
+ },
85
+ {
86
+ "epoch": 4.8,
87
+ "learning_rate": 0.000156096,
88
+ "loss": 0.3285,
89
+ "step": 3000
90
+ },
91
+ {
92
+ "epoch": 4.8,
93
+ "eval_loss": 0.6422050595283508,
94
+ "eval_runtime": 216.2337,
95
+ "eval_samples_per_second": 23.123,
96
+ "eval_steps_per_second": 2.89,
97
+ "eval_wer": 0.5062487053787199,
98
+ "step": 3000
99
+ },
100
+ {
101
+ "epoch": 5.6,
102
+ "learning_rate": 0.00013209599999999998,
103
+ "loss": 0.2764,
104
+ "step": 3500
105
+ },
106
+ {
107
+ "epoch": 5.6,
108
+ "eval_loss": 0.5936103463172913,
109
+ "eval_runtime": 221.9715,
110
+ "eval_samples_per_second": 22.525,
111
+ "eval_steps_per_second": 2.816,
112
+ "eval_wer": 0.47476351584616444,
113
+ "step": 3500
114
+ },
115
+ {
116
+ "epoch": 6.4,
117
+ "learning_rate": 0.00010809599999999998,
118
+ "loss": 0.2361,
119
+ "step": 4000
120
+ },
121
+ {
122
+ "epoch": 6.4,
123
+ "eval_loss": 0.6486021280288696,
124
+ "eval_runtime": 217.2436,
125
+ "eval_samples_per_second": 23.016,
126
+ "eval_steps_per_second": 2.877,
127
+ "eval_wer": 0.4683191788073373,
128
+ "step": 4000
129
+ },
130
+ {
131
+ "epoch": 7.2,
132
+ "learning_rate": 8.4144e-05,
133
+ "loss": 0.2049,
134
+ "step": 4500
135
+ },
136
+ {
137
+ "epoch": 7.2,
138
+ "eval_loss": 0.6320939064025879,
139
+ "eval_runtime": 216.2222,
140
+ "eval_samples_per_second": 23.124,
141
+ "eval_steps_per_second": 2.891,
142
+ "eval_wer": 0.453198002255518,
143
+ "step": 4500
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "learning_rate": 6.014399999999999e-05,
148
+ "loss": 0.176,
149
+ "step": 5000
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_loss": 0.6229676604270935,
154
+ "eval_runtime": 217.7281,
155
+ "eval_samples_per_second": 22.964,
156
+ "eval_steps_per_second": 2.871,
157
+ "eval_wer": 0.44822665653985133,
158
+ "step": 5000
159
+ },
160
+ {
161
+ "epoch": 8.8,
162
+ "learning_rate": 3.6191999999999995e-05,
163
+ "loss": 0.1393,
164
+ "step": 5500
165
+ },
166
+ {
167
+ "epoch": 8.8,
168
+ "eval_loss": 0.6595410108566284,
169
+ "eval_runtime": 217.4753,
170
+ "eval_samples_per_second": 22.991,
171
+ "eval_steps_per_second": 2.874,
172
+ "eval_wer": 0.4402863126884393,
173
+ "step": 5500
174
+ },
175
+ {
176
+ "epoch": 9.6,
177
+ "learning_rate": 1.2192e-05,
178
+ "loss": 0.1141,
179
+ "step": 6000
180
+ },
181
+ {
182
+ "epoch": 9.6,
183
+ "eval_loss": 0.6552333831787109,
184
+ "eval_runtime": 216.5487,
185
+ "eval_samples_per_second": 23.089,
186
+ "eval_steps_per_second": 2.886,
187
+ "eval_wer": 0.43480862620543625,
188
+ "step": 6000
189
+ },
190
+ {
191
+ "epoch": 10.0,
192
+ "step": 6250,
193
+ "total_flos": 1.0733519285960786e+19,
194
+ "train_loss": 0.5376352182006836,
195
+ "train_runtime": 6902.5833,
196
+ "train_samples_per_second": 14.487,
197
+ "train_steps_per_second": 0.905
198
+ }
199
+ ],
200
+ "max_steps": 6250,
201
+ "num_train_epochs": 10,
202
+ "total_flos": 1.0733519285960786e+19,
203
+ "trial_name": null,
204
+ "trial_params": null
205
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca2ad775929458a40c80853a160c83c7e110384fc53cfba997ea42b133bea3d
3
+ size 3055
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"!": 1, "\"": 2, "&": 3, "'": 4, "(": 5, ")": 6, ",": 7, "-": 8, ".": 9, "/": 10, ":": 11, ";": 12, "=": 13, "?": 14, "^": 15, "_": 16, "a": 17, "b": 18, "c": 19, "d": 20, "e": 21, "f": 22, "g": 23, "h": 24, "i": 25, "j": 26, "k": 27, "l": 28, "m": 29, "n": 30, "o": 31, "p": 32, "q": 33, "r": 34, "s": 35, "t": 36, "u": 37, "v": 38, "w": 39, "x": 40, "y": 41, "z": 42, "{": 43, "}": 44, "«": 45, "°": 46, "º": 47, "»": 48, "½": 49, "×": 50, "à": 51, "á": 52, "â": 53, "ä": 54, "å": 55, "ç": 56, "è": 57, "é": 58, "ê": 59, "ë": 60, "í": 61, "î": 62, "ï": 63, "ñ": 64, "ó": 65, "ô": 66, "ö": 67, "ù": 68, "û": 69, "ü": 70, "ÿ": 71, "ă": 72, "ć": 73, "č": 74, "ę": 75, "ı": 76, "ł": 77, "ń": 78, "ō": 79, "ŏ": 80, "œ": 81, "ř": 82, "ś": 83, "š": 84, "ź": 85, "ž": 86, "̀": 87, "́": 88, "̂": 89, "–": 90, "—": 91, "’": 92, "…": 93, "ℤ": 94, "ℵ": 95, "|": 0, "[UNK]": 96, "[PAD]": 97}