yosthin06 commited on
Commit
7e5e3e2
1 Parent(s): 4e45445

End of training

Browse files
Files changed (2) hide show
  1. README.md +16 -16
  2. generation_config.json +3 -3
README.md CHANGED
@@ -16,13 +16,10 @@ model-index:
16
  dataset:
17
  name: PolyAI/minds14
18
  type: PolyAI/minds14
19
- config: en-US
20
- split: train
21
- args: en-US
22
  metrics:
23
  - name: Wer
24
  type: wer
25
- value: 36.12750885478158
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,9 +29,9 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the PolyAI/minds14 dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.6316
36
- - Wer Ortho: 35.7187
37
- - Wer: 36.1275
38
 
39
  ## Model description
40
 
@@ -60,19 +57,22 @@ The following hyperparameters were used during training:
60
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
  - lr_scheduler_type: constant_with_warmup
62
  - lr_scheduler_warmup_steps: 50
63
- - training_steps: 500
64
- - mixed_precision_training: Native AMP
65
 
66
  ### Training results
67
 
68
- | Training Loss | Epoch | Step | Validation Loss | Wer Ortho | Wer |
69
- |:-------------:|:-----:|:----:|:---------------:|:---------:|:-------:|
70
- | 0.0012 | 17.86 | 500 | 0.6316 | 35.7187 | 36.1275 |
 
 
 
 
71
 
72
 
73
  ### Framework versions
74
 
75
- - Transformers 4.35.2
76
- - Pytorch 2.1.0+cu118
77
- - Datasets 2.15.0
78
- - Tokenizers 0.15.0
 
16
  dataset:
17
  name: PolyAI/minds14
18
  type: PolyAI/minds14
 
 
 
19
  metrics:
20
  - name: Wer
21
  type: wer
22
+ value: 0.33530106257378983
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
29
 
30
  This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the PolyAI/minds14 dataset.
31
  It achieves the following results on the evaluation set:
32
+ - Loss: 0.5824
33
+ - Wer Ortho: 0.3424
34
+ - Wer: 0.3353
35
 
36
  ## Model description
37
 
 
57
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
  - lr_scheduler_type: constant_with_warmup
59
  - lr_scheduler_warmup_steps: 50
60
+ - training_steps: 250
 
61
 
62
  ### Training results
63
 
64
+ | Training Loss | Epoch | Step | Validation Loss | Wer Ortho | Wer |
65
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|
66
+ | 0.3677 | 1.72 | 50 | 0.5198 | 0.3849 | 0.3648 |
67
+ | 0.1925 | 3.45 | 100 | 0.5038 | 0.3671 | 0.3518 |
68
+ | 0.0836 | 5.17 | 150 | 0.5206 | 0.3547 | 0.3406 |
69
+ | 0.0265 | 6.9 | 200 | 0.5520 | 0.3627 | 0.3518 |
70
+ | 0.008 | 8.62 | 250 | 0.5824 | 0.3424 | 0.3353 |
71
 
72
 
73
  ### Framework versions
74
 
75
+ - Transformers 4.32.0
76
+ - Pytorch 2.1.0+cu121
77
+ - Datasets 2.16.1
78
+ - Tokenizers 0.13.3
generation_config.json CHANGED
@@ -35,7 +35,7 @@
35
  "forced_decoder_ids": [
36
  [
37
  1,
38
- 50322
39
  ],
40
  [
41
  2,
@@ -148,7 +148,7 @@
148
  "<|yo|>": 50325,
149
  "<|zh|>": 50260
150
  },
151
- "language": "sinhalese",
152
  "max_initial_timestamp_index": 1,
153
  "max_length": 448,
154
  "no_timestamps_token_id": 50363,
@@ -249,5 +249,5 @@
249
  "transcribe": 50359,
250
  "translate": 50358
251
  },
252
- "transformers_version": "4.35.2"
253
  }
 
35
  "forced_decoder_ids": [
36
  [
37
  1,
38
+ 50259
39
  ],
40
  [
41
  2,
 
148
  "<|yo|>": 50325,
149
  "<|zh|>": 50260
150
  },
151
+ "language": "english",
152
  "max_initial_timestamp_index": 1,
153
  "max_length": 448,
154
  "no_timestamps_token_id": 50363,
 
249
  "transcribe": 50359,
250
  "translate": 50358
251
  },
252
+ "transformers_version": "4.32.0"
253
  }