gingercake01 commited on
Commit
f8dc9f4
1 Parent(s): 9f7c5a9

End of training

Browse files
Files changed (2) hide show
  1. README.md +6 -6
  2. generation_config.json +14 -18
README.md CHANGED
@@ -2,23 +2,23 @@
2
  language:
3
  - ko
4
  license: apache-2.0
5
- base_model: openai/whisper-large
6
  tags:
7
  - hf-asr-leaderboard
8
  - generated_from_trainer
9
  datasets:
10
- - gingercake01/0528_1000freetalksamplewithnoise
11
  model-index:
12
- - name: largeWhisper_finetuned_with_noise
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
- # largeWhisper_finetuned_with_noise
20
 
21
- This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the 1000freetalksample dataset.
22
 
23
  ## Model description
24
 
@@ -38,7 +38,7 @@ More information needed
38
 
39
  The following hyperparameters were used during training:
40
  - learning_rate: 1e-05
41
- - train_batch_size: 16
42
  - eval_batch_size: 8
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
 
2
  language:
3
  - ko
4
  license: apache-2.0
5
+ base_model: openai/whisper-base
6
  tags:
7
  - hf-asr-leaderboard
8
  - generated_from_trainer
9
  datasets:
10
+ - gingercake01/0529_1000audio_base
11
  model-index:
12
+ - name: baseWhisper_finetune
13
  results: []
14
  ---
15
 
16
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
  should probably proofread and complete it, then remove this comment. -->
18
 
19
+ # baseWhisper_finetune
20
 
21
+ This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the 1000freetalksample dataset.
22
 
23
  ## Model description
24
 
 
38
 
39
  The following hyperparameters were used during training:
40
  - learning_rate: 1e-05
41
+ - train_batch_size: 8
42
  - eval_batch_size: 8
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
generation_config.json CHANGED
@@ -1,40 +1,36 @@
1
  {
2
  "alignment_heads": [
3
  [
4
- 9,
5
- 19
6
  ],
7
  [
8
- 11,
9
  2
10
  ],
11
  [
12
- 11,
13
- 4
14
- ],
15
- [
16
- 11,
17
- 17
18
  ],
19
  [
20
- 22,
21
  7
22
  ],
23
  [
24
- 22,
25
- 11
26
  ],
27
  [
28
- 22,
29
- 17
30
  ],
31
  [
32
- 23,
33
- 2
34
  ],
35
  [
36
- 23,
37
- 15
38
  ]
39
  ],
40
  "begin_suppress_tokens": [
 
1
  {
2
  "alignment_heads": [
3
  [
4
+ 3,
5
+ 1
6
  ],
7
  [
8
+ 4,
9
  2
10
  ],
11
  [
12
+ 4,
13
+ 3
 
 
 
 
14
  ],
15
  [
16
+ 4,
17
  7
18
  ],
19
  [
20
+ 5,
21
+ 1
22
  ],
23
  [
24
+ 5,
25
+ 2
26
  ],
27
  [
28
+ 5,
29
+ 4
30
  ],
31
  [
32
+ 5,
33
+ 6
34
  ]
35
  ],
36
  "begin_suppress_tokens": [