tz579 commited on
Commit
1b13b31
1 Parent(s): 96e90c3

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-large-lv60
4
  tags:
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - ami
@@ -14,15 +16,15 @@ model-index:
14
  name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
16
  dataset:
17
- name: ami
18
  type: ami
19
  config: ihm
20
  split: None
21
- args: ihm
22
  metrics:
23
  - name: Wer
24
  type: wer
25
- value: 0.31977176615593855
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +32,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # wav2vec2-base-ami-fine-tuned
32
 
33
- This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the ami dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.5417
36
- - Wer: 0.3198
37
 
38
  ## Model description
39
 
 
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-large-lv60
4
  tags:
5
+ - automatic-speech-recognition
6
+ - edinburghcstr/ami
7
  - generated_from_trainer
8
  datasets:
9
  - ami
 
16
  name: Automatic Speech Recognition
17
  type: automatic-speech-recognition
18
  dataset:
19
+ name: EDINBURGHCSTR/AMI - IHM
20
  type: ami
21
  config: ihm
22
  split: None
23
+ args: 'Config: ihm, Training split: train, Eval split: validation'
24
  metrics:
25
  - name: Wer
26
  type: wer
27
+ value: 0.33567800752279153
28
  ---
29
 
30
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  # wav2vec2-base-ami-fine-tuned
34
 
35
+ This model is a fine-tuned version of [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) on the EDINBURGHCSTR/AMI - IHM dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.5988
38
+ - Wer: 0.3357
39
 
40
  ## Model description
41
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 0.5988181233406067,
4
+ "eval_runtime": 206.1503,
5
+ "eval_samples": 12383,
6
+ "eval_samples_per_second": 60.068,
7
+ "eval_steps_per_second": 3.755,
8
+ "eval_wer": 0.33567800752279153,
9
+ "total_flos": 1.799905283771071e+19,
10
+ "train_loss": 0.9291712401461605,
11
+ "train_runtime": 7691.3711,
12
+ "train_samples": 102201,
13
+ "train_samples_per_second": 26.575,
14
+ "train_steps_per_second": 1.661
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_loss": 0.5988181233406067,
4
+ "eval_runtime": 206.1503,
5
+ "eval_samples": 12383,
6
+ "eval_samples_per_second": 60.068,
7
+ "eval_steps_per_second": 3.755,
8
+ "eval_wer": 0.33567800752279153
9
+ }
runs/May26_14-50-05_tz579-raptorlake/events.out.tfevents.1716761958.tz579-raptorlake.18848.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1310939e448ee98714b4657d05a20de382f54d3731ea98f13acadc571b043444
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 1.799905283771071e+19,
4
+ "train_loss": 0.9291712401461605,
5
+ "train_runtime": 7691.3711,
6
+ "train_samples": 102201,
7
+ "train_samples_per_second": 26.575,
8
+ "train_steps_per_second": 1.661
9
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff