Chahat7874 commited on
Commit
080109f
1 Parent(s): f09164e

End of training

Browse files
README.md CHANGED
@@ -1,13 +1,33 @@
1
  ---
 
 
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-large-xlsr-53
4
  tags:
 
 
 
5
  - generated_from_trainer
6
  datasets:
7
  - common_voice_15_0
 
 
8
  model-index:
9
  - name: wav2vec2-common_voice-en-demo
10
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -15,7 +35,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # wav2vec2-common_voice-en-demo
17
 
18
- This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice_15_0 dataset.
 
 
 
19
 
20
  ## Model description
21
 
 
1
  ---
2
+ language:
3
+ - hi
4
  license: apache-2.0
5
  base_model: facebook/wav2vec2-large-xlsr-53
6
  tags:
7
+ - automatic-speech-recognition
8
+ - mozilla-foundation/common_voice_15_0
9
+ - mms
10
  - generated_from_trainer
11
  datasets:
12
  - common_voice_15_0
13
+ metrics:
14
+ - wer
15
  model-index:
16
  - name: wav2vec2-common_voice-en-demo
17
+ results:
18
+ - task:
19
+ name: Automatic Speech Recognition
20
+ type: automatic-speech-recognition
21
+ dataset:
22
+ name: MOZILLA-FOUNDATION/COMMON_VOICE_15_0 - HI
23
+ type: common_voice_15_0
24
+ config: hi
25
+ split: validation
26
+ args: 'Config: hi, Training split: train, Eval split: validation'
27
+ metrics:
28
+ - name: Wer
29
+ type: wer
30
+ value: 1.0023633677991137
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # wav2vec2-common_voice-en-demo
37
 
38
+ This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the MOZILLA-FOUNDATION/COMMON_VOICE_15_0 - HI dataset.
39
+ It achieves the following results on the evaluation set:
40
+ - Loss: 20.2294
41
+ - Wer: 1.0024
42
 
43
  ## Model description
44
 
adapter.eng.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f16202c0de9c8feeaa1cf336a6630fc754a6ed13dca6fb50a2468e0c7c3f94d2
3
  size 3918936
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7fc12bdeecd5b36ad6a1fa85f00caaa02034ef6c105b1b27ac58abcaf9b6232
3
  size 3918936
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 20.233524322509766,
4
- "eval_runtime": 167.9773,
5
  "eval_samples": 2416,
6
- "eval_samples_per_second": 14.383,
7
- "eval_steps_per_second": 1.798,
8
- "eval_wer": 1.0018709995076318,
9
  "total_flos": 6.105598680744346e+17,
10
- "train_loss": 18.518729795258622,
11
- "train_runtime": 625.4328,
12
  "train_samples": 4630,
13
- "train_samples_per_second": 7.403,
14
- "train_steps_per_second": 0.232
15
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 20.229398727416992,
4
+ "eval_runtime": 167.7396,
5
  "eval_samples": 2416,
6
+ "eval_samples_per_second": 14.403,
7
+ "eval_steps_per_second": 1.8,
8
+ "eval_wer": 1.0023633677991137,
9
  "total_flos": 6.105598680744346e+17,
10
+ "train_loss": 18.50860385237069,
11
+ "train_runtime": 615.4894,
12
  "train_samples": 4630,
13
+ "train_samples_per_second": 7.522,
14
+ "train_steps_per_second": 0.236
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 20.233524322509766,
4
- "eval_runtime": 167.9773,
5
  "eval_samples": 2416,
6
- "eval_samples_per_second": 14.383,
7
- "eval_steps_per_second": 1.798,
8
- "eval_wer": 1.0018709995076318
9
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 20.229398727416992,
4
+ "eval_runtime": 167.7396,
5
  "eval_samples": 2416,
6
+ "eval_samples_per_second": 14.403,
7
+ "eval_steps_per_second": 1.8,
8
+ "eval_wer": 1.0023633677991137
9
  }
runs/May25_12-14-50_d77941dfb6d4/events.out.tfevents.1716640249.d77941dfb6d4.7672.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14e3b90d7609ffd19c1601f164c7d0567cec9fb0960beeea80ea82f191a515c4
3
+ size 406
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 6.105598680744346e+17,
4
- "train_loss": 18.518729795258622,
5
- "train_runtime": 625.4328,
6
  "train_samples": 4630,
7
- "train_samples_per_second": 7.403,
8
- "train_steps_per_second": 0.232
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 6.105598680744346e+17,
4
+ "train_loss": 18.50860385237069,
5
+ "train_runtime": 615.4894,
6
  "train_samples": 4630,
7
+ "train_samples_per_second": 7.522,
8
+ "train_steps_per_second": 0.236
9
  }
trainer_state.json CHANGED
@@ -10,21 +10,21 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.6896551724137931,
13
- "eval_loss": 21.947891235351562,
14
- "eval_runtime": 168.4847,
15
- "eval_samples_per_second": 14.34,
16
- "eval_steps_per_second": 1.792,
17
- "eval_wer": 1.0003446578040374,
18
  "step": 100
19
  },
20
  {
21
  "epoch": 1.0,
22
  "step": 145,
23
  "total_flos": 6.105598680744346e+17,
24
- "train_loss": 18.518729795258622,
25
- "train_runtime": 625.4328,
26
- "train_samples_per_second": 7.403,
27
- "train_steps_per_second": 0.232
28
  }
29
  ],
30
  "logging_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.6896551724137931,
13
+ "eval_loss": 21.9151554107666,
14
+ "eval_runtime": 164.601,
15
+ "eval_samples_per_second": 14.678,
16
+ "eval_steps_per_second": 1.835,
17
+ "eval_wer": 1.0004431314623339,
18
  "step": 100
19
  },
20
  {
21
  "epoch": 1.0,
22
  "step": 145,
23
  "total_flos": 6.105598680744346e+17,
24
+ "train_loss": 18.50860385237069,
25
+ "train_runtime": 615.4894,
26
+ "train_samples_per_second": 7.522,
27
+ "train_steps_per_second": 0.236
28
  }
29
  ],
30
  "logging_steps": 500,