DewiBrynJones commited on
Commit
8c9b1a5
1 Parent(s): 54c100e

move model to techiaith

Browse files
README.md CHANGED
@@ -20,118 +20,9 @@ model-index:
20
  name: Common Voice cy
21
  type: common_voice
22
  args: cy
23
- metrics:
24
- - name: Test WER
25
- type: wer
26
- value: 25.31
27
  ---
28
 
29
  # Wav2Vec2-Large-XLSR-Welsh
30
 
31
- Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the [Welsh Common Voice dataset](https://huggingface.co/datasets/common_voice).
32
 
33
- When using this model, make sure that your speech input is sampled at 16kHz.
34
-
35
-
36
- ## Usage
37
-
38
- The model can be used directly (without a language model) as follows:
39
-
40
- ```python
41
- import torch
42
- import torchaudio
43
- from datasets import load_dataset
44
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
45
-
46
- test_dataset = load_dataset("common_voice", "cy", split="test[:2%]")
47
-
48
- processor = Wav2Vec2Processor.from_pretrained("DewiBrynJones/wav2vec2-large-xlsr-welsh")
49
- model = Wav2Vec2ForCTC.from_pretrained("DewiBrynJones/wav2vec2-large-xlsr-welsh")
50
-
51
- resampler = torchaudio.transforms.Resample(48_000, 16_000)
52
-
53
- # Preprocessing the datasets.
54
- # We need to read the aduio files as arrays
55
- def speech_file_to_array_fn(batch):
56
- speech_array, sampling_rate = torchaudio.load(batch["path"])
57
- batch["speech"] = resampler(speech_array).squeeze().numpy()
58
- return batch
59
-
60
- test_dataset = test_dataset.map(speech_file_to_array_fn)
61
- inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
62
-
63
- with torch.no_grad():
64
- tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
65
-
66
- predicted_ids = torch.argmax(logits, dim=-1)
67
-
68
- print("Prediction:", processor.batch_decode(predicted_ids))
69
- print("Reference:", test_dataset["sentence"][:2])
70
- ```
71
-
72
-
73
- ## Evaluation
74
-
75
- The model can be evaluated as follows on the Welsh test data of Common Voice.
76
-
77
-
78
- ```python
79
- import torch
80
- import torchaudio
81
- from datasets import load_dataset, load_metric
82
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
83
- import re
84
-
85
- test_dataset = load_dataset("common_voice", "cy", split="test")
86
-
87
- wer = load_metric("wer")
88
-
89
- processor = Wav2Vec2Processor.from_pretrained("DewiBrynJones/wav2vec2-large-xlsr-welsh")
90
- model = Wav2Vec2ForCTC.from_pretrained("DewiBrynJones/wav2vec2-large-xlsr-welsh")
91
-
92
- model.to("cuda")
93
-
94
- chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\u2013\\\\u2014\\\\;\\\\:\\\\"\\\\\\\\%\\\\\\\\\\\\]'
95
-
96
- resampler = torchaudio.transforms.Resample(48_000, 16_000)
97
-
98
- # Preprocessing the datasets.
99
- # We need to read the aduio files as arrays
100
- def speech_file_to_array_fn(batch):
101
- batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
102
- speech_array, sampling_rate = torchaudio.load(batch["path"])
103
- batch["speech"] = resampler(speech_array).squeeze().numpy()
104
- return batch
105
-
106
- test_dataset = test_dataset.map(speech_file_to_array_fn)
107
-
108
- # Preprocessing the datasets.
109
- # We need to read the aduio files as arrays
110
- def evaluate(batch):
111
- inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
112
-
113
- with torch.no_grad():
114
- logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
115
-
116
- pred_ids = torch.argmax(logits, dim=-1)
117
- batch["pred_strings"] = processor.batch_decode(pred_ids)
118
- return batch
119
-
120
- result = test_dataset.map(evaluate, batched=True, batch_size=8)
121
-
122
- print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
123
- ```
124
-
125
- **Test Result**: 25.31%
126
-
127
-
128
- # Training
129
-
130
- A Docker based setup for training and evaluating this model can be found at GitHub: https://github.com/techiaith/xlsr-fine-tuning-week
131
-
132
- # Example Predictions
133
-
134
- | Prediction | Reference |
135
- |---|---|
136
- | rhedais i ffwrdd heb ddweud dim wrthi ym beth digwyddodd | Rhedais i ffwrdd heb ddweud dim wrthi am beth ddigwyddodd. |
137
- | ac yr oedd y ferch yn ofnus d | Ac yr oedd y ferch yn ofnus. |
 
20
  name: Common Voice cy
21
  type: common_voice
22
  args: cy
 
 
 
 
23
  ---
24
 
25
  # Wav2Vec2-Large-XLSR-Welsh
26
 
27
+ This model has moved to https://huggingface.co/techiaith/wav2vec2-xlsr-ft-cy
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json DELETED
@@ -1,76 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
- "activation_dropout": 0.055,
4
- "apply_spec_augment": true,
5
- "architectures": [
6
- "Wav2Vec2ForCTC"
7
- ],
8
- "attention_dropout": 0.094,
9
- "bos_token_id": 1,
10
- "conv_bias": true,
11
- "conv_dim": [
12
- 512,
13
- 512,
14
- 512,
15
- 512,
16
- 512,
17
- 512,
18
- 512
19
- ],
20
- "conv_kernel": [
21
- 10,
22
- 3,
23
- 3,
24
- 3,
25
- 3,
26
- 2,
27
- 2
28
- ],
29
- "conv_stride": [
30
- 5,
31
- 2,
32
- 2,
33
- 2,
34
- 2,
35
- 2,
36
- 2
37
- ],
38
- "ctc_loss_reduction": "mean",
39
- "ctc_zero_infinity": false,
40
- "do_stable_layer_norm": true,
41
- "eos_token_id": 2,
42
- "feat_extract_activation": "gelu",
43
- "feat_extract_dropout": 0.0,
44
- "feat_extract_norm": "layer",
45
- "feat_proj_dropout": 0.04,
46
- "final_dropout": 0.0,
47
- "gradient_checkpointing": true,
48
- "hidden_act": "gelu",
49
- "hidden_dropout": 0.047,
50
- "hidden_size": 1024,
51
- "initializer_range": 0.02,
52
- "intermediate_size": 4096,
53
- "layer_norm_eps": 1e-05,
54
- "layerdrop": 0.041,
55
- "mask_channel_length": 10,
56
- "mask_channel_min_space": 1,
57
- "mask_channel_other": 0.0,
58
- "mask_channel_prob": 0.0,
59
- "mask_channel_selection": "static",
60
- "mask_feature_length": 10,
61
- "mask_feature_prob": 0.0,
62
- "mask_time_length": 10,
63
- "mask_time_min_space": 1,
64
- "mask_time_other": 0.0,
65
- "mask_time_prob": 0.082,
66
- "mask_time_selection": "static",
67
- "model_type": "wav2vec2",
68
- "num_attention_heads": 16,
69
- "num_conv_pos_embedding_groups": 16,
70
- "num_conv_pos_embeddings": 128,
71
- "num_feat_extract_layers": 7,
72
- "num_hidden_layers": 24,
73
- "pad_token_id": 48,
74
- "transformers_version": "4.4.2",
75
- "vocab_size": 49
76
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocessor_config.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_size": 1,
4
- "padding_side": "right",
5
- "padding_value": 0.0,
6
- "return_attention_mask": true,
7
- "sampling_rate": 16000
8
- }
 
 
 
 
 
 
 
 
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b3d1d4c5339a82fac76d5a361323ba7c8e8faa278c8f37e1e4db9929ecd4705
3
- size 1262129841
 
 
 
 
special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
 
 
tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
 
 
vocab.json DELETED
@@ -1 +0,0 @@
1
- {"g": 1, "‘": 2, "e": 3, "r": 4, "b": 5, "ê": 6, "â": 7, "ŷ": 8, "'": 9, "v": 10, "ö": 11, "ï": 12, "h": 13, "z": 14, "m": 15, "á": 16, "ä": 17, "ñ": 18, "o": 19, "n": 20, "j": 21, "ò": 22, "ë": 23, "î": 24, "k": 25, "q": 26, "x": 27, "¬": 28, "a": 29, "s": 30, "i": 31, "ÿ": 32, "u": 33, "ŵ": 34, "c": 35, "é": 36, "w": 37, "p": 38, "y": 39, "d": 40, "l": 41, "à": 42, "û": 43, "t": 44, "f": 45, "ô": 46, "|": 0, "[UNK]": 47, "[PAD]": 48}