Davidsamuel101 commited on
Commit
e1d9525
·
1 Parent(s): 8e70273

Added w2v2 with LM

Browse files
README.md CHANGED
@@ -1,3 +1,73 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: facebook/wav2vec2-xls-r-300m
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - bookbot/common_voice_16_1_sw
8
+ - bookbot/ALFFA_swahili
9
+ - bookbot/fleurs_sw
10
+ - generated_from_trainer
11
+ metrics:
12
+ - wer
13
+ model-index:
14
+ - name: wav2vec2-xls-r-300m-swahili-cv-fleurs-alffa-word
15
+ results: []
16
+ ---
17
+
18
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
19
+ should probably proofread and complete it, then remove this comment. -->
20
+
21
+ # wav2vec2-xls-r-300m-swahili-cv-fleurs-alffa-word
22
+
23
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset.
24
+ It achieves the following results on the evaluation set:
25
+ - Loss: 0.2057
26
+ - Wer: 0.2194
27
+ - Cer: 0.1098
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 0.0002
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 8
49
+ - seed: 42
50
+ - gradient_accumulation_steps: 2
51
+ - total_train_batch_size: 32
52
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
+ - lr_scheduler_type: linear
54
+ - lr_scheduler_warmup_ratio: 0.1
55
+ - num_epochs: 5.0
56
+
57
+ ### Training results
58
+
59
+ | Training Loss | Epoch | Step | Validation Loss | Wer | Cer |
60
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|
61
+ | 0.3702 | 1.0 | 1961 | 0.2878 | 0.3335 | 0.1367 |
62
+ | 0.2333 | 2.0 | 3922 | 0.2324 | 0.2653 | 0.1219 |
63
+ | 0.172 | 3.0 | 5883 | 0.2136 | 0.2464 | 0.1162 |
64
+ | 0.1331 | 4.0 | 7844 | 0.2043 | 0.2287 | 0.1127 |
65
+ | 0.1018 | 5.0 | 9805 | 0.2057 | 0.2194 | 0.1098 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.45.2
71
+ - Pytorch 2.3.1
72
+ - Datasets 2.19.2
73
+ - Tokenizers 0.20.1
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 31,
3
+ "<s>": 30
4
+ }
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_cer": 0.10983672160892569,
4
+ "eval_loss": 0.2057330459356308,
5
+ "eval_runtime": 242.3069,
6
+ "eval_samples": 22360,
7
+ "eval_samples_per_second": 92.28,
8
+ "eval_steps_per_second": 11.535,
9
+ "eval_wer": 0.21943229191706215,
10
+ "total_flos": 5.6911698946882765e+19,
11
+ "train_loss": 0.39030011520891517,
12
+ "train_runtime": 4855.0155,
13
+ "train_samples": 62739,
14
+ "train_samples_per_second": 64.613,
15
+ "train_steps_per_second": 2.02
16
+ }
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "'", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
config.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "mean",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.0,
54
+ "feat_quantizer_dropout": 0.0,
55
+ "final_dropout": 0.0,
56
+ "hidden_act": "gelu",
57
+ "hidden_dropout": 0.0,
58
+ "hidden_size": 1024,
59
+ "initializer_range": 0.02,
60
+ "intermediate_size": 4096,
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.05,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "wav2vec2",
70
+ "num_adapter_layers": 3,
71
+ "num_attention_heads": 16,
72
+ "num_codevector_groups": 2,
73
+ "num_codevectors_per_group": 320,
74
+ "num_conv_pos_embedding_groups": 16,
75
+ "num_conv_pos_embeddings": 128,
76
+ "num_feat_extract_layers": 7,
77
+ "num_hidden_layers": 24,
78
+ "num_negatives": 100,
79
+ "output_hidden_size": 1024,
80
+ "pad_token_id": 29,
81
+ "proj_codevector_dim": 768,
82
+ "tdnn_dilation": [
83
+ 1,
84
+ 2,
85
+ 3,
86
+ 1,
87
+ 1
88
+ ],
89
+ "tdnn_dim": [
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 512,
94
+ 1500
95
+ ],
96
+ "tdnn_kernel": [
97
+ 5,
98
+ 3,
99
+ 3,
100
+ 1,
101
+ 1
102
+ ],
103
+ "torch_dtype": "float32",
104
+ "transformers_version": "4.45.2",
105
+ "use_weighted_layer_sum": false,
106
+ "vocab_size": 32,
107
+ "xvector_output_dim": 512
108
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_cer": 0.10983672160892569,
4
+ "eval_loss": 0.2057330459356308,
5
+ "eval_runtime": 242.3069,
6
+ "eval_samples": 22360,
7
+ "eval_samples_per_second": 92.28,
8
+ "eval_steps_per_second": 11.535,
9
+ "eval_wer": 0.21943229191706215
10
+ }
language_model/5gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5dad64919b8449ffb73db87605df3869e42d8b9ab07e68f73bbcc4cfd7eb2f4
3
+ size 37825846
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7952ec24b484c6ca7edf84c99ee298ffd531ea56e4f6bd89e379ff255a05622
3
+ size 1261938680
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
runs/Oct24_14-46-01_s44504-focus-slate/events.out.tfevents.1729742666.s44504-focus-slate.2113579.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:050f6bed9982481627ed120e82dbd027a25b2dc48966b16b084ea1809539991f
3
+ size 12710
runs/Oct24_14-46-01_s44504-focus-slate/events.out.tfevents.1729747772.s44504-focus-slate.2113579.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f4101de0fa00b8543099fd3caab4c2acac9b7a4df22d5e07f70d481a98b1e2c
3
+ size 453
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "28": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "29": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "30": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "31": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": false,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "processor_class": "Wav2Vec2ProcessorWithLM",
43
+ "replace_word_delimiter_char": " ",
44
+ "target_lang": null,
45
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
46
+ "unk_token": "[UNK]",
47
+ "word_delimiter_token": "|"
48
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 5.6911698946882765e+19,
4
+ "train_loss": 0.39030011520891517,
5
+ "train_runtime": 4855.0155,
6
+ "train_samples": 62739,
7
+ "train_samples_per_second": 64.613,
8
+ "train_steps_per_second": 2.02
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 9805,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.25497195308516063,
13
+ "grad_norm": 1.5315192937850952,
14
+ "learning_rate": 0.00010193679918450561,
15
+ "loss": 3.8945,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5099439061703213,
20
+ "grad_norm": 1.6545052528381348,
21
+ "learning_rate": 0.00019956935630099728,
22
+ "loss": 0.5228,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7649158592554819,
27
+ "grad_norm": 1.1093895435333252,
28
+ "learning_rate": 0.0001882366273798731,
29
+ "loss": 0.3702,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_cer": 0.13674156347728234,
35
+ "eval_loss": 0.287818968296051,
36
+ "eval_runtime": 242.9432,
37
+ "eval_samples_per_second": 92.038,
38
+ "eval_steps_per_second": 11.505,
39
+ "eval_wer": 0.3335439517042918,
40
+ "step": 1961
41
+ },
42
+ {
43
+ "epoch": 1.0198878123406425,
44
+ "grad_norm": 0.6819602847099304,
45
+ "learning_rate": 0.0001769038984587489,
46
+ "loss": 0.3114,
47
+ "step": 2000
48
+ },
49
+ {
50
+ "epoch": 1.2748597654258031,
51
+ "grad_norm": 0.7735158801078796,
52
+ "learning_rate": 0.00016557116953762465,
53
+ "loss": 0.2571,
54
+ "step": 2500
55
+ },
56
+ {
57
+ "epoch": 1.5298317185109638,
58
+ "grad_norm": 0.7301546931266785,
59
+ "learning_rate": 0.00015423844061650046,
60
+ "loss": 0.2411,
61
+ "step": 3000
62
+ },
63
+ {
64
+ "epoch": 1.7848036715961244,
65
+ "grad_norm": 0.5918829441070557,
66
+ "learning_rate": 0.00014290571169537624,
67
+ "loss": 0.2333,
68
+ "step": 3500
69
+ },
70
+ {
71
+ "epoch": 2.0,
72
+ "eval_cer": 0.12185579510710033,
73
+ "eval_loss": 0.23235103487968445,
74
+ "eval_runtime": 242.1687,
75
+ "eval_samples_per_second": 92.332,
76
+ "eval_steps_per_second": 11.542,
77
+ "eval_wer": 0.26528299554267426,
78
+ "step": 3922
79
+ },
80
+ {
81
+ "epoch": 2.039775624681285,
82
+ "grad_norm": 0.7791718244552612,
83
+ "learning_rate": 0.00013157298277425205,
84
+ "loss": 0.2157,
85
+ "step": 4000
86
+ },
87
+ {
88
+ "epoch": 2.2947475777664454,
89
+ "grad_norm": 0.5816757082939148,
90
+ "learning_rate": 0.00012024025385312785,
91
+ "loss": 0.1817,
92
+ "step": 4500
93
+ },
94
+ {
95
+ "epoch": 2.5497195308516063,
96
+ "grad_norm": 0.42094656825065613,
97
+ "learning_rate": 0.00010890752493200362,
98
+ "loss": 0.1803,
99
+ "step": 5000
100
+ },
101
+ {
102
+ "epoch": 2.804691483936767,
103
+ "grad_norm": 0.515943706035614,
104
+ "learning_rate": 9.757479601087942e-05,
105
+ "loss": 0.172,
106
+ "step": 5500
107
+ },
108
+ {
109
+ "epoch": 3.0,
110
+ "eval_cer": 0.11621454523249779,
111
+ "eval_loss": 0.213576540350914,
112
+ "eval_runtime": 242.2848,
113
+ "eval_samples_per_second": 92.288,
114
+ "eval_steps_per_second": 11.536,
115
+ "eval_wer": 0.246385720074449,
116
+ "step": 5883
117
+ },
118
+ {
119
+ "epoch": 3.0596634370219276,
120
+ "grad_norm": 0.397499680519104,
121
+ "learning_rate": 8.624206708975522e-05,
122
+ "loss": 0.1656,
123
+ "step": 6000
124
+ },
125
+ {
126
+ "epoch": 3.3146353901070884,
127
+ "grad_norm": 0.5948652625083923,
128
+ "learning_rate": 7.490933816863101e-05,
129
+ "loss": 0.1388,
130
+ "step": 6500
131
+ },
132
+ {
133
+ "epoch": 3.569607343192249,
134
+ "grad_norm": 0.5423580408096313,
135
+ "learning_rate": 6.35766092475068e-05,
136
+ "loss": 0.1383,
137
+ "step": 7000
138
+ },
139
+ {
140
+ "epoch": 3.8245792962774097,
141
+ "grad_norm": 1.2003000974655151,
142
+ "learning_rate": 5.22438803263826e-05,
143
+ "loss": 0.1331,
144
+ "step": 7500
145
+ },
146
+ {
147
+ "epoch": 4.0,
148
+ "eval_cer": 0.11265216708611207,
149
+ "eval_loss": 0.20429827272891998,
150
+ "eval_runtime": 242.3631,
151
+ "eval_samples_per_second": 92.258,
152
+ "eval_steps_per_second": 11.532,
153
+ "eval_wer": 0.22865256897179317,
154
+ "step": 7844
155
+ },
156
+ {
157
+ "epoch": 4.07955124936257,
158
+ "grad_norm": 0.8045985102653503,
159
+ "learning_rate": 4.091115140525839e-05,
160
+ "loss": 0.123,
161
+ "step": 8000
162
+ },
163
+ {
164
+ "epoch": 4.3345232024477305,
165
+ "grad_norm": 0.402927964925766,
166
+ "learning_rate": 2.957842248413418e-05,
167
+ "loss": 0.1052,
168
+ "step": 8500
169
+ },
170
+ {
171
+ "epoch": 4.589495155532891,
172
+ "grad_norm": 0.42731374502182007,
173
+ "learning_rate": 1.8245693563009974e-05,
174
+ "loss": 0.1054,
175
+ "step": 9000
176
+ },
177
+ {
178
+ "epoch": 4.844467108618052,
179
+ "grad_norm": 0.2811175286769867,
180
+ "learning_rate": 6.912964641885767e-06,
181
+ "loss": 0.1018,
182
+ "step": 9500
183
+ },
184
+ {
185
+ "epoch": 5.0,
186
+ "eval_cer": 0.10983672160892569,
187
+ "eval_loss": 0.2057330459356308,
188
+ "eval_runtime": 270.0068,
189
+ "eval_samples_per_second": 82.813,
190
+ "eval_steps_per_second": 10.352,
191
+ "eval_wer": 0.21943229191706215,
192
+ "step": 9805
193
+ },
194
+ {
195
+ "epoch": 5.0,
196
+ "step": 9805,
197
+ "total_flos": 5.6911698946882765e+19,
198
+ "train_loss": 0.39030011520891517,
199
+ "train_runtime": 4855.0155,
200
+ "train_samples_per_second": 64.613,
201
+ "train_steps_per_second": 2.02
202
+ }
203
+ ],
204
+ "logging_steps": 500,
205
+ "max_steps": 9805,
206
+ "num_input_tokens_seen": 0,
207
+ "num_train_epochs": 5,
208
+ "save_steps": 500,
209
+ "stateful_callbacks": {
210
+ "TrainerControl": {
211
+ "args": {
212
+ "should_epoch_stop": false,
213
+ "should_evaluate": false,
214
+ "should_log": false,
215
+ "should_save": true,
216
+ "should_training_stop": true
217
+ },
218
+ "attributes": {}
219
+ }
220
+ },
221
+ "total_flos": 5.6911698946882765e+19,
222
+ "train_batch_size": 16,
223
+ "trial_name": null,
224
+ "trial_params": null
225
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8477df06fbac59bc0840720a7d0069ac4b2c63dd7a1c33d595f46daf93e07f71
3
+ size 5368
vocab.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "'": 1,
3
+ "[PAD]": 29,
4
+ "[UNK]": 28,
5
+ "a": 2,
6
+ "b": 3,
7
+ "c": 4,
8
+ "d": 5,
9
+ "e": 6,
10
+ "f": 7,
11
+ "g": 8,
12
+ "h": 9,
13
+ "i": 10,
14
+ "j": 11,
15
+ "k": 12,
16
+ "l": 13,
17
+ "m": 14,
18
+ "n": 15,
19
+ "o": 16,
20
+ "p": 17,
21
+ "q": 18,
22
+ "r": 19,
23
+ "s": 20,
24
+ "t": 21,
25
+ "u": 22,
26
+ "v": 23,
27
+ "w": 24,
28
+ "x": 25,
29
+ "y": 26,
30
+ "z": 27,
31
+ "|": 0
32
+ }