oskarandrsson commited on
Commit
8eff79b
1 Parent(s): 11e4086

Upload tokenizer

Browse files
Files changed (3) hide show
  1. added_tokens.json +2 -2
  2. tokenizer_config.json +6 -7
  3. vocab.json +6 -3
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "</s>": 33,
3
- "<s>": 32
4
  }
 
1
  {
2
+ "</s>": 36,
3
+ "<s>": 35
4
  }
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "added_tokens_decoder": {
3
- "30": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": false
10
  },
11
- "31": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
@@ -16,18 +16,18 @@
16
  "single_word": false,
17
  "special": false
18
  },
19
- "32": {
20
  "content": "<s>",
21
  "lstrip": false,
22
- "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "33": {
28
  "content": "</s>",
29
  "lstrip": false,
30
- "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
@@ -39,7 +39,6 @@
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
42
- "processor_class": "Wav2Vec2BertProcessor",
43
  "replace_word_delimiter_char": " ",
44
  "target_lang": null,
45
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
 
1
  {
2
  "added_tokens_decoder": {
3
+ "33": {
4
  "content": "[UNK]",
5
  "lstrip": true,
6
  "normalized": false,
 
8
  "single_word": false,
9
  "special": false
10
  },
11
+ "34": {
12
  "content": "[PAD]",
13
  "lstrip": true,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": false
18
  },
19
+ "35": {
20
  "content": "<s>",
21
  "lstrip": false,
22
+ "normalized": true,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "36": {
28
  "content": "</s>",
29
  "lstrip": false,
30
+ "normalized": true,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
 
39
  "eos_token": "</s>",
40
  "model_max_length": 1000000000000000019884624838656,
41
  "pad_token": "[PAD]",
 
42
  "replace_word_delimiter_char": " ",
43
  "target_lang": null,
44
  "tokenizer_class": "Wav2Vec2CTCTokenizer",
vocab.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "[PAD]": 31,
3
- "[UNK]": 30,
4
  "a": 1,
5
  "b": 2,
6
  "c": 3,
@@ -30,5 +30,8 @@
30
  "|": 0,
31
  "ä": 27,
32
  "å": 28,
33
- "ö": 29
 
 
 
34
  }
 
1
  {
2
+ "[PAD]": 34,
3
+ "[UNK]": 33,
4
  "a": 1,
5
  "b": 2,
6
  "c": 3,
 
30
  "|": 0,
31
  "ä": 27,
32
  "å": 28,
33
+ "é": 29,
34
+ "ô": 30,
35
+ "ö": 31,
36
+ "ü": 32
37
  }