Upload tokenizer
Browse files- tokenizer_config.json +1 -0
- vocab.json +26 -26
tokenizer_config.json
CHANGED
@@ -6,6 +6,7 @@
|
|
6 |
"model_max_length": 1000000000000000019884624838656,
|
7 |
"pad_token": "[PAD]",
|
8 |
"replace_word_delimiter_char": " ",
|
|
|
9 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
10 |
"unk_token": "[UNK]",
|
11 |
"word_delimiter_token": "|"
|
|
|
6 |
"model_max_length": 1000000000000000019884624838656,
|
7 |
"pad_token": "[PAD]",
|
8 |
"replace_word_delimiter_char": " ",
|
9 |
+
"target_lang": null,
|
10 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
11 |
"unk_token": "[UNK]",
|
12 |
"word_delimiter_token": "|"
|
vocab.json
CHANGED
@@ -1,32 +1,32 @@
|
|
1 |
{
|
2 |
-
"'":
|
3 |
-
"A":
|
4 |
-
"B":
|
5 |
-
"C":
|
6 |
-
"D":
|
7 |
-
"E":
|
8 |
-
"F":
|
9 |
-
"G":
|
10 |
-
"H":
|
11 |
-
"I":
|
12 |
"J": 21,
|
13 |
-
"K":
|
14 |
"L": 13,
|
15 |
-
"M":
|
16 |
-
"N":
|
17 |
-
"O":
|
18 |
-
"P":
|
19 |
-
"Q":
|
20 |
-
"R":
|
21 |
-
"S":
|
22 |
-
"T":
|
23 |
-
"U":
|
24 |
-
"V":
|
25 |
-
"W":
|
26 |
-
"X":
|
27 |
-
"Y":
|
28 |
-
"Z":
|
29 |
"[PAD]": 29,
|
30 |
"[UNK]": 28,
|
31 |
-
"|":
|
32 |
}
|
|
|
1 |
{
|
2 |
+
"'": 5,
|
3 |
+
"A": 9,
|
4 |
+
"B": 25,
|
5 |
+
"C": 23,
|
6 |
+
"D": 20,
|
7 |
+
"E": 2,
|
8 |
+
"F": 19,
|
9 |
+
"G": 24,
|
10 |
+
"H": 18,
|
11 |
+
"I": 27,
|
12 |
"J": 21,
|
13 |
+
"K": 22,
|
14 |
"L": 13,
|
15 |
+
"M": 10,
|
16 |
+
"N": 0,
|
17 |
+
"O": 11,
|
18 |
+
"P": 17,
|
19 |
+
"Q": 14,
|
20 |
+
"R": 1,
|
21 |
+
"S": 26,
|
22 |
+
"T": 3,
|
23 |
+
"U": 8,
|
24 |
+
"V": 15,
|
25 |
+
"W": 6,
|
26 |
+
"X": 7,
|
27 |
+
"Y": 4,
|
28 |
+
"Z": 16,
|
29 |
"[PAD]": 29,
|
30 |
"[UNK]": 28,
|
31 |
+
"|": 12
|
32 |
}
|