add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"v": 0, ";": 1, "Y": 2, "W": 3, "f": 4, ",": 5, "M": 6, "L": 7, "m": 8, "q": 9, "e": 10, "p": 11, "F": 13, "j": 14, "N": 15, "z": 16, "-": 17, "?": 18, "A": 19, "S": 20, "'": 21, "G": 22, "x": 23, "B": 24, "n": 25, "U": 26, "Q": 27, "g": 28, "J": 29, "o": 30, ":": 31, "b": 32, "c": 33, ".": 34, "C": 35, "H": 36, "D": 37, "w": 38, "t": 39, "\"": 40, "K": 41, "X": 42, "u": 43, "I": 44, "T": 45, "y": 46, "d": 47, "l": 48, "O": 49, "i": 50, "r": 51, "E": 52, "k": 53, "R": 54, "s": 55, "h": 56, "P": 57, "V": 58, "a": 59, "!": 60, "|": 12, "[UNK]": 61, "[PAD]": 62}
|