add tokenizer
Browse files- tokenizer_config.json +1 -1
- vocab.json +1 -1
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"q": 0, "W": 1, "e": 2, "!": 3, "j": 4, "t": 5, "c": 6, "-": 7, "r": 8, "y": 9, "i": 10, "V": 11, "F": 12, "\"": 13, ":": 14, "Y": 15, "w": 16, "z": 17, "u": 18, "J": 19, "?": 20, "m": 21, "H": 22, "s": 23, "a": 24, "P": 25, "n": 26, "R": 27, "L": 28, "M": 29, "k": 30, "o": 31, ",": 33, "f": 34, ".": 35, "v": 36, ";": 37, "G": 38, "D": 39, "B": 40, "l": 41, "C": 42, "h": 43, "'": 44, "O": 45, "N": 46, "X": 47, "S": 48, "d": 49, "Q": 50, "T": 51, "I": 52, "g": 53, "p": 54, "U": 55, "A": 56, "b": 57, "K": 58, "E": 59, "x": 60, "|": 32, "[UNK]": 61, "[PAD]": 62, "<s>": 63, "</s>": 64}
|