File size: 1,992 Bytes
7132cf2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "UNK",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "PAD",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "BOS",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "EOS",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": {
"type": "Sequence",
"normalizers": [
{
"type": "Replace",
"pattern": {
"String": "\n"
},
"content": " UTT_BOUNDARY"
},
{
"type": "Strip",
"strip_left": true,
"strip_right": true
}
]
},
"pre_tokenizer": {
"type": "Whitespace"
},
"post_processor": null,
"decoder": null,
"model": {
"type": "WordLevel",
"vocab": {
"UNK": 0,
"PAD": 1,
"BOS": 2,
"EOS": 3,
"WORD_BOUNDARY": 4,
"UTT_BOUNDARY": 5,
"oɪ": 6,
"a": 7,
"ɾ": 8,
"k": 9,
"t̠ʃ": 10,
"i": 11,
"s̺": 12,
"l": 13,
"p": 14,
"o": 15,
"r": 16,
"aɪ": 17,
"n": 18,
"m": 19,
"ð": 20,
"e": 21,
"ts̻": 22,
"β": 23,
"s̻": 24,
"ʎ": 25,
"b": 26,
"aʊ": 27,
"t": 28,
"ɣ": 29,
"ɡ": 30,
"c": 31,
"u": 32,
"eɪ": 33,
"d": 34,
"ts̺": 35,
"j": 36,
"ɲ": 37,
"f": 38,
"ʃ": 39,
"ɟ": 40,
"eʊ": 41,
"θ": 42,
"x": 43
},
"unk_token": "UNK"
}
} |