|
""" |
|
|
|
|
|
""" |
|
|
|
import json |
|
from transformers import AutoTokenizer |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("tokenizer") |
|
|
|
print("vocab size:", tokenizer.vocab_size) |
|
|
|
|
|
tokens = tokenizer.encode("中") |
|
decode_line = tokenizer.decode(tokens) |
|
|
|
|
|
def id2token(ids): |
|
return tokenizer.convert_ids_to_tokens(ids) |
|
|
|
def test_token(): |
|
for word in "中国解决方法黑白侗,。!?;": |
|
encoding = tokenizer.encode(word) |
|
for token_id in encoding: |
|
decode_str = tokenizer.decode([token_id]) |
|
token = id2token([token_id]) |
|
print(word, token_id, decode_str, json.dumps(decode_str), token, json.dumps(token)) |
|
|
|
|
|
test_token() |
|
|
|
""" |
|
中 655 中 "\u4e2d" ['ä¸Ń'] ["\u00e4\u00b8\u0143"] |
|
国 686 国 "\u56fd" ['åĽ½'] ["\u00e5\u013d\u00bd"] |
|
解 1798 解 "\u89e3" ['解'] ["\u00e8\u00a7\u00a3"] |
|
决 1796 决 "\u51b3" ['åĨ³'] ["\u00e5\u0128\u00b3"] |
|
方 881 方 "\u65b9" ['æĸ¹'] ["\u00e6\u0138\u00b9"] |
|
法 916 法 "\u6cd5" ['æ³ķ'] ["\u00e6\u00b3\u0137"] |
|
黑 3561 黑 "\u9ed1" ['é»ij'] ["\u00e9\u00bb\u0133"] |
|
白 2325 白 "\u767d" ['çĻ½'] ["\u00e7\u013b\u00bd"] |
|
侗 806 � "\ufffd" ['ä¾'] ["\u00e4\u00be"] |
|
侗 235 � "\ufffd" ['Ĺ'] ["\u0139"] |
|
""" |
|
|
|
|
|
|