xu-song's picture
update
d10ecd7
raw
history blame
717 Bytes
def SimpleTokenizer():
import os
from tokenizers import Tokenizer
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
TOKENIZER_DIR = os.path.join(CURRENT_DIR, "20B_tokenizer.json")
tokenizer = Tokenizer.from_file(TOKENIZER_DIR)
tokenizer.vocab_size = tokenizer.get_vocab_size(with_added_tokens=True)
# vocab_size = len(tokenizer.get_vocab())
# vocab_size = tokenizer.vocab_size
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
# "tokenizer_type": "HFTokenizer", # https://github.com/EleutherAI/gpt-neox/blob/v2.0/configs/20B.yml#L107
# tokenizer.vocab_size = tokenizer.get_vocab_size(with_added_tokens=True)