File size: 819 Bytes
0538a72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
from datasets import load_dataset
from tokenizers import ByteLevelBPETokenizer
from pythainlp.tokenize import word_tokenize
# load dataset
dataset = load_dataset("oscar", "unshuffled_deduplicated_th", split="train")
# Instantiate tokenizer
tokenizer = ByteLevelBPETokenizer()
def th_tokenize(text):
result = " ".join(word_tokenize(text, engine="newmm", keep_whitespace=False))
return result
def batch_iterator(batch_size=1000):
for i in range(0, len(dataset), batch_size):
yield [th_tokenize(text) for text in dataset[i : i + batch_size]["text"]]
# Customized training
tokenizer.train_from_iterator(
batch_iterator(),
vocab_size=50265,
min_frequency=2,
special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>",],
)
# Save files to disk
tokenizer.save(f"./tokenizer.json")
|