# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://github.com/Tencent/Tencent-Hunyuan-Large/blob/main/License.docx # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # test tokenizer encode & decode consistency from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('/tokenizer_exp/other_tokenizer_vocab/hy', local_files_only=True, trust_remote_code=True) test_data = [line.strip() for line in open('/tokenizer_exp/data/test.txt', 'r').readlines()] num_origi_len = 0 num_token_len = 0 for d in test_data: a = tokenizer.encode(d) num_origi_len += len(d) num_token_len += len(a) b = tokenizer.decode(a) assert b == d, f"encode & decode not consistent: {d} vs {b}" print(f" original length: {num_origi_len}") print(f" token length: {num_token_len}")