|
{ |
|
"add_prefix_space": false, |
|
"added_tokens_decoder": { |
|
"101": { |
|
"content": "¨", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"50155": { |
|
"content": "======", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"50256": { |
|
"content": "<|endoftext|>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"50257": { |
|
"content": "[¨M¨]", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
} |
|
}, |
|
"bos_token": "<|endoftext|>", |
|
"clean_up_tokenization_spaces": true, |
|
"eos_token": "<|endoftext|>", |
|
"mask_token": "[¨M¨]", |
|
"model_max_length": 1024, |
|
"pad_token": "¨", |
|
"padding_side": "right", |
|
"sep_token": "======", |
|
"split_special_tokens": false, |
|
"tokenizer_class": "GPT2Tokenizer", |
|
"unk_token": "<|endoftext|>" |
|
} |
|
|