File size: 1,763 Bytes
b9eb078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

from typing import Union
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
Tokenizer = Union[(PreTrainedTokenizer, PreTrainedTokenizerFast)]
NUM_SENTINEL_TOKENS: int = 100

def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
    'Adds sentinel tokens and padding token (if missing).\n\n    Expands the tokenizer vocabulary to include sentinel tokens\n    used in mixture-of-denoiser tasks as well as a padding token.\n\n    All added tokens are added as special tokens. No tokens are\n    added if sentinel tokens and padding token already exist.\n    '
    sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]
    tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
    if (tokenizer.pad_token is None):
        tokenizer.add_tokens('<pad>', special_tokens=True)
        tokenizer.pad_token = '<pad>'
        assert (tokenizer.pad_token_id is not None)
    sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])
    _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
    tokenizer.sentinel_token_ids = _sentinel_token_ids

class AutoTokenizerForMOD(AutoTokenizer):
    'AutoTokenizer + Adaptation for MOD.\n\n    A simple wrapper around AutoTokenizer to make instantiating\n    an MOD-adapted tokenizer a bit easier.\n\n    MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n    a padding token, and a property to get the token ids of the\n    sentinel tokens.\n    '

    @classmethod
    def from_pretrained(cls, *args, **kwargs):
        'See `AutoTokenizer.from_pretrained` docstring.'
        tokenizer = super().from_pretrained(*args, **kwargs)
        adapt_tokenizer_for_denoising(tokenizer)
        return tokenizer