everdoubling commited on
Commit
1c1d66d
โ€ข
1 Parent(s): 258bc45

Upload tokenizer.py

Browse files

ByT5 Korean Tokenizer

Files changed (1) hide show
  1. tokenizer.py +282 -0
tokenizer.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model ByT5."""
16
+
17
+
18
+ import warnings
19
+ from typing import Dict, List, Optional, Tuple, Union
20
+
21
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from transformers.models.byt5.tokenization_byt5 import ByT5Tokenizer
23
+
24
+ class ByT5KoreanTokenizer(PreTrainedTokenizer):
25
+ """
26
+ Construct a ByT5Korean tokenizer.
27
+ On top of ByT5's simple raw bytes utf-8 encoding, ByT5Korean adds extra tokens for Korean jamo.
28
+
29
+ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
30
+ Users should refer to this superclass for more information regarding those methods.
31
+
32
+ Args:
33
+ eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
34
+ The end of sequence token.
35
+
36
+ .. note::
37
+
38
+ When building a sequence using special tokens, this is not the token that is used for the end of
39
+ sequence. The token used is the :obj:`sep_token`.
40
+ unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
41
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
42
+ token instead.
43
+ pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
44
+ The token used for padding, for example when batching sequences of different lengths.
45
+ extra_ids (:obj:`int`, `optional`, defaults to 100):
46
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
47
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
48
+ indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
49
+ like in ByT5 preprocessing see `here
50
+ <https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
51
+ additional_special_tokens (:obj:`List[str]`, `optional`):
52
+ Additional special tokens used by the tokenizer.
53
+ """
54
+
55
+ model_input_names = ["input_ids", "attention_mask"]
56
+
57
+ def __init__(
58
+ self,
59
+ eos_token="</s>",
60
+ unk_token="<unk>",
61
+ pad_token="<pad>",
62
+ extra_ids=57,
63
+ additional_special_tokens=None,
64
+ **kwargs
65
+ ) -> None:
66
+ # Add extra_ids to the special token list
67
+ if extra_ids > 0 and additional_special_tokens is None:
68
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
69
+ elif extra_ids > 0 and additional_special_tokens is not None:
70
+ # Check that we have the right number of extra_id special tokens
71
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
72
+ if extra_tokens != extra_ids:
73
+ raise ValueError(
74
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to ByT5Tokenizer. "
75
+ "In this case the additional_special_tokens must include the extra_ids tokens"
76
+ )
77
+
78
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
79
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
80
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
81
+
82
+ super().__init__(
83
+ eos_token=eos_token,
84
+ unk_token=unk_token,
85
+ pad_token=pad_token,
86
+ extra_ids=extra_ids,
87
+ additional_special_tokens=additional_special_tokens,
88
+ **kwargs,
89
+ )
90
+
91
+ self._extra_ids = extra_ids
92
+
93
+ # Add the special tokens (including extra_ids)
94
+ for token in self.all_special_tokens:
95
+ self.tokens_trie.add(token)
96
+
97
+ self._utf_vocab_size = 2 ** 8 # utf is 8 bits
98
+ self._utf_vocab_size += 19 + 21 + 28 # korean jamo
99
+
100
+ # define special tokens dict
101
+ self.special_tokens_encoder: Dict[int, str] = {
102
+ self.pad_token: 0,
103
+ self.eos_token: 1,
104
+ self.unk_token: 2,
105
+ }
106
+ self._num_special_tokens = len(self.special_tokens_encoder)
107
+ n = len(additional_special_tokens)
108
+ for i, token in enumerate(additional_special_tokens):
109
+ self.special_tokens_encoder[token] = self.vocab_size + i - n
110
+ self.special_tokens_decoder: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
111
+
112
+ @property
113
+ def vocab_size(self):
114
+ return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
115
+
116
+ def get_special_tokens_mask(
117
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
118
+ ) -> List[int]:
119
+ """
120
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
121
+ special tokens using the tokenizer ``prepare_for_model`` method.
122
+
123
+ Args:
124
+ token_ids_0 (:obj:`List[int]`):
125
+ List of IDs.
126
+ token_ids_1 (:obj:`List[int]`, `optional`):
127
+ Optional second list of IDs for sequence pairs.
128
+ already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
129
+ Whether or not the token list is already formatted with special tokens for the model.
130
+
131
+ Returns:
132
+ :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
133
+ """
134
+ if already_has_special_tokens:
135
+ return super().get_special_tokens_mask(
136
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
137
+ )
138
+
139
+ # normal case: some special tokens
140
+ if token_ids_1 is None:
141
+ return ([0] * len(token_ids_0)) + [1]
142
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
143
+
144
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
145
+ """Do not add eos again if user already added it."""
146
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
147
+ warnings.warn(
148
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added."
149
+ )
150
+ return token_ids
151
+ else:
152
+ return token_ids + [self.eos_token_id]
153
+
154
+ def create_token_type_ids_from_sequences(
155
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
156
+ ) -> List[int]:
157
+ """
158
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
159
+ make use of token type ids, therefore a list of zeros is returned.
160
+
161
+ Args:
162
+ token_ids_0 (:obj:`List[int]`):
163
+ List of IDs.
164
+ token_ids_1 (:obj:`List[int]`, `optional`):
165
+ Optional second list of IDs for sequence pairs.
166
+
167
+ Returns:
168
+ :obj:`List[int]`: List of zeros.
169
+ """
170
+ eos = [self.eos_token_id]
171
+
172
+ if token_ids_1 is None:
173
+ return len(token_ids_0 + eos) * [0]
174
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
175
+
176
+ def build_inputs_with_special_tokens(
177
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
178
+ ) -> List[int]:
179
+ """
180
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
181
+ adding special tokens. A sequence has the following format:
182
+
183
+ - single sequence: ``X </s>``
184
+ - pair of sequences: ``A </s> B </s>``
185
+
186
+ Args:
187
+ token_ids_0 (:obj:`List[int]`):
188
+ List of IDs to which the special tokens will be added.
189
+ token_ids_1 (:obj:`List[int]`, `optional`):
190
+ Optional second list of IDs for sequence pairs.
191
+
192
+ Returns:
193
+ :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
194
+ """
195
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
196
+ if token_ids_1 is None:
197
+ return token_ids_0
198
+ else:
199
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
200
+ return token_ids_0 + token_ids_1
201
+
202
+ def _convert_char_to_tokens_Korean(self, c):
203
+ o = ord(c)
204
+ if 44032 <= o and o <= 55203: # 44032: ๊ฐ€, 55203: ํžฃ
205
+ o -= 44032
206
+ return [chr(256 + (o // 588)), chr(256 + 19 + ((o % 588) // 28)), chr(256 + 19 + 21 + (o % 28))]
207
+ return [chr(i) for i in c.encode("utf-8")]
208
+
209
+ def _tokenize(self, text: str) -> List[str]:
210
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
211
+ if text in self.all_special_tokens:
212
+ return [text]
213
+ # return [self.special_tokens_encoder[text]]
214
+ # tokens = [chr(i) for i in text.encode("utf-8")]
215
+ # return tokens
216
+ return sum([self._convert_char_to_tokens_Korean(c) for c in text], [])
217
+
218
+ def _convert_token_to_id(self, token):
219
+ """Converts a token (str) in an id using the vocab."""
220
+ if token in self.special_tokens_encoder:
221
+ token_id = self.special_tokens_encoder[token]
222
+ elif token in self.added_tokens_encoder:
223
+ token_id = self.added_tokens_encoder[token]
224
+ # else:
225
+ # token_id = token + self._num_special_tokens
226
+ elif len(token) != 1:
227
+ token_id = self.unk_token_id
228
+ else:
229
+ token_id = ord(token) + self._num_special_tokens
230
+ return token_id
231
+
232
+ def _convert_id_to_token(self, index):
233
+ """Converts an index (integer) in a token (str) using the vocab."""
234
+ if index in self.special_tokens_decoder:
235
+ token = self.special_tokens_decoder[index]
236
+ else:
237
+ token = chr(index - self._num_special_tokens)
238
+ return token
239
+
240
+ def convert_tokens_to_string(self, tokens):
241
+ """Converts a sequence of tokens (string) in a single string."""
242
+ bstring = b""
243
+ ids = [ord(t[0]) for t in tokens]
244
+ for i in range(len(ids)-2):
245
+ if 256 <= ids[i] and ids[i] < 256+19 and 256+19 <= ids[i+1] and ids[i+1] < 256+19+21 and 256+19+21 <= ids[i+2] and ids[i+2] < 256+19+21+28:
246
+ tokens[i] = chr(44032 + (ids[i]-256)*21*28 + (ids[i+1]-256-19)*28 + (ids[i+2]-256-19-21))
247
+ tokens[i+1] = None
248
+ tokens[i+2] = None
249
+ for token in tokens:
250
+ if token == None:
251
+ continue
252
+ if token in self.special_tokens_decoder:
253
+ tok_string = self.special_tokens_decoder[token].encode("utf-8")
254
+ elif token in self.added_tokens_decoder:
255
+ tok_string = self.special_tokens_decoder[token].encode("utf-8")
256
+ elif token in self.special_tokens_encoder:
257
+ tok_string = token.encode("utf-8")
258
+ elif token in self.added_tokens_encoder:
259
+ tok_string = token.encode("utf-8")
260
+ else:
261
+ if type(token) == str and ord(token) >= 256:
262
+ tok_string = token.encode("utf-8")
263
+ else:
264
+ tok_string = bytes([ord(token) if type(token) == str else min(255, token)])
265
+ bstring += tok_string
266
+ string = bstring.decode("utf-8", errors="ignore")
267
+ return string
268
+
269
+ # ByT5KoreanTokenizer has no vocab file
270
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
271
+ return ()
272
+
273
+
274
+ if __name__ == "__main__":
275
+ tokenizer = ByT5KoreanTokenizer()
276
+ text = "This is a test <extra_id_0> of the ๊ฐ€๋‚˜ํžฃ ์•ˆ๋…•ํ•˜์„ธ์š” <extra_id_1>."
277
+ tokenized_text = tokenizer.tokenize(text)
278
+ print(tokenized_text)
279
+ print(tokenizer(text))
280
+ print(tokenizer.convert_tokens_to_ids(tokenized_text))
281
+ print(tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(tokenized_text)))
282
+ print(tokenizer.convert_tokens_to_string(tokenized_text))