LoneStriker commited on
Commit
fd3991c
1 Parent(s): f14a75d

Delete tokenization_yi.py

Browse files
Files changed (1) hide show
  1. tokenization_yi.py +0 -255
tokenization_yi.py DELETED
@@ -1,255 +0,0 @@
1
- import os
2
- from shutil import copyfile
3
- from typing import Any, Dict, List, Optional, Tuple
4
-
5
- import sentencepiece as spm
6
- from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
7
- from transformers.utils import logging
8
-
9
- logger = logging.get_logger(__name__)
10
-
11
- VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
12
-
13
- PRETRAINED_VOCAB_FILES_MAP = {
14
- "vocab_file": {},
15
- "tokenizer_file": {},
16
- }
17
- PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
18
-
19
-
20
- class YiTokenizer(PreTrainedTokenizer):
21
- """
22
- Construct a Yi tokenizer. Based on byte-level Byte-Pair-Encoding.
23
-
24
- Args:
25
- vocab_file (`str`):
26
- Path to the vocabulary file.
27
- """
28
-
29
- vocab_files_names = VOCAB_FILES_NAMES
30
- pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
31
- max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
32
- model_input_names = ["input_ids", "attention_mask"]
33
-
34
- def __init__(
35
- self,
36
- vocab_file,
37
- unk_token="<unk>",
38
- bos_token="<|startoftext|>",
39
- eos_token="<|im_end|>",
40
- pad_token="<unk>",
41
- sp_model_kwargs: Optional[Dict[str, Any]] = None,
42
- add_bos_token=True,
43
- add_eos_token=False,
44
- clean_up_tokenization_spaces=False,
45
- **kwargs,
46
- ):
47
- self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
48
- bos_token = (
49
- AddedToken(bos_token, lstrip=False, rstrip=False)
50
- if isinstance(bos_token, str)
51
- else bos_token
52
- )
53
- eos_token = (
54
- AddedToken(eos_token, lstrip=False, rstrip=False)
55
- if isinstance(eos_token, str)
56
- else eos_token
57
- )
58
- unk_token = (
59
- AddedToken(unk_token, lstrip=False, rstrip=False)
60
- if isinstance(unk_token, str)
61
- else unk_token
62
- )
63
- pad_token = (
64
- AddedToken(pad_token, lstrip=False, rstrip=False)
65
- if isinstance(pad_token, str)
66
- else pad_token
67
- )
68
- self.vocab_file = vocab_file
69
- self.add_bos_token = add_bos_token
70
- self.add_eos_token = add_eos_token
71
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
72
- self.sp_model.Load(vocab_file)
73
- super().__init__(
74
- bos_token=bos_token,
75
- eos_token=eos_token,
76
- unk_token=unk_token,
77
- pad_token=pad_token,
78
- add_bos_token=add_bos_token,
79
- add_eos_token=add_eos_token,
80
- sp_model_kwargs=self.sp_model_kwargs,
81
- clean_up_tokenization_spaces=clean_up_tokenization_spaces,
82
- **kwargs,
83
- )
84
-
85
- def __getstate__(self):
86
- state = self.__dict__.copy()
87
- state["sp_model"] = None
88
- return state
89
-
90
- def __setstate__(self, d):
91
- self.__dict__ = d
92
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
93
- self.sp_model.Load(self.vocab_file)
94
-
95
- @property
96
- def vocab_size(self):
97
- """Returns vocab size"""
98
- return self.sp_model.get_piece_size()
99
-
100
- def get_vocab(self):
101
- """Returns vocab as a dict"""
102
- vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
103
- vocab.update(self.added_tokens_encoder)
104
- return vocab
105
-
106
- def _tokenize(self, text):
107
- """Returns a tokenized string."""
108
- return self.sp_model.encode(text, out_type=str)
109
-
110
- def _convert_token_to_id(self, token):
111
- """Converts a token (str) in an id using the vocab."""
112
- return self.sp_model.piece_to_id(token)
113
-
114
- def _convert_id_to_token(self, index):
115
- """Converts an index (integer) in a token (str) using the vocab."""
116
- token = self.sp_model.IdToPiece(index)
117
- return token
118
-
119
- def convert_tokens_to_string(self, tokens):
120
- """Converts a sequence of tokens (string) in a single string."""
121
- current_sub_tokens = []
122
- out_string = ""
123
- prev_is_special = False
124
- for i, token in enumerate(tokens):
125
- # make sure that special tokens are not decoded using sentencepiece model
126
- if token in self.all_special_tokens:
127
- if not prev_is_special and i != 0:
128
- out_string += " "
129
- out_string += self.sp_model.decode(current_sub_tokens) + token
130
- prev_is_special = True
131
- current_sub_tokens = []
132
- else:
133
- current_sub_tokens.append(token)
134
- prev_is_special = False
135
- out_string += self.sp_model.decode(current_sub_tokens)
136
- return out_string
137
-
138
- def save_vocabulary(
139
- self, save_directory, filename_prefix: Optional[str] = None
140
- ) -> Tuple[str]:
141
- """
142
- Save the vocabulary and special tokens file to a directory.
143
-
144
- Args:
145
- save_directory (`str`):
146
- The directory in which to save the vocabulary.
147
-
148
- Returns:
149
- `Tuple(str)`: Paths to the files saved.
150
- """
151
- if not os.path.isdir(save_directory):
152
- logger.error(f"Vocabulary path ({save_directory}) should be a directory")
153
- return
154
- out_vocab_file = os.path.join(
155
- save_directory,
156
- (filename_prefix + "-" if filename_prefix else "")
157
- + VOCAB_FILES_NAMES["vocab_file"],
158
- )
159
-
160
- if os.path.abspath(self.vocab_file) != os.path.abspath(
161
- out_vocab_file
162
- ) and os.path.isfile(self.vocab_file):
163
- copyfile(self.vocab_file, out_vocab_file)
164
- elif not os.path.isfile(self.vocab_file):
165
- with open(out_vocab_file, "wb") as fi:
166
- content_spiece_model = self.sp_model.serialized_model_proto()
167
- fi.write(content_spiece_model)
168
-
169
- return (out_vocab_file,)
170
-
171
- def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
172
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
173
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
174
-
175
- output = bos_token_id + token_ids_0 + eos_token_id
176
-
177
- if token_ids_1 is not None:
178
- output = output + bos_token_id + token_ids_1 + eos_token_id
179
-
180
- return output
181
-
182
- def get_special_tokens_mask(
183
- self,
184
- token_ids_0: List[int],
185
- token_ids_1: Optional[List[int]] = None,
186
- already_has_special_tokens: bool = False,
187
- ) -> List[int]:
188
- """
189
- Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
190
- special tokens using the tokenizer `prepare_for_model` method.
191
-
192
- Args:
193
- token_ids_0 (`List[int]`):
194
- List of IDs.
195
- token_ids_1 (`List[int]`, *optional*):
196
- Optional second list of IDs for sequence pairs.
197
- already_has_special_tokens (`bool`, *optional*, defaults to `False`):
198
- Whether or not the token list is already formatted with special tokens for the model.
199
-
200
- Returns:
201
- `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
202
- """
203
- if already_has_special_tokens:
204
- return super().get_special_tokens_mask(
205
- token_ids_0=token_ids_0,
206
- token_ids_1=token_ids_1,
207
- already_has_special_tokens=True,
208
- )
209
-
210
- bos_token_id = [1] if self.add_bos_token else []
211
- eos_token_id = [1] if self.add_eos_token else []
212
-
213
- if token_ids_1 is None:
214
- return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
215
- return (
216
- bos_token_id
217
- + ([0] * len(token_ids_0))
218
- + eos_token_id
219
- + bos_token_id
220
- + ([0] * len(token_ids_1))
221
- + eos_token_id
222
- )
223
-
224
- def create_token_type_ids_from_sequences(
225
- self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
226
- ) -> List[int]:
227
- """
228
- Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
229
- sequence pair mask has the following format:
230
-
231
- ```
232
- 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
233
- | first sequence | second sequence |
234
- ```
235
-
236
- if token_ids_1 is None, only returns the first portion of the mask (0s).
237
-
238
- Args:
239
- token_ids_0 (`List[int]`):
240
- List of ids.
241
- token_ids_1 (`List[int]`, *optional*):
242
- Optional second list of IDs for sequence pairs.
243
-
244
- Returns:
245
- `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
246
- """
247
- bos_token_id = [self.bos_token_id] if self.add_bos_token else []
248
- eos_token_id = [self.eos_token_id] if self.add_eos_token else []
249
-
250
- output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
251
-
252
- if token_ids_1 is not None:
253
- output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
254
-
255
- return output