Ashrafb alirezamsh commited on
Commit
4dcb861
0 Parent(s):

Duplicate from alirezamsh/small100

Browse files

Co-authored-by: Alireza Mohammadshahi <alirezamsh@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +37 -0
  4. tokenization_small100.py +364 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Small100
3
+ emoji: 📉
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.16.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: alirezamsh/small100
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+
4
+ os.system("pip install transformers sentencepiece torch")
5
+
6
+ from transformers import M2M100ForConditionalGeneration
7
+ from tokenization_small100 import SMALL100Tokenizer
8
+
9
+ langs = """Afrikaans (af), Amharic (am), Arabic (ar), Asturian (ast), Azerbaijani (az), Bashkir (ba), Belarusian (be), Bulgarian (bg), Bengali (bn), Breton (br), Bosnian (bs), Catalan; Valencian (ca), Cebuano (ceb), Czech (cs), Welsh (cy), Danish (da), German (de), Greeek (el), English (en), Spanish (es), Estonian (et), Persian (fa), Fulah (ff), Finnish (fi), French (fr), Western Frisian (fy), Irish (ga), Gaelic; Scottish Gaelic (gd), Galician (gl), Gujarati (gu), Hausa (ha), Hebrew (he), Hindi (hi), Croatian (hr), Haitian; Haitian Creole (ht), Hungarian (hu), Armenian (hy), Indonesian (id), Igbo (ig), Iloko (ilo), Icelandic (is), Italian (it), Japanese (ja), Javanese (jv), Georgian (ka), Kazakh (kk), Central Khmer (km), Kannada (kn),
10
+ Korean (ko), Luxembourgish; Letzeburgesch (lb), Ganda (lg), Lingala (ln), Lao (lo), Lithuanian (lt), Latvian (lv), Malagasy (mg), Macedonian (mk), Malayalam (ml), Mongolian (mn), Marathi (mr), Malay (ms), Burmese (my), Nepali (ne), Dutch; Flemish (nl), Norwegian (no), Northern Sotho (ns), Occitan (post 1500) (oc), Oriya (or), Panjabi; Punjabi (pa), Polish (pl), Pushto; Pashto (ps), Portuguese (pt), Romanian; Moldavian; Moldovan (ro), Russian (ru), Sindhi (sd), Sinhala; Sinhalese (si), Slovak (sk),
11
+ Slovenian (sl), Somali (so), Albanian (sq), Serbian (sr), Swati (ss), Sundanese (su), Swedish (sv), Swahili (sw), Tamil (ta), Thai (th), Tagalog (tl), Tswana (tn),
12
+ Turkish (tr), Ukrainian (uk), Urdu (ur), Uzbek (uz), Vietnamese (vi), Wolof (wo), Xhosa (xh), Yiddish (yi), Yoruba (yo), Chinese (zh), Zulu (zu)"""
13
+ lang_list = [lang.strip() for lang in langs.split(',')]
14
+
15
+ model = M2M100ForConditionalGeneration.from_pretrained("alirezamsh/small100")
16
+ tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100")
17
+
18
+ description = """This is an official demo for the paper [*SMaLL-100: Introducing Shallow Multilingual Machine Translation Model for Low-Resource Languages*](https://arxiv.org/abs/2210.11621) by Alireza Mohammadshahi, Vassilina Nikoulina, Alexandre Berard, Caroline Brun, James Henderson, Laurent Besacier
19
+
20
+ In this paper, they propose a compact and shallow massively multilingual MT model, and achieve competitive results with M2M-100, while being super smaller and faster. More details are provided [here](https://huggingface.co/alirezamsh/small100). Currently running on 2 vCPU - 16GB RAM."""
21
+
22
+ def small100_tr(lang, text):
23
+
24
+ lang = lang.split(" ")[-1][1:-1]
25
+
26
+ tokenizer.tgt_lang = lang
27
+ encoded_text = tokenizer(text, return_tensors="pt")
28
+ generated_tokens = model.generate(**encoded_text)
29
+ return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
30
+
31
+ examples = [["French (fr)", "Life is like a box of chocolates."]]
32
+
33
+ output_text = gr.outputs.Textbox()
34
+ gr.Interface(small100_tr, inputs=[gr.inputs.Dropdown(lang_list, label=" Target Language"), 'text'], outputs=output_text, title="SMaLL100: Translate much faster between 100 languages",
35
+ description=description,
36
+ examples=examples
37
+ ).launch()
tokenization_small100.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022 Idiap Research Institute, http://www.idiap.ch/
2
+ # Written by Alireza Mohammadshahi <alireza.mohammadshahi@idiap.ch>
3
+ # This is a modified version of https://github.com/huggingface/transformers/blob/main/src/transformers/models/m2m_100/tokenization_m2m_100.py
4
+ # which owns by Fariseq Authors and The HuggingFace Inc. team.
5
+ #
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ """Tokenization classes for SMALL100."""
19
+ import json
20
+ import os
21
+ from pathlib import Path
22
+ from shutil import copyfile
23
+ from typing import Any, Dict, List, Optional, Tuple, Union
24
+
25
+ import sentencepiece
26
+
27
+ from transformers.tokenization_utils import BatchEncoding, PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+ VOCAB_FILES_NAMES = {
36
+ "vocab_file": "vocab.json",
37
+ "spm_file": "sentencepiece.bpe.model",
38
+ "tokenizer_config_file": "tokenizer_config.json",
39
+ }
40
+
41
+ PRETRAINED_VOCAB_FILES_MAP = {
42
+ "vocab_file": {
43
+ "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/vocab.json",
44
+ },
45
+ "spm_file": {
46
+ "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/sentencepiece.bpe.model",
47
+ },
48
+ "tokenizer_config_file": {
49
+ "alirezamsh/small100": "https://huggingface.co/alirezamsh/small100/resolve/main/tokenizer_config.json",
50
+ },
51
+ }
52
+
53
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
54
+ "alirezamsh/small100": 1024,
55
+ }
56
+
57
+ # fmt: off
58
+ FAIRSEQ_LANGUAGE_CODES = {
59
+ "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"]
60
+ }
61
+ # fmt: on
62
+
63
+
64
+ class SMALL100Tokenizer(PreTrainedTokenizer):
65
+ """
66
+ Construct an SMALL100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
67
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
68
+ this superclass for more information regarding those methods.
69
+ Args:
70
+ vocab_file (`str`):
71
+ Path to the vocabulary file.
72
+ spm_file (`str`):
73
+ Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
74
+ contains the vocabulary.
75
+ tgt_lang (`str`, *optional*):
76
+ A string representing the target language.
77
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
78
+ The end of sequence token.
79
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
80
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
81
+ sequence classification or for a text and a question for question answering. It is also used as the last
82
+ token of a sequence built with special tokens.
83
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
84
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
85
+ token instead.
86
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
87
+ The token used for padding, for example when batching sequences of different lengths.
88
+ language_codes (`str`, *optional*):
89
+ What language codes to use. Should be `"m2m100"`.
90
+ sp_model_kwargs (`dict`, *optional*):
91
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
92
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
93
+ to set:
94
+ - `enable_sampling`: Enable subword regularization.
95
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
96
+ - `nbest_size = {0,1}`: No sampling is performed.
97
+ - `nbest_size > 1`: samples from the nbest_size results.
98
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
99
+ using forward-filtering-and-backward-sampling algorithm.
100
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
101
+ BPE-dropout.
102
+ Examples:
103
+ ```python
104
+ >>> from tokenization_small100 import SMALL100Tokenizer
105
+ >>> tokenizer = SMALL100Tokenizer.from_pretrained("alirezamsh/small100", tgt_lang="ro")
106
+ >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
107
+ >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
108
+ >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
109
+ >>> model(**model_inputs) # should work
110
+ ```"""
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
114
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
115
+ model_input_names = ["input_ids", "attention_mask"]
116
+
117
+ prefix_tokens: List[int] = []
118
+ suffix_tokens: List[int] = []
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_file,
123
+ spm_file,
124
+ tgt_lang=None,
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ sep_token="</s>",
128
+ pad_token="<pad>",
129
+ unk_token="<unk>",
130
+ language_codes="m2m100",
131
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
132
+ num_madeup_words=8,
133
+ **kwargs,
134
+ ) -> None:
135
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
136
+
137
+ self.language_codes = language_codes
138
+ fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes]
139
+ self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
140
+
141
+ kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
142
+ kwargs["additional_special_tokens"] += [
143
+ self.get_lang_token(lang_code)
144
+ for lang_code in fairseq_language_code
145
+ if self.get_lang_token(lang_code) not in kwargs["additional_special_tokens"]
146
+ ]
147
+
148
+ super().__init__(
149
+ tgt_lang=tgt_lang,
150
+ bos_token=bos_token,
151
+ eos_token=eos_token,
152
+ sep_token=sep_token,
153
+ unk_token=unk_token,
154
+ pad_token=pad_token,
155
+ language_codes=language_codes,
156
+ sp_model_kwargs=self.sp_model_kwargs,
157
+ num_madeup_words=num_madeup_words,
158
+ **kwargs,
159
+ )
160
+
161
+ self.vocab_file = vocab_file
162
+ self.encoder = load_json(vocab_file)
163
+ self.decoder = {v: k for k, v in self.encoder.items()}
164
+ self.spm_file = spm_file
165
+ self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
166
+
167
+ self.encoder_size = len(self.encoder)
168
+
169
+ self.lang_token_to_id = {
170
+ self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)
171
+ }
172
+ self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)}
173
+ self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
174
+
175
+ self._tgt_lang = tgt_lang if tgt_lang is not None else "en"
176
+ self.cur_lang_id = self.get_lang_id(self._tgt_lang)
177
+ self.set_lang_special_tokens(self._tgt_lang)
178
+
179
+ self.num_madeup_words = num_madeup_words
180
+
181
+ @property
182
+ def vocab_size(self) -> int:
183
+ return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words
184
+
185
+ @property
186
+ def tgt_lang(self) -> str:
187
+ return self._tgt_lang
188
+
189
+ @tgt_lang.setter
190
+ def tgt_lang(self, new_tgt_lang: str) -> None:
191
+ self._tgt_lang = new_tgt_lang
192
+ self.set_lang_special_tokens(self._tgt_lang)
193
+
194
+ def _tokenize(self, text: str) -> List[str]:
195
+ return self.sp_model.encode(text, out_type=str)
196
+
197
+ def _convert_token_to_id(self, token):
198
+ if token in self.lang_token_to_id:
199
+ return self.lang_token_to_id[token]
200
+ return self.encoder.get(token, self.encoder[self.unk_token])
201
+
202
+ def _convert_id_to_token(self, index: int) -> str:
203
+ """Converts an index (integer) in a token (str) using the decoder."""
204
+ if index in self.id_to_lang_token:
205
+ return self.id_to_lang_token[index]
206
+ return self.decoder.get(index, self.unk_token)
207
+
208
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
209
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
210
+ return self.sp_model.decode(tokens)
211
+
212
+ def get_special_tokens_mask(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
214
+ ) -> List[int]:
215
+ """
216
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
217
+ special tokens using the tokenizer `prepare_for_model` method.
218
+ Args:
219
+ token_ids_0 (`List[int]`):
220
+ List of IDs.
221
+ token_ids_1 (`List[int]`, *optional*):
222
+ Optional second list of IDs for sequence pairs.
223
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
224
+ Whether or not the token list is already formatted with special tokens for the model.
225
+ Returns:
226
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
227
+ """
228
+
229
+ if already_has_special_tokens:
230
+ return super().get_special_tokens_mask(
231
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
232
+ )
233
+
234
+ prefix_ones = [1] * len(self.prefix_tokens)
235
+ suffix_ones = [1] * len(self.suffix_tokens)
236
+ if token_ids_1 is None:
237
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
238
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
239
+
240
+ def build_inputs_with_special_tokens(
241
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
242
+ ) -> List[int]:
243
+ """
244
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
245
+ adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
246
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
247
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
248
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
249
+ separator.
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs to which the special tokens will be added.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+ Returns:
256
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
257
+ """
258
+ if token_ids_1 is None:
259
+ if self.prefix_tokens is None:
260
+ return token_ids_0 + self.suffix_tokens
261
+ else:
262
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
263
+ # We don't expect to process pairs, but leave the pair logic for API consistency
264
+ if self.prefix_tokens is None:
265
+ return token_ids_0 + token_ids_1 + self.suffix_tokens
266
+ else:
267
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
268
+
269
+ def get_vocab(self) -> Dict:
270
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
271
+ vocab.update(self.added_tokens_encoder)
272
+ return vocab
273
+
274
+ def __getstate__(self) -> Dict:
275
+ state = self.__dict__.copy()
276
+ state["sp_model"] = None
277
+ return state
278
+
279
+ def __setstate__(self, d: Dict) -> None:
280
+ self.__dict__ = d
281
+
282
+ # for backward compatibility
283
+ if not hasattr(self, "sp_model_kwargs"):
284
+ self.sp_model_kwargs = {}
285
+
286
+ self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
287
+
288
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
289
+ save_dir = Path(save_directory)
290
+ if not save_dir.is_dir():
291
+ raise OSError(f"{save_directory} should be a directory")
292
+ vocab_save_path = save_dir / (
293
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
294
+ )
295
+ spm_save_path = save_dir / (
296
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
297
+ )
298
+
299
+ save_json(self.encoder, vocab_save_path)
300
+
301
+ if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
302
+ copyfile(self.spm_file, spm_save_path)
303
+ elif not os.path.isfile(self.spm_file):
304
+ with open(spm_save_path, "wb") as fi:
305
+ content_spiece_model = self.sp_model.serialized_model_proto()
306
+ fi.write(content_spiece_model)
307
+
308
+ return (str(vocab_save_path), str(spm_save_path))
309
+
310
+ def prepare_seq2seq_batch(
311
+ self,
312
+ src_texts: List[str],
313
+ tgt_texts: Optional[List[str]] = None,
314
+ tgt_lang: str = "ro",
315
+ **kwargs,
316
+ ) -> BatchEncoding:
317
+ self.tgt_lang = tgt_lang
318
+ self.set_lang_special_tokens(self.tgt_lang)
319
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
320
+
321
+ def _build_translation_inputs(self, raw_inputs, tgt_lang: Optional[str], **extra_kwargs):
322
+ """Used by translation pipeline, to prepare inputs for the generate function"""
323
+ if tgt_lang is None:
324
+ raise ValueError("Translation requires a `tgt_lang` for this model")
325
+ self.tgt_lang = tgt_lang
326
+ inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs)
327
+ return inputs
328
+
329
+ def _switch_to_input_mode(self):
330
+ self.set_lang_special_tokens(self.tgt_lang)
331
+
332
+ def _switch_to_target_mode(self):
333
+ self.prefix_tokens = None
334
+ self.suffix_tokens = [self.eos_token_id]
335
+
336
+ def set_lang_special_tokens(self, src_lang: str) -> None:
337
+ """Reset the special tokens to the tgt lang setting. No prefix and suffix=[eos, tgt_lang_code]."""
338
+ lang_token = self.get_lang_token(src_lang)
339
+ self.cur_lang_id = self.lang_token_to_id[lang_token]
340
+ self.prefix_tokens = [self.cur_lang_id]
341
+ self.suffix_tokens = [self.eos_token_id]
342
+
343
+ def get_lang_token(self, lang: str) -> str:
344
+ return self.lang_code_to_token[lang]
345
+
346
+ def get_lang_id(self, lang: str) -> int:
347
+ lang_token = self.get_lang_token(lang)
348
+ return self.lang_token_to_id[lang_token]
349
+
350
+
351
+ def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
352
+ spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
353
+ spm.Load(str(path))
354
+ return spm
355
+
356
+
357
+ def load_json(path: str) -> Union[Dict, List]:
358
+ with open(path, "r") as f:
359
+ return json.load(f)
360
+
361
+
362
+ def save_json(data, path: str) -> None:
363
+ with open(path, "w") as f:
364
+ json.dump(data, f, indent=2)