pere commited on
Commit
6e16ce1
·
1 Parent(s): e13e012
run_streaming_fix.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ./run_t5_mlm_flax_streaming.py \
2
+ --output_dir="./" \
3
+ --model_type="t5" \
4
+ --config_name="./" \
5
+ --tokenizer_name="./" \
6
+ --dataset_name="pere/norwegian_colossal_corpus_v2_short100k" \
7
+ --max_seq_length="512" \
8
+ --weight_decay="0.01" \
9
+ --per_device_train_batch_size="32" \
10
+ --per_device_eval_batch_size="32" \
11
+ --learning_rate="8e-3" \
12
+ --warmup_steps="5000" \
13
+ --overwrite_output_dir \
14
+ --cache_dir /mnt/disks/flaxdisk/cache/ \
15
+ --num_train_epochs="5" \
16
+ --adam_beta1="0.9" \
17
+ --adam_beta2="0.98" \
18
+ --logging_steps="500" \
19
+ --num_train_steps="1000000" \
20
+ --num_eval_samples="5000" \
21
+ --save_steps="5000" \
22
+ --eval_steps="5000" \
23
+ --preprocessing_num_workers 96 \
24
+ --adafactor \
25
+ --push_to_hub
26
+
run_t5_mlm_flax_streaming_fix.py ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Pretraining with T5-like span-masked language modeling on a streaming dataset.
18
+ Here is the full list of checkpoints on the hub that can be pretrained by this script:
19
+ https://huggingface.co/models?filter=t5
20
+ """
21
+ import logging
22
+ import os
23
+ import sys
24
+ import time
25
+ from collections import defaultdict
26
+ from dataclasses import dataclass, field
27
+ from pathlib import Path
28
+ from typing import Dict, Optional
29
+
30
+ import datasets
31
+ import numpy as np
32
+ from datasets import load_dataset
33
+ from tqdm import tqdm
34
+
35
+ import flax
36
+ import jax
37
+ import jax.numpy as jnp
38
+ import optax
39
+ from flax import jax_utils, traverse_util
40
+ from flax.training import train_state
41
+ from flax.training.common_utils import get_metrics, onehot, shard
42
+ from transformers import (
43
+ CONFIG_MAPPING,
44
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING,
45
+ BatchEncoding,
46
+ FlaxT5ForConditionalGeneration,
47
+ HfArgumentParser,
48
+ PreTrainedTokenizerBase,
49
+ T5Config,
50
+ T5TokenizerFast,
51
+ TrainingArguments,
52
+ is_tensorboard_available,
53
+ set_seed,
54
+ )
55
+ from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
56
+
57
+ #if datasets.__version__ <= "1.8.0":
58
+ # raise ValueError("Make sure to upgrade `datasets` to a version >= 1.9.0 to use dataset streaming")
59
+
60
+ MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
61
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
68
+ """
69
+
70
+ model_name_or_path: Optional[str] = field(
71
+ default=None,
72
+ metadata={
73
+ "help": "The model checkpoint for weights initialization."
74
+ "Don't set if you want to train a model from scratch."
75
+ },
76
+ )
77
+ model_type: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
80
+ )
81
+ config_name: Optional[str] = field(
82
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
83
+ )
84
+ tokenizer_name: Optional[str] = field(
85
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
86
+ )
87
+ cache_dir: Optional[str] = field(
88
+ default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
89
+ )
90
+ use_fast_tokenizer: bool = field(
91
+ default=True,
92
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
93
+ )
94
+ dtype: Optional[str] = field(
95
+ default="float32",
96
+ metadata={
97
+ "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
98
+ },
99
+ )
100
+ auth_token: Optional[str] = field(
101
+ default=None,
102
+ metadata={
103
+ "help": "Auth token for private repositories on the Huggingface Hub"
104
+ }
105
+ )
106
+
107
+
108
+ @dataclass
109
+ class DataTrainingArguments:
110
+ """
111
+ Arguments pertaining to what data we are going to input our model for training and eval.
112
+ """
113
+
114
+ dataset_name: Optional[str] = field(
115
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
116
+ )
117
+ dataset_config_name: Optional[str] = field(
118
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
119
+ )
120
+ max_seq_length: Optional[int] = field(
121
+ default=None,
122
+ metadata={
123
+ "help": "The maximum total input sequence length after tokenization and masking. Sequences longer than this will be truncated. Default to the max input length of the model."
124
+ },
125
+ )
126
+ preprocessing_num_workers: Optional[int] = field(
127
+ default=None,
128
+ metadata={"help": "The number of processes to use for the preprocessing."},
129
+ )
130
+ mlm_probability: float = field(
131
+ default=0.15, metadata={"help": "Ratio of tokens to mask for span masked language modeling loss"}
132
+ )
133
+ mean_noise_span_length: float = field(
134
+ default=3.0,
135
+ metadata={"help": "Mean span length of masked tokens"},
136
+ )
137
+ text_column_name: str = field(
138
+ default="text", metadata={"help": "The name of the column to retrieve the training text."}
139
+ )
140
+ shuffle_buffer_size: int = field(
141
+ default=10000, metadata={"help": "The number of examples to pre-load for shuffling."}
142
+ )
143
+ num_train_steps: int = field(default=50000, metadata={"help": "The number of training steps."})
144
+ num_eval_samples: int = field(default=50000, metadata={"help": "The number of samples to be used for evaluation"})
145
+
146
+ def __post_init__(self):
147
+ if self.dataset_name is None:
148
+ raise ValueError("Need a dataset name for streaming.")
149
+
150
+
151
+ def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
152
+ """This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .
153
+ Training parameters to avoid padding with random_spans_noise_mask.
154
+ When training a model with random_spans_noise_mask, we would like to set the other
155
+ training hyperparmeters in a way that avoids padding.
156
+ This function helps us compute these hyperparameters.
157
+ We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,
158
+ and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.
159
+ This function tells us the required number of tokens in the raw example (for split_tokens())
160
+ as well as the length of the encoded targets. Note that this function assumes
161
+ the inputs and targets will have EOS appended and includes that in the reported length.
162
+ Args:
163
+ inputs_length: an integer - desired length of the tokenized inputs sequence
164
+ noise_density: a float
165
+ mean_noise_span_length: a float
166
+ Returns:
167
+ tokens_length: length of original text in tokens
168
+ targets_length: an integer - length in tokens of encoded targets sequence
169
+ """
170
+
171
+ def _tokens_length_to_inputs_length_targets_length(tokens_length):
172
+ num_noise_tokens = int(round(tokens_length * noise_density))
173
+ num_nonnoise_tokens = tokens_length - num_noise_tokens
174
+ num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
175
+ # inputs contain all nonnoise tokens, sentinels for all noise spans
176
+ # and one EOS token.
177
+ _input_length = num_nonnoise_tokens + num_noise_spans + 1
178
+ _output_length = num_noise_tokens + num_noise_spans + 1
179
+ return _input_length, _output_length
180
+
181
+ tokens_length = inputs_length
182
+
183
+ while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
184
+ tokens_length += 1
185
+
186
+ inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length)
187
+
188
+ # minor hack to get the targets length to be equal to inputs length
189
+ # which is more likely to have been set to a nice round number.
190
+ if noise_density == 0.5 and targets_length > inputs_length:
191
+ tokens_length -= 1
192
+ targets_length -= 1
193
+ return tokens_length, targets_length
194
+
195
+
196
+ @flax.struct.dataclass
197
+ class FlaxDataCollatorForT5MLM:
198
+ """
199
+ Data collator used for T5 span-masked language modeling.
200
+ It is made sure that after masking the inputs are of length `data_args.max_seq_length` and targets are also of fixed length.
201
+ For more information on how T5 span-masked language modeling works, one can take a look
202
+ at the `official paper <https://arxiv.org/pdf/1910.10683.pdf>`__
203
+ or the `official code for preprocessing <https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/data/preprocessors.py>`__ .
204
+ Args:
205
+ tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
206
+ The tokenizer used for encoding the data.
207
+ noise_density (:obj:`float`):
208
+ The probability with which to (randomly) mask tokens in the input.
209
+ mean_noise_span_length (:obj:`float`):
210
+ The average span length of the masked tokens.
211
+ input_length (:obj:`int`):
212
+ The expected input length after masking.
213
+ target_length (:obj:`int`):
214
+ The expected target length after masking.
215
+ pad_token_id: (:obj:`int`):
216
+ The pad token id of the model
217
+ decoder_start_token_id: (:obj:`int):
218
+ The decoder start token id of the model
219
+ """
220
+
221
+ tokenizer: PreTrainedTokenizerBase
222
+ noise_density: float
223
+ mean_noise_span_length: float
224
+ input_length: int
225
+ target_length: int
226
+ pad_token_id: int
227
+ decoder_start_token_id: int
228
+
229
+ def __call__(self, examples: Dict[str, np.ndarray]) -> BatchEncoding:
230
+
231
+ batch = BatchEncoding(
232
+ {k: np.array(examples[k]) for k in examples.keys()}
233
+ )
234
+ input_ids = batch['input_ids']
235
+ batch_size, expandend_input_length = input_ids.shape
236
+
237
+ mask_indices = np.asarray([self.random_spans_noise_mask(expandend_input_length) for i in range(batch_size)])
238
+ labels_mask = ~mask_indices
239
+
240
+ input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8))
241
+ labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8))
242
+
243
+ batch["input_ids"] = self.filter_input_ids(input_ids, input_ids_sentinel)
244
+ batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel)
245
+
246
+ if batch["input_ids"].shape[-1] != self.input_length:
247
+ raise ValueError(
248
+ f"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but should be {self.target_length}."
249
+ )
250
+
251
+ if batch["labels"].shape[-1] != self.target_length:
252
+ raise ValueError(
253
+ f"`labels` are incorrectly preprocessed. `labels` length is {batch['labels'].shape[-1]}, but should be {self.target_length}."
254
+ )
255
+
256
+ # to check that tokens are correctly proprocessed, one can run `self.tokenizer.batch_decode(input_ids)` and `self.tokenizer.batch_decode(labels)` here...
257
+ batch["decoder_input_ids"] = shift_tokens_right(
258
+ batch["labels"], self.pad_token_id, self.decoder_start_token_id
259
+ )
260
+
261
+ return batch
262
+
263
+ def create_sentinel_ids(self, mask_indices):
264
+ """
265
+ Sentinel ids creation given the indices that should be masked.
266
+ The start indices of each mask are replaced by the sentinel ids in increasing
267
+ order. Consecutive mask indices to be deleted are replaced with `-1`.
268
+ """
269
+ start_indices = mask_indices - np.roll(mask_indices, 1, axis=-1) * mask_indices
270
+ start_indices[:, 0] = mask_indices[:, 0]
271
+
272
+ sentinel_ids = np.where(start_indices != 0, np.cumsum(start_indices, axis=-1), start_indices)
273
+ sentinel_ids = np.where(sentinel_ids != 0, (sentinel_ids + self.tokenizer.vocab_size - 1), 0)
274
+ sentinel_ids -= mask_indices - start_indices
275
+
276
+ return sentinel_ids
277
+
278
+ def filter_input_ids(self, input_ids, sentinel_ids):
279
+ """
280
+ Puts sentinel mask on `input_ids` and fuse consecutive mask tokens into a single mask token by deleting.
281
+ This will reduce the sequence length from `expanded_inputs_length` to `input_length`.
282
+ """
283
+ batch_size = input_ids.shape[0]
284
+
285
+ input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids)
286
+ input_ids = input_ids_full[input_ids_full > 0].reshape((batch_size, -1))
287
+ input_ids = np.concatenate(
288
+ [input_ids, np.full((batch_size, 1), self.tokenizer.eos_token_id, dtype=np.int32)], axis=-1
289
+ )
290
+ return input_ids
291
+
292
+ def random_spans_noise_mask(self, length):
293
+
294
+ """This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ .
295
+ Noise mask consisting of random spans of noise tokens.
296
+ The number of noise tokens and the number of noise spans and non-noise spans
297
+ are determined deterministically as follows:
298
+ num_noise_tokens = round(length * noise_density)
299
+ num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length)
300
+ Spans alternate between non-noise and noise, beginning with non-noise.
301
+ Subject to the above restrictions, all masks are equally likely.
302
+ Args:
303
+ length: an int32 scalar (length of the incoming token sequence)
304
+ noise_density: a float - approximate density of output mask
305
+ mean_noise_span_length: a number
306
+ Returns:
307
+ a boolean tensor with shape [length]
308
+ """
309
+
310
+ orig_length = length
311
+
312
+ num_noise_tokens = int(np.round(length * self.noise_density))
313
+ # avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
314
+ num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
315
+ num_noise_spans = int(np.round(num_noise_tokens / self.mean_noise_span_length))
316
+
317
+ # avoid degeneracy by ensuring positive number of noise spans
318
+ num_noise_spans = max(num_noise_spans, 1)
319
+ num_nonnoise_tokens = length - num_noise_tokens
320
+
321
+ # pick the lengths of the noise spans and the non-noise spans
322
+ def _random_segmentation(num_items, num_segments):
323
+ """Partition a sequence of items randomly into non-empty segments.
324
+ Args:
325
+ num_items: an integer scalar > 0
326
+ num_segments: an integer scalar in [1, num_items]
327
+ Returns:
328
+ a Tensor with shape [num_segments] containing positive integers that add
329
+ up to num_items
330
+ """
331
+ mask_indices = np.arange(num_items - 1) < (num_segments - 1)
332
+ np.random.shuffle(mask_indices)
333
+ first_in_segment = np.pad(mask_indices, [[1, 0]])
334
+ segment_id = np.cumsum(first_in_segment)
335
+ # count length of sub segments assuming that list is sorted
336
+ _, segment_length = np.unique(segment_id, return_counts=True)
337
+ return segment_length
338
+
339
+ noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans)
340
+ nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens, num_noise_spans)
341
+
342
+ interleaved_span_lengths = np.reshape(
343
+ np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2]
344
+ )
345
+ span_starts = np.cumsum(interleaved_span_lengths)[:-1]
346
+ span_start_indicator = np.zeros((length,), dtype=np.int8)
347
+ span_start_indicator[span_starts] = True
348
+ span_num = np.cumsum(span_start_indicator)
349
+ is_noise = np.equal(span_num % 2, 1)
350
+
351
+ return is_noise[:orig_length]
352
+
353
+
354
+ def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
355
+ num_samples = len(samples_idx)
356
+ samples_to_remove = num_samples % batch_size
357
+
358
+ if samples_to_remove != 0:
359
+ samples_idx = samples_idx[:-samples_to_remove]
360
+ sections_split = num_samples // batch_size
361
+ batch_idx = np.split(samples_idx, sections_split)
362
+ return batch_idx
363
+
364
+
365
+ def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length):
366
+ """
367
+ The training iterator is advanced so that after groupifying the samples,
368
+ `num_samples` of length `max_seq_length` are returned.
369
+ """
370
+ num_total_tokens = max_seq_length * num_samples
371
+ samples = defaultdict(list)
372
+
373
+ i = 0
374
+ while i < num_total_tokens:
375
+ tokenized_samples = next(train_iterator)
376
+ i += len(tokenized_samples["input_ids"])
377
+
378
+ # concatenate tokenized samples to list
379
+ samples = {k: samples[k] + tokenized_samples[k] for k in tokenized_samples.keys()}
380
+
381
+ # Concatenated tokens are split to lists of length `max_seq_length`.
382
+ # Note that remainedr of % max_seq_length are thrown away.
383
+ def group_texts(examples):
384
+ result = {
385
+ k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)]
386
+ for k, t in examples.items()
387
+ }
388
+ return result
389
+
390
+ grouped_samples = group_texts(samples)
391
+ return grouped_samples
392
+
393
+
394
+ def write_train_metric(summary_writer, train_metrics, train_time, step):
395
+ summary_writer.scalar("train_time", train_time, step)
396
+
397
+ train_metrics = get_metrics(train_metrics)
398
+ for key, vals in train_metrics.items():
399
+ tag = f"train_{key}"
400
+ for i, val in enumerate(vals):
401
+ summary_writer.scalar(tag, val, step - len(vals) + i + 1)
402
+
403
+
404
+ def write_eval_metric(summary_writer, eval_metrics, step):
405
+ for metric_name, value in eval_metrics.items():
406
+ summary_writer.scalar(f"eval_{metric_name}", value, step)
407
+
408
+
409
+ if __name__ == "__main__":
410
+ # See all possible arguments in src/transformers/training_args.py
411
+ # or by passing the --help flag to this script.
412
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
413
+
414
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
415
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
416
+ # If we pass only one argument to the script and it's the path to a json file,
417
+ # let's parse it to get our arguments.
418
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
419
+ else:
420
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
421
+
422
+ if (
423
+ os.path.exists(training_args.output_dir)
424
+ and os.listdir(training_args.output_dir)
425
+ and training_args.do_train
426
+ and not training_args.overwrite_output_dir
427
+ ):
428
+ raise ValueError(
429
+ f"Output directory ({training_args.output_dir}) already exists and is not empty."
430
+ "Use --overwrite_output_dir to overcome."
431
+ )
432
+
433
+ # Setup logging
434
+ logging.basicConfig(
435
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
436
+ level="INFO",
437
+ datefmt="[%X]",
438
+ )
439
+
440
+ # Log on each process the small summary:
441
+ logger = logging.getLogger(__name__)
442
+ #logger.warning(
443
+ # f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
444
+ # + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
445
+ #)
446
+
447
+ # Set the verbosity to info of the Transformers logger (on main process only):
448
+ logger.info(f"Training/evaluation parameters {training_args}")
449
+
450
+ # Set seed before initializing model.
451
+ set_seed(training_args.seed)
452
+
453
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
454
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
455
+ # (the dataset will be downloaded automatically from the datasets Hub).
456
+ #
457
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
458
+ # 'text' is found. You can easily tweak this behavior (see below).
459
+ if data_args.dataset_name is not None:
460
+ # Downloading and loading a dataset from the hub.
461
+ datasets = load_dataset(
462
+ data_args.dataset_name,
463
+ data_args.dataset_config_name,
464
+ cache_dir=model_args.cache_dir,
465
+ streaming=True,
466
+ split="train"
467
+ )
468
+
469
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
470
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
471
+
472
+ # Load pretrained model and tokenizer
473
+
474
+ if model_args.tokenizer_name:
475
+ tokenizer = T5TokenizerFast.from_pretrained(
476
+ model_args.tokenizer_name,
477
+ cache_dir=model_args.cache_dir,
478
+ use_fast=model_args.use_fast_tokenizer,
479
+ use_auth_token=model_args.auth_token
480
+ )
481
+ elif model_args.model_name_or_path:
482
+ tokenizer = T5TokenizerFast.from_pretrained(
483
+ model_args.model_name_or_path,
484
+ cache_dir=model_args.cache_dir,
485
+ use_fast=model_args.use_fast_tokenizer,
486
+ use_auth_token=model_args.auth_token
487
+ )
488
+ else:
489
+ raise ValueError(
490
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
491
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
492
+ )
493
+
494
+ if model_args.config_name:
495
+ config = T5Config.from_pretrained(
496
+ model_args.config_name, cache_dir=model_args.cache_dir, vocab_size=len(tokenizer)
497
+ )
498
+ elif model_args.model_name_or_path:
499
+ config = T5Config.from_pretrained(
500
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, vocab_size=len(tokenizer)
501
+ )
502
+ else:
503
+ config = CONFIG_MAPPING[model_args.model_type]()
504
+ logger.warning("You are instantiating a new config instance from scratch.")
505
+
506
+ # Preprocessing the datasets.
507
+ # First we tokenize all the texts.
508
+
509
+ max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
510
+
511
+ # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
512
+ # Since we make sure that all sequences are of the same length, no attention_mask is needed.
513
+ def tokenize_function(examples):
514
+ return tokenizer(examples[data_args.text_column_name], return_attention_mask=False)
515
+
516
+ tokenized_datasets = datasets.map(
517
+ tokenize_function,
518
+ batched=True
519
+ )
520
+
521
+ # T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
522
+ # To ensure that the input length is `max_seq_length`, we need to increase the maximum length
523
+ # according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly.
524
+ expanded_inputs_length, targets_length = compute_input_and_target_lengths(
525
+ inputs_length=max_seq_length,
526
+ noise_density=data_args.mlm_probability,
527
+ mean_noise_span_length=data_args.mean_noise_span_length,
528
+ )
529
+
530
+ shuffle_seed = training_args.seed
531
+ tokenized_datasets = tokenized_datasets.shuffle(buffer_size=data_args.shuffle_buffer_size, seed=shuffle_seed)
532
+
533
+ # Enable tensorboard only on the master node
534
+ has_tensorboard = is_tensorboard_available()
535
+ if has_tensorboard and jax.process_index() == 0:
536
+ try:
537
+ from flax.metrics.tensorboard import SummaryWriter
538
+
539
+ summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
540
+ except ImportError as ie:
541
+ has_tensorboard = False
542
+ logger.warning(
543
+ f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
544
+ )
545
+ else:
546
+ logger.warning(
547
+ "Unable to display metrics through TensorBoard because the package is not installed: "
548
+ "Please run pip install tensorboard to enable."
549
+ )
550
+
551
+ # Initialize our training
552
+ rng = jax.random.PRNGKey(training_args.seed)
553
+ dropout_rngs = jax.random.split(rng, jax.local_device_count())
554
+
555
+ model = FlaxT5ForConditionalGeneration(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype))
556
+
557
+ #if model_args.model_name_or_path:
558
+ # model = FlaxT5ForConditionalGeneration.from_pretrained(
559
+ # model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
560
+ # )
561
+ #else:
562
+ # model = FlaxT5ForConditionalGeneration.from_pretrained(
563
+ # config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
564
+ # )
565
+
566
+
567
+ # Data collator
568
+ # This one will take care of randomly masking the tokens.
569
+ data_collator = FlaxDataCollatorForT5MLM(
570
+ tokenizer=tokenizer,
571
+ noise_density=data_args.mlm_probability,
572
+ mean_noise_span_length=data_args.mean_noise_span_length,
573
+ input_length=max_seq_length,
574
+ target_length=targets_length,
575
+ pad_token_id=model.config.pad_token_id,
576
+ decoder_start_token_id=model.config.decoder_start_token_id,
577
+ )
578
+
579
+ # Store some constant
580
+ num_epochs = int(training_args.num_train_epochs)
581
+ train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
582
+ eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
583
+
584
+ num_train_steps = data_args.num_train_steps
585
+
586
+ # Create learning rate schedule
587
+ warmup_fn = optax.linear_schedule(
588
+ init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
589
+ )
590
+ decay_fn = optax.linear_schedule(
591
+ init_value=training_args.learning_rate,
592
+ end_value=0,
593
+ transition_steps=num_train_steps - training_args.warmup_steps,
594
+ )
595
+ linear_decay_lr_schedule_fn = optax.join_schedules(
596
+ schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
597
+ )
598
+
599
+ # We use Optax's "masking" functionality to not apply weight decay
600
+ # to bias and LayerNorm scale parameters. decay_mask_fn returns a
601
+ # mask boolean with the same structure as the parameters.
602
+ # The mask is True for parameters that should be decayed.
603
+ def decay_mask_fn(params):
604
+ flat_params = traverse_util.flatten_dict(params)
605
+ flat_mask = {
606
+ path: (path[-1] != "bias" and path[-2:] not in [("layer_norm", "scale"), ("final_layer_norm", "scale")])
607
+ for path in flat_params
608
+ }
609
+ return traverse_util.unflatten_dict(flat_mask)
610
+
611
+ # create adam optimizer
612
+ if training_args.adafactor:
613
+ # We use the default parameters here to initialize adafactor,
614
+ # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
615
+ optimizer = optax.adafactor(
616
+ learning_rate=linear_decay_lr_schedule_fn,
617
+ )
618
+ else:
619
+ optimizer = optax.adamw(
620
+ learning_rate=linear_decay_lr_schedule_fn,
621
+ b1=training_args.adam_beta1,
622
+ b2=training_args.adam_beta2,
623
+ weight_decay=training_args.weight_decay,
624
+ mask=decay_mask_fn,
625
+ )
626
+
627
+ # Setup train state
628
+ state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
629
+
630
+ # Define gradient update step fn
631
+ def train_step(state, batch, dropout_rng):
632
+ dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
633
+
634
+ def loss_fn(params):
635
+ labels = batch.pop("labels")
636
+
637
+ logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
638
+
639
+ # compute loss
640
+ loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
641
+
642
+ return loss
643
+
644
+ grad_fn = jax.value_and_grad(loss_fn)
645
+ loss, grad = grad_fn(state.params)
646
+ grad = jax.lax.pmean(grad, "batch")
647
+ new_state = state.apply_gradients(grads=grad)
648
+
649
+ metrics = jax.lax.pmean(
650
+ {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
651
+ )
652
+
653
+ return new_state, metrics, new_dropout_rng
654
+
655
+ # Create parallel version of the train step
656
+ p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
657
+
658
+ # Define eval fn
659
+ def eval_step(params, batch):
660
+ labels = batch.pop("labels")
661
+
662
+ logits = model(**batch, params=params, train=False)[0]
663
+
664
+ # compute loss
665
+ loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1]))
666
+
667
+ # compute accuracy
668
+ accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels)
669
+
670
+ # summarize metrics
671
+ metrics = {"loss": loss.mean(), "accuracy": accuracy.mean()}
672
+ metrics = jax.lax.pmean(metrics, axis_name="batch")
673
+
674
+ return metrics
675
+
676
+ p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
677
+
678
+ # Replicate the train state on each device
679
+ state = jax_utils.replicate(state)
680
+
681
+ train_time = 0
682
+ train_start = time.time()
683
+ train_metrics = []
684
+ eval_metrics = []
685
+
686
+ training_iter = iter(tokenized_datasets)
687
+
688
+ eval_samples = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, expanded_inputs_length)
689
+
690
+ steps = tqdm(range(num_train_steps), desc="Training...", position=0)
691
+ for step in range(num_train_steps):
692
+ # ======================== Training ================================
693
+ try:
694
+ samples = advance_iter_and_group_samples(training_iter, train_batch_size, expanded_inputs_length)
695
+ except StopIteration:
696
+ # Once the end of the dataset stream is reached, the training iterator
697
+ # is reinitialized and reshuffled and a new eval dataset is randomely chosen.
698
+ shuffle_seed += 1
699
+ tokenized_datasets.set_epoch(shuffle_seed)
700
+
701
+ training_iter = iter(tokenized_datasets)
702
+
703
+ eval_dataset = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, expanded_inputs_length)
704
+ samples = advance_iter_and_group_samples(training_iter, train_batch_size, expanded_inputs_length)
705
+
706
+ # Model forward
707
+ model_inputs = data_collator(samples)
708
+ model_inputs = shard(model_inputs.data)
709
+ state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
710
+ train_metrics.append(train_metric)
711
+
712
+ if step % training_args.logging_steps == 0 and step > 0:
713
+ train_metric = jax_utils.unreplicate(train_metric)
714
+ steps.write(
715
+ f"Step... ({step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
716
+ )
717
+ train_time += time.time() - train_start
718
+ if has_tensorboard and jax.process_index() == 0:
719
+ write_train_metric(summary_writer, train_metrics, train_time, step)
720
+ train_metrics = []
721
+ # ======================== Evaluating ==============================
722
+ if step % training_args.eval_steps == 0 and step > 0:
723
+ eval_samples_idx = jnp.arange(data_args.num_eval_samples)
724
+ eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
725
+
726
+ for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=1)):
727
+ # process input samples
728
+ batch_eval_samples = {k: [v[idx] for idx in batch_idx] for k, v in eval_samples.items()}
729
+ model_inputs = data_collator(batch_eval_samples)
730
+
731
+ # Model forward
732
+ model_inputs = shard(model_inputs.data)
733
+ metrics = p_eval_step(state.params, model_inputs)
734
+ eval_metrics.append(metrics)
735
+
736
+ # normalize eval metrics
737
+ eval_metrics = get_metrics(eval_metrics)
738
+ eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
739
+
740
+ # Update progress bar
741
+ steps.desc = f"Step... ({step + 1}/{num_train_steps} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
742
+
743
+ if has_tensorboard and jax.process_index() == 0:
744
+ write_eval_metric(summary_writer, eval_metrics, step)
745
+ eval_metrics = []
746
+
747
+ if step % training_args.save_steps == 0 and step > 0:
748
+ # save checkpoint after each save_steps and push checkpoint to the hub
749
+ if jax.process_index() == 0:
750
+ params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
751
+ model.save_pretrained(
752
+ training_args.output_dir,
753
+ params=params,
754
+ push_to_hub=training_args.push_to_hub,
755
+ commit_message=f"Saving weights and logs of step {step+1}",
756
+ )
757
+ tokenizer.save_pretrained(
758
+ training_args.output_dir
759
+ )
760
+
761
+ # update tqdm bar
762
+ steps.update(1)
763
+