stefan-it commited on
Commit
393fe18
1 Parent(s): a537654

model: add initial version incl. PyTorch weights

Browse files
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5ForConditionalGeneration"
4
+ ],
5
+ "d_ff": 3072,
6
+ "d_kv": 64,
7
+ "d_model": 768,
8
+ "decoder_start_token_id": 0,
9
+ "dense_act_fn": "relu",
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "is_gated_act": false,
16
+ "layer_norm_epsilon": 1e-06,
17
+ "model_type": "t5",
18
+ "n_positions": 512,
19
+ "num_decoder_layers": 36,
20
+ "num_heads": 12,
21
+ "num_layers": 36,
22
+ "output_past": true,
23
+ "pad_token_id": 0,
24
+ "relative_attention_max_distance": 128,
25
+ "relative_attention_num_buckets": 32,
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.23.0.dev0",
28
+ "use_cache": true,
29
+ "vocab_size": 32128
30
+ }
operative_config.gin ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mesh_tensorflow.optimize
2
+ import mesh_tensorflow.transformer.dataset as mesh_tensorflow2
3
+ import mesh_tensorflow.transformer.learning_rate_schedules as mesh_tensorflow3
4
+ import mesh_tensorflow.transformer.t2t_vocabulary as mesh_tensorflow4
5
+ import mesh_tensorflow.transformer.transformer as mesh_tensorflow5
6
+ import mesh_tensorflow.transformer.transformer_layers as mesh_tensorflow6
7
+ import mesh_tensorflow.transformer.utils as mesh_tensorflow7
8
+ import t5.models.mesh_transformer
9
+
10
+ # Macros:
11
+ # ==============================================================================
12
+ d_ff = 3072
13
+ d_kv = 64
14
+ d_model = 768
15
+ dropout_rate = 0.0
16
+ inputs_length = 512
17
+ mean_noise_span_length = 3.0
18
+ MIXTURE_NAME = 'tr_corpus'
19
+ noise_density = 0.15
20
+ num_heads = 12
21
+ num_layers = 36
22
+
23
+ # Parameters for adafactor_decay_rate_pow:
24
+ # ==============================================================================
25
+ adafactor_decay_rate_pow.exponent = 0.8
26
+ adafactor_decay_rate_pow.offset = 0
27
+
28
+ # Parameters for AdafactorOptimizer:
29
+ # ==============================================================================
30
+ AdafactorOptimizer.beta1 = 0.0
31
+ AdafactorOptimizer.clipping_threshold = 1.0
32
+ AdafactorOptimizer.decay_rate = None
33
+ AdafactorOptimizer.epsilon1 = 1e-30
34
+ AdafactorOptimizer.epsilon2 = 0.001
35
+ AdafactorOptimizer.exclude_from_parameter_scale = None
36
+ AdafactorOptimizer.factored = True
37
+ AdafactorOptimizer.min_dim_size_to_factor = 128
38
+ AdafactorOptimizer.multiply_by_parameter_scale = True
39
+ AdafactorOptimizer.stacked_dim_names = None
40
+
41
+ # Parameters for Bitransformer:
42
+ # ==============================================================================
43
+ Bitransformer.shared_embedding = True
44
+
45
+ # Parameters for denoise:
46
+ # ==============================================================================
47
+ denoise.passthrough_feature_keys = None
48
+
49
+ # Parameters for decoder/DenseReluDense:
50
+ # ==============================================================================
51
+ decoder/DenseReluDense.activation = 'relu'
52
+ decoder/DenseReluDense.dropout_rate = %dropout_rate
53
+ decoder/DenseReluDense.hidden_size = %d_ff
54
+ decoder/DenseReluDense.use_bias = False
55
+
56
+ # Parameters for encoder/DenseReluDense:
57
+ # ==============================================================================
58
+ encoder/DenseReluDense.activation = 'relu'
59
+ encoder/DenseReluDense.dropout_rate = %dropout_rate
60
+ encoder/DenseReluDense.hidden_size = %d_ff
61
+ encoder/DenseReluDense.use_bias = False
62
+
63
+ # Parameters for enc_dec_attention:
64
+ # ==============================================================================
65
+ # None.
66
+
67
+ # Parameters for enc_dec_attention_bias:
68
+ # ==============================================================================
69
+ # None.
70
+
71
+ # Parameters for decoder/EncDecAttention:
72
+ # ==============================================================================
73
+ decoder/EncDecAttention.relative_attention_type = None
74
+
75
+ # Parameters for get_variable_dtype:
76
+ # ==============================================================================
77
+ get_variable_dtype.activation_dtype = 'bfloat16'
78
+
79
+ # Parameters for get_vocab_embedding_cls:
80
+ # ==============================================================================
81
+ # None.
82
+
83
+ # Parameters for get_vocabulary:
84
+ # ==============================================================================
85
+ get_vocabulary.mixture_or_task_name = %MIXTURE_NAME
86
+
87
+ # Parameters for decoder/LayerStack:
88
+ # ==============================================================================
89
+ decoder/LayerStack.dropout_rate = None
90
+ decoder/LayerStack.norm_epsilon = None
91
+ decoder/LayerStack.recompute_grads = False
92
+ decoder/LayerStack.sublayers_final = \
93
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
94
+ decoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
95
+ decoder/LayerStack.sublayers_per_layer = \
96
+ [@transformer.sublayer_rms_norm,
97
+ @transformer.sublayer_call_layer,
98
+ @transformer.sublayer_dropout,
99
+ @transformer.sublayer_residual]
100
+
101
+ # Parameters for encoder/LayerStack:
102
+ # ==============================================================================
103
+ encoder/LayerStack.dropout_rate = None
104
+ encoder/LayerStack.norm_epsilon = None
105
+ encoder/LayerStack.recompute_grads = False
106
+ encoder/LayerStack.sublayers_final = \
107
+ [@transformer.sublayer_rms_norm, @transformer.sublayer_dropout]
108
+ encoder/LayerStack.sublayers_initial = [@transformer.sublayer_dropout]
109
+ encoder/LayerStack.sublayers_per_layer = \
110
+ [@transformer.sublayer_rms_norm,
111
+ @transformer.sublayer_call_layer,
112
+ @transformer.sublayer_dropout,
113
+ @transformer.sublayer_residual]
114
+
115
+ # Parameters for learning_rate_schedule_noam:
116
+ # ==============================================================================
117
+ learning_rate_schedule_noam.linear_decay_fraction = 0.0
118
+ learning_rate_schedule_noam.multiplier = 1.0
119
+ learning_rate_schedule_noam.offset = 0
120
+ learning_rate_schedule_noam.warmup_steps = 10000
121
+
122
+ # Parameters for make_bitransformer:
123
+ # ==============================================================================
124
+ make_bitransformer.decoder_name = 'decoder'
125
+ make_bitransformer.encoder_name = 'encoder'
126
+
127
+ # Parameters for decoder/make_layer_stack:
128
+ # ==============================================================================
129
+ decoder/make_layer_stack.block_scope = True
130
+ decoder/make_layer_stack.layers = \
131
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
132
+ @mesh_tensorflow.transformer.transformer_layers.EncDecAttention,
133
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
134
+ decoder/make_layer_stack.num_layers = %num_layers
135
+
136
+ # Parameters for encoder/make_layer_stack:
137
+ # ==============================================================================
138
+ encoder/make_layer_stack.block_scope = True
139
+ encoder/make_layer_stack.layers = \
140
+ [@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
141
+ @mesh_tensorflow.transformer.transformer_layers.DenseReluDense]
142
+ encoder/make_layer_stack.num_layers = %num_layers
143
+
144
+ # Parameters for mesh_train_dataset_fn:
145
+ # ==============================================================================
146
+ mesh_train_dataset_fn.mixture_or_task_name = %MIXTURE_NAME
147
+ mesh_train_dataset_fn.pack = True
148
+ mesh_train_dataset_fn.seed = None
149
+ mesh_train_dataset_fn.shuffle = True
150
+ mesh_train_dataset_fn.use_cached = False
151
+
152
+ # Parameters for noise_span_to_unique_sentinel:
153
+ # ==============================================================================
154
+ # None.
155
+
156
+ # Parameters for nonnoise_span_to_unique_sentinel:
157
+ # ==============================================================================
158
+ # None.
159
+
160
+ # Parameters for pack_dataset:
161
+ # ==============================================================================
162
+ pack_dataset.use_custom_ops = False
163
+
164
+ # Parameters for pack_or_pad:
165
+ # ==============================================================================
166
+ # None.
167
+
168
+ # Parameters for random_spans_helper:
169
+ # ==============================================================================
170
+ random_spans_helper.verbose = False
171
+
172
+ # Parameters for random_spans_noise_mask:
173
+ # ==============================================================================
174
+ # None.
175
+
176
+ # Parameters for reduce_concat_tokens:
177
+ # ==============================================================================
178
+ # None.
179
+
180
+ # Parameters for rewrite_stack_variables:
181
+ # ==============================================================================
182
+ rewrite_stack_variables.max_combined_variable_size = 536870912
183
+
184
+ # Parameters for run:
185
+ # ==============================================================================
186
+ run.autostack = True
187
+ run.batch_size = ('tokens_per_batch', 65536)
188
+ run.checkpoint_input_pipeline = False
189
+ run.dataset_split = 'train'
190
+ run.ensemble_inputs = None
191
+ run.eval_checkpoint_step = None
192
+ run.eval_dataset_fn = None
193
+ run.eval_dir_suffix = None
194
+ run.eval_summary_dir = None
195
+ run.export_checkpoint_step = None
196
+ run.export_path = ''
197
+ run.init_checkpoint = None
198
+ run.iterations_per_loop = 100
199
+ run.keep_checkpoint_max = None
200
+ run.layout_rules = \
201
+ 'ensemble:ensemble,batch:batch,d_ff:model,heads:model,vocab:model,experts:batch'
202
+ run.learning_rate_schedule = @learning_rate_schedules.learning_rate_schedule_noam
203
+ run.mesh_devices = None
204
+ run.mesh_shape = @mesh_tensorflow.transformer.utils.tpu_mesh_shape()
205
+ run.mode = 'train'
206
+ run.model_type = 'bitransformer'
207
+ run.optimizer = @optimize.AdafactorOptimizer
208
+ run.output_eval_examples = True
209
+ run.perplexity_eval_steps = 100
210
+ run.predict_fn = None
211
+ run.save_checkpoints_steps = 50000
212
+ run.seen_data_init_step = 0
213
+ run.sequence_length = {'inputs': 512, 'targets': 128}
214
+ run.skip_seen_data = False
215
+ run.total_run_steps = None
216
+ run.train_dataset_fn = @t5.models.mesh_transformer.mesh_train_dataset_fn
217
+ run.train_steps = 524288
218
+ run.variable_filter = None
219
+
220
+ # Parameters for select_random_chunk:
221
+ # ==============================================================================
222
+ select_random_chunk.additional_feature_keys = None
223
+ select_random_chunk.additional_passthrough_keys = None
224
+ select_random_chunk.min_length = None
225
+ select_random_chunk.passthrough_feature_keys = None
226
+ select_random_chunk.sequence_length = None
227
+ select_random_chunk.uniform_random_start = False
228
+
229
+ # Parameters for decoder/SelfAttention:
230
+ # ==============================================================================
231
+ decoder/SelfAttention.attention_func = None
232
+ decoder/SelfAttention.attention_kwargs = None
233
+ decoder/SelfAttention.combine_dims = True
234
+ decoder/SelfAttention.dropout_rate = %dropout_rate
235
+ decoder/SelfAttention.fold_scaling_into_initializer = True
236
+ decoder/SelfAttention.hyperprompt_hidden_dim = None
237
+ decoder/SelfAttention.hyperprompt_length_decoder = None
238
+ decoder/SelfAttention.hyperprompt_length_encoder = None
239
+ decoder/SelfAttention.hyperprompt_mtlshare = False
240
+ decoder/SelfAttention.hyperprompt_task_num = 8
241
+ decoder/SelfAttention.keep_query_heads_dims = False
242
+ decoder/SelfAttention.key_value_size = %d_kv
243
+ decoder/SelfAttention.num_heads = %num_heads
244
+ decoder/SelfAttention.num_memory_heads = 0
245
+ decoder/SelfAttention.relative_attention_num_buckets = 32
246
+ decoder/SelfAttention.relative_attention_type = 'bias_shared'
247
+ decoder/SelfAttention.shared_kv = False
248
+ decoder/SelfAttention.use_hyperprompt = False
249
+ decoder/SelfAttention.z_loss_coeff = None
250
+
251
+ # Parameters for encoder/SelfAttention:
252
+ # ==============================================================================
253
+ encoder/SelfAttention.attention_func = None
254
+ encoder/SelfAttention.attention_kwargs = None
255
+ encoder/SelfAttention.combine_dims = True
256
+ encoder/SelfAttention.dropout_rate = %dropout_rate
257
+ encoder/SelfAttention.fold_scaling_into_initializer = True
258
+ encoder/SelfAttention.hyperprompt_hidden_dim = None
259
+ encoder/SelfAttention.hyperprompt_length_decoder = None
260
+ encoder/SelfAttention.hyperprompt_length_encoder = None
261
+ encoder/SelfAttention.hyperprompt_mtlshare = False
262
+ encoder/SelfAttention.hyperprompt_task_num = 8
263
+ encoder/SelfAttention.keep_query_heads_dims = False
264
+ encoder/SelfAttention.key_value_size = %d_kv
265
+ encoder/SelfAttention.num_heads = %num_heads
266
+ encoder/SelfAttention.num_memory_heads = 0
267
+ encoder/SelfAttention.relative_attention_num_buckets = 32
268
+ encoder/SelfAttention.relative_attention_type = 'bias_shared'
269
+ encoder/SelfAttention.shared_kv = False
270
+ encoder/SelfAttention.use_hyperprompt = False
271
+ encoder/SelfAttention.z_loss_coeff = None
272
+
273
+ # Parameters for sentinel_id:
274
+ # ==============================================================================
275
+ sentinel_id.return_value = None
276
+
277
+ # Parameters for serialize_num_microbatches:
278
+ # ==============================================================================
279
+ serialize_num_microbatches.tokens_per_microbatch_per_replica = 8192
280
+
281
+ # Parameters for SimdMeshImpl:
282
+ # ==============================================================================
283
+ SimdMeshImpl.allreduce_in_bfloat16_max_group_size = 8
284
+
285
+ # Parameters for split_tokens:
286
+ # ==============================================================================
287
+ split_tokens.additional_feature_keys = None
288
+ split_tokens.num_parallel_calls = -1
289
+ split_tokens.passthrough_feature_keys = None
290
+
291
+ # Parameters for sublayer_call_layer:
292
+ # ==============================================================================
293
+ # None.
294
+
295
+ # Parameters for sublayer_dropout:
296
+ # ==============================================================================
297
+ sublayer_dropout.dropout_rate = %dropout_rate
298
+
299
+ # Parameters for sublayer_mask_padding:
300
+ # ==============================================================================
301
+ # None.
302
+
303
+ # Parameters for sublayer_residual:
304
+ # ==============================================================================
305
+ # None.
306
+
307
+ # Parameters for sublayer_rms_norm:
308
+ # ==============================================================================
309
+ sublayer_rms_norm.epsilon = 1e-06
310
+ sublayer_rms_norm.name = 'rms_norm'
311
+
312
+ # Parameters for tpu_estimator_model_fn:
313
+ # ==============================================================================
314
+ tpu_estimator_model_fn.hierarchical_tiling_spec = None
315
+ tpu_estimator_model_fn.init_variable_filter = ''
316
+ tpu_estimator_model_fn.model_info_file = ''
317
+ tpu_estimator_model_fn.outer_batch_size = 1
318
+ tpu_estimator_model_fn.tpu_summaries = False
319
+ tpu_estimator_model_fn.weight_decay_checkpoint = None
320
+
321
+ # Parameters for tpu_mesh_shape:
322
+ # ==============================================================================
323
+ tpu_mesh_shape.ensemble_parallelism = None
324
+ tpu_mesh_shape.model_parallelism = 4
325
+ tpu_mesh_shape.tpu_topology = 'v3-32'
326
+
327
+ # Parameters for unit_scaling_convention:
328
+ # ==============================================================================
329
+ unit_scaling_convention.value = False
330
+
331
+ # Parameters for decoder/Unitransformer:
332
+ # ==============================================================================
333
+ decoder/Unitransformer.d_model = %d_model
334
+ decoder/Unitransformer.ensemble = None
335
+ decoder/Unitransformer.input_full_attention = False
336
+ decoder/Unitransformer.label_smoothing = 0.0
337
+ decoder/Unitransformer.loss_denominator = None
338
+ decoder/Unitransformer.loss_fn = None
339
+ decoder/Unitransformer.loss_on_targets_only = False
340
+ decoder/Unitransformer.max_length = 512
341
+ decoder/Unitransformer.positional_embedding = False
342
+ decoder/Unitransformer.shared_embedding_and_softmax_weights = True
343
+ decoder/Unitransformer.sinusoid_positional_embedding = False
344
+ decoder/Unitransformer.token_dropout_rate = 0.0
345
+ decoder/Unitransformer.vocab_divisor = 128
346
+ decoder/Unitransformer.z_loss = 0.0001
347
+
348
+ # Parameters for encoder/Unitransformer:
349
+ # ==============================================================================
350
+ encoder/Unitransformer.d_model = %d_model
351
+ encoder/Unitransformer.ensemble = None
352
+ encoder/Unitransformer.input_full_attention = False
353
+ encoder/Unitransformer.label_smoothing = 0.0
354
+ encoder/Unitransformer.loss_denominator = None
355
+ encoder/Unitransformer.loss_fn = None
356
+ encoder/Unitransformer.loss_on_targets_only = False
357
+ encoder/Unitransformer.max_length = 512
358
+ encoder/Unitransformer.positional_embedding = False
359
+ encoder/Unitransformer.shared_embedding_and_softmax_weights = True
360
+ encoder/Unitransformer.sinusoid_positional_embedding = False
361
+ encoder/Unitransformer.token_dropout_rate = 0.0
362
+ encoder/Unitransformer.vocab_divisor = 128
363
+ encoder/Unitransformer.z_loss = 0.0001
364
+
365
+ # Parameters for VarianceScalingInitializer:
366
+ # ==============================================================================
367
+ VarianceScalingInitializer.distribution = 'normal'
368
+ VarianceScalingInitializer.mode = 'fan_in'
369
+ VarianceScalingInitializer.scale = 1.0
370
+
371
+ # Parameters for VocabEmbedding:
372
+ # ==============================================================================
373
+ VocabEmbedding.scale_variable_like_classifier_weights = False
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4351be219b3e159e546310ba6481a1ac26b3b6bed18e056528a78498628b206c
3
+ size 2477677391
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca0ade8cf006bd16c9d7dafebce0f7fad8ba4018f3e70947e972cf10a2b3b03
3
+ size 839200
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "special_tokens_map_file": null, "name_or_path": "./", "sp_model_kwargs": {}, "tokenizer_class": "T5Tokenizer"}