KublaiKhan1 commited on
Commit
5312231
·
verified ·
1 Parent(s): 4b59bd9

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -72,3 +72,7 @@ v8smooth/chkpts/checkpoint/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
72
  v8smooth/chkpts/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
73
  v8smooth/chkpts/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
74
  v8smooth/chkpts/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
72
  v8smooth/chkpts/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
73
  v8smooth/chkpts/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
74
  v8smooth/chkpts/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
75
+ v32-smooth/jax-vqvae-vqgan/chkpts/checkpoint/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
76
+ v32-smooth/jax-vqvae-vqgan/chkpts/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
77
+ v32-smooth/jax-vqvae-vqgan/chkpts/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
78
+ v32-smooth/jax-vqvae-vqgan/chkpts/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
v32-smooth/jax-vqvae-vqgan/chkpts/checkpoint.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1144908eaa949b6a0f58e2b6632de7f4c46dd246b749aad6f069bd9aaf41a764
3
+ size 1543746552
v32-smooth/jax-vqvae-vqgan/chkpts/checkpoint/checkpoint.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c447998b3c58130641c8fda7c71069a6be4763b2f6c661be9ea166233813d6a7
3
+ size 1543746552
v32-smooth/jax-vqvae-vqgan/chkpts/checkpointbest.tmp.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb709dda710cc72acbf965ccc5603e1771279cb745b7cd9e0c3915919f6be86
3
+ size 1543746552
v32-smooth/jax-vqvae-vqgan/chkpts/checkpointbest.tmp/checkpointbest.tmp.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19f27343b99ecb964d812c18b1d6ddc29ac9c9225f88c060ea6bf8eb01327488
3
+ size 1543746552
v32-smooth/train.py ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try: # For debugging
2
+ from localutils.debugger import enable_debug
3
+ enable_debug()
4
+ except ImportError:
5
+ pass
6
+
7
+ import flax.linen as nn
8
+ import jax.numpy as jnp
9
+ from absl import app, flags
10
+ from functools import partial
11
+ import numpy as np
12
+ import tqdm
13
+ import jax
14
+ import jax.numpy as jnp
15
+ import flax
16
+ import optax
17
+ import wandb
18
+ from ml_collections import config_flags
19
+ import ml_collections
20
+ import tensorflow_datasets as tfds
21
+ import tensorflow as tf
22
+ tf.config.set_visible_devices([], "GPU")
23
+ tf.config.set_visible_devices([], "TPU")
24
+ import matplotlib.pyplot as plt
25
+ from typing import Any
26
+ import os
27
+
28
+ from utils.wandb import setup_wandb, default_wandb_config
29
+ from utils.train_state import TrainState, target_update
30
+ from utils.checkpoint import Checkpoint
31
+ from utils.pretrained_resnet import get_pretrained_embs, get_pretrained_model
32
+ from utils.fid import get_fid_network, fid_from_stats
33
+ from models.vqvae import VQVAE
34
+ from models.discriminator import Discriminator
35
+
36
+ FLAGS = flags.FLAGS
37
+ flags.DEFINE_string('dataset_name', 'imagenet256', 'Environment name.')
38
+ flags.DEFINE_string('save_dir', "/home/lambda/nfs_share/jax-vqvae-vqgan/chkpts/checkpoint", 'Save dir (if not None, save params).')
39
+ flags.DEFINE_string('load_dir', "/home/lambda/nfs_share/jax-vqvae-vqgan/chkpts/checkpoint.tmp" , 'Load dir (if not None, load params from here).')
40
+ flags.DEFINE_integer('seed', 0, 'Random seed.')
41
+ flags.DEFINE_integer('log_interval', 1000, 'Logging interval.')
42
+ flags.DEFINE_integer('eval_interval', 1000, 'Eval interval.')
43
+ flags.DEFINE_integer('save_interval', 1000, 'Save interval.')
44
+ flags.DEFINE_integer('batch_size', 128, 'Total Batch size.')
45
+ flags.DEFINE_integer('max_steps', int(1_000_000), 'Number of training steps.')
46
+
47
+ model_config = ml_collections.ConfigDict({
48
+ # VQVAE
49
+ 'lr': 0.0001,
50
+ 'beta1': 0.0,#.5
51
+ 'beta2': 0.99,#.9
52
+ 'lr_warmup_steps': 2000,
53
+ 'lr_decay_steps': 500_000,#They use 'lambdalr'
54
+ 'filters': 128,
55
+ 'num_res_blocks': 2,
56
+ 'channel_multipliers': (1, 2, 4, 4),#Seems right
57
+ 'embedding_dim': 4, # For FSQ, a good default is 4.
58
+ 'norm_type': 'GN',
59
+ 'weight_decay': 0.05,#None maybe?
60
+ 'clip_gradient': 1.0,
61
+ 'l2_loss_weight': 1.0,#They use L1 actually
62
+ 'eps_update_rate': 0.9999,
63
+ # Quantizer
64
+ 'quantizer_type': 'kl', # or 'fsq', 'kl'
65
+ # Quantizer (VQ)
66
+ 'quantizer_loss_ratio': 1,
67
+ 'codebook_size': 1024,
68
+ 'entropy_loss_ratio': 0.1,
69
+ 'entropy_loss_type': 'softmax',
70
+ 'entropy_temperature': 0.01,
71
+ 'commitment_cost': 0.25,
72
+ # Quantizer (FSQ)
73
+ 'fsq_levels': 5, # Bins per dimension.
74
+ # Quantizer (KL)
75
+ 'kl_weight': 0.000001,#They use 1e-6 on their stuff LUL. .001 is the default
76
+ # GAN
77
+ 'g_adversarial_loss_weight': 0.5,
78
+ 'g_grad_penalty_cost': 10,
79
+ 'perceptual_loss_weight': 0.5,
80
+ 'gan_warmup_steps': 25000,
81
+ 'pl_decay': .01,
82
+ 'pl_weight': 2
83
+ })
84
+
85
+ wandb_config = default_wandb_config()
86
+ wandb_config.update({
87
+ 'project': 'vqvae',
88
+ 'name': 'vqvae_{dataset_name}',
89
+ })
90
+
91
+ config_flags.DEFINE_config_dict('wandb', wandb_config, lock_config=False)
92
+ config_flags.DEFINE_config_dict('model', model_config, lock_config=False)
93
+
94
+ ##############################################
95
+ ## Model Definitions.
96
+ ##############################################
97
+
98
+ @jax.vmap
99
+ def sigmoid_cross_entropy_with_logits(*, labels: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
100
+ """https://github.com/google-research/maskgit/blob/main/maskgit/libml/losses.py
101
+ """
102
+ zeros = jnp.zeros_like(logits, dtype=logits.dtype)
103
+ condition = (logits >= zeros)
104
+ relu_logits = jnp.where(condition, logits, zeros)
105
+ neg_abs_logits = jnp.where(condition, -logits, logits)
106
+ return relu_logits - logits * labels + jnp.log1p(jnp.exp(neg_abs_logits))
107
+
108
+ class VQGANModel(flax.struct.PyTreeNode):
109
+ rng: Any
110
+ config: dict = flax.struct.field(pytree_node=False)
111
+ vqvae: TrainState
112
+ vqvae_eps: TrainState
113
+ discriminator: TrainState
114
+
115
+ # Train G and D.
116
+ @partial(jax.pmap, axis_name='data', in_axes=(0, 0))
117
+ def update(self, images, pmap_axis='data'):
118
+ new_rng, curr_key = jax.random.split(self.rng, 2)
119
+
120
+ resnet, resnet_params = get_pretrained_model('resnet50', '/home/lambda/nfs_share/jax-vqvae-vqgan/data/resnet_pretrained.npy')
121
+
122
+ is_gan_training = 1.0 - (self.vqvae.step < self.config['gan_warmup_steps']).astype(jnp.float32)
123
+
124
+ def loss_fn(params_vqvae, params_disc):
125
+ # Reconstruct image
126
+
127
+ print("calling self.vqvae")
128
+ reconstructed_images, result_dict = self.vqvae(images, params=params_vqvae, rngs={'noise': curr_key})
129
+
130
+ print("Reconstructed images shape", reconstructed_images.shape)
131
+ print("Input images shape", images.shape)
132
+ assert reconstructed_images.shape == images.shape
133
+ #Path reg here.
134
+ def path_reg_loss(latents, targets):#let's have pl_mean be in our self.config
135
+ pl_noise = jax.random.normal(new_rng, shape = targets.shape) / jnp.sqrt(targets.shape[2] * targets.shape[3])
136
+ def grad_sum(latents, pl_noise):#So we don't have access to the actual decode method
137
+ #return jnp.sum(self.vqvae.decode(latents))
138
+
139
+ #I am not sure if this makes any sense whatsoever tbh
140
+ my_sum = self.vqvae(latents, params=params_vqvae, method="decode", rngs={'noise': curr_key})*pl_noise
141
+ print("Decode shape", my_sum.shape)
142
+ return jnp.sum(my_sum)
143
+
144
+ decode_grad_fn = jax.grad(grad_sum)
145
+ pl_grads = decode_grad_fn(latents, pl_noise)
146
+ pl_lengths = jnp.sqrt(jnp.mean(jnp.sum(jnp.square(pl_grads), axis = [2,3]), axis = 1))
147
+ pl_mean = self.vqvae.pl_mean + self.config.pl_decay * (jnp.mean(pl_lengths) - self.vqvae.pl_mean)
148
+ pl_penalty = jnp.square(pl_lengths - pl_mean)
149
+ loss = jnp.mean(pl_penalty)
150
+ return loss, pl_mean
151
+ smooth_loss, pl_mean = path_reg_loss(result_dict["latents"], reconstructed_images)
152
+ print("Smooth loss is", smooth_loss)
153
+ # GAN loss on VQVAE output.
154
+ discriminator_fn = lambda x: self.discriminator(x, params=params_disc)
155
+ real_logit, vjp_fn = jax.vjp(discriminator_fn, images, has_aux=False)
156
+ gradient = vjp_fn(jnp.ones_like(real_logit))[0] # Gradient of discriminator output wrt. real images.
157
+ gradient = gradient.reshape((images.shape[0], -1))
158
+ gradient = jnp.asarray(gradient, jnp.float32)
159
+ penalty = jnp.sum(jnp.square(gradient), axis=-1)
160
+ penalty = jnp.mean(penalty) # Gradient penalty for training D.
161
+ fake_logit = discriminator_fn(reconstructed_images)
162
+ d_loss_real = sigmoid_cross_entropy_with_logits(labels=jnp.ones_like(real_logit), logits=real_logit).mean()
163
+ d_loss_fake = sigmoid_cross_entropy_with_logits(labels=jnp.zeros_like(fake_logit), logits=fake_logit).mean()
164
+ loss_d = d_loss_real + d_loss_fake + (penalty * self.config['g_grad_penalty_cost'])
165
+
166
+ d_loss_for_vae = sigmoid_cross_entropy_with_logits(labels=jnp.ones_like(fake_logit), logits=fake_logit).mean()
167
+ d_loss_for_vae = d_loss_for_vae * is_gan_training
168
+
169
+ real_pools, _ = get_pretrained_embs(resnet_params, resnet, images=images)
170
+ fake_pools, _ = get_pretrained_embs(resnet_params, resnet, images=reconstructed_images)
171
+ perceptual_loss = jnp.mean((real_pools - fake_pools)**2)
172
+
173
+ l2_loss = jnp.mean((reconstructed_images - images) ** 2)
174
+ quantizer_loss = result_dict['quantizer_loss'] if 'quantizer_loss' in result_dict else 0.0
175
+ if self.config['quantizer_type'] == 'kl' or self.config["quantizer_type"] == "kl_two":
176
+ quantizer_loss = quantizer_loss * self.config['kl_weight']
177
+ loss_vae = (l2_loss * FLAGS.model['l2_loss_weight']) \
178
+ + (quantizer_loss * FLAGS.model['quantizer_loss_ratio']) \
179
+ + (d_loss_for_vae * FLAGS.model['g_adversarial_loss_weight']) \
180
+ + (perceptual_loss * FLAGS.model['perceptual_loss_weight']) \
181
+ + smooth_loss
182
+ codebook_usage = result_dict['usage'] if 'usage' in result_dict else 0.0
183
+ return (loss_vae, loss_d), {
184
+ 'loss_vae': loss_vae,
185
+ 'loss_d': loss_d,
186
+ 'l2_loss': l2_loss,
187
+ 'd_loss_for_vae': d_loss_for_vae,
188
+ 'smooth_loss': smooth_loss,
189
+ 'perceptual_loss': perceptual_loss,
190
+ 'quantizer_loss': quantizer_loss,
191
+ 'codebook_usage': codebook_usage,
192
+ 'pl_mean': pl_mean,
193
+ }
194
+
195
+ # This is a fancy way to do 'jax.grad' so (loss_vae, params_vqvae) and (loss_d, params_disc) are differentiated.
196
+ _, grad_fn, info = jax.vjp(loss_fn, self.vqvae.params, self.discriminator.params, has_aux=True)
197
+ vae_grads, _ = grad_fn((1., 0.))
198
+ _, d_grads = grad_fn((0., 1.))
199
+
200
+ vae_grads = jax.lax.pmean(vae_grads, axis_name=pmap_axis)
201
+ d_grads = jax.lax.pmean(d_grads, axis_name=pmap_axis)
202
+ d_grads = jax.tree_map(lambda x: x * is_gan_training, d_grads)
203
+
204
+ info = jax.lax.pmean(info, axis_name=pmap_axis)
205
+ if self.config['quantizer_type'] == 'fsq':
206
+ info['codebook_usage'] = jnp.sum(info['codebook_usage'] > 0) / info['codebook_usage'].shape[-1]
207
+
208
+ #So i think we need to update the new vqvae
209
+ updates, new_opt_state = self.vqvae.tx.update(vae_grads, self.vqvae.opt_state, self.vqvae.params)
210
+ new_params = optax.apply_updates(self.vqvae.params, updates)
211
+
212
+ #Right here.
213
+ new_vqvae = self.vqvae.replace(step=self.vqvae.step + 1, params=new_params, opt_state=new_opt_state, pl_mean=info["pl_mean"])
214
+
215
+ updates, new_opt_state = self.discriminator.tx.update(d_grads, self.discriminator.opt_state, self.discriminator.params)
216
+ new_params = optax.apply_updates(self.discriminator.params, updates)
217
+ new_discriminator = self.discriminator.replace(step=self.discriminator.step + 1, params=new_params, opt_state=new_opt_state)
218
+
219
+ info['grad_norm_vae'] = optax.global_norm(vae_grads)
220
+ info['grad_norm_d'] = optax.global_norm(d_grads)
221
+ info['update_norm'] = optax.global_norm(updates)
222
+ info['param_norm'] = optax.global_norm(new_params)
223
+ info['is_gan_training'] = is_gan_training
224
+
225
+ new_vqvae_eps = target_update(new_vqvae, self.vqvae_eps, 1-self.config['eps_update_rate'])
226
+
227
+ new_model = self.replace(rng=new_rng, vqvae=new_vqvae, vqvae_eps=new_vqvae_eps, discriminator=new_discriminator)
228
+ return new_model, info
229
+
230
+ @partial(jax.pmap, axis_name='data', in_axes=(0, 0))
231
+ def reconstruction(self, images, pmap_axis='data'):
232
+ reconstructed_images, _ = self.vqvae_eps(images)
233
+ reconstructed_images = jnp.clip(reconstructed_images, 0, 1)
234
+ return reconstructed_images
235
+
236
+ ##############################################
237
+ ## Training Code.
238
+ ##############################################
239
+ def main(_):
240
+ np.random.seed(FLAGS.seed)
241
+ print("Using devices", jax.local_devices())
242
+ device_count = len(jax.local_devices())
243
+ global_device_count = jax.device_count()
244
+ local_batch_size = FLAGS.batch_size // (global_device_count // device_count)
245
+ print("Device count", device_count)
246
+ print("Global device count", global_device_count)
247
+ print("Global Batch: ", FLAGS.batch_size)
248
+ print("Node Batch: ", local_batch_size)
249
+ print("Device Batch:", local_batch_size // device_count)
250
+
251
+ # Create wandb logger
252
+ if jax.process_index() == 0:
253
+ setup_wandb(FLAGS.model.to_dict(), **FLAGS.wandb)
254
+
255
+ def get_dataset(is_train):
256
+ if 'imagenet' in FLAGS.dataset_name:
257
+ def deserialization_fn(data):
258
+ image = data['image']
259
+ min_side = tf.minimum(tf.shape(image)[0], tf.shape(image)[1])
260
+ image = tf.image.resize_with_crop_or_pad(image, min_side, min_side)
261
+ if 'imagenet256' in FLAGS.dataset_name:
262
+ image = tf.image.resize(image, (256, 256))
263
+ elif 'imagenet128' in FLAGS.dataset_name:
264
+ image = tf.image.resize(image, (128, 128))
265
+ else:
266
+ raise ValueError(f"Unknown dataset {FLAGS.dataset_name}")
267
+ if is_train:
268
+ image = tf.image.random_flip_left_right(image)
269
+ image = tf.cast(image, tf.float32) / 255.0
270
+ return image
271
+
272
+
273
+ split = tfds.split_for_jax_process('train' if is_train else 'validation', drop_remainder=True)
274
+ print(split)
275
+ dataset = tfds.load('imagenet2012', split=split, data_dir = "/dev/shm")
276
+ dataset = dataset.map(deserialization_fn, num_parallel_calls=tf.data.AUTOTUNE)
277
+ dataset = dataset.shuffle(10000, seed=42, reshuffle_each_iteration=True)
278
+ dataset = dataset.repeat()
279
+ dataset = dataset.batch(local_batch_size)
280
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
281
+ dataset = tfds.as_numpy(dataset)
282
+ dataset = iter(dataset)
283
+ return dataset
284
+ else:
285
+ raise ValueError(f"Unknown dataset {FLAGS.dataset_name}")
286
+
287
+ dataset = get_dataset(is_train=True)
288
+ dataset_valid = get_dataset(is_train=False)
289
+ example_obs = next(dataset)[:1]
290
+
291
+ import os
292
+ print(os.getcwd() + "0")
293
+ get_fid_activations = get_fid_network()
294
+ # if not os.path.exists('./data/imagenet256_fidstats_openai.npz'):
295
+ # eaaise ValueError("Please download the FID stats file! See the README.")
296
+ # truth_fid_stats = np.load('data/imagenet256_fidstats_openai.npz')
297
+ truth_fid_stats = np.load("/home/lambda/nfs_share/jax-vqvae-vqgan/base_stats.npz")
298
+
299
+ rng = jax.random.PRNGKey(FLAGS.seed)
300
+ rng, param_key = jax.random.split(rng)
301
+ print("Total Memory on device:", float(jax.local_devices()[0].memory_stats()['bytes_limit']) / 1024**3, "GB")
302
+
303
+ ###################################
304
+ # Creating Model and put on devices.
305
+ ###################################
306
+ FLAGS.model.image_channels = example_obs.shape[-1]
307
+ FLAGS.model.image_size = example_obs.shape[1]
308
+ vqvae_def = VQVAE(FLAGS.model, train=True)
309
+ vqvae_params = vqvae_def.init({'params': param_key, 'noise': param_key}, example_obs)['params']
310
+ tx = optax.adam(learning_rate=FLAGS.model['lr'], b1=FLAGS.model['beta1'], b2=FLAGS.model['beta2'])
311
+ vqvae_ts = TrainState.create(vqvae_def, vqvae_params, tx=tx)
312
+ vqvae_def_eps = VQVAE(FLAGS.model, train=False)
313
+ vqvae_eps_ts = TrainState.create(vqvae_def_eps, vqvae_params)
314
+ print("Total num of VQVAE parameters:", sum(x.size for x in jax.tree_util.tree_leaves(vqvae_params)))
315
+
316
+ discriminator_def = Discriminator(FLAGS.model)
317
+ discriminator_params = discriminator_def.init(param_key, example_obs)['params']
318
+ tx = optax.adam(learning_rate=FLAGS.model['lr'], b1=FLAGS.model['beta1'], b2=FLAGS.model['beta2'])
319
+ discriminator_ts = TrainState.create(discriminator_def, discriminator_params, tx=tx)
320
+ print("Total num of Discriminator parameters:", sum(x.size for x in jax.tree_util.tree_leaves(discriminator_params)))
321
+
322
+ model = VQGANModel(rng=rng, vqvae=vqvae_ts, vqvae_eps=vqvae_eps_ts, discriminator=discriminator_ts, config=FLAGS.model)
323
+
324
+ if FLAGS.load_dir is not None:
325
+ try:
326
+ cp = Checkpoint(FLAGS.load_dir)
327
+ model = cp.load_model(model)
328
+ print("Loaded model with step", model.vqvae.step)
329
+ except:
330
+ print("Random init")
331
+ else:
332
+ print("Random init")
333
+
334
+ model = flax.jax_utils.replicate(model, devices=jax.local_devices())
335
+ jax.debug.visualize_array_sharding(model.vqvae.params['decoder']['Conv_0']['bias'])
336
+
337
+ ###################################
338
+ # Train Loop
339
+ ###################################
340
+
341
+ best_fid = 100000
342
+
343
+ for i in tqdm.tqdm(range(1, FLAGS.max_steps + 1),
344
+ smoothing=0.1,
345
+ dynamic_ncols=True):
346
+
347
+ batch_images = next(dataset)
348
+ batch_images = batch_images.reshape((len(jax.local_devices()), -1, *batch_images.shape[1:])) # [devices, batch//devices, etc..]
349
+
350
+ model, update_info = model.update(batch_images)
351
+
352
+ if i % FLAGS.log_interval == 0:
353
+ update_info = jax.tree_map(lambda x: x.mean(), update_info)
354
+ train_metrics = {f'training/{k}': v for k, v in update_info.items()}
355
+ if jax.process_index() == 0:
356
+ wandb.log(train_metrics, step=i)
357
+
358
+ if i % FLAGS.eval_interval == 0:
359
+ # Print some images
360
+ reconstructed_images = model.reconstruction(batch_images) # [devices, 8, 256, 256, 3]
361
+ valid_images = next(dataset_valid)
362
+ valid_images = valid_images.reshape((len(jax.local_devices()), -1, *valid_images.shape[1:])) # [devices, batch//devices, etc..]
363
+ valid_reconstructed_images = model.reconstruction(valid_images) # [devices, 8, 256, 256, 3]
364
+
365
+ if jax.process_index() == 0:
366
+ wandb.log({'batch_image_mean': batch_images.mean()}, step=i)
367
+ wandb.log({'reconstructed_images_mean': reconstructed_images.mean()}, step=i)
368
+ wandb.log({'batch_image_std': batch_images.std()}, step=i)
369
+ wandb.log({'reconstructed_images_std': reconstructed_images.std()}, step=i)
370
+
371
+ # plot comparison witah matplotlib. put each reconstruction side by side.
372
+ fig, axs = plt.subplots(2, 8, figsize=(30, 15))
373
+ #print("batch shape", batch_images.shape)#batch shape (4, 32, 256, 256, 3) #THE FIRST SHAPE IS DEVICES
374
+ #print("recon shape", reconstructed_images.shape)#it's all the same lol
375
+ #print("valid shape", valid_images.shape)
376
+ #it seems to be made for 8 device, aka tpuv3 instead
377
+ for j in range(4):#fuck it
378
+ axs[0, j].imshow(batch_images[j, 0], vmin=0, vmax=1)
379
+ axs[1, j].imshow(reconstructed_images[j, 0], vmin=0, vmax=1)
380
+ wandb.log({'reconstruction': wandb.Image(fig)}, step=i)
381
+ plt.close(fig)
382
+ fig, axs = plt.subplots(2, 8, figsize=(30, 15))
383
+ for j in range(4):
384
+ axs[0, j].imshow(valid_images[j, 0], vmin=0, vmax=1)
385
+ axs[1, j].imshow(valid_reconstructed_images[j, 0], vmin=0, vmax=1)
386
+ wandb.log({'reconstruction_valid': wandb.Image(fig)}, step=i)
387
+ plt.close(fig)
388
+
389
+ # Validation Losses
390
+ _, valid_update_info = model.update(valid_images)
391
+ valid_update_info = jax.tree_map(lambda x: x.mean(), valid_update_info)
392
+ valid_metrics = {f'validation/{k}': v for k, v in valid_update_info.items()}
393
+ if jax.process_index() == 0:
394
+ wandb.log(valid_metrics, step=i)
395
+
396
+ # FID measurement.
397
+ activations = []
398
+ activations2 = []
399
+ for _ in range(780):#This is apprximately 40k
400
+ valid_images = next(dataset_valid)
401
+ valid_images = valid_images.reshape((len(jax.local_devices()), -1, *valid_images.shape[1:])) # [devices, batch//devices, etc..]
402
+ valid_reconstructed_images = model.reconstruction(valid_images) # [devices, 8, 256, 256, 3]
403
+
404
+ #print("valid recon shape", valid_reconstructed_images.shape)
405
+
406
+ valid_reconstructed_images = jax.image.resize(valid_reconstructed_images, (valid_images.shape[0], valid_images.shape[1], 299, 299, 3),
407
+ method='bilinear', antialias=False)
408
+ valid_reconstructed_images = 2 * valid_reconstructed_images - 1
409
+ activations += [np.array(get_fid_activations(valid_reconstructed_images))[..., 0, 0, :]]
410
+
411
+
412
+ #Only needed when we save
413
+ #valid_reconstructed_images = jax.image.resize(valid_images, (valid_images.shape[0], valid_images.shape[1], 299, 299, 3),
414
+ #method='bilinear', antialias=False)
415
+ #valid_reconstructed_images = 2 * valid_reconstructed_images - 1
416
+ #activations2 += [np.array(get_fid_activations(valid_reconstructed_images))[..., 0, 0, :]]
417
+
418
+
419
+ # TODO: use all_gather to get activations from all devices.
420
+ #This seems to be FID with only 64 images?
421
+ activations = np.concatenate(activations, axis=0)
422
+ activations = activations.reshape((-1, activations.shape[-1]))
423
+
424
+ # activations2 = np.concatenate(activations2, axis = 0)
425
+ # activations2 = activations2.reshape((-1, activations2.shape[-1]))
426
+
427
+ print("doing this much FID", activations.shape)#8192, 2048 should be 2048 items then I guess
428
+ mu1 = np.mean(activations, axis=0)
429
+ sigma1 = np.cov(activations, rowvar=False)
430
+ fid = fid_from_stats(mu1, sigma1, truth_fid_stats['mu'], truth_fid_stats['sigma'])
431
+
432
+ # mu2 = np.mean(activations2, axis = 0)
433
+ # sigma2 = np.cov(activations2, rowvar = False)
434
+
435
+ #save mu2 and sigma2
436
+ #And then exit for now
437
+ # np.savez("base.npz", mu = mu2, sigma = sigma2)
438
+ # exit()
439
+
440
+ #Used with loading base
441
+ #fid = fid_from_stats(mu1, sigma1, mu2, sigma2)
442
+
443
+ if jax.process_index() == 0:
444
+ wandb.log({'validation/fid': fid}, step=i)
445
+ print("validation FID at step", i, fid)
446
+ #Then if fid is smaller than previous best FID, save new FID
447
+ if fid < best_fid:
448
+ model_single = flax.jax_utils.unreplicate(model)
449
+ cp = Checkpoint(FLAGS.save_dir + "best.tmp")
450
+ cp.set_model(model_single)
451
+ cp.save()
452
+ best_fid = fid
453
+
454
+ if (i % FLAGS.save_interval == 0) and (FLAGS.save_dir is not None):
455
+ if jax.process_index() == 0:
456
+ model_single = flax.jax_utils.unreplicate(model)
457
+ cp = Checkpoint(FLAGS.save_dir)
458
+ cp.set_model(model_single)
459
+ cp.save()
460
+
461
+ if __name__ == '__main__':
462
+ app.run(main)