boris commited on
Commit
35406cd
2 Parent(s): cfb03b3 99ed4c6

Merge pull request #58 from pcuenca/main

Browse files
Files changed (1) hide show
  1. examples/vqgan-jax-encoding-howto.py +124 -0
examples/vqgan-jax-encoding-howto.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # VQGAN-JAX - Encoding HowTo
5
+
6
+ import numpy as np
7
+
8
+ # For data loading
9
+ import torch
10
+ import torchvision.transforms.functional as TF
11
+ from torch.utils.data import Dataset, DataLoader
12
+ from torchvision.datasets.folder import default_loader
13
+ from torchvision.transforms import InterpolationMode
14
+
15
+ # For data saving
16
+ from pathlib import Path
17
+ import pandas as pd
18
+ from tqdm import tqdm
19
+
20
+ import jax
21
+ from jax import pmap
22
+
23
+ from vqgan_jax.modeling_flax_vqgan import VQModel
24
+
25
+ ## Params and arguments
26
+
27
+ image_list = '/sddata/dalle-mini/CC12M/10k.tsv' # List of paths containing images to encode
28
+ output_tsv = 'output.tsv' # Encoded results
29
+ batch_size = 64
30
+ num_workers = 4 # TPU v3-8s have 96 cores, so feel free to increase this number when necessary
31
+
32
+ # Load model
33
+ model = VQModel.from_pretrained("flax-community/vqgan_f16_16384")
34
+
35
+ ## Data Loading.
36
+
37
+ # Simple torch Dataset to load images from paths.
38
+ # You can use your own pipeline instead.
39
+ class ImageDataset(Dataset):
40
+ def __init__(self, image_list_path: str, image_size: int, max_items=None):
41
+ """
42
+ :param image_list_path: Path to a file containing a list of all images. We assume absolute paths for now.
43
+ :param image_size: Image size. Source images will be resized and center-cropped.
44
+ :max_items: Limit dataset size for debugging
45
+ """
46
+ self.image_list = pd.read_csv(image_list_path, sep='\t', header=None)
47
+ if max_items is not None: self.image_list = self.image_list[:max_items]
48
+ self.image_size = image_size
49
+
50
+ def __len__(self):
51
+ return len(self.image_list)
52
+
53
+ def _get_raw_image(self, i):
54
+ image_path = Path(self.image_list.iloc[i][0])
55
+ return default_loader(image_path)
56
+
57
+ def resize_image(self, image):
58
+ s = min(image.size)
59
+ r = self.image_size / s
60
+ s = (round(r * image.size[1]), round(r * image.size[0]))
61
+ image = TF.resize(image, s, interpolation=InterpolationMode.LANCZOS)
62
+ image = TF.center_crop(image, output_size = 2 * [self.image_size])
63
+ image = np.expand_dims(np.array(image), axis=0)
64
+ return image
65
+
66
+ def __getitem__(self, i):
67
+ image = self._get_raw_image(i)
68
+ return self.resize_image(image)
69
+
70
+ ## Encoding
71
+
72
+ # Encoding function to be parallelized with `pmap`
73
+ # Note: images have to be square
74
+ def encode(model, batch):
75
+ _, indices = model.encode(batch)
76
+ return indices
77
+
78
+ # Alternative: create a batch with num_tpus*batch_size and use `shard` to distribute.
79
+ def superbatch_generator(dataloader, num_tpus):
80
+ iter_loader = iter(dataloader)
81
+ for batch in iter_loader:
82
+ superbatch = [batch.squeeze(1)]
83
+ try:
84
+ for _ in range(num_tpus-1):
85
+ batch = next(iter_loader)
86
+ if batch is None:
87
+ break
88
+ # Skip incomplete last batch
89
+ if batch.shape[0] == dataloader.batch_size:
90
+ superbatch.append(batch.squeeze(1))
91
+ except StopIteration:
92
+ pass
93
+ superbatch = torch.stack(superbatch, axis=0)
94
+ yield superbatch
95
+
96
+ def encode_dataset(dataset, batch_size=32):
97
+ dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
98
+ superbatches = superbatch_generator(dataloader, num_tpus=jax.device_count())
99
+
100
+ num_tpus = jax.device_count()
101
+ dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
102
+ superbatches = superbatch_generator(dataloader, num_tpus=num_tpus)
103
+
104
+ p_encoder = pmap(lambda batch: encode(model, batch))
105
+
106
+ # We save each superbatch to avoid reallocation of buffers as we process them.
107
+ # We keep the file open to prevent excessive file seeks.
108
+ with open(output_tsv, "w") as file:
109
+ iterations = len(dataset) // (batch_size * num_tpus)
110
+ for n in tqdm(range(iterations)):
111
+ superbatch = next(superbatches)
112
+ encoded = p_encoder(superbatch.numpy())
113
+ encoded = encoded.reshape(-1, encoded.shape[-1])
114
+
115
+ # Extract paths from the dataset, and save paths and encodings (as string) to disk
116
+ start_index = n * batch_size * num_tpus
117
+ end_index = (n+1) * batch_size * num_tpus
118
+ paths = dataset.image_list[start_index:end_index][0].values
119
+ encoded_as_string = list(map(lambda item: np.array2string(item, separator=',', max_line_width=50000, formatter={'int':lambda x: str(x)}), encoded))
120
+ batch_df = pd.DataFrame.from_dict({"image_file": paths, "encoding": encoded_as_string})
121
+ batch_df.to_csv(file, sep='\t', header=(n==0), index=None)
122
+
123
+ dataset = ImageDataset(image_list, image_size=256)
124
+ encoded_dataset = encode_dataset(dataset, batch_size=batch_size)