File size: 4,059 Bytes
9ae1b1e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
from glob import glob
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset
def make_transform(
smaller_edge_size: int, patch_size, center_crop=False, max_edge_size=812
) -> transforms.Compose:
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
interpolation_mode = transforms.InterpolationMode.BICUBIC
assert smaller_edge_size > 0
if center_crop:
return transforms.Compose(
[
transforms.Resize(
size=smaller_edge_size,
interpolation=interpolation_mode,
antialias=True,
),
transforms.CenterCrop(smaller_edge_size),
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
transforms.Lambda(
lambda img: img[
:,
: min(
max_edge_size,
(img.shape[1] - img.shape[1] % patch_size),
),
: min(
max_edge_size,
(img.shape[2] - img.shape[2] % patch_size),
),
]
),
]
)
else:
return transforms.Compose(
[
transforms.Resize(
size=smaller_edge_size,
interpolation=interpolation_mode,
antialias=True,
),
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
transforms.Lambda(
lambda img: img[
:,
: min(
max_edge_size,
(img.shape[1] - img.shape[1] % patch_size),
),
: min(
max_edge_size,
(img.shape[2] - img.shape[2] % patch_size),
),
]
),
]
)
class VisualDataset(Dataset):
def __init__(self, transform, imgs=None):
self.transform = transform
if imgs is None:
self.files = [
'resources/example.jpg',
'resources/villa.png',
'resources/000000037740.jpg',
'resources/000000064359.jpg',
'resources/000000066635.jpg',
'resources/000000078420.jpg',
]
else:
self.files = imgs
def __len__(self):
return len(self.files)
def __getitem__(self, index):
img = self.files[index]
img = Image.open(img).convert('RGB')
if self.transform:
img = self.transform(img)
return img
class ImageNetDataset(Dataset):
def __init__(self, transform, num_train_max=1000000):
self.transform = transform
self.files = glob('data/imagenet/train/*/*.JPEG')
step = len(self.files) // num_train_max
self.files = self.files[::step]
def __len__(self):
return len(self.files)
def __getitem__(self, index):
img = Image.open(self.files[index]).convert('RGB')
img = self.transform(img)
return img
def load_data(args, model):
transform = make_transform(
args.resolution, model.patch_size, center_crop=True
)
dataset = ImageNetDataset(
transform=transform, num_train_max=args.num_train_max
)
return dataset
def load_visual_data(args, model):
transform = make_transform(
args.visual_size, model.patch_size, max_edge_size=1792
)
dataset = VisualDataset(transform=transform, imgs=vars(args).get('imgs'))
return dataset
|