Spaces:
Runtime error
Runtime error
tjxj
commited on
Commit
•
6f7f0bf
1
Parent(s):
2a8871c
basicsr
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- basicsr/__init__.py +12 -0
- basicsr/archs/__init__.py +25 -0
- basicsr/archs/arch_util.py +318 -0
- basicsr/archs/basicvsr_arch.py +336 -0
- basicsr/archs/dfdnet_arch.py +169 -0
- basicsr/archs/dfdnet_util.py +162 -0
- basicsr/archs/discriminator_arch.py +85 -0
- basicsr/archs/duf_arch.py +277 -0
- basicsr/archs/ecbsr_arch.py +274 -0
- basicsr/archs/edsr_arch.py +61 -0
- basicsr/archs/edvr_arch.py +383 -0
- basicsr/archs/hifacegan_arch.py +259 -0
- basicsr/archs/hifacegan_util.py +255 -0
- basicsr/archs/inception.py +307 -0
- basicsr/archs/rcan_arch.py +135 -0
- basicsr/archs/ridnet_arch.py +184 -0
- basicsr/archs/rrdbnet_arch.py +119 -0
- basicsr/archs/spynet_arch.py +96 -0
- basicsr/archs/srresnet_arch.py +65 -0
- basicsr/archs/stylegan2_arch.py +799 -0
- basicsr/archs/swinir_arch.py +956 -0
- basicsr/archs/tof_arch.py +172 -0
- basicsr/archs/vgg_arch.py +161 -0
- basicsr/data/__init__.py +101 -0
- basicsr/data/data_sampler.py +48 -0
- basicsr/data/data_util.py +313 -0
- basicsr/data/degradations.py +765 -0
- basicsr/data/ffhq_dataset.py +80 -0
- basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt +0 -0
- basicsr/data/meta_info/meta_info_REDS4_test_GT.txt +4 -0
- basicsr/data/meta_info/meta_info_REDS_GT.txt +270 -0
- basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt +4 -0
- basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt +30 -0
- basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt +0 -0
- basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt +1225 -0
- basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt +0 -0
- basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt +1613 -0
- basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt +0 -0
- basicsr/data/paired_image_dataset.py +109 -0
- basicsr/data/prefetch_dataloader.py +125 -0
- basicsr/data/reds_dataset.py +360 -0
- basicsr/data/single_image_dataset.py +69 -0
- basicsr/data/transforms.py +179 -0
- basicsr/data/video_test_dataset.py +287 -0
- basicsr/data/vimeo90k_dataset.py +192 -0
- basicsr/losses/__init__.py +26 -0
- basicsr/losses/loss_util.py +95 -0
- basicsr/losses/losses.py +492 -0
- basicsr/metrics/__init__.py +20 -0
- basicsr/metrics/fid.py +93 -0
basicsr/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# https://github.com/xinntao/BasicSR
|
2 |
+
# flake8: noqa
|
3 |
+
from .archs import *
|
4 |
+
from .data import *
|
5 |
+
from .losses import *
|
6 |
+
from .metrics import *
|
7 |
+
from .models import *
|
8 |
+
from .ops import *
|
9 |
+
from .test import *
|
10 |
+
from .train import *
|
11 |
+
from .utils import *
|
12 |
+
from .version import __gitsha__, __version__
|
basicsr/archs/__init__.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
from copy import deepcopy
|
3 |
+
from os import path as osp
|
4 |
+
|
5 |
+
from basicsr.utils import get_root_logger, scandir
|
6 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
7 |
+
|
8 |
+
__all__ = ['build_network']
|
9 |
+
|
10 |
+
# automatically scan and import arch modules for registry
|
11 |
+
# scan all the files under the 'archs' folder and collect files ending with
|
12 |
+
# '_arch.py'
|
13 |
+
arch_folder = osp.dirname(osp.abspath(__file__))
|
14 |
+
arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
|
15 |
+
# import all the arch modules
|
16 |
+
_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames]
|
17 |
+
|
18 |
+
|
19 |
+
def build_network(opt):
|
20 |
+
opt = deepcopy(opt)
|
21 |
+
network_type = opt.pop('type')
|
22 |
+
net = ARCH_REGISTRY.get(network_type)(**opt)
|
23 |
+
logger = get_root_logger()
|
24 |
+
logger.info(f'Network [{net.__class__.__name__}] is created.')
|
25 |
+
return net
|
basicsr/archs/arch_util.py
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections.abc
|
2 |
+
import math
|
3 |
+
import torch
|
4 |
+
import torchvision
|
5 |
+
import warnings
|
6 |
+
from distutils.version import LooseVersion
|
7 |
+
from itertools import repeat
|
8 |
+
from torch import nn as nn
|
9 |
+
from torch.nn import functional as F
|
10 |
+
from torch.nn import init as init
|
11 |
+
from torch.nn.modules.batchnorm import _BatchNorm
|
12 |
+
|
13 |
+
from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv
|
14 |
+
from basicsr.utils import get_root_logger
|
15 |
+
|
16 |
+
|
17 |
+
@torch.no_grad()
|
18 |
+
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
|
19 |
+
"""Initialize network weights.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
|
23 |
+
scale (float): Scale initialized weights, especially for residual
|
24 |
+
blocks. Default: 1.
|
25 |
+
bias_fill (float): The value to fill bias. Default: 0
|
26 |
+
kwargs (dict): Other arguments for initialization function.
|
27 |
+
"""
|
28 |
+
if not isinstance(module_list, list):
|
29 |
+
module_list = [module_list]
|
30 |
+
for module in module_list:
|
31 |
+
for m in module.modules():
|
32 |
+
if isinstance(m, nn.Conv2d):
|
33 |
+
init.kaiming_normal_(m.weight, **kwargs)
|
34 |
+
m.weight.data *= scale
|
35 |
+
if m.bias is not None:
|
36 |
+
m.bias.data.fill_(bias_fill)
|
37 |
+
elif isinstance(m, nn.Linear):
|
38 |
+
init.kaiming_normal_(m.weight, **kwargs)
|
39 |
+
m.weight.data *= scale
|
40 |
+
if m.bias is not None:
|
41 |
+
m.bias.data.fill_(bias_fill)
|
42 |
+
elif isinstance(m, _BatchNorm):
|
43 |
+
init.constant_(m.weight, 1)
|
44 |
+
if m.bias is not None:
|
45 |
+
m.bias.data.fill_(bias_fill)
|
46 |
+
|
47 |
+
|
48 |
+
def make_layer(basic_block, num_basic_block, **kwarg):
|
49 |
+
"""Make layers by stacking the same blocks.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
basic_block (nn.module): nn.module class for basic block.
|
53 |
+
num_basic_block (int): number of blocks.
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
nn.Sequential: Stacked blocks in nn.Sequential.
|
57 |
+
"""
|
58 |
+
layers = []
|
59 |
+
for _ in range(num_basic_block):
|
60 |
+
layers.append(basic_block(**kwarg))
|
61 |
+
return nn.Sequential(*layers)
|
62 |
+
|
63 |
+
|
64 |
+
class ResidualBlockNoBN(nn.Module):
|
65 |
+
"""Residual block without BN.
|
66 |
+
|
67 |
+
It has a style of:
|
68 |
+
---Conv-ReLU-Conv-+-
|
69 |
+
|________________|
|
70 |
+
|
71 |
+
Args:
|
72 |
+
num_feat (int): Channel number of intermediate features.
|
73 |
+
Default: 64.
|
74 |
+
res_scale (float): Residual scale. Default: 1.
|
75 |
+
pytorch_init (bool): If set to True, use pytorch default init,
|
76 |
+
otherwise, use default_init_weights. Default: False.
|
77 |
+
"""
|
78 |
+
|
79 |
+
def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
|
80 |
+
super(ResidualBlockNoBN, self).__init__()
|
81 |
+
self.res_scale = res_scale
|
82 |
+
self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
|
83 |
+
self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
|
84 |
+
self.relu = nn.ReLU(inplace=True)
|
85 |
+
|
86 |
+
if not pytorch_init:
|
87 |
+
default_init_weights([self.conv1, self.conv2], 0.1)
|
88 |
+
|
89 |
+
def forward(self, x):
|
90 |
+
identity = x
|
91 |
+
out = self.conv2(self.relu(self.conv1(x)))
|
92 |
+
return identity + out * self.res_scale
|
93 |
+
|
94 |
+
|
95 |
+
class Upsample(nn.Sequential):
|
96 |
+
"""Upsample module.
|
97 |
+
|
98 |
+
Args:
|
99 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
100 |
+
num_feat (int): Channel number of intermediate features.
|
101 |
+
"""
|
102 |
+
|
103 |
+
def __init__(self, scale, num_feat):
|
104 |
+
m = []
|
105 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
106 |
+
for _ in range(int(math.log(scale, 2))):
|
107 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
108 |
+
m.append(nn.PixelShuffle(2))
|
109 |
+
elif scale == 3:
|
110 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
111 |
+
m.append(nn.PixelShuffle(3))
|
112 |
+
else:
|
113 |
+
raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
|
114 |
+
super(Upsample, self).__init__(*m)
|
115 |
+
|
116 |
+
|
117 |
+
def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True):
|
118 |
+
"""Warp an image or feature map with optical flow.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
x (Tensor): Tensor with size (n, c, h, w).
|
122 |
+
flow (Tensor): Tensor with size (n, h, w, 2), normal value.
|
123 |
+
interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
|
124 |
+
padding_mode (str): 'zeros' or 'border' or 'reflection'.
|
125 |
+
Default: 'zeros'.
|
126 |
+
align_corners (bool): Before pytorch 1.3, the default value is
|
127 |
+
align_corners=True. After pytorch 1.3, the default value is
|
128 |
+
align_corners=False. Here, we use the True as default.
|
129 |
+
|
130 |
+
Returns:
|
131 |
+
Tensor: Warped image or feature map.
|
132 |
+
"""
|
133 |
+
assert x.size()[-2:] == flow.size()[1:3]
|
134 |
+
_, _, h, w = x.size()
|
135 |
+
# create mesh grid
|
136 |
+
grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x))
|
137 |
+
grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
|
138 |
+
grid.requires_grad = False
|
139 |
+
|
140 |
+
vgrid = grid + flow
|
141 |
+
# scale grid to [-1,1]
|
142 |
+
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
|
143 |
+
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
|
144 |
+
vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
|
145 |
+
output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
|
146 |
+
|
147 |
+
# TODO, what if align_corners=False
|
148 |
+
return output
|
149 |
+
|
150 |
+
|
151 |
+
def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):
|
152 |
+
"""Resize a flow according to ratio or shape.
|
153 |
+
|
154 |
+
Args:
|
155 |
+
flow (Tensor): Precomputed flow. shape [N, 2, H, W].
|
156 |
+
size_type (str): 'ratio' or 'shape'.
|
157 |
+
sizes (list[int | float]): the ratio for resizing or the final output
|
158 |
+
shape.
|
159 |
+
1) The order of ratio should be [ratio_h, ratio_w]. For
|
160 |
+
downsampling, the ratio should be smaller than 1.0 (i.e., ratio
|
161 |
+
< 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
|
162 |
+
ratio > 1.0).
|
163 |
+
2) The order of output_size should be [out_h, out_w].
|
164 |
+
interp_mode (str): The mode of interpolation for resizing.
|
165 |
+
Default: 'bilinear'.
|
166 |
+
align_corners (bool): Whether align corners. Default: False.
|
167 |
+
|
168 |
+
Returns:
|
169 |
+
Tensor: Resized flow.
|
170 |
+
"""
|
171 |
+
_, _, flow_h, flow_w = flow.size()
|
172 |
+
if size_type == 'ratio':
|
173 |
+
output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
|
174 |
+
elif size_type == 'shape':
|
175 |
+
output_h, output_w = sizes[0], sizes[1]
|
176 |
+
else:
|
177 |
+
raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')
|
178 |
+
|
179 |
+
input_flow = flow.clone()
|
180 |
+
ratio_h = output_h / flow_h
|
181 |
+
ratio_w = output_w / flow_w
|
182 |
+
input_flow[:, 0, :, :] *= ratio_w
|
183 |
+
input_flow[:, 1, :, :] *= ratio_h
|
184 |
+
resized_flow = F.interpolate(
|
185 |
+
input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)
|
186 |
+
return resized_flow
|
187 |
+
|
188 |
+
|
189 |
+
# TODO: may write a cpp file
|
190 |
+
def pixel_unshuffle(x, scale):
|
191 |
+
""" Pixel unshuffle.
|
192 |
+
|
193 |
+
Args:
|
194 |
+
x (Tensor): Input feature with shape (b, c, hh, hw).
|
195 |
+
scale (int): Downsample ratio.
|
196 |
+
|
197 |
+
Returns:
|
198 |
+
Tensor: the pixel unshuffled feature.
|
199 |
+
"""
|
200 |
+
b, c, hh, hw = x.size()
|
201 |
+
out_channel = c * (scale**2)
|
202 |
+
assert hh % scale == 0 and hw % scale == 0
|
203 |
+
h = hh // scale
|
204 |
+
w = hw // scale
|
205 |
+
x_view = x.view(b, c, h, scale, w, scale)
|
206 |
+
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
|
207 |
+
|
208 |
+
|
209 |
+
class DCNv2Pack(ModulatedDeformConvPack):
|
210 |
+
"""Modulated deformable conv for deformable alignment.
|
211 |
+
|
212 |
+
Different from the official DCNv2Pack, which generates offsets and masks
|
213 |
+
from the preceding features, this DCNv2Pack takes another different
|
214 |
+
features to generate offsets and masks.
|
215 |
+
|
216 |
+
Ref:
|
217 |
+
Delving Deep into Deformable Alignment in Video Super-Resolution.
|
218 |
+
"""
|
219 |
+
|
220 |
+
def forward(self, x, feat):
|
221 |
+
out = self.conv_offset(feat)
|
222 |
+
o1, o2, mask = torch.chunk(out, 3, dim=1)
|
223 |
+
offset = torch.cat((o1, o2), dim=1)
|
224 |
+
mask = torch.sigmoid(mask)
|
225 |
+
|
226 |
+
offset_absmean = torch.mean(torch.abs(offset))
|
227 |
+
if offset_absmean > 50:
|
228 |
+
logger = get_root_logger()
|
229 |
+
logger.warning(f'Offset abs mean is {offset_absmean}, larger than 50.')
|
230 |
+
|
231 |
+
if LooseVersion(torchvision.__version__) >= LooseVersion('0.9.0'):
|
232 |
+
return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
|
233 |
+
self.dilation, mask)
|
234 |
+
else:
|
235 |
+
return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding,
|
236 |
+
self.dilation, self.groups, self.deformable_groups)
|
237 |
+
|
238 |
+
|
239 |
+
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
240 |
+
# From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
|
241 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
242 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
243 |
+
def norm_cdf(x):
|
244 |
+
# Computes standard normal cumulative distribution function
|
245 |
+
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
246 |
+
|
247 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
248 |
+
warnings.warn(
|
249 |
+
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
|
250 |
+
'The distribution of values may be incorrect.',
|
251 |
+
stacklevel=2)
|
252 |
+
|
253 |
+
with torch.no_grad():
|
254 |
+
# Values are generated by using a truncated uniform distribution and
|
255 |
+
# then using the inverse CDF for the normal distribution.
|
256 |
+
# Get upper and lower cdf values
|
257 |
+
low = norm_cdf((a - mean) / std)
|
258 |
+
up = norm_cdf((b - mean) / std)
|
259 |
+
|
260 |
+
# Uniformly fill tensor with values from [low, up], then translate to
|
261 |
+
# [2l-1, 2u-1].
|
262 |
+
tensor.uniform_(2 * low - 1, 2 * up - 1)
|
263 |
+
|
264 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
265 |
+
# standard normal
|
266 |
+
tensor.erfinv_()
|
267 |
+
|
268 |
+
# Transform to proper mean, std
|
269 |
+
tensor.mul_(std * math.sqrt(2.))
|
270 |
+
tensor.add_(mean)
|
271 |
+
|
272 |
+
# Clamp to ensure it's in the proper range
|
273 |
+
tensor.clamp_(min=a, max=b)
|
274 |
+
return tensor
|
275 |
+
|
276 |
+
|
277 |
+
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
278 |
+
r"""Fills the input Tensor with values drawn from a truncated
|
279 |
+
normal distribution.
|
280 |
+
|
281 |
+
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
|
282 |
+
|
283 |
+
The values are effectively drawn from the
|
284 |
+
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
285 |
+
with values outside :math:`[a, b]` redrawn until they are within
|
286 |
+
the bounds. The method used for generating the random values works
|
287 |
+
best when :math:`a \leq \text{mean} \leq b`.
|
288 |
+
|
289 |
+
Args:
|
290 |
+
tensor: an n-dimensional `torch.Tensor`
|
291 |
+
mean: the mean of the normal distribution
|
292 |
+
std: the standard deviation of the normal distribution
|
293 |
+
a: the minimum cutoff value
|
294 |
+
b: the maximum cutoff value
|
295 |
+
|
296 |
+
Examples:
|
297 |
+
>>> w = torch.empty(3, 5)
|
298 |
+
>>> nn.init.trunc_normal_(w)
|
299 |
+
"""
|
300 |
+
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
301 |
+
|
302 |
+
|
303 |
+
# From PyTorch
|
304 |
+
def _ntuple(n):
|
305 |
+
|
306 |
+
def parse(x):
|
307 |
+
if isinstance(x, collections.abc.Iterable):
|
308 |
+
return x
|
309 |
+
return tuple(repeat(x, n))
|
310 |
+
|
311 |
+
return parse
|
312 |
+
|
313 |
+
|
314 |
+
to_1tuple = _ntuple(1)
|
315 |
+
to_2tuple = _ntuple(2)
|
316 |
+
to_3tuple = _ntuple(3)
|
317 |
+
to_4tuple = _ntuple(4)
|
318 |
+
to_ntuple = _ntuple
|
basicsr/archs/basicvsr_arch.py
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
from torch.nn import functional as F
|
4 |
+
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
from .arch_util import ResidualBlockNoBN, flow_warp, make_layer
|
7 |
+
from .edvr_arch import PCDAlignment, TSAFusion
|
8 |
+
from .spynet_arch import SpyNet
|
9 |
+
|
10 |
+
|
11 |
+
@ARCH_REGISTRY.register()
|
12 |
+
class BasicVSR(nn.Module):
|
13 |
+
"""A recurrent network for video SR. Now only x4 is supported.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
num_feat (int): Number of channels. Default: 64.
|
17 |
+
num_block (int): Number of residual blocks for each branch. Default: 15
|
18 |
+
spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, num_feat=64, num_block=15, spynet_path=None):
|
22 |
+
super().__init__()
|
23 |
+
self.num_feat = num_feat
|
24 |
+
|
25 |
+
# alignment
|
26 |
+
self.spynet = SpyNet(spynet_path)
|
27 |
+
|
28 |
+
# propagation
|
29 |
+
self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
|
30 |
+
self.forward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
|
31 |
+
|
32 |
+
# reconstruction
|
33 |
+
self.fusion = nn.Conv2d(num_feat * 2, num_feat, 1, 1, 0, bias=True)
|
34 |
+
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
|
35 |
+
self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
|
36 |
+
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
|
37 |
+
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
|
38 |
+
|
39 |
+
self.pixel_shuffle = nn.PixelShuffle(2)
|
40 |
+
|
41 |
+
# activation functions
|
42 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
43 |
+
|
44 |
+
def get_flow(self, x):
|
45 |
+
b, n, c, h, w = x.size()
|
46 |
+
|
47 |
+
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
|
48 |
+
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
|
49 |
+
|
50 |
+
flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
|
51 |
+
flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
|
52 |
+
|
53 |
+
return flows_forward, flows_backward
|
54 |
+
|
55 |
+
def forward(self, x):
|
56 |
+
"""Forward function of BasicVSR.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
x: Input frames with shape (b, n, c, h, w). n is the temporal dimension / number of frames.
|
60 |
+
"""
|
61 |
+
flows_forward, flows_backward = self.get_flow(x)
|
62 |
+
b, n, _, h, w = x.size()
|
63 |
+
|
64 |
+
# backward branch
|
65 |
+
out_l = []
|
66 |
+
feat_prop = x.new_zeros(b, self.num_feat, h, w)
|
67 |
+
for i in range(n - 1, -1, -1):
|
68 |
+
x_i = x[:, i, :, :, :]
|
69 |
+
if i < n - 1:
|
70 |
+
flow = flows_backward[:, i, :, :, :]
|
71 |
+
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
|
72 |
+
feat_prop = torch.cat([x_i, feat_prop], dim=1)
|
73 |
+
feat_prop = self.backward_trunk(feat_prop)
|
74 |
+
out_l.insert(0, feat_prop)
|
75 |
+
|
76 |
+
# forward branch
|
77 |
+
feat_prop = torch.zeros_like(feat_prop)
|
78 |
+
for i in range(0, n):
|
79 |
+
x_i = x[:, i, :, :, :]
|
80 |
+
if i > 0:
|
81 |
+
flow = flows_forward[:, i - 1, :, :, :]
|
82 |
+
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
|
83 |
+
|
84 |
+
feat_prop = torch.cat([x_i, feat_prop], dim=1)
|
85 |
+
feat_prop = self.forward_trunk(feat_prop)
|
86 |
+
|
87 |
+
# upsample
|
88 |
+
out = torch.cat([out_l[i], feat_prop], dim=1)
|
89 |
+
out = self.lrelu(self.fusion(out))
|
90 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
|
91 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
|
92 |
+
out = self.lrelu(self.conv_hr(out))
|
93 |
+
out = self.conv_last(out)
|
94 |
+
base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
|
95 |
+
out += base
|
96 |
+
out_l[i] = out
|
97 |
+
|
98 |
+
return torch.stack(out_l, dim=1)
|
99 |
+
|
100 |
+
|
101 |
+
class ConvResidualBlocks(nn.Module):
|
102 |
+
"""Conv and residual block used in BasicVSR.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
num_in_ch (int): Number of input channels. Default: 3.
|
106 |
+
num_out_ch (int): Number of output channels. Default: 64.
|
107 |
+
num_block (int): Number of residual blocks. Default: 15.
|
108 |
+
"""
|
109 |
+
|
110 |
+
def __init__(self, num_in_ch=3, num_out_ch=64, num_block=15):
|
111 |
+
super().__init__()
|
112 |
+
self.main = nn.Sequential(
|
113 |
+
nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1, bias=True), nn.LeakyReLU(negative_slope=0.1, inplace=True),
|
114 |
+
make_layer(ResidualBlockNoBN, num_block, num_feat=num_out_ch))
|
115 |
+
|
116 |
+
def forward(self, fea):
|
117 |
+
return self.main(fea)
|
118 |
+
|
119 |
+
|
120 |
+
@ARCH_REGISTRY.register()
|
121 |
+
class IconVSR(nn.Module):
|
122 |
+
"""IconVSR, proposed also in the BasicVSR paper.
|
123 |
+
|
124 |
+
Args:
|
125 |
+
num_feat (int): Number of channels. Default: 64.
|
126 |
+
num_block (int): Number of residual blocks for each branch. Default: 15.
|
127 |
+
keyframe_stride (int): Keyframe stride. Default: 5.
|
128 |
+
temporal_padding (int): Temporal padding. Default: 2.
|
129 |
+
spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
|
130 |
+
edvr_path (str): Path to the pretrained EDVR model. Default: None.
|
131 |
+
"""
|
132 |
+
|
133 |
+
def __init__(self,
|
134 |
+
num_feat=64,
|
135 |
+
num_block=15,
|
136 |
+
keyframe_stride=5,
|
137 |
+
temporal_padding=2,
|
138 |
+
spynet_path=None,
|
139 |
+
edvr_path=None):
|
140 |
+
super().__init__()
|
141 |
+
|
142 |
+
self.num_feat = num_feat
|
143 |
+
self.temporal_padding = temporal_padding
|
144 |
+
self.keyframe_stride = keyframe_stride
|
145 |
+
|
146 |
+
# keyframe_branch
|
147 |
+
self.edvr = EDVRFeatureExtractor(temporal_padding * 2 + 1, num_feat, edvr_path)
|
148 |
+
# alignment
|
149 |
+
self.spynet = SpyNet(spynet_path)
|
150 |
+
|
151 |
+
# propagation
|
152 |
+
self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
|
153 |
+
self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
|
154 |
+
|
155 |
+
self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
|
156 |
+
self.forward_trunk = ConvResidualBlocks(2 * num_feat + 3, num_feat, num_block)
|
157 |
+
|
158 |
+
# reconstruction
|
159 |
+
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
|
160 |
+
self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
|
161 |
+
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
|
162 |
+
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
|
163 |
+
|
164 |
+
self.pixel_shuffle = nn.PixelShuffle(2)
|
165 |
+
|
166 |
+
# activation functions
|
167 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
168 |
+
|
169 |
+
def pad_spatial(self, x):
|
170 |
+
"""Apply padding spatially.
|
171 |
+
|
172 |
+
Since the PCD module in EDVR requires that the resolution is a multiple
|
173 |
+
of 4, we apply padding to the input LR images if their resolution is
|
174 |
+
not divisible by 4.
|
175 |
+
|
176 |
+
Args:
|
177 |
+
x (Tensor): Input LR sequence with shape (n, t, c, h, w).
|
178 |
+
Returns:
|
179 |
+
Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).
|
180 |
+
"""
|
181 |
+
n, t, c, h, w = x.size()
|
182 |
+
|
183 |
+
pad_h = (4 - h % 4) % 4
|
184 |
+
pad_w = (4 - w % 4) % 4
|
185 |
+
|
186 |
+
# padding
|
187 |
+
x = x.view(-1, c, h, w)
|
188 |
+
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
|
189 |
+
|
190 |
+
return x.view(n, t, c, h + pad_h, w + pad_w)
|
191 |
+
|
192 |
+
def get_flow(self, x):
|
193 |
+
b, n, c, h, w = x.size()
|
194 |
+
|
195 |
+
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
|
196 |
+
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
|
197 |
+
|
198 |
+
flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
|
199 |
+
flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
|
200 |
+
|
201 |
+
return flows_forward, flows_backward
|
202 |
+
|
203 |
+
def get_keyframe_feature(self, x, keyframe_idx):
|
204 |
+
if self.temporal_padding == 2:
|
205 |
+
x = [x[:, [4, 3]], x, x[:, [-4, -5]]]
|
206 |
+
elif self.temporal_padding == 3:
|
207 |
+
x = [x[:, [6, 5, 4]], x, x[:, [-5, -6, -7]]]
|
208 |
+
x = torch.cat(x, dim=1)
|
209 |
+
|
210 |
+
num_frames = 2 * self.temporal_padding + 1
|
211 |
+
feats_keyframe = {}
|
212 |
+
for i in keyframe_idx:
|
213 |
+
feats_keyframe[i] = self.edvr(x[:, i:i + num_frames].contiguous())
|
214 |
+
return feats_keyframe
|
215 |
+
|
216 |
+
def forward(self, x):
|
217 |
+
b, n, _, h_input, w_input = x.size()
|
218 |
+
|
219 |
+
x = self.pad_spatial(x)
|
220 |
+
h, w = x.shape[3:]
|
221 |
+
|
222 |
+
keyframe_idx = list(range(0, n, self.keyframe_stride))
|
223 |
+
if keyframe_idx[-1] != n - 1:
|
224 |
+
keyframe_idx.append(n - 1) # last frame is a keyframe
|
225 |
+
|
226 |
+
# compute flow and keyframe features
|
227 |
+
flows_forward, flows_backward = self.get_flow(x)
|
228 |
+
feats_keyframe = self.get_keyframe_feature(x, keyframe_idx)
|
229 |
+
|
230 |
+
# backward branch
|
231 |
+
out_l = []
|
232 |
+
feat_prop = x.new_zeros(b, self.num_feat, h, w)
|
233 |
+
for i in range(n - 1, -1, -1):
|
234 |
+
x_i = x[:, i, :, :, :]
|
235 |
+
if i < n - 1:
|
236 |
+
flow = flows_backward[:, i, :, :, :]
|
237 |
+
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
|
238 |
+
if i in keyframe_idx:
|
239 |
+
feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
|
240 |
+
feat_prop = self.backward_fusion(feat_prop)
|
241 |
+
feat_prop = torch.cat([x_i, feat_prop], dim=1)
|
242 |
+
feat_prop = self.backward_trunk(feat_prop)
|
243 |
+
out_l.insert(0, feat_prop)
|
244 |
+
|
245 |
+
# forward branch
|
246 |
+
feat_prop = torch.zeros_like(feat_prop)
|
247 |
+
for i in range(0, n):
|
248 |
+
x_i = x[:, i, :, :, :]
|
249 |
+
if i > 0:
|
250 |
+
flow = flows_forward[:, i - 1, :, :, :]
|
251 |
+
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
|
252 |
+
if i in keyframe_idx:
|
253 |
+
feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
|
254 |
+
feat_prop = self.forward_fusion(feat_prop)
|
255 |
+
|
256 |
+
feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)
|
257 |
+
feat_prop = self.forward_trunk(feat_prop)
|
258 |
+
|
259 |
+
# upsample
|
260 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv1(feat_prop)))
|
261 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
|
262 |
+
out = self.lrelu(self.conv_hr(out))
|
263 |
+
out = self.conv_last(out)
|
264 |
+
base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
|
265 |
+
out += base
|
266 |
+
out_l[i] = out
|
267 |
+
|
268 |
+
return torch.stack(out_l, dim=1)[..., :4 * h_input, :4 * w_input]
|
269 |
+
|
270 |
+
|
271 |
+
class EDVRFeatureExtractor(nn.Module):
|
272 |
+
"""EDVR feature extractor used in IconVSR.
|
273 |
+
|
274 |
+
Args:
|
275 |
+
num_input_frame (int): Number of input frames.
|
276 |
+
num_feat (int): Number of feature channels
|
277 |
+
load_path (str): Path to the pretrained weights of EDVR. Default: None.
|
278 |
+
"""
|
279 |
+
|
280 |
+
def __init__(self, num_input_frame, num_feat, load_path):
|
281 |
+
|
282 |
+
super(EDVRFeatureExtractor, self).__init__()
|
283 |
+
|
284 |
+
self.center_frame_idx = num_input_frame // 2
|
285 |
+
|
286 |
+
# extract pyramid features
|
287 |
+
self.conv_first = nn.Conv2d(3, num_feat, 3, 1, 1)
|
288 |
+
self.feature_extraction = make_layer(ResidualBlockNoBN, 5, num_feat=num_feat)
|
289 |
+
self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
290 |
+
self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
291 |
+
self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
292 |
+
self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
293 |
+
|
294 |
+
# pcd and tsa module
|
295 |
+
self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=8)
|
296 |
+
self.fusion = TSAFusion(num_feat=num_feat, num_frame=num_input_frame, center_frame_idx=self.center_frame_idx)
|
297 |
+
|
298 |
+
# activation function
|
299 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
300 |
+
|
301 |
+
if load_path:
|
302 |
+
self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
|
303 |
+
|
304 |
+
def forward(self, x):
|
305 |
+
b, n, c, h, w = x.size()
|
306 |
+
|
307 |
+
# extract features for each frame
|
308 |
+
# L1
|
309 |
+
feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
|
310 |
+
feat_l1 = self.feature_extraction(feat_l1)
|
311 |
+
# L2
|
312 |
+
feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
|
313 |
+
feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
|
314 |
+
# L3
|
315 |
+
feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
|
316 |
+
feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
|
317 |
+
|
318 |
+
feat_l1 = feat_l1.view(b, n, -1, h, w)
|
319 |
+
feat_l2 = feat_l2.view(b, n, -1, h // 2, w // 2)
|
320 |
+
feat_l3 = feat_l3.view(b, n, -1, h // 4, w // 4)
|
321 |
+
|
322 |
+
# PCD alignment
|
323 |
+
ref_feat_l = [ # reference feature list
|
324 |
+
feat_l1[:, self.center_frame_idx, :, :, :].clone(), feat_l2[:, self.center_frame_idx, :, :, :].clone(),
|
325 |
+
feat_l3[:, self.center_frame_idx, :, :, :].clone()
|
326 |
+
]
|
327 |
+
aligned_feat = []
|
328 |
+
for i in range(n):
|
329 |
+
nbr_feat_l = [ # neighboring feature list
|
330 |
+
feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(), feat_l3[:, i, :, :, :].clone()
|
331 |
+
]
|
332 |
+
aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
|
333 |
+
aligned_feat = torch.stack(aligned_feat, dim=1) # (b, t, c, h, w)
|
334 |
+
|
335 |
+
# TSA fusion
|
336 |
+
return self.fusion(aligned_feat)
|
basicsr/archs/dfdnet_arch.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch.nn.utils.spectral_norm import spectral_norm
|
6 |
+
|
7 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
8 |
+
from .dfdnet_util import AttentionBlock, Blur, MSDilationBlock, UpResBlock, adaptive_instance_normalization
|
9 |
+
from .vgg_arch import VGGFeatureExtractor
|
10 |
+
|
11 |
+
|
12 |
+
class SFTUpBlock(nn.Module):
|
13 |
+
"""Spatial feature transform (SFT) with upsampling block.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
in_channel (int): Number of input channels.
|
17 |
+
out_channel (int): Number of output channels.
|
18 |
+
kernel_size (int): Kernel size in convolutions. Default: 3.
|
19 |
+
padding (int): Padding in convolutions. Default: 1.
|
20 |
+
"""
|
21 |
+
|
22 |
+
def __init__(self, in_channel, out_channel, kernel_size=3, padding=1):
|
23 |
+
super(SFTUpBlock, self).__init__()
|
24 |
+
self.conv1 = nn.Sequential(
|
25 |
+
Blur(in_channel),
|
26 |
+
spectral_norm(nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)),
|
27 |
+
nn.LeakyReLU(0.04, True),
|
28 |
+
# The official codes use two LeakyReLU here, so 0.04 for equivalent
|
29 |
+
)
|
30 |
+
self.convup = nn.Sequential(
|
31 |
+
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
|
32 |
+
spectral_norm(nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding)),
|
33 |
+
nn.LeakyReLU(0.2, True),
|
34 |
+
)
|
35 |
+
|
36 |
+
# for SFT scale and shift
|
37 |
+
self.scale_block = nn.Sequential(
|
38 |
+
spectral_norm(nn.Conv2d(in_channel, out_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
|
39 |
+
spectral_norm(nn.Conv2d(out_channel, out_channel, 3, 1, 1)))
|
40 |
+
self.shift_block = nn.Sequential(
|
41 |
+
spectral_norm(nn.Conv2d(in_channel, out_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
|
42 |
+
spectral_norm(nn.Conv2d(out_channel, out_channel, 3, 1, 1)), nn.Sigmoid())
|
43 |
+
# The official codes use sigmoid for shift block, do not know why
|
44 |
+
|
45 |
+
def forward(self, x, updated_feat):
|
46 |
+
out = self.conv1(x)
|
47 |
+
# SFT
|
48 |
+
scale = self.scale_block(updated_feat)
|
49 |
+
shift = self.shift_block(updated_feat)
|
50 |
+
out = out * scale + shift
|
51 |
+
# upsample
|
52 |
+
out = self.convup(out)
|
53 |
+
return out
|
54 |
+
|
55 |
+
|
56 |
+
@ARCH_REGISTRY.register()
|
57 |
+
class DFDNet(nn.Module):
|
58 |
+
"""DFDNet: Deep Face Dictionary Network.
|
59 |
+
|
60 |
+
It only processes faces with 512x512 size.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
num_feat (int): Number of feature channels.
|
64 |
+
dict_path (str): Path to the facial component dictionary.
|
65 |
+
"""
|
66 |
+
|
67 |
+
def __init__(self, num_feat, dict_path):
|
68 |
+
super().__init__()
|
69 |
+
self.parts = ['left_eye', 'right_eye', 'nose', 'mouth']
|
70 |
+
# part_sizes: [80, 80, 50, 110]
|
71 |
+
channel_sizes = [128, 256, 512, 512]
|
72 |
+
self.feature_sizes = np.array([256, 128, 64, 32])
|
73 |
+
self.vgg_layers = ['relu2_2', 'relu3_4', 'relu4_4', 'conv5_4']
|
74 |
+
self.flag_dict_device = False
|
75 |
+
|
76 |
+
# dict
|
77 |
+
self.dict = torch.load(dict_path)
|
78 |
+
|
79 |
+
# vgg face extractor
|
80 |
+
self.vgg_extractor = VGGFeatureExtractor(
|
81 |
+
layer_name_list=self.vgg_layers,
|
82 |
+
vgg_type='vgg19',
|
83 |
+
use_input_norm=True,
|
84 |
+
range_norm=True,
|
85 |
+
requires_grad=False)
|
86 |
+
|
87 |
+
# attention block for fusing dictionary features and input features
|
88 |
+
self.attn_blocks = nn.ModuleDict()
|
89 |
+
for idx, feat_size in enumerate(self.feature_sizes):
|
90 |
+
for name in self.parts:
|
91 |
+
self.attn_blocks[f'{name}_{feat_size}'] = AttentionBlock(channel_sizes[idx])
|
92 |
+
|
93 |
+
# multi scale dilation block
|
94 |
+
self.multi_scale_dilation = MSDilationBlock(num_feat * 8, dilation=[4, 3, 2, 1])
|
95 |
+
|
96 |
+
# upsampling and reconstruction
|
97 |
+
self.upsample0 = SFTUpBlock(num_feat * 8, num_feat * 8)
|
98 |
+
self.upsample1 = SFTUpBlock(num_feat * 8, num_feat * 4)
|
99 |
+
self.upsample2 = SFTUpBlock(num_feat * 4, num_feat * 2)
|
100 |
+
self.upsample3 = SFTUpBlock(num_feat * 2, num_feat)
|
101 |
+
self.upsample4 = nn.Sequential(
|
102 |
+
spectral_norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1)), nn.LeakyReLU(0.2, True), UpResBlock(num_feat),
|
103 |
+
UpResBlock(num_feat), nn.Conv2d(num_feat, 3, kernel_size=3, stride=1, padding=1), nn.Tanh())
|
104 |
+
|
105 |
+
def swap_feat(self, vgg_feat, updated_feat, dict_feat, location, part_name, f_size):
|
106 |
+
"""swap the features from the dictionary."""
|
107 |
+
# get the original vgg features
|
108 |
+
part_feat = vgg_feat[:, :, location[1]:location[3], location[0]:location[2]].clone()
|
109 |
+
# resize original vgg features
|
110 |
+
part_resize_feat = F.interpolate(part_feat, dict_feat.size()[2:4], mode='bilinear', align_corners=False)
|
111 |
+
# use adaptive instance normalization to adjust color and illuminations
|
112 |
+
dict_feat = adaptive_instance_normalization(dict_feat, part_resize_feat)
|
113 |
+
# get similarity scores
|
114 |
+
similarity_score = F.conv2d(part_resize_feat, dict_feat)
|
115 |
+
similarity_score = F.softmax(similarity_score.view(-1), dim=0)
|
116 |
+
# select the most similar features in the dict (after norm)
|
117 |
+
select_idx = torch.argmax(similarity_score)
|
118 |
+
swap_feat = F.interpolate(dict_feat[select_idx:select_idx + 1], part_feat.size()[2:4])
|
119 |
+
# attention
|
120 |
+
attn = self.attn_blocks[f'{part_name}_' + str(f_size)](swap_feat - part_feat)
|
121 |
+
attn_feat = attn * swap_feat
|
122 |
+
# update features
|
123 |
+
updated_feat[:, :, location[1]:location[3], location[0]:location[2]] = attn_feat + part_feat
|
124 |
+
return updated_feat
|
125 |
+
|
126 |
+
def put_dict_to_device(self, x):
|
127 |
+
if self.flag_dict_device is False:
|
128 |
+
for k, v in self.dict.items():
|
129 |
+
for kk, vv in v.items():
|
130 |
+
self.dict[k][kk] = vv.to(x)
|
131 |
+
self.flag_dict_device = True
|
132 |
+
|
133 |
+
def forward(self, x, part_locations):
|
134 |
+
"""
|
135 |
+
Now only support testing with batch size = 0.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
x (Tensor): Input faces with shape (b, c, 512, 512).
|
139 |
+
part_locations (list[Tensor]): Part locations.
|
140 |
+
"""
|
141 |
+
self.put_dict_to_device(x)
|
142 |
+
# extract vggface features
|
143 |
+
vgg_features = self.vgg_extractor(x)
|
144 |
+
# update vggface features using the dictionary for each part
|
145 |
+
updated_vgg_features = []
|
146 |
+
batch = 0 # only supports testing with batch size = 0
|
147 |
+
for vgg_layer, f_size in zip(self.vgg_layers, self.feature_sizes):
|
148 |
+
dict_features = self.dict[f'{f_size}']
|
149 |
+
vgg_feat = vgg_features[vgg_layer]
|
150 |
+
updated_feat = vgg_feat.clone()
|
151 |
+
|
152 |
+
# swap features from dictionary
|
153 |
+
for part_idx, part_name in enumerate(self.parts):
|
154 |
+
location = (part_locations[part_idx][batch] // (512 / f_size)).int()
|
155 |
+
updated_feat = self.swap_feat(vgg_feat, updated_feat, dict_features[part_name], location, part_name,
|
156 |
+
f_size)
|
157 |
+
|
158 |
+
updated_vgg_features.append(updated_feat)
|
159 |
+
|
160 |
+
vgg_feat_dilation = self.multi_scale_dilation(vgg_features['conv5_4'])
|
161 |
+
# use updated vgg features to modulate the upsampled features with
|
162 |
+
# SFT (Spatial Feature Transform) scaling and shifting manner.
|
163 |
+
upsampled_feat = self.upsample0(vgg_feat_dilation, updated_vgg_features[3])
|
164 |
+
upsampled_feat = self.upsample1(upsampled_feat, updated_vgg_features[2])
|
165 |
+
upsampled_feat = self.upsample2(upsampled_feat, updated_vgg_features[1])
|
166 |
+
upsampled_feat = self.upsample3(upsampled_feat, updated_vgg_features[0])
|
167 |
+
out = self.upsample4(upsampled_feat)
|
168 |
+
|
169 |
+
return out
|
basicsr/archs/dfdnet_util.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torch.autograd import Function
|
5 |
+
from torch.nn.utils.spectral_norm import spectral_norm
|
6 |
+
|
7 |
+
|
8 |
+
class BlurFunctionBackward(Function):
|
9 |
+
|
10 |
+
@staticmethod
|
11 |
+
def forward(ctx, grad_output, kernel, kernel_flip):
|
12 |
+
ctx.save_for_backward(kernel, kernel_flip)
|
13 |
+
grad_input = F.conv2d(grad_output, kernel_flip, padding=1, groups=grad_output.shape[1])
|
14 |
+
return grad_input
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def backward(ctx, gradgrad_output):
|
18 |
+
kernel, _ = ctx.saved_tensors
|
19 |
+
grad_input = F.conv2d(gradgrad_output, kernel, padding=1, groups=gradgrad_output.shape[1])
|
20 |
+
return grad_input, None, None
|
21 |
+
|
22 |
+
|
23 |
+
class BlurFunction(Function):
|
24 |
+
|
25 |
+
@staticmethod
|
26 |
+
def forward(ctx, x, kernel, kernel_flip):
|
27 |
+
ctx.save_for_backward(kernel, kernel_flip)
|
28 |
+
output = F.conv2d(x, kernel, padding=1, groups=x.shape[1])
|
29 |
+
return output
|
30 |
+
|
31 |
+
@staticmethod
|
32 |
+
def backward(ctx, grad_output):
|
33 |
+
kernel, kernel_flip = ctx.saved_tensors
|
34 |
+
grad_input = BlurFunctionBackward.apply(grad_output, kernel, kernel_flip)
|
35 |
+
return grad_input, None, None
|
36 |
+
|
37 |
+
|
38 |
+
blur = BlurFunction.apply
|
39 |
+
|
40 |
+
|
41 |
+
class Blur(nn.Module):
|
42 |
+
|
43 |
+
def __init__(self, channel):
|
44 |
+
super().__init__()
|
45 |
+
kernel = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=torch.float32)
|
46 |
+
kernel = kernel.view(1, 1, 3, 3)
|
47 |
+
kernel = kernel / kernel.sum()
|
48 |
+
kernel_flip = torch.flip(kernel, [2, 3])
|
49 |
+
|
50 |
+
self.kernel = kernel.repeat(channel, 1, 1, 1)
|
51 |
+
self.kernel_flip = kernel_flip.repeat(channel, 1, 1, 1)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
return blur(x, self.kernel.type_as(x), self.kernel_flip.type_as(x))
|
55 |
+
|
56 |
+
|
57 |
+
def calc_mean_std(feat, eps=1e-5):
|
58 |
+
"""Calculate mean and std for adaptive_instance_normalization.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
feat (Tensor): 4D tensor.
|
62 |
+
eps (float): A small value added to the variance to avoid
|
63 |
+
divide-by-zero. Default: 1e-5.
|
64 |
+
"""
|
65 |
+
size = feat.size()
|
66 |
+
assert len(size) == 4, 'The input feature should be 4D tensor.'
|
67 |
+
n, c = size[:2]
|
68 |
+
feat_var = feat.view(n, c, -1).var(dim=2) + eps
|
69 |
+
feat_std = feat_var.sqrt().view(n, c, 1, 1)
|
70 |
+
feat_mean = feat.view(n, c, -1).mean(dim=2).view(n, c, 1, 1)
|
71 |
+
return feat_mean, feat_std
|
72 |
+
|
73 |
+
|
74 |
+
def adaptive_instance_normalization(content_feat, style_feat):
|
75 |
+
"""Adaptive instance normalization.
|
76 |
+
|
77 |
+
Adjust the reference features to have the similar color and illuminations
|
78 |
+
as those in the degradate features.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
content_feat (Tensor): The reference feature.
|
82 |
+
style_feat (Tensor): The degradate features.
|
83 |
+
"""
|
84 |
+
size = content_feat.size()
|
85 |
+
style_mean, style_std = calc_mean_std(style_feat)
|
86 |
+
content_mean, content_std = calc_mean_std(content_feat)
|
87 |
+
normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
|
88 |
+
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
|
89 |
+
|
90 |
+
|
91 |
+
def AttentionBlock(in_channel):
|
92 |
+
return nn.Sequential(
|
93 |
+
spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
|
94 |
+
spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)))
|
95 |
+
|
96 |
+
|
97 |
+
def conv_block(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=True):
|
98 |
+
"""Conv block used in MSDilationBlock."""
|
99 |
+
|
100 |
+
return nn.Sequential(
|
101 |
+
spectral_norm(
|
102 |
+
nn.Conv2d(
|
103 |
+
in_channels,
|
104 |
+
out_channels,
|
105 |
+
kernel_size=kernel_size,
|
106 |
+
stride=stride,
|
107 |
+
dilation=dilation,
|
108 |
+
padding=((kernel_size - 1) // 2) * dilation,
|
109 |
+
bias=bias)),
|
110 |
+
nn.LeakyReLU(0.2),
|
111 |
+
spectral_norm(
|
112 |
+
nn.Conv2d(
|
113 |
+
out_channels,
|
114 |
+
out_channels,
|
115 |
+
kernel_size=kernel_size,
|
116 |
+
stride=stride,
|
117 |
+
dilation=dilation,
|
118 |
+
padding=((kernel_size - 1) // 2) * dilation,
|
119 |
+
bias=bias)),
|
120 |
+
)
|
121 |
+
|
122 |
+
|
123 |
+
class MSDilationBlock(nn.Module):
|
124 |
+
"""Multi-scale dilation block."""
|
125 |
+
|
126 |
+
def __init__(self, in_channels, kernel_size=3, dilation=(1, 1, 1, 1), bias=True):
|
127 |
+
super(MSDilationBlock, self).__init__()
|
128 |
+
|
129 |
+
self.conv_blocks = nn.ModuleList()
|
130 |
+
for i in range(4):
|
131 |
+
self.conv_blocks.append(conv_block(in_channels, in_channels, kernel_size, dilation=dilation[i], bias=bias))
|
132 |
+
self.conv_fusion = spectral_norm(
|
133 |
+
nn.Conv2d(
|
134 |
+
in_channels * 4,
|
135 |
+
in_channels,
|
136 |
+
kernel_size=kernel_size,
|
137 |
+
stride=1,
|
138 |
+
padding=(kernel_size - 1) // 2,
|
139 |
+
bias=bias))
|
140 |
+
|
141 |
+
def forward(self, x):
|
142 |
+
out = []
|
143 |
+
for i in range(4):
|
144 |
+
out.append(self.conv_blocks[i](x))
|
145 |
+
out = torch.cat(out, 1)
|
146 |
+
out = self.conv_fusion(out) + x
|
147 |
+
return out
|
148 |
+
|
149 |
+
|
150 |
+
class UpResBlock(nn.Module):
|
151 |
+
|
152 |
+
def __init__(self, in_channel):
|
153 |
+
super(UpResBlock, self).__init__()
|
154 |
+
self.body = nn.Sequential(
|
155 |
+
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
|
156 |
+
nn.LeakyReLU(0.2, True),
|
157 |
+
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
|
158 |
+
)
|
159 |
+
|
160 |
+
def forward(self, x):
|
161 |
+
out = x + self.body(x)
|
162 |
+
return out
|
basicsr/archs/discriminator_arch.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn as nn
|
2 |
+
|
3 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
4 |
+
|
5 |
+
|
6 |
+
@ARCH_REGISTRY.register()
|
7 |
+
class VGGStyleDiscriminator(nn.Module):
|
8 |
+
"""VGG style discriminator with input size 128 x 128 or 256 x 256.
|
9 |
+
|
10 |
+
It is used to train SRGAN, ESRGAN, and VideoGAN.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
num_in_ch (int): Channel number of inputs. Default: 3.
|
14 |
+
num_feat (int): Channel number of base intermediate features.Default: 64.
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(self, num_in_ch, num_feat, input_size=128):
|
18 |
+
super(VGGStyleDiscriminator, self).__init__()
|
19 |
+
self.input_size = input_size
|
20 |
+
assert self.input_size == 128 or self.input_size == 256, (
|
21 |
+
f'input size must be 128 or 256, but received {input_size}')
|
22 |
+
|
23 |
+
self.conv0_0 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1, bias=True)
|
24 |
+
self.conv0_1 = nn.Conv2d(num_feat, num_feat, 4, 2, 1, bias=False)
|
25 |
+
self.bn0_1 = nn.BatchNorm2d(num_feat, affine=True)
|
26 |
+
|
27 |
+
self.conv1_0 = nn.Conv2d(num_feat, num_feat * 2, 3, 1, 1, bias=False)
|
28 |
+
self.bn1_0 = nn.BatchNorm2d(num_feat * 2, affine=True)
|
29 |
+
self.conv1_1 = nn.Conv2d(num_feat * 2, num_feat * 2, 4, 2, 1, bias=False)
|
30 |
+
self.bn1_1 = nn.BatchNorm2d(num_feat * 2, affine=True)
|
31 |
+
|
32 |
+
self.conv2_0 = nn.Conv2d(num_feat * 2, num_feat * 4, 3, 1, 1, bias=False)
|
33 |
+
self.bn2_0 = nn.BatchNorm2d(num_feat * 4, affine=True)
|
34 |
+
self.conv2_1 = nn.Conv2d(num_feat * 4, num_feat * 4, 4, 2, 1, bias=False)
|
35 |
+
self.bn2_1 = nn.BatchNorm2d(num_feat * 4, affine=True)
|
36 |
+
|
37 |
+
self.conv3_0 = nn.Conv2d(num_feat * 4, num_feat * 8, 3, 1, 1, bias=False)
|
38 |
+
self.bn3_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
39 |
+
self.conv3_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
|
40 |
+
self.bn3_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
41 |
+
|
42 |
+
self.conv4_0 = nn.Conv2d(num_feat * 8, num_feat * 8, 3, 1, 1, bias=False)
|
43 |
+
self.bn4_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
44 |
+
self.conv4_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
|
45 |
+
self.bn4_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
46 |
+
|
47 |
+
if self.input_size == 256:
|
48 |
+
self.conv5_0 = nn.Conv2d(num_feat * 8, num_feat * 8, 3, 1, 1, bias=False)
|
49 |
+
self.bn5_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
50 |
+
self.conv5_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
|
51 |
+
self.bn5_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
|
52 |
+
|
53 |
+
self.linear1 = nn.Linear(num_feat * 8 * 4 * 4, 100)
|
54 |
+
self.linear2 = nn.Linear(100, 1)
|
55 |
+
|
56 |
+
# activation function
|
57 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
58 |
+
|
59 |
+
def forward(self, x):
|
60 |
+
assert x.size(2) == self.input_size, (f'Input size must be identical to input_size, but received {x.size()}.')
|
61 |
+
|
62 |
+
feat = self.lrelu(self.conv0_0(x))
|
63 |
+
feat = self.lrelu(self.bn0_1(self.conv0_1(feat))) # output spatial size: /2
|
64 |
+
|
65 |
+
feat = self.lrelu(self.bn1_0(self.conv1_0(feat)))
|
66 |
+
feat = self.lrelu(self.bn1_1(self.conv1_1(feat))) # output spatial size: /4
|
67 |
+
|
68 |
+
feat = self.lrelu(self.bn2_0(self.conv2_0(feat)))
|
69 |
+
feat = self.lrelu(self.bn2_1(self.conv2_1(feat))) # output spatial size: /8
|
70 |
+
|
71 |
+
feat = self.lrelu(self.bn3_0(self.conv3_0(feat)))
|
72 |
+
feat = self.lrelu(self.bn3_1(self.conv3_1(feat))) # output spatial size: /16
|
73 |
+
|
74 |
+
feat = self.lrelu(self.bn4_0(self.conv4_0(feat)))
|
75 |
+
feat = self.lrelu(self.bn4_1(self.conv4_1(feat))) # output spatial size: /32
|
76 |
+
|
77 |
+
if self.input_size == 256:
|
78 |
+
feat = self.lrelu(self.bn5_0(self.conv5_0(feat)))
|
79 |
+
feat = self.lrelu(self.bn5_1(self.conv5_1(feat))) # output spatial size: / 64
|
80 |
+
|
81 |
+
# spatial size: (4, 4)
|
82 |
+
feat = feat.view(feat.size(0), -1)
|
83 |
+
feat = self.lrelu(self.linear1(feat))
|
84 |
+
out = self.linear2(feat)
|
85 |
+
return out
|
basicsr/archs/duf_arch.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
from torch import nn as nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
7 |
+
|
8 |
+
|
9 |
+
class DenseBlocksTemporalReduce(nn.Module):
|
10 |
+
"""A concatenation of 3 dense blocks with reduction in temporal dimension.
|
11 |
+
|
12 |
+
Note that the output temporal dimension is 6 fewer the input temporal dimension, since there are 3 blocks.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
num_feat (int): Number of channels in the blocks. Default: 64.
|
16 |
+
num_grow_ch (int): Growing factor of the dense blocks. Default: 32
|
17 |
+
adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation.
|
18 |
+
Set to false if you want to train from scratch. Default: False.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, num_feat=64, num_grow_ch=32, adapt_official_weights=False):
|
22 |
+
super(DenseBlocksTemporalReduce, self).__init__()
|
23 |
+
if adapt_official_weights:
|
24 |
+
eps = 1e-3
|
25 |
+
momentum = 1e-3
|
26 |
+
else: # pytorch default values
|
27 |
+
eps = 1e-05
|
28 |
+
momentum = 0.1
|
29 |
+
|
30 |
+
self.temporal_reduce1 = nn.Sequential(
|
31 |
+
nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
32 |
+
nn.Conv3d(num_feat, num_feat, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True),
|
33 |
+
nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
34 |
+
nn.Conv3d(num_feat, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
|
35 |
+
|
36 |
+
self.temporal_reduce2 = nn.Sequential(
|
37 |
+
nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
38 |
+
nn.Conv3d(
|
39 |
+
num_feat + num_grow_ch,
|
40 |
+
num_feat + num_grow_ch, (1, 1, 1),
|
41 |
+
stride=(1, 1, 1),
|
42 |
+
padding=(0, 0, 0),
|
43 |
+
bias=True), nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
44 |
+
nn.Conv3d(num_feat + num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
|
45 |
+
|
46 |
+
self.temporal_reduce3 = nn.Sequential(
|
47 |
+
nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
48 |
+
nn.Conv3d(
|
49 |
+
num_feat + 2 * num_grow_ch,
|
50 |
+
num_feat + 2 * num_grow_ch, (1, 1, 1),
|
51 |
+
stride=(1, 1, 1),
|
52 |
+
padding=(0, 0, 0),
|
53 |
+
bias=True), nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum),
|
54 |
+
nn.ReLU(inplace=True),
|
55 |
+
nn.Conv3d(
|
56 |
+
num_feat + 2 * num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
|
57 |
+
|
58 |
+
def forward(self, x):
|
59 |
+
"""
|
60 |
+
Args:
|
61 |
+
x (Tensor): Input tensor with shape (b, num_feat, t, h, w).
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
Tensor: Output with shape (b, num_feat + num_grow_ch * 3, 1, h, w).
|
65 |
+
"""
|
66 |
+
x1 = self.temporal_reduce1(x)
|
67 |
+
x1 = torch.cat((x[:, :, 1:-1, :, :], x1), 1)
|
68 |
+
|
69 |
+
x2 = self.temporal_reduce2(x1)
|
70 |
+
x2 = torch.cat((x1[:, :, 1:-1, :, :], x2), 1)
|
71 |
+
|
72 |
+
x3 = self.temporal_reduce3(x2)
|
73 |
+
x3 = torch.cat((x2[:, :, 1:-1, :, :], x3), 1)
|
74 |
+
|
75 |
+
return x3
|
76 |
+
|
77 |
+
|
78 |
+
class DenseBlocks(nn.Module):
|
79 |
+
""" A concatenation of N dense blocks.
|
80 |
+
|
81 |
+
Args:
|
82 |
+
num_feat (int): Number of channels in the blocks. Default: 64.
|
83 |
+
num_grow_ch (int): Growing factor of the dense blocks. Default: 32.
|
84 |
+
num_block (int): Number of dense blocks. The values are:
|
85 |
+
DUF-S (16 layers): 3
|
86 |
+
DUF-M (18 layers): 9
|
87 |
+
DUF-L (52 layers): 21
|
88 |
+
adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation.
|
89 |
+
Set to false if you want to train from scratch. Default: False.
|
90 |
+
"""
|
91 |
+
|
92 |
+
def __init__(self, num_block, num_feat=64, num_grow_ch=16, adapt_official_weights=False):
|
93 |
+
super(DenseBlocks, self).__init__()
|
94 |
+
if adapt_official_weights:
|
95 |
+
eps = 1e-3
|
96 |
+
momentum = 1e-3
|
97 |
+
else: # pytorch default values
|
98 |
+
eps = 1e-05
|
99 |
+
momentum = 0.1
|
100 |
+
|
101 |
+
self.dense_blocks = nn.ModuleList()
|
102 |
+
for i in range(0, num_block):
|
103 |
+
self.dense_blocks.append(
|
104 |
+
nn.Sequential(
|
105 |
+
nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
|
106 |
+
nn.Conv3d(
|
107 |
+
num_feat + i * num_grow_ch,
|
108 |
+
num_feat + i * num_grow_ch, (1, 1, 1),
|
109 |
+
stride=(1, 1, 1),
|
110 |
+
padding=(0, 0, 0),
|
111 |
+
bias=True), nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum),
|
112 |
+
nn.ReLU(inplace=True),
|
113 |
+
nn.Conv3d(
|
114 |
+
num_feat + i * num_grow_ch,
|
115 |
+
num_grow_ch, (3, 3, 3),
|
116 |
+
stride=(1, 1, 1),
|
117 |
+
padding=(1, 1, 1),
|
118 |
+
bias=True)))
|
119 |
+
|
120 |
+
def forward(self, x):
|
121 |
+
"""
|
122 |
+
Args:
|
123 |
+
x (Tensor): Input tensor with shape (b, num_feat, t, h, w).
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
Tensor: Output with shape (b, num_feat + num_block * num_grow_ch, t, h, w).
|
127 |
+
"""
|
128 |
+
for i in range(0, len(self.dense_blocks)):
|
129 |
+
y = self.dense_blocks[i](x)
|
130 |
+
x = torch.cat((x, y), 1)
|
131 |
+
return x
|
132 |
+
|
133 |
+
|
134 |
+
class DynamicUpsamplingFilter(nn.Module):
|
135 |
+
"""Dynamic upsampling filter used in DUF.
|
136 |
+
|
137 |
+
Ref: https://github.com/yhjo09/VSR-DUF.
|
138 |
+
It only supports input with 3 channels. And it applies the same filters to 3 channels.
|
139 |
+
|
140 |
+
Args:
|
141 |
+
filter_size (tuple): Filter size of generated filters. The shape is (kh, kw). Default: (5, 5).
|
142 |
+
"""
|
143 |
+
|
144 |
+
def __init__(self, filter_size=(5, 5)):
|
145 |
+
super(DynamicUpsamplingFilter, self).__init__()
|
146 |
+
if not isinstance(filter_size, tuple):
|
147 |
+
raise TypeError(f'The type of filter_size must be tuple, but got type{filter_size}')
|
148 |
+
if len(filter_size) != 2:
|
149 |
+
raise ValueError(f'The length of filter size must be 2, but got {len(filter_size)}.')
|
150 |
+
# generate a local expansion filter, similar to im2col
|
151 |
+
self.filter_size = filter_size
|
152 |
+
filter_prod = np.prod(filter_size)
|
153 |
+
expansion_filter = torch.eye(int(filter_prod)).view(filter_prod, 1, *filter_size) # (kh*kw, 1, kh, kw)
|
154 |
+
self.expansion_filter = expansion_filter.repeat(3, 1, 1, 1) # repeat for all the 3 channels
|
155 |
+
|
156 |
+
def forward(self, x, filters):
|
157 |
+
"""Forward function for DynamicUpsamplingFilter.
|
158 |
+
|
159 |
+
Args:
|
160 |
+
x (Tensor): Input image with 3 channels. The shape is (n, 3, h, w).
|
161 |
+
filters (Tensor): Generated dynamic filters.
|
162 |
+
The shape is (n, filter_prod, upsampling_square, h, w).
|
163 |
+
filter_prod: prod of filter kernel size, e.g., 1*5*5=25.
|
164 |
+
upsampling_square: similar to pixel shuffle,
|
165 |
+
upsampling_square = upsampling * upsampling
|
166 |
+
e.g., for x 4 upsampling, upsampling_square= 4*4 = 16
|
167 |
+
|
168 |
+
Returns:
|
169 |
+
Tensor: Filtered image with shape (n, 3*upsampling_square, h, w)
|
170 |
+
"""
|
171 |
+
n, filter_prod, upsampling_square, h, w = filters.size()
|
172 |
+
kh, kw = self.filter_size
|
173 |
+
expanded_input = F.conv2d(
|
174 |
+
x, self.expansion_filter.to(x), padding=(kh // 2, kw // 2), groups=3) # (n, 3*filter_prod, h, w)
|
175 |
+
expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1,
|
176 |
+
2) # (n, h, w, 3, filter_prod)
|
177 |
+
filters = filters.permute(0, 3, 4, 1, 2) # (n, h, w, filter_prod, upsampling_square]
|
178 |
+
out = torch.matmul(expanded_input, filters) # (n, h, w, 3, upsampling_square)
|
179 |
+
return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w)
|
180 |
+
|
181 |
+
|
182 |
+
@ARCH_REGISTRY.register()
|
183 |
+
class DUF(nn.Module):
|
184 |
+
"""Network architecture for DUF
|
185 |
+
|
186 |
+
Paper: Jo et.al. Deep Video Super-Resolution Network Using Dynamic
|
187 |
+
Upsampling Filters Without Explicit Motion Compensation, CVPR, 2018
|
188 |
+
Code reference:
|
189 |
+
https://github.com/yhjo09/VSR-DUF
|
190 |
+
For all the models below, 'adapt_official_weights' is only necessary when
|
191 |
+
loading the weights converted from the official TensorFlow weights.
|
192 |
+
Please set it to False if you are training the model from scratch.
|
193 |
+
|
194 |
+
There are three models with different model size: DUF16Layers, DUF28Layers,
|
195 |
+
and DUF52Layers. This class is the base class for these models.
|
196 |
+
|
197 |
+
Args:
|
198 |
+
scale (int): The upsampling factor. Default: 4.
|
199 |
+
num_layer (int): The number of layers. Default: 52.
|
200 |
+
adapt_official_weights_weights (bool): Whether to adapt the weights
|
201 |
+
translated from the official implementation. Set to false if you
|
202 |
+
want to train from scratch. Default: False.
|
203 |
+
"""
|
204 |
+
|
205 |
+
def __init__(self, scale=4, num_layer=52, adapt_official_weights=False):
|
206 |
+
super(DUF, self).__init__()
|
207 |
+
self.scale = scale
|
208 |
+
if adapt_official_weights:
|
209 |
+
eps = 1e-3
|
210 |
+
momentum = 1e-3
|
211 |
+
else: # pytorch default values
|
212 |
+
eps = 1e-05
|
213 |
+
momentum = 0.1
|
214 |
+
|
215 |
+
self.conv3d1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
|
216 |
+
self.dynamic_filter = DynamicUpsamplingFilter((5, 5))
|
217 |
+
|
218 |
+
if num_layer == 16:
|
219 |
+
num_block = 3
|
220 |
+
num_grow_ch = 32
|
221 |
+
elif num_layer == 28:
|
222 |
+
num_block = 9
|
223 |
+
num_grow_ch = 16
|
224 |
+
elif num_layer == 52:
|
225 |
+
num_block = 21
|
226 |
+
num_grow_ch = 16
|
227 |
+
else:
|
228 |
+
raise ValueError(f'Only supported (16, 28, 52) layers, but got {num_layer}.')
|
229 |
+
|
230 |
+
self.dense_block1 = DenseBlocks(
|
231 |
+
num_block=num_block, num_feat=64, num_grow_ch=num_grow_ch,
|
232 |
+
adapt_official_weights=adapt_official_weights) # T = 7
|
233 |
+
self.dense_block2 = DenseBlocksTemporalReduce(
|
234 |
+
64 + num_grow_ch * num_block, num_grow_ch, adapt_official_weights=adapt_official_weights) # T = 1
|
235 |
+
channels = 64 + num_grow_ch * num_block + num_grow_ch * 3
|
236 |
+
self.bn3d2 = nn.BatchNorm3d(channels, eps=eps, momentum=momentum)
|
237 |
+
self.conv3d2 = nn.Conv3d(channels, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
|
238 |
+
|
239 |
+
self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
|
240 |
+
self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
|
241 |
+
|
242 |
+
self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
|
243 |
+
self.conv3d_f2 = nn.Conv3d(
|
244 |
+
512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
|
245 |
+
|
246 |
+
def forward(self, x):
|
247 |
+
"""
|
248 |
+
Args:
|
249 |
+
x (Tensor): Input with shape (b, 7, c, h, w)
|
250 |
+
|
251 |
+
Returns:
|
252 |
+
Tensor: Output with shape (b, c, h * scale, w * scale)
|
253 |
+
"""
|
254 |
+
num_batches, num_imgs, _, h, w = x.size()
|
255 |
+
|
256 |
+
x = x.permute(0, 2, 1, 3, 4) # (b, c, 7, h, w) for Conv3D
|
257 |
+
x_center = x[:, :, num_imgs // 2, :, :]
|
258 |
+
|
259 |
+
x = self.conv3d1(x)
|
260 |
+
x = self.dense_block1(x)
|
261 |
+
x = self.dense_block2(x)
|
262 |
+
x = F.relu(self.bn3d2(x), inplace=True)
|
263 |
+
x = F.relu(self.conv3d2(x), inplace=True)
|
264 |
+
|
265 |
+
# residual image
|
266 |
+
res = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True))
|
267 |
+
|
268 |
+
# filter
|
269 |
+
filter_ = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True))
|
270 |
+
filter_ = F.softmax(filter_.view(num_batches, 25, self.scale**2, h, w), dim=1)
|
271 |
+
|
272 |
+
# dynamic filter
|
273 |
+
out = self.dynamic_filter(x_center, filter_)
|
274 |
+
out += res.squeeze_(2)
|
275 |
+
out = F.pixel_shuffle(out, self.scale)
|
276 |
+
|
277 |
+
return out
|
basicsr/archs/ecbsr_arch.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
|
7 |
+
|
8 |
+
class SeqConv3x3(nn.Module):
|
9 |
+
"""The re-parameterizable block used in the ECBSR architecture.
|
10 |
+
|
11 |
+
Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
|
12 |
+
Ref git repo: https://github.com/xindongzhang/ECBSR
|
13 |
+
|
14 |
+
Args:
|
15 |
+
seq_type (str): Sequence type, option: conv1x1-conv3x3 | conv1x1-sobelx | conv1x1-sobely | conv1x1-laplacian.
|
16 |
+
in_channels (int): Channel number of input.
|
17 |
+
out_channels (int): Channel number of output.
|
18 |
+
depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, seq_type, in_channels, out_channels, depth_multiplier=1):
|
22 |
+
super(SeqConv3x3, self).__init__()
|
23 |
+
self.seq_type = seq_type
|
24 |
+
self.in_channels = in_channels
|
25 |
+
self.out_channels = out_channels
|
26 |
+
|
27 |
+
if self.seq_type == 'conv1x1-conv3x3':
|
28 |
+
self.mid_planes = int(out_channels * depth_multiplier)
|
29 |
+
conv0 = torch.nn.Conv2d(self.in_channels, self.mid_planes, kernel_size=1, padding=0)
|
30 |
+
self.k0 = conv0.weight
|
31 |
+
self.b0 = conv0.bias
|
32 |
+
|
33 |
+
conv1 = torch.nn.Conv2d(self.mid_planes, self.out_channels, kernel_size=3)
|
34 |
+
self.k1 = conv1.weight
|
35 |
+
self.b1 = conv1.bias
|
36 |
+
|
37 |
+
elif self.seq_type == 'conv1x1-sobelx':
|
38 |
+
conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
|
39 |
+
self.k0 = conv0.weight
|
40 |
+
self.b0 = conv0.bias
|
41 |
+
|
42 |
+
# init scale and bias
|
43 |
+
scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
|
44 |
+
self.scale = nn.Parameter(scale)
|
45 |
+
bias = torch.randn(self.out_channels) * 1e-3
|
46 |
+
bias = torch.reshape(bias, (self.out_channels, ))
|
47 |
+
self.bias = nn.Parameter(bias)
|
48 |
+
# init mask
|
49 |
+
self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
|
50 |
+
for i in range(self.out_channels):
|
51 |
+
self.mask[i, 0, 0, 0] = 1.0
|
52 |
+
self.mask[i, 0, 1, 0] = 2.0
|
53 |
+
self.mask[i, 0, 2, 0] = 1.0
|
54 |
+
self.mask[i, 0, 0, 2] = -1.0
|
55 |
+
self.mask[i, 0, 1, 2] = -2.0
|
56 |
+
self.mask[i, 0, 2, 2] = -1.0
|
57 |
+
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
|
58 |
+
|
59 |
+
elif self.seq_type == 'conv1x1-sobely':
|
60 |
+
conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
|
61 |
+
self.k0 = conv0.weight
|
62 |
+
self.b0 = conv0.bias
|
63 |
+
|
64 |
+
# init scale and bias
|
65 |
+
scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
|
66 |
+
self.scale = nn.Parameter(torch.FloatTensor(scale))
|
67 |
+
bias = torch.randn(self.out_channels) * 1e-3
|
68 |
+
bias = torch.reshape(bias, (self.out_channels, ))
|
69 |
+
self.bias = nn.Parameter(torch.FloatTensor(bias))
|
70 |
+
# init mask
|
71 |
+
self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
|
72 |
+
for i in range(self.out_channels):
|
73 |
+
self.mask[i, 0, 0, 0] = 1.0
|
74 |
+
self.mask[i, 0, 0, 1] = 2.0
|
75 |
+
self.mask[i, 0, 0, 2] = 1.0
|
76 |
+
self.mask[i, 0, 2, 0] = -1.0
|
77 |
+
self.mask[i, 0, 2, 1] = -2.0
|
78 |
+
self.mask[i, 0, 2, 2] = -1.0
|
79 |
+
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
|
80 |
+
|
81 |
+
elif self.seq_type == 'conv1x1-laplacian':
|
82 |
+
conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
|
83 |
+
self.k0 = conv0.weight
|
84 |
+
self.b0 = conv0.bias
|
85 |
+
|
86 |
+
# init scale and bias
|
87 |
+
scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
|
88 |
+
self.scale = nn.Parameter(torch.FloatTensor(scale))
|
89 |
+
bias = torch.randn(self.out_channels) * 1e-3
|
90 |
+
bias = torch.reshape(bias, (self.out_channels, ))
|
91 |
+
self.bias = nn.Parameter(torch.FloatTensor(bias))
|
92 |
+
# init mask
|
93 |
+
self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
|
94 |
+
for i in range(self.out_channels):
|
95 |
+
self.mask[i, 0, 0, 1] = 1.0
|
96 |
+
self.mask[i, 0, 1, 0] = 1.0
|
97 |
+
self.mask[i, 0, 1, 2] = 1.0
|
98 |
+
self.mask[i, 0, 2, 1] = 1.0
|
99 |
+
self.mask[i, 0, 1, 1] = -4.0
|
100 |
+
self.mask = nn.Parameter(data=self.mask, requires_grad=False)
|
101 |
+
else:
|
102 |
+
raise ValueError('The type of seqconv is not supported!')
|
103 |
+
|
104 |
+
def forward(self, x):
|
105 |
+
if self.seq_type == 'conv1x1-conv3x3':
|
106 |
+
# conv-1x1
|
107 |
+
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
|
108 |
+
# explicitly padding with bias
|
109 |
+
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
|
110 |
+
b0_pad = self.b0.view(1, -1, 1, 1)
|
111 |
+
y0[:, :, 0:1, :] = b0_pad
|
112 |
+
y0[:, :, -1:, :] = b0_pad
|
113 |
+
y0[:, :, :, 0:1] = b0_pad
|
114 |
+
y0[:, :, :, -1:] = b0_pad
|
115 |
+
# conv-3x3
|
116 |
+
y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1)
|
117 |
+
else:
|
118 |
+
y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
|
119 |
+
# explicitly padding with bias
|
120 |
+
y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
|
121 |
+
b0_pad = self.b0.view(1, -1, 1, 1)
|
122 |
+
y0[:, :, 0:1, :] = b0_pad
|
123 |
+
y0[:, :, -1:, :] = b0_pad
|
124 |
+
y0[:, :, :, 0:1] = b0_pad
|
125 |
+
y0[:, :, :, -1:] = b0_pad
|
126 |
+
# conv-3x3
|
127 |
+
y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=self.bias, stride=1, groups=self.out_channels)
|
128 |
+
return y1
|
129 |
+
|
130 |
+
def rep_params(self):
|
131 |
+
device = self.k0.get_device()
|
132 |
+
if device < 0:
|
133 |
+
device = None
|
134 |
+
|
135 |
+
if self.seq_type == 'conv1x1-conv3x3':
|
136 |
+
# re-param conv kernel
|
137 |
+
rep_weight = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3))
|
138 |
+
# re-param conv bias
|
139 |
+
rep_bias = torch.ones(1, self.mid_planes, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
|
140 |
+
rep_bias = F.conv2d(input=rep_bias, weight=self.k1).view(-1, ) + self.b1
|
141 |
+
else:
|
142 |
+
tmp = self.scale * self.mask
|
143 |
+
k1 = torch.zeros((self.out_channels, self.out_channels, 3, 3), device=device)
|
144 |
+
for i in range(self.out_channels):
|
145 |
+
k1[i, i, :, :] = tmp[i, 0, :, :]
|
146 |
+
b1 = self.bias
|
147 |
+
# re-param conv kernel
|
148 |
+
rep_weight = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3))
|
149 |
+
# re-param conv bias
|
150 |
+
rep_bias = torch.ones(1, self.out_channels, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
|
151 |
+
rep_bias = F.conv2d(input=rep_bias, weight=k1).view(-1, ) + b1
|
152 |
+
return rep_weight, rep_bias
|
153 |
+
|
154 |
+
|
155 |
+
class ECB(nn.Module):
|
156 |
+
"""The ECB block used in the ECBSR architecture.
|
157 |
+
|
158 |
+
Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
|
159 |
+
Ref git repo: https://github.com/xindongzhang/ECBSR
|
160 |
+
|
161 |
+
Args:
|
162 |
+
in_channels (int): Channel number of input.
|
163 |
+
out_channels (int): Channel number of output.
|
164 |
+
depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
|
165 |
+
act_type (str): Activation type. Option: prelu | relu | rrelu | softplus | linear. Default: prelu.
|
166 |
+
with_idt (bool): Whether to use identity connection. Default: False.
|
167 |
+
"""
|
168 |
+
|
169 |
+
def __init__(self, in_channels, out_channels, depth_multiplier, act_type='prelu', with_idt=False):
|
170 |
+
super(ECB, self).__init__()
|
171 |
+
|
172 |
+
self.depth_multiplier = depth_multiplier
|
173 |
+
self.in_channels = in_channels
|
174 |
+
self.out_channels = out_channels
|
175 |
+
self.act_type = act_type
|
176 |
+
|
177 |
+
if with_idt and (self.in_channels == self.out_channels):
|
178 |
+
self.with_idt = True
|
179 |
+
else:
|
180 |
+
self.with_idt = False
|
181 |
+
|
182 |
+
self.conv3x3 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, padding=1)
|
183 |
+
self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.in_channels, self.out_channels, self.depth_multiplier)
|
184 |
+
self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.in_channels, self.out_channels)
|
185 |
+
self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.in_channels, self.out_channels)
|
186 |
+
self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.in_channels, self.out_channels)
|
187 |
+
|
188 |
+
if self.act_type == 'prelu':
|
189 |
+
self.act = nn.PReLU(num_parameters=self.out_channels)
|
190 |
+
elif self.act_type == 'relu':
|
191 |
+
self.act = nn.ReLU(inplace=True)
|
192 |
+
elif self.act_type == 'rrelu':
|
193 |
+
self.act = nn.RReLU(lower=-0.05, upper=0.05)
|
194 |
+
elif self.act_type == 'softplus':
|
195 |
+
self.act = nn.Softplus()
|
196 |
+
elif self.act_type == 'linear':
|
197 |
+
pass
|
198 |
+
else:
|
199 |
+
raise ValueError('The type of activation if not support!')
|
200 |
+
|
201 |
+
def forward(self, x):
|
202 |
+
if self.training:
|
203 |
+
y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x) + self.conv1x1_sby(x) + self.conv1x1_lpl(x)
|
204 |
+
if self.with_idt:
|
205 |
+
y += x
|
206 |
+
else:
|
207 |
+
rep_weight, rep_bias = self.rep_params()
|
208 |
+
y = F.conv2d(input=x, weight=rep_weight, bias=rep_bias, stride=1, padding=1)
|
209 |
+
if self.act_type != 'linear':
|
210 |
+
y = self.act(y)
|
211 |
+
return y
|
212 |
+
|
213 |
+
def rep_params(self):
|
214 |
+
weight0, bias0 = self.conv3x3.weight, self.conv3x3.bias
|
215 |
+
weight1, bias1 = self.conv1x1_3x3.rep_params()
|
216 |
+
weight2, bias2 = self.conv1x1_sbx.rep_params()
|
217 |
+
weight3, bias3 = self.conv1x1_sby.rep_params()
|
218 |
+
weight4, bias4 = self.conv1x1_lpl.rep_params()
|
219 |
+
rep_weight, rep_bias = (weight0 + weight1 + weight2 + weight3 + weight4), (
|
220 |
+
bias0 + bias1 + bias2 + bias3 + bias4)
|
221 |
+
|
222 |
+
if self.with_idt:
|
223 |
+
device = rep_weight.get_device()
|
224 |
+
if device < 0:
|
225 |
+
device = None
|
226 |
+
weight_idt = torch.zeros(self.out_channels, self.out_channels, 3, 3, device=device)
|
227 |
+
for i in range(self.out_channels):
|
228 |
+
weight_idt[i, i, 1, 1] = 1.0
|
229 |
+
bias_idt = 0.0
|
230 |
+
rep_weight, rep_bias = rep_weight + weight_idt, rep_bias + bias_idt
|
231 |
+
return rep_weight, rep_bias
|
232 |
+
|
233 |
+
|
234 |
+
@ARCH_REGISTRY.register()
|
235 |
+
class ECBSR(nn.Module):
|
236 |
+
"""ECBSR architecture.
|
237 |
+
|
238 |
+
Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
|
239 |
+
Ref git repo: https://github.com/xindongzhang/ECBSR
|
240 |
+
|
241 |
+
Args:
|
242 |
+
num_in_ch (int): Channel number of inputs.
|
243 |
+
num_out_ch (int): Channel number of outputs.
|
244 |
+
num_block (int): Block number in the trunk network.
|
245 |
+
num_channel (int): Channel number.
|
246 |
+
with_idt (bool): Whether use identity in convolution layers.
|
247 |
+
act_type (str): Activation type.
|
248 |
+
scale (int): Upsampling factor.
|
249 |
+
"""
|
250 |
+
|
251 |
+
def __init__(self, num_in_ch, num_out_ch, num_block, num_channel, with_idt, act_type, scale):
|
252 |
+
super(ECBSR, self).__init__()
|
253 |
+
self.num_in_ch = num_in_ch
|
254 |
+
self.scale = scale
|
255 |
+
|
256 |
+
backbone = []
|
257 |
+
backbone += [ECB(num_in_ch, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
|
258 |
+
for _ in range(num_block):
|
259 |
+
backbone += [ECB(num_channel, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
|
260 |
+
backbone += [
|
261 |
+
ECB(num_channel, num_out_ch * scale * scale, depth_multiplier=2.0, act_type='linear', with_idt=with_idt)
|
262 |
+
]
|
263 |
+
|
264 |
+
self.backbone = nn.Sequential(*backbone)
|
265 |
+
self.upsampler = nn.PixelShuffle(scale)
|
266 |
+
|
267 |
+
def forward(self, x):
|
268 |
+
if self.num_in_ch > 1:
|
269 |
+
shortcut = torch.repeat_interleave(x, self.scale * self.scale, dim=1)
|
270 |
+
else:
|
271 |
+
shortcut = x # will repeat the input in the channel dimension (repeat scale * scale times)
|
272 |
+
y = self.backbone(x) + shortcut
|
273 |
+
y = self.upsampler(y)
|
274 |
+
return y
|
basicsr/archs/edsr_arch.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
|
4 |
+
from basicsr.archs.arch_util import ResidualBlockNoBN, Upsample, make_layer
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
|
7 |
+
|
8 |
+
@ARCH_REGISTRY.register()
|
9 |
+
class EDSR(nn.Module):
|
10 |
+
"""EDSR network structure.
|
11 |
+
|
12 |
+
Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution.
|
13 |
+
Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch
|
14 |
+
|
15 |
+
Args:
|
16 |
+
num_in_ch (int): Channel number of inputs.
|
17 |
+
num_out_ch (int): Channel number of outputs.
|
18 |
+
num_feat (int): Channel number of intermediate features.
|
19 |
+
Default: 64.
|
20 |
+
num_block (int): Block number in the trunk network. Default: 16.
|
21 |
+
upscale (int): Upsampling factor. Support 2^n and 3.
|
22 |
+
Default: 4.
|
23 |
+
res_scale (float): Used to scale the residual in residual block.
|
24 |
+
Default: 1.
|
25 |
+
img_range (float): Image range. Default: 255.
|
26 |
+
rgb_mean (tuple[float]): Image mean in RGB orders.
|
27 |
+
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
|
28 |
+
"""
|
29 |
+
|
30 |
+
def __init__(self,
|
31 |
+
num_in_ch,
|
32 |
+
num_out_ch,
|
33 |
+
num_feat=64,
|
34 |
+
num_block=16,
|
35 |
+
upscale=4,
|
36 |
+
res_scale=1,
|
37 |
+
img_range=255.,
|
38 |
+
rgb_mean=(0.4488, 0.4371, 0.4040)):
|
39 |
+
super(EDSR, self).__init__()
|
40 |
+
|
41 |
+
self.img_range = img_range
|
42 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
43 |
+
|
44 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
45 |
+
self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat, res_scale=res_scale, pytorch_init=True)
|
46 |
+
self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
47 |
+
self.upsample = Upsample(upscale, num_feat)
|
48 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
49 |
+
|
50 |
+
def forward(self, x):
|
51 |
+
self.mean = self.mean.type_as(x)
|
52 |
+
|
53 |
+
x = (x - self.mean) * self.img_range
|
54 |
+
x = self.conv_first(x)
|
55 |
+
res = self.conv_after_body(self.body(x))
|
56 |
+
res += x
|
57 |
+
|
58 |
+
x = self.conv_last(self.upsample(res))
|
59 |
+
x = x / self.img_range + self.mean
|
60 |
+
|
61 |
+
return x
|
basicsr/archs/edvr_arch.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
from torch.nn import functional as F
|
4 |
+
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
from .arch_util import DCNv2Pack, ResidualBlockNoBN, make_layer
|
7 |
+
|
8 |
+
|
9 |
+
class PCDAlignment(nn.Module):
|
10 |
+
"""Alignment module using Pyramid, Cascading and Deformable convolution
|
11 |
+
(PCD). It is used in EDVR.
|
12 |
+
|
13 |
+
Ref:
|
14 |
+
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
|
15 |
+
|
16 |
+
Args:
|
17 |
+
num_feat (int): Channel number of middle features. Default: 64.
|
18 |
+
deformable_groups (int): Deformable groups. Defaults: 8.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, num_feat=64, deformable_groups=8):
|
22 |
+
super(PCDAlignment, self).__init__()
|
23 |
+
|
24 |
+
# Pyramid has three levels:
|
25 |
+
# L3: level 3, 1/4 spatial size
|
26 |
+
# L2: level 2, 1/2 spatial size
|
27 |
+
# L1: level 1, original spatial size
|
28 |
+
self.offset_conv1 = nn.ModuleDict()
|
29 |
+
self.offset_conv2 = nn.ModuleDict()
|
30 |
+
self.offset_conv3 = nn.ModuleDict()
|
31 |
+
self.dcn_pack = nn.ModuleDict()
|
32 |
+
self.feat_conv = nn.ModuleDict()
|
33 |
+
|
34 |
+
# Pyramids
|
35 |
+
for i in range(3, 0, -1):
|
36 |
+
level = f'l{i}'
|
37 |
+
self.offset_conv1[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
|
38 |
+
if i == 3:
|
39 |
+
self.offset_conv2[level] = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
40 |
+
else:
|
41 |
+
self.offset_conv2[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
|
42 |
+
self.offset_conv3[level] = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
43 |
+
self.dcn_pack[level] = DCNv2Pack(num_feat, num_feat, 3, padding=1, deformable_groups=deformable_groups)
|
44 |
+
|
45 |
+
if i < 3:
|
46 |
+
self.feat_conv[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
|
47 |
+
|
48 |
+
# Cascading dcn
|
49 |
+
self.cas_offset_conv1 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
|
50 |
+
self.cas_offset_conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
51 |
+
self.cas_dcnpack = DCNv2Pack(num_feat, num_feat, 3, padding=1, deformable_groups=deformable_groups)
|
52 |
+
|
53 |
+
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
|
54 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
55 |
+
|
56 |
+
def forward(self, nbr_feat_l, ref_feat_l):
|
57 |
+
"""Align neighboring frame features to the reference frame features.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
nbr_feat_l (list[Tensor]): Neighboring feature list. It
|
61 |
+
contains three pyramid levels (L1, L2, L3),
|
62 |
+
each with shape (b, c, h, w).
|
63 |
+
ref_feat_l (list[Tensor]): Reference feature list. It
|
64 |
+
contains three pyramid levels (L1, L2, L3),
|
65 |
+
each with shape (b, c, h, w).
|
66 |
+
|
67 |
+
Returns:
|
68 |
+
Tensor: Aligned features.
|
69 |
+
"""
|
70 |
+
# Pyramids
|
71 |
+
upsampled_offset, upsampled_feat = None, None
|
72 |
+
for i in range(3, 0, -1):
|
73 |
+
level = f'l{i}'
|
74 |
+
offset = torch.cat([nbr_feat_l[i - 1], ref_feat_l[i - 1]], dim=1)
|
75 |
+
offset = self.lrelu(self.offset_conv1[level](offset))
|
76 |
+
if i == 3:
|
77 |
+
offset = self.lrelu(self.offset_conv2[level](offset))
|
78 |
+
else:
|
79 |
+
offset = self.lrelu(self.offset_conv2[level](torch.cat([offset, upsampled_offset], dim=1)))
|
80 |
+
offset = self.lrelu(self.offset_conv3[level](offset))
|
81 |
+
|
82 |
+
feat = self.dcn_pack[level](nbr_feat_l[i - 1], offset)
|
83 |
+
if i < 3:
|
84 |
+
feat = self.feat_conv[level](torch.cat([feat, upsampled_feat], dim=1))
|
85 |
+
if i > 1:
|
86 |
+
feat = self.lrelu(feat)
|
87 |
+
|
88 |
+
if i > 1: # upsample offset and features
|
89 |
+
# x2: when we upsample the offset, we should also enlarge
|
90 |
+
# the magnitude.
|
91 |
+
upsampled_offset = self.upsample(offset) * 2
|
92 |
+
upsampled_feat = self.upsample(feat)
|
93 |
+
|
94 |
+
# Cascading
|
95 |
+
offset = torch.cat([feat, ref_feat_l[0]], dim=1)
|
96 |
+
offset = self.lrelu(self.cas_offset_conv2(self.lrelu(self.cas_offset_conv1(offset))))
|
97 |
+
feat = self.lrelu(self.cas_dcnpack(feat, offset))
|
98 |
+
return feat
|
99 |
+
|
100 |
+
|
101 |
+
class TSAFusion(nn.Module):
|
102 |
+
"""Temporal Spatial Attention (TSA) fusion module.
|
103 |
+
|
104 |
+
Temporal: Calculate the correlation between center frame and
|
105 |
+
neighboring frames;
|
106 |
+
Spatial: It has 3 pyramid levels, the attention is similar to SFT.
|
107 |
+
(SFT: Recovering realistic texture in image super-resolution by deep
|
108 |
+
spatial feature transform.)
|
109 |
+
|
110 |
+
Args:
|
111 |
+
num_feat (int): Channel number of middle features. Default: 64.
|
112 |
+
num_frame (int): Number of frames. Default: 5.
|
113 |
+
center_frame_idx (int): The index of center frame. Default: 2.
|
114 |
+
"""
|
115 |
+
|
116 |
+
def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2):
|
117 |
+
super(TSAFusion, self).__init__()
|
118 |
+
self.center_frame_idx = center_frame_idx
|
119 |
+
# temporal attention (before fusion conv)
|
120 |
+
self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
121 |
+
self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
122 |
+
self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
|
123 |
+
|
124 |
+
# spatial attention (after fusion conv)
|
125 |
+
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
|
126 |
+
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
|
127 |
+
self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1)
|
128 |
+
self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1)
|
129 |
+
self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
130 |
+
self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1)
|
131 |
+
self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
132 |
+
self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1)
|
133 |
+
self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
|
134 |
+
self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
135 |
+
self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1)
|
136 |
+
self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1)
|
137 |
+
|
138 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
139 |
+
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
|
140 |
+
|
141 |
+
def forward(self, aligned_feat):
|
142 |
+
"""
|
143 |
+
Args:
|
144 |
+
aligned_feat (Tensor): Aligned features with shape (b, t, c, h, w).
|
145 |
+
|
146 |
+
Returns:
|
147 |
+
Tensor: Features after TSA with the shape (b, c, h, w).
|
148 |
+
"""
|
149 |
+
b, t, c, h, w = aligned_feat.size()
|
150 |
+
# temporal attention
|
151 |
+
embedding_ref = self.temporal_attn1(aligned_feat[:, self.center_frame_idx, :, :, :].clone())
|
152 |
+
embedding = self.temporal_attn2(aligned_feat.view(-1, c, h, w))
|
153 |
+
embedding = embedding.view(b, t, -1, h, w) # (b, t, c, h, w)
|
154 |
+
|
155 |
+
corr_l = [] # correlation list
|
156 |
+
for i in range(t):
|
157 |
+
emb_neighbor = embedding[:, i, :, :, :]
|
158 |
+
corr = torch.sum(emb_neighbor * embedding_ref, 1) # (b, h, w)
|
159 |
+
corr_l.append(corr.unsqueeze(1)) # (b, 1, h, w)
|
160 |
+
corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) # (b, t, h, w)
|
161 |
+
corr_prob = corr_prob.unsqueeze(2).expand(b, t, c, h, w)
|
162 |
+
corr_prob = corr_prob.contiguous().view(b, -1, h, w) # (b, t*c, h, w)
|
163 |
+
aligned_feat = aligned_feat.view(b, -1, h, w) * corr_prob
|
164 |
+
|
165 |
+
# fusion
|
166 |
+
feat = self.lrelu(self.feat_fusion(aligned_feat))
|
167 |
+
|
168 |
+
# spatial attention
|
169 |
+
attn = self.lrelu(self.spatial_attn1(aligned_feat))
|
170 |
+
attn_max = self.max_pool(attn)
|
171 |
+
attn_avg = self.avg_pool(attn)
|
172 |
+
attn = self.lrelu(self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1)))
|
173 |
+
# pyramid levels
|
174 |
+
attn_level = self.lrelu(self.spatial_attn_l1(attn))
|
175 |
+
attn_max = self.max_pool(attn_level)
|
176 |
+
attn_avg = self.avg_pool(attn_level)
|
177 |
+
attn_level = self.lrelu(self.spatial_attn_l2(torch.cat([attn_max, attn_avg], dim=1)))
|
178 |
+
attn_level = self.lrelu(self.spatial_attn_l3(attn_level))
|
179 |
+
attn_level = self.upsample(attn_level)
|
180 |
+
|
181 |
+
attn = self.lrelu(self.spatial_attn3(attn)) + attn_level
|
182 |
+
attn = self.lrelu(self.spatial_attn4(attn))
|
183 |
+
attn = self.upsample(attn)
|
184 |
+
attn = self.spatial_attn5(attn)
|
185 |
+
attn_add = self.spatial_attn_add2(self.lrelu(self.spatial_attn_add1(attn)))
|
186 |
+
attn = torch.sigmoid(attn)
|
187 |
+
|
188 |
+
# after initialization, * 2 makes (attn * 2) to be close to 1.
|
189 |
+
feat = feat * attn * 2 + attn_add
|
190 |
+
return feat
|
191 |
+
|
192 |
+
|
193 |
+
class PredeblurModule(nn.Module):
|
194 |
+
"""Pre-dublur module.
|
195 |
+
|
196 |
+
Args:
|
197 |
+
num_in_ch (int): Channel number of input image. Default: 3.
|
198 |
+
num_feat (int): Channel number of intermediate features. Default: 64.
|
199 |
+
hr_in (bool): Whether the input has high resolution. Default: False.
|
200 |
+
"""
|
201 |
+
|
202 |
+
def __init__(self, num_in_ch=3, num_feat=64, hr_in=False):
|
203 |
+
super(PredeblurModule, self).__init__()
|
204 |
+
self.hr_in = hr_in
|
205 |
+
|
206 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
207 |
+
if self.hr_in:
|
208 |
+
# downsample x4 by stride conv
|
209 |
+
self.stride_conv_hr1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
210 |
+
self.stride_conv_hr2 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
211 |
+
|
212 |
+
# generate feature pyramid
|
213 |
+
self.stride_conv_l2 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
214 |
+
self.stride_conv_l3 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
215 |
+
|
216 |
+
self.resblock_l3 = ResidualBlockNoBN(num_feat=num_feat)
|
217 |
+
self.resblock_l2_1 = ResidualBlockNoBN(num_feat=num_feat)
|
218 |
+
self.resblock_l2_2 = ResidualBlockNoBN(num_feat=num_feat)
|
219 |
+
self.resblock_l1 = nn.ModuleList([ResidualBlockNoBN(num_feat=num_feat) for i in range(5)])
|
220 |
+
|
221 |
+
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
|
222 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
223 |
+
|
224 |
+
def forward(self, x):
|
225 |
+
feat_l1 = self.lrelu(self.conv_first(x))
|
226 |
+
if self.hr_in:
|
227 |
+
feat_l1 = self.lrelu(self.stride_conv_hr1(feat_l1))
|
228 |
+
feat_l1 = self.lrelu(self.stride_conv_hr2(feat_l1))
|
229 |
+
|
230 |
+
# generate feature pyramid
|
231 |
+
feat_l2 = self.lrelu(self.stride_conv_l2(feat_l1))
|
232 |
+
feat_l3 = self.lrelu(self.stride_conv_l3(feat_l2))
|
233 |
+
|
234 |
+
feat_l3 = self.upsample(self.resblock_l3(feat_l3))
|
235 |
+
feat_l2 = self.resblock_l2_1(feat_l2) + feat_l3
|
236 |
+
feat_l2 = self.upsample(self.resblock_l2_2(feat_l2))
|
237 |
+
|
238 |
+
for i in range(2):
|
239 |
+
feat_l1 = self.resblock_l1[i](feat_l1)
|
240 |
+
feat_l1 = feat_l1 + feat_l2
|
241 |
+
for i in range(2, 5):
|
242 |
+
feat_l1 = self.resblock_l1[i](feat_l1)
|
243 |
+
return feat_l1
|
244 |
+
|
245 |
+
|
246 |
+
@ARCH_REGISTRY.register()
|
247 |
+
class EDVR(nn.Module):
|
248 |
+
"""EDVR network structure for video super-resolution.
|
249 |
+
|
250 |
+
Now only support X4 upsampling factor.
|
251 |
+
Paper:
|
252 |
+
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
|
253 |
+
|
254 |
+
Args:
|
255 |
+
num_in_ch (int): Channel number of input image. Default: 3.
|
256 |
+
num_out_ch (int): Channel number of output image. Default: 3.
|
257 |
+
num_feat (int): Channel number of intermediate features. Default: 64.
|
258 |
+
num_frame (int): Number of input frames. Default: 5.
|
259 |
+
deformable_groups (int): Deformable groups. Defaults: 8.
|
260 |
+
num_extract_block (int): Number of blocks for feature extraction.
|
261 |
+
Default: 5.
|
262 |
+
num_reconstruct_block (int): Number of blocks for reconstruction.
|
263 |
+
Default: 10.
|
264 |
+
center_frame_idx (int): The index of center frame. Frame counting from
|
265 |
+
0. Default: Middle of input frames.
|
266 |
+
hr_in (bool): Whether the input has high resolution. Default: False.
|
267 |
+
with_predeblur (bool): Whether has predeblur module.
|
268 |
+
Default: False.
|
269 |
+
with_tsa (bool): Whether has TSA module. Default: True.
|
270 |
+
"""
|
271 |
+
|
272 |
+
def __init__(self,
|
273 |
+
num_in_ch=3,
|
274 |
+
num_out_ch=3,
|
275 |
+
num_feat=64,
|
276 |
+
num_frame=5,
|
277 |
+
deformable_groups=8,
|
278 |
+
num_extract_block=5,
|
279 |
+
num_reconstruct_block=10,
|
280 |
+
center_frame_idx=None,
|
281 |
+
hr_in=False,
|
282 |
+
with_predeblur=False,
|
283 |
+
with_tsa=True):
|
284 |
+
super(EDVR, self).__init__()
|
285 |
+
if center_frame_idx is None:
|
286 |
+
self.center_frame_idx = num_frame // 2
|
287 |
+
else:
|
288 |
+
self.center_frame_idx = center_frame_idx
|
289 |
+
self.hr_in = hr_in
|
290 |
+
self.with_predeblur = with_predeblur
|
291 |
+
self.with_tsa = with_tsa
|
292 |
+
|
293 |
+
# extract features for each frame
|
294 |
+
if self.with_predeblur:
|
295 |
+
self.predeblur = PredeblurModule(num_feat=num_feat, hr_in=self.hr_in)
|
296 |
+
self.conv_1x1 = nn.Conv2d(num_feat, num_feat, 1, 1)
|
297 |
+
else:
|
298 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
299 |
+
|
300 |
+
# extract pyramid features
|
301 |
+
self.feature_extraction = make_layer(ResidualBlockNoBN, num_extract_block, num_feat=num_feat)
|
302 |
+
self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
303 |
+
self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
304 |
+
self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
|
305 |
+
self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
306 |
+
|
307 |
+
# pcd and tsa module
|
308 |
+
self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=deformable_groups)
|
309 |
+
if self.with_tsa:
|
310 |
+
self.fusion = TSAFusion(num_feat=num_feat, num_frame=num_frame, center_frame_idx=self.center_frame_idx)
|
311 |
+
else:
|
312 |
+
self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
|
313 |
+
|
314 |
+
# reconstruction
|
315 |
+
self.reconstruction = make_layer(ResidualBlockNoBN, num_reconstruct_block, num_feat=num_feat)
|
316 |
+
# upsample
|
317 |
+
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
|
318 |
+
self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1)
|
319 |
+
self.pixel_shuffle = nn.PixelShuffle(2)
|
320 |
+
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
|
321 |
+
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
|
322 |
+
|
323 |
+
# activation function
|
324 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
325 |
+
|
326 |
+
def forward(self, x):
|
327 |
+
b, t, c, h, w = x.size()
|
328 |
+
if self.hr_in:
|
329 |
+
assert h % 16 == 0 and w % 16 == 0, ('The height and width must be multiple of 16.')
|
330 |
+
else:
|
331 |
+
assert h % 4 == 0 and w % 4 == 0, ('The height and width must be multiple of 4.')
|
332 |
+
|
333 |
+
x_center = x[:, self.center_frame_idx, :, :, :].contiguous()
|
334 |
+
|
335 |
+
# extract features for each frame
|
336 |
+
# L1
|
337 |
+
if self.with_predeblur:
|
338 |
+
feat_l1 = self.conv_1x1(self.predeblur(x.view(-1, c, h, w)))
|
339 |
+
if self.hr_in:
|
340 |
+
h, w = h // 4, w // 4
|
341 |
+
else:
|
342 |
+
feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
|
343 |
+
|
344 |
+
feat_l1 = self.feature_extraction(feat_l1)
|
345 |
+
# L2
|
346 |
+
feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
|
347 |
+
feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
|
348 |
+
# L3
|
349 |
+
feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
|
350 |
+
feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
|
351 |
+
|
352 |
+
feat_l1 = feat_l1.view(b, t, -1, h, w)
|
353 |
+
feat_l2 = feat_l2.view(b, t, -1, h // 2, w // 2)
|
354 |
+
feat_l3 = feat_l3.view(b, t, -1, h // 4, w // 4)
|
355 |
+
|
356 |
+
# PCD alignment
|
357 |
+
ref_feat_l = [ # reference feature list
|
358 |
+
feat_l1[:, self.center_frame_idx, :, :, :].clone(), feat_l2[:, self.center_frame_idx, :, :, :].clone(),
|
359 |
+
feat_l3[:, self.center_frame_idx, :, :, :].clone()
|
360 |
+
]
|
361 |
+
aligned_feat = []
|
362 |
+
for i in range(t):
|
363 |
+
nbr_feat_l = [ # neighboring feature list
|
364 |
+
feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(), feat_l3[:, i, :, :, :].clone()
|
365 |
+
]
|
366 |
+
aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
|
367 |
+
aligned_feat = torch.stack(aligned_feat, dim=1) # (b, t, c, h, w)
|
368 |
+
|
369 |
+
if not self.with_tsa:
|
370 |
+
aligned_feat = aligned_feat.view(b, -1, h, w)
|
371 |
+
feat = self.fusion(aligned_feat)
|
372 |
+
|
373 |
+
out = self.reconstruction(feat)
|
374 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
|
375 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
|
376 |
+
out = self.lrelu(self.conv_hr(out))
|
377 |
+
out = self.conv_last(out)
|
378 |
+
if self.hr_in:
|
379 |
+
base = x_center
|
380 |
+
else:
|
381 |
+
base = F.interpolate(x_center, scale_factor=4, mode='bilinear', align_corners=False)
|
382 |
+
out += base
|
383 |
+
return out
|
basicsr/archs/hifacegan_arch.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
|
6 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
7 |
+
from .hifacegan_util import BaseNetwork, LIPEncoder, SPADEResnetBlock, get_nonspade_norm_layer
|
8 |
+
|
9 |
+
|
10 |
+
class SPADEGenerator(BaseNetwork):
|
11 |
+
"""Generator with SPADEResBlock"""
|
12 |
+
|
13 |
+
def __init__(self,
|
14 |
+
num_in_ch=3,
|
15 |
+
num_feat=64,
|
16 |
+
use_vae=False,
|
17 |
+
z_dim=256,
|
18 |
+
crop_size=512,
|
19 |
+
norm_g='spectralspadesyncbatch3x3',
|
20 |
+
is_train=True,
|
21 |
+
init_train_phase=3): # progressive training disabled
|
22 |
+
super().__init__()
|
23 |
+
self.nf = num_feat
|
24 |
+
self.input_nc = num_in_ch
|
25 |
+
self.is_train = is_train
|
26 |
+
self.train_phase = init_train_phase
|
27 |
+
|
28 |
+
self.scale_ratio = 5 # hardcoded now
|
29 |
+
self.sw = crop_size // (2**self.scale_ratio)
|
30 |
+
self.sh = self.sw # 20210519: By default use square image, aspect_ratio = 1.0
|
31 |
+
|
32 |
+
if use_vae:
|
33 |
+
# In case of VAE, we will sample from random z vector
|
34 |
+
self.fc = nn.Linear(z_dim, 16 * self.nf * self.sw * self.sh)
|
35 |
+
else:
|
36 |
+
# Otherwise, we make the network deterministic by starting with
|
37 |
+
# downsampled segmentation map instead of random z
|
38 |
+
self.fc = nn.Conv2d(num_in_ch, 16 * self.nf, 3, padding=1)
|
39 |
+
|
40 |
+
self.head_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
|
41 |
+
|
42 |
+
self.g_middle_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
|
43 |
+
self.g_middle_1 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
|
44 |
+
|
45 |
+
self.ups = nn.ModuleList([
|
46 |
+
SPADEResnetBlock(16 * self.nf, 8 * self.nf, norm_g),
|
47 |
+
SPADEResnetBlock(8 * self.nf, 4 * self.nf, norm_g),
|
48 |
+
SPADEResnetBlock(4 * self.nf, 2 * self.nf, norm_g),
|
49 |
+
SPADEResnetBlock(2 * self.nf, 1 * self.nf, norm_g)
|
50 |
+
])
|
51 |
+
|
52 |
+
self.to_rgbs = nn.ModuleList([
|
53 |
+
nn.Conv2d(8 * self.nf, 3, 3, padding=1),
|
54 |
+
nn.Conv2d(4 * self.nf, 3, 3, padding=1),
|
55 |
+
nn.Conv2d(2 * self.nf, 3, 3, padding=1),
|
56 |
+
nn.Conv2d(1 * self.nf, 3, 3, padding=1)
|
57 |
+
])
|
58 |
+
|
59 |
+
self.up = nn.Upsample(scale_factor=2)
|
60 |
+
|
61 |
+
def encode(self, input_tensor):
|
62 |
+
"""
|
63 |
+
Encode input_tensor into feature maps, can be overridden in derived classes
|
64 |
+
Default: nearest downsampling of 2**5 = 32 times
|
65 |
+
"""
|
66 |
+
h, w = input_tensor.size()[-2:]
|
67 |
+
sh, sw = h // 2**self.scale_ratio, w // 2**self.scale_ratio
|
68 |
+
x = F.interpolate(input_tensor, size=(sh, sw))
|
69 |
+
return self.fc(x)
|
70 |
+
|
71 |
+
def forward(self, x):
|
72 |
+
# In oroginal SPADE, seg means a segmentation map, but here we use x instead.
|
73 |
+
seg = x
|
74 |
+
|
75 |
+
x = self.encode(x)
|
76 |
+
x = self.head_0(x, seg)
|
77 |
+
|
78 |
+
x = self.up(x)
|
79 |
+
x = self.g_middle_0(x, seg)
|
80 |
+
x = self.g_middle_1(x, seg)
|
81 |
+
|
82 |
+
if self.is_train:
|
83 |
+
phase = self.train_phase + 1
|
84 |
+
else:
|
85 |
+
phase = len(self.to_rgbs)
|
86 |
+
|
87 |
+
for i in range(phase):
|
88 |
+
x = self.up(x)
|
89 |
+
x = self.ups[i](x, seg)
|
90 |
+
|
91 |
+
x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1))
|
92 |
+
x = torch.tanh(x)
|
93 |
+
|
94 |
+
return x
|
95 |
+
|
96 |
+
def mixed_guidance_forward(self, input_x, seg=None, n=0, mode='progressive'):
|
97 |
+
"""
|
98 |
+
A helper class for subspace visualization. Input and seg are different images.
|
99 |
+
For the first n levels (including encoder) we use input, for the rest we use seg.
|
100 |
+
|
101 |
+
If mode = 'progressive', the output's like: AAABBB
|
102 |
+
If mode = 'one_plug', the output's like: AAABAA
|
103 |
+
If mode = 'one_ablate', the output's like: BBBABB
|
104 |
+
"""
|
105 |
+
|
106 |
+
if seg is None:
|
107 |
+
return self.forward(input_x)
|
108 |
+
|
109 |
+
if self.is_train:
|
110 |
+
phase = self.train_phase + 1
|
111 |
+
else:
|
112 |
+
phase = len(self.to_rgbs)
|
113 |
+
|
114 |
+
if mode == 'progressive':
|
115 |
+
n = max(min(n, 4 + phase), 0)
|
116 |
+
guide_list = [input_x] * n + [seg] * (4 + phase - n)
|
117 |
+
elif mode == 'one_plug':
|
118 |
+
n = max(min(n, 4 + phase - 1), 0)
|
119 |
+
guide_list = [seg] * (4 + phase)
|
120 |
+
guide_list[n] = input_x
|
121 |
+
elif mode == 'one_ablate':
|
122 |
+
if n > 3 + phase:
|
123 |
+
return self.forward(input_x)
|
124 |
+
guide_list = [input_x] * (4 + phase)
|
125 |
+
guide_list[n] = seg
|
126 |
+
|
127 |
+
x = self.encode(guide_list[0])
|
128 |
+
x = self.head_0(x, guide_list[1])
|
129 |
+
|
130 |
+
x = self.up(x)
|
131 |
+
x = self.g_middle_0(x, guide_list[2])
|
132 |
+
x = self.g_middle_1(x, guide_list[3])
|
133 |
+
|
134 |
+
for i in range(phase):
|
135 |
+
x = self.up(x)
|
136 |
+
x = self.ups[i](x, guide_list[4 + i])
|
137 |
+
|
138 |
+
x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1))
|
139 |
+
x = torch.tanh(x)
|
140 |
+
|
141 |
+
return x
|
142 |
+
|
143 |
+
|
144 |
+
@ARCH_REGISTRY.register()
|
145 |
+
class HiFaceGAN(SPADEGenerator):
|
146 |
+
"""
|
147 |
+
HiFaceGAN: SPADEGenerator with a learnable feature encoder
|
148 |
+
Current encoder design: LIPEncoder
|
149 |
+
"""
|
150 |
+
|
151 |
+
def __init__(self,
|
152 |
+
num_in_ch=3,
|
153 |
+
num_feat=64,
|
154 |
+
use_vae=False,
|
155 |
+
z_dim=256,
|
156 |
+
crop_size=512,
|
157 |
+
norm_g='spectralspadesyncbatch3x3',
|
158 |
+
is_train=True,
|
159 |
+
init_train_phase=3):
|
160 |
+
super().__init__(num_in_ch, num_feat, use_vae, z_dim, crop_size, norm_g, is_train, init_train_phase)
|
161 |
+
self.lip_encoder = LIPEncoder(num_in_ch, num_feat, self.sw, self.sh, self.scale_ratio)
|
162 |
+
|
163 |
+
def encode(self, input_tensor):
|
164 |
+
return self.lip_encoder(input_tensor)
|
165 |
+
|
166 |
+
|
167 |
+
@ARCH_REGISTRY.register()
|
168 |
+
class HiFaceGANDiscriminator(BaseNetwork):
|
169 |
+
"""
|
170 |
+
Inspired by pix2pixHD multiscale discriminator.
|
171 |
+
Args:
|
172 |
+
num_in_ch (int): Channel number of inputs. Default: 3.
|
173 |
+
num_out_ch (int): Channel number of outputs. Default: 3.
|
174 |
+
conditional_d (bool): Whether use conditional discriminator.
|
175 |
+
Default: True.
|
176 |
+
num_d (int): Number of Multiscale discriminators. Default: 3.
|
177 |
+
n_layers_d (int): Number of downsample layers in each D. Default: 4.
|
178 |
+
num_feat (int): Channel number of base intermediate features.
|
179 |
+
Default: 64.
|
180 |
+
norm_d (str): String to determine normalization layers in D.
|
181 |
+
Choices: [spectral][instance/batch/syncbatch]
|
182 |
+
Default: 'spectralinstance'.
|
183 |
+
keep_features (bool): Keep intermediate features for matching loss, etc.
|
184 |
+
Default: True.
|
185 |
+
"""
|
186 |
+
|
187 |
+
def __init__(self,
|
188 |
+
num_in_ch=3,
|
189 |
+
num_out_ch=3,
|
190 |
+
conditional_d=True,
|
191 |
+
num_d=2,
|
192 |
+
n_layers_d=4,
|
193 |
+
num_feat=64,
|
194 |
+
norm_d='spectralinstance',
|
195 |
+
keep_features=True):
|
196 |
+
super().__init__()
|
197 |
+
self.num_d = num_d
|
198 |
+
|
199 |
+
input_nc = num_in_ch
|
200 |
+
if conditional_d:
|
201 |
+
input_nc += num_out_ch
|
202 |
+
|
203 |
+
for i in range(num_d):
|
204 |
+
subnet_d = NLayerDiscriminator(input_nc, n_layers_d, num_feat, norm_d, keep_features)
|
205 |
+
self.add_module(f'discriminator_{i}', subnet_d)
|
206 |
+
|
207 |
+
def downsample(self, x):
|
208 |
+
return F.avg_pool2d(x, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False)
|
209 |
+
|
210 |
+
# Returns list of lists of discriminator outputs.
|
211 |
+
# The final result is of size opt.num_d x opt.n_layers_D
|
212 |
+
def forward(self, x):
|
213 |
+
result = []
|
214 |
+
for _, _net_d in self.named_children():
|
215 |
+
out = _net_d(x)
|
216 |
+
result.append(out)
|
217 |
+
x = self.downsample(x)
|
218 |
+
|
219 |
+
return result
|
220 |
+
|
221 |
+
|
222 |
+
class NLayerDiscriminator(BaseNetwork):
|
223 |
+
"""Defines the PatchGAN discriminator with the specified arguments."""
|
224 |
+
|
225 |
+
def __init__(self, input_nc, n_layers_d, num_feat, norm_d, keep_features):
|
226 |
+
super().__init__()
|
227 |
+
kw = 4
|
228 |
+
padw = int(np.ceil((kw - 1.0) / 2))
|
229 |
+
nf = num_feat
|
230 |
+
self.keep_features = keep_features
|
231 |
+
|
232 |
+
norm_layer = get_nonspade_norm_layer(norm_d)
|
233 |
+
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, False)]]
|
234 |
+
|
235 |
+
for n in range(1, n_layers_d):
|
236 |
+
nf_prev = nf
|
237 |
+
nf = min(nf * 2, 512)
|
238 |
+
stride = 1 if n == n_layers_d - 1 else 2
|
239 |
+
sequence += [[
|
240 |
+
norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)),
|
241 |
+
nn.LeakyReLU(0.2, False)
|
242 |
+
]]
|
243 |
+
|
244 |
+
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
245 |
+
|
246 |
+
# We divide the layers into groups to extract intermediate layer outputs
|
247 |
+
for n in range(len(sequence)):
|
248 |
+
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
|
249 |
+
|
250 |
+
def forward(self, x):
|
251 |
+
results = [x]
|
252 |
+
for submodel in self.children():
|
253 |
+
intermediate_output = submodel(results[-1])
|
254 |
+
results.append(intermediate_output)
|
255 |
+
|
256 |
+
if self.keep_features:
|
257 |
+
return results[1:]
|
258 |
+
else:
|
259 |
+
return results[-1]
|
basicsr/archs/hifacegan_util.py
ADDED
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch.nn import init
|
6 |
+
# Warning: spectral norm could be buggy
|
7 |
+
# under eval mode and multi-GPU inference
|
8 |
+
# A workaround is sticking to single-GPU inference and train mode
|
9 |
+
from torch.nn.utils import spectral_norm
|
10 |
+
|
11 |
+
|
12 |
+
class SPADE(nn.Module):
|
13 |
+
|
14 |
+
def __init__(self, config_text, norm_nc, label_nc):
|
15 |
+
super().__init__()
|
16 |
+
|
17 |
+
assert config_text.startswith('spade')
|
18 |
+
parsed = re.search('spade(\\D+)(\\d)x\\d', config_text)
|
19 |
+
param_free_norm_type = str(parsed.group(1))
|
20 |
+
ks = int(parsed.group(2))
|
21 |
+
|
22 |
+
if param_free_norm_type == 'instance':
|
23 |
+
self.param_free_norm = nn.InstanceNorm2d(norm_nc)
|
24 |
+
elif param_free_norm_type == 'syncbatch':
|
25 |
+
print('SyncBatchNorm is currently not supported under single-GPU mode, switch to "instance" instead')
|
26 |
+
self.param_free_norm = nn.InstanceNorm2d(norm_nc)
|
27 |
+
elif param_free_norm_type == 'batch':
|
28 |
+
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
|
29 |
+
else:
|
30 |
+
raise ValueError(f'{param_free_norm_type} is not a recognized param-free norm type in SPADE')
|
31 |
+
|
32 |
+
# The dimension of the intermediate embedding space. Yes, hardcoded.
|
33 |
+
nhidden = 128 if norm_nc > 128 else norm_nc
|
34 |
+
|
35 |
+
pw = ks // 2
|
36 |
+
self.mlp_shared = nn.Sequential(nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU())
|
37 |
+
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, bias=False)
|
38 |
+
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, bias=False)
|
39 |
+
|
40 |
+
def forward(self, x, segmap):
|
41 |
+
|
42 |
+
# Part 1. generate parameter-free normalized activations
|
43 |
+
normalized = self.param_free_norm(x)
|
44 |
+
|
45 |
+
# Part 2. produce scaling and bias conditioned on semantic map
|
46 |
+
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
|
47 |
+
actv = self.mlp_shared(segmap)
|
48 |
+
gamma = self.mlp_gamma(actv)
|
49 |
+
beta = self.mlp_beta(actv)
|
50 |
+
|
51 |
+
# apply scale and bias
|
52 |
+
out = normalized * gamma + beta
|
53 |
+
|
54 |
+
return out
|
55 |
+
|
56 |
+
|
57 |
+
class SPADEResnetBlock(nn.Module):
|
58 |
+
"""
|
59 |
+
ResNet block that uses SPADE. It differs from the ResNet block of pix2pixHD in that
|
60 |
+
it takes in the segmentation map as input, learns the skip connection if necessary,
|
61 |
+
and applies normalization first and then convolution.
|
62 |
+
This architecture seemed like a standard architecture for unconditional or
|
63 |
+
class-conditional GAN architecture using residual block.
|
64 |
+
The code was inspired from https://github.com/LMescheder/GAN_stability.
|
65 |
+
"""
|
66 |
+
|
67 |
+
def __init__(self, fin, fout, norm_g='spectralspadesyncbatch3x3', semantic_nc=3):
|
68 |
+
super().__init__()
|
69 |
+
# Attributes
|
70 |
+
self.learned_shortcut = (fin != fout)
|
71 |
+
fmiddle = min(fin, fout)
|
72 |
+
|
73 |
+
# create conv layers
|
74 |
+
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
|
75 |
+
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
|
76 |
+
if self.learned_shortcut:
|
77 |
+
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
|
78 |
+
|
79 |
+
# apply spectral norm if specified
|
80 |
+
if 'spectral' in norm_g:
|
81 |
+
self.conv_0 = spectral_norm(self.conv_0)
|
82 |
+
self.conv_1 = spectral_norm(self.conv_1)
|
83 |
+
if self.learned_shortcut:
|
84 |
+
self.conv_s = spectral_norm(self.conv_s)
|
85 |
+
|
86 |
+
# define normalization layers
|
87 |
+
spade_config_str = norm_g.replace('spectral', '')
|
88 |
+
self.norm_0 = SPADE(spade_config_str, fin, semantic_nc)
|
89 |
+
self.norm_1 = SPADE(spade_config_str, fmiddle, semantic_nc)
|
90 |
+
if self.learned_shortcut:
|
91 |
+
self.norm_s = SPADE(spade_config_str, fin, semantic_nc)
|
92 |
+
|
93 |
+
# note the resnet block with SPADE also takes in |seg|,
|
94 |
+
# the semantic segmentation map as input
|
95 |
+
def forward(self, x, seg):
|
96 |
+
x_s = self.shortcut(x, seg)
|
97 |
+
dx = self.conv_0(self.act(self.norm_0(x, seg)))
|
98 |
+
dx = self.conv_1(self.act(self.norm_1(dx, seg)))
|
99 |
+
out = x_s + dx
|
100 |
+
return out
|
101 |
+
|
102 |
+
def shortcut(self, x, seg):
|
103 |
+
if self.learned_shortcut:
|
104 |
+
x_s = self.conv_s(self.norm_s(x, seg))
|
105 |
+
else:
|
106 |
+
x_s = x
|
107 |
+
return x_s
|
108 |
+
|
109 |
+
def act(self, x):
|
110 |
+
return F.leaky_relu(x, 2e-1)
|
111 |
+
|
112 |
+
|
113 |
+
class BaseNetwork(nn.Module):
|
114 |
+
""" A basis for hifacegan archs with custom initialization """
|
115 |
+
|
116 |
+
def init_weights(self, init_type='normal', gain=0.02):
|
117 |
+
|
118 |
+
def init_func(m):
|
119 |
+
classname = m.__class__.__name__
|
120 |
+
if classname.find('BatchNorm2d') != -1:
|
121 |
+
if hasattr(m, 'weight') and m.weight is not None:
|
122 |
+
init.normal_(m.weight.data, 1.0, gain)
|
123 |
+
if hasattr(m, 'bias') and m.bias is not None:
|
124 |
+
init.constant_(m.bias.data, 0.0)
|
125 |
+
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
|
126 |
+
if init_type == 'normal':
|
127 |
+
init.normal_(m.weight.data, 0.0, gain)
|
128 |
+
elif init_type == 'xavier':
|
129 |
+
init.xavier_normal_(m.weight.data, gain=gain)
|
130 |
+
elif init_type == 'xavier_uniform':
|
131 |
+
init.xavier_uniform_(m.weight.data, gain=1.0)
|
132 |
+
elif init_type == 'kaiming':
|
133 |
+
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
|
134 |
+
elif init_type == 'orthogonal':
|
135 |
+
init.orthogonal_(m.weight.data, gain=gain)
|
136 |
+
elif init_type == 'none': # uses pytorch's default init method
|
137 |
+
m.reset_parameters()
|
138 |
+
else:
|
139 |
+
raise NotImplementedError(f'initialization method [{init_type}] is not implemented')
|
140 |
+
if hasattr(m, 'bias') and m.bias is not None:
|
141 |
+
init.constant_(m.bias.data, 0.0)
|
142 |
+
|
143 |
+
self.apply(init_func)
|
144 |
+
|
145 |
+
# propagate to children
|
146 |
+
for m in self.children():
|
147 |
+
if hasattr(m, 'init_weights'):
|
148 |
+
m.init_weights(init_type, gain)
|
149 |
+
|
150 |
+
def forward(self, x):
|
151 |
+
pass
|
152 |
+
|
153 |
+
|
154 |
+
def lip2d(x, logit, kernel=3, stride=2, padding=1):
|
155 |
+
weight = logit.exp()
|
156 |
+
return F.avg_pool2d(x * weight, kernel, stride, padding) / F.avg_pool2d(weight, kernel, stride, padding)
|
157 |
+
|
158 |
+
|
159 |
+
class SoftGate(nn.Module):
|
160 |
+
COEFF = 12.0
|
161 |
+
|
162 |
+
def forward(self, x):
|
163 |
+
return torch.sigmoid(x).mul(self.COEFF)
|
164 |
+
|
165 |
+
|
166 |
+
class SimplifiedLIP(nn.Module):
|
167 |
+
|
168 |
+
def __init__(self, channels):
|
169 |
+
super(SimplifiedLIP, self).__init__()
|
170 |
+
self.logit = nn.Sequential(
|
171 |
+
nn.Conv2d(channels, channels, 3, padding=1, bias=False), nn.InstanceNorm2d(channels, affine=True),
|
172 |
+
SoftGate())
|
173 |
+
|
174 |
+
def init_layer(self):
|
175 |
+
self.logit[0].weight.data.fill_(0.0)
|
176 |
+
|
177 |
+
def forward(self, x):
|
178 |
+
frac = lip2d(x, self.logit(x))
|
179 |
+
return frac
|
180 |
+
|
181 |
+
|
182 |
+
class LIPEncoder(BaseNetwork):
|
183 |
+
"""Local Importance-based Pooling (Ziteng Gao et.al.,ICCV 2019)"""
|
184 |
+
|
185 |
+
def __init__(self, input_nc, ngf, sw, sh, n_2xdown, norm_layer=nn.InstanceNorm2d):
|
186 |
+
super().__init__()
|
187 |
+
self.sw = sw
|
188 |
+
self.sh = sh
|
189 |
+
self.max_ratio = 16
|
190 |
+
# 20200310: Several Convolution (stride 1) + LIP blocks, 4 fold
|
191 |
+
kw = 3
|
192 |
+
pw = (kw - 1) // 2
|
193 |
+
|
194 |
+
model = [
|
195 |
+
nn.Conv2d(input_nc, ngf, kw, stride=1, padding=pw, bias=False),
|
196 |
+
norm_layer(ngf),
|
197 |
+
nn.ReLU(),
|
198 |
+
]
|
199 |
+
cur_ratio = 1
|
200 |
+
for i in range(n_2xdown):
|
201 |
+
next_ratio = min(cur_ratio * 2, self.max_ratio)
|
202 |
+
model += [
|
203 |
+
SimplifiedLIP(ngf * cur_ratio),
|
204 |
+
nn.Conv2d(ngf * cur_ratio, ngf * next_ratio, kw, stride=1, padding=pw),
|
205 |
+
norm_layer(ngf * next_ratio),
|
206 |
+
]
|
207 |
+
cur_ratio = next_ratio
|
208 |
+
if i < n_2xdown - 1:
|
209 |
+
model += [nn.ReLU(inplace=True)]
|
210 |
+
|
211 |
+
self.model = nn.Sequential(*model)
|
212 |
+
|
213 |
+
def forward(self, x):
|
214 |
+
return self.model(x)
|
215 |
+
|
216 |
+
|
217 |
+
def get_nonspade_norm_layer(norm_type='instance'):
|
218 |
+
# helper function to get # output channels of the previous layer
|
219 |
+
def get_out_channel(layer):
|
220 |
+
if hasattr(layer, 'out_channels'):
|
221 |
+
return getattr(layer, 'out_channels')
|
222 |
+
return layer.weight.size(0)
|
223 |
+
|
224 |
+
# this function will be returned
|
225 |
+
def add_norm_layer(layer):
|
226 |
+
nonlocal norm_type
|
227 |
+
if norm_type.startswith('spectral'):
|
228 |
+
layer = spectral_norm(layer)
|
229 |
+
subnorm_type = norm_type[len('spectral'):]
|
230 |
+
|
231 |
+
if subnorm_type == 'none' or len(subnorm_type) == 0:
|
232 |
+
return layer
|
233 |
+
|
234 |
+
# remove bias in the previous layer, which is meaningless
|
235 |
+
# since it has no effect after normalization
|
236 |
+
if getattr(layer, 'bias', None) is not None:
|
237 |
+
delattr(layer, 'bias')
|
238 |
+
layer.register_parameter('bias', None)
|
239 |
+
|
240 |
+
if subnorm_type == 'batch':
|
241 |
+
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
|
242 |
+
elif subnorm_type == 'sync_batch':
|
243 |
+
print('SyncBatchNorm is currently not supported under single-GPU mode, switch to "instance" instead')
|
244 |
+
# norm_layer = SynchronizedBatchNorm2d(
|
245 |
+
# get_out_channel(layer), affine=True)
|
246 |
+
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
|
247 |
+
elif subnorm_type == 'instance':
|
248 |
+
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
|
249 |
+
else:
|
250 |
+
raise ValueError(f'normalization layer {subnorm_type} is not recognized')
|
251 |
+
|
252 |
+
return nn.Sequential(layer, norm_layer)
|
253 |
+
|
254 |
+
print('This is a legacy from nvlabs/SPADE, and will be removed in future versions.')
|
255 |
+
return add_norm_layer
|
basicsr/archs/inception.py
ADDED
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified from https://github.com/mseitzer/pytorch-fid/blob/master/pytorch_fid/inception.py # noqa: E501
|
2 |
+
# For FID metric
|
3 |
+
|
4 |
+
import os
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from torch.utils.model_zoo import load_url
|
9 |
+
from torchvision import models
|
10 |
+
|
11 |
+
# Inception weights ported to Pytorch from
|
12 |
+
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
|
13 |
+
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
|
14 |
+
LOCAL_FID_WEIGHTS = 'experiments/pretrained_models/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
|
15 |
+
|
16 |
+
|
17 |
+
class InceptionV3(nn.Module):
|
18 |
+
"""Pretrained InceptionV3 network returning feature maps"""
|
19 |
+
|
20 |
+
# Index of default block of inception to return,
|
21 |
+
# corresponds to output of final average pooling
|
22 |
+
DEFAULT_BLOCK_INDEX = 3
|
23 |
+
|
24 |
+
# Maps feature dimensionality to their output blocks indices
|
25 |
+
BLOCK_INDEX_BY_DIM = {
|
26 |
+
64: 0, # First max pooling features
|
27 |
+
192: 1, # Second max pooling features
|
28 |
+
768: 2, # Pre-aux classifier features
|
29 |
+
2048: 3 # Final average pooling features
|
30 |
+
}
|
31 |
+
|
32 |
+
def __init__(self,
|
33 |
+
output_blocks=(DEFAULT_BLOCK_INDEX),
|
34 |
+
resize_input=True,
|
35 |
+
normalize_input=True,
|
36 |
+
requires_grad=False,
|
37 |
+
use_fid_inception=True):
|
38 |
+
"""Build pretrained InceptionV3.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
output_blocks (list[int]): Indices of blocks to return features of.
|
42 |
+
Possible values are:
|
43 |
+
- 0: corresponds to output of first max pooling
|
44 |
+
- 1: corresponds to output of second max pooling
|
45 |
+
- 2: corresponds to output which is fed to aux classifier
|
46 |
+
- 3: corresponds to output of final average pooling
|
47 |
+
resize_input (bool): If true, bilinearly resizes input to width and
|
48 |
+
height 299 before feeding input to model. As the network
|
49 |
+
without fully connected layers is fully convolutional, it
|
50 |
+
should be able to handle inputs of arbitrary size, so resizing
|
51 |
+
might not be strictly needed. Default: True.
|
52 |
+
normalize_input (bool): If true, scales the input from range (0, 1)
|
53 |
+
to the range the pretrained Inception network expects,
|
54 |
+
namely (-1, 1). Default: True.
|
55 |
+
requires_grad (bool): If true, parameters of the model require
|
56 |
+
gradients. Possibly useful for finetuning the network.
|
57 |
+
Default: False.
|
58 |
+
use_fid_inception (bool): If true, uses the pretrained Inception
|
59 |
+
model used in Tensorflow's FID implementation.
|
60 |
+
If false, uses the pretrained Inception model available in
|
61 |
+
torchvision. The FID Inception model has different weights
|
62 |
+
and a slightly different structure from torchvision's
|
63 |
+
Inception model. If you want to compute FID scores, you are
|
64 |
+
strongly advised to set this parameter to true to get
|
65 |
+
comparable results. Default: True.
|
66 |
+
"""
|
67 |
+
super(InceptionV3, self).__init__()
|
68 |
+
|
69 |
+
self.resize_input = resize_input
|
70 |
+
self.normalize_input = normalize_input
|
71 |
+
self.output_blocks = sorted(output_blocks)
|
72 |
+
self.last_needed_block = max(output_blocks)
|
73 |
+
|
74 |
+
assert self.last_needed_block <= 3, ('Last possible output block index is 3')
|
75 |
+
|
76 |
+
self.blocks = nn.ModuleList()
|
77 |
+
|
78 |
+
if use_fid_inception:
|
79 |
+
inception = fid_inception_v3()
|
80 |
+
else:
|
81 |
+
try:
|
82 |
+
inception = models.inception_v3(pretrained=True, init_weights=False)
|
83 |
+
except TypeError:
|
84 |
+
# pytorch < 1.5 does not have init_weights for inception_v3
|
85 |
+
inception = models.inception_v3(pretrained=True)
|
86 |
+
|
87 |
+
# Block 0: input to maxpool1
|
88 |
+
block0 = [
|
89 |
+
inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3,
|
90 |
+
nn.MaxPool2d(kernel_size=3, stride=2)
|
91 |
+
]
|
92 |
+
self.blocks.append(nn.Sequential(*block0))
|
93 |
+
|
94 |
+
# Block 1: maxpool1 to maxpool2
|
95 |
+
if self.last_needed_block >= 1:
|
96 |
+
block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
|
97 |
+
self.blocks.append(nn.Sequential(*block1))
|
98 |
+
|
99 |
+
# Block 2: maxpool2 to aux classifier
|
100 |
+
if self.last_needed_block >= 2:
|
101 |
+
block2 = [
|
102 |
+
inception.Mixed_5b,
|
103 |
+
inception.Mixed_5c,
|
104 |
+
inception.Mixed_5d,
|
105 |
+
inception.Mixed_6a,
|
106 |
+
inception.Mixed_6b,
|
107 |
+
inception.Mixed_6c,
|
108 |
+
inception.Mixed_6d,
|
109 |
+
inception.Mixed_6e,
|
110 |
+
]
|
111 |
+
self.blocks.append(nn.Sequential(*block2))
|
112 |
+
|
113 |
+
# Block 3: aux classifier to final avgpool
|
114 |
+
if self.last_needed_block >= 3:
|
115 |
+
block3 = [
|
116 |
+
inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
|
117 |
+
nn.AdaptiveAvgPool2d(output_size=(1, 1))
|
118 |
+
]
|
119 |
+
self.blocks.append(nn.Sequential(*block3))
|
120 |
+
|
121 |
+
for param in self.parameters():
|
122 |
+
param.requires_grad = requires_grad
|
123 |
+
|
124 |
+
def forward(self, x):
|
125 |
+
"""Get Inception feature maps.
|
126 |
+
|
127 |
+
Args:
|
128 |
+
x (Tensor): Input tensor of shape (b, 3, h, w).
|
129 |
+
Values are expected to be in range (-1, 1). You can also input
|
130 |
+
(0, 1) with setting normalize_input = True.
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
list[Tensor]: Corresponding to the selected output block, sorted
|
134 |
+
ascending by index.
|
135 |
+
"""
|
136 |
+
output = []
|
137 |
+
|
138 |
+
if self.resize_input:
|
139 |
+
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
|
140 |
+
|
141 |
+
if self.normalize_input:
|
142 |
+
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
|
143 |
+
|
144 |
+
for idx, block in enumerate(self.blocks):
|
145 |
+
x = block(x)
|
146 |
+
if idx in self.output_blocks:
|
147 |
+
output.append(x)
|
148 |
+
|
149 |
+
if idx == self.last_needed_block:
|
150 |
+
break
|
151 |
+
|
152 |
+
return output
|
153 |
+
|
154 |
+
|
155 |
+
def fid_inception_v3():
|
156 |
+
"""Build pretrained Inception model for FID computation.
|
157 |
+
|
158 |
+
The Inception model for FID computation uses a different set of weights
|
159 |
+
and has a slightly different structure than torchvision's Inception.
|
160 |
+
|
161 |
+
This method first constructs torchvision's Inception and then patches the
|
162 |
+
necessary parts that are different in the FID Inception model.
|
163 |
+
"""
|
164 |
+
try:
|
165 |
+
inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False, init_weights=False)
|
166 |
+
except TypeError:
|
167 |
+
# pytorch < 1.5 does not have init_weights for inception_v3
|
168 |
+
inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
|
169 |
+
|
170 |
+
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
|
171 |
+
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
|
172 |
+
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
|
173 |
+
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
|
174 |
+
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
|
175 |
+
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
|
176 |
+
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
|
177 |
+
inception.Mixed_7b = FIDInceptionE_1(1280)
|
178 |
+
inception.Mixed_7c = FIDInceptionE_2(2048)
|
179 |
+
|
180 |
+
if os.path.exists(LOCAL_FID_WEIGHTS):
|
181 |
+
state_dict = torch.load(LOCAL_FID_WEIGHTS, map_location=lambda storage, loc: storage)
|
182 |
+
else:
|
183 |
+
state_dict = load_url(FID_WEIGHTS_URL, progress=True)
|
184 |
+
|
185 |
+
inception.load_state_dict(state_dict)
|
186 |
+
return inception
|
187 |
+
|
188 |
+
|
189 |
+
class FIDInceptionA(models.inception.InceptionA):
|
190 |
+
"""InceptionA block patched for FID computation"""
|
191 |
+
|
192 |
+
def __init__(self, in_channels, pool_features):
|
193 |
+
super(FIDInceptionA, self).__init__(in_channels, pool_features)
|
194 |
+
|
195 |
+
def forward(self, x):
|
196 |
+
branch1x1 = self.branch1x1(x)
|
197 |
+
|
198 |
+
branch5x5 = self.branch5x5_1(x)
|
199 |
+
branch5x5 = self.branch5x5_2(branch5x5)
|
200 |
+
|
201 |
+
branch3x3dbl = self.branch3x3dbl_1(x)
|
202 |
+
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
203 |
+
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
|
204 |
+
|
205 |
+
# Patch: Tensorflow's average pool does not use the padded zero's in
|
206 |
+
# its average calculation
|
207 |
+
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
|
208 |
+
branch_pool = self.branch_pool(branch_pool)
|
209 |
+
|
210 |
+
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
|
211 |
+
return torch.cat(outputs, 1)
|
212 |
+
|
213 |
+
|
214 |
+
class FIDInceptionC(models.inception.InceptionC):
|
215 |
+
"""InceptionC block patched for FID computation"""
|
216 |
+
|
217 |
+
def __init__(self, in_channels, channels_7x7):
|
218 |
+
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
|
219 |
+
|
220 |
+
def forward(self, x):
|
221 |
+
branch1x1 = self.branch1x1(x)
|
222 |
+
|
223 |
+
branch7x7 = self.branch7x7_1(x)
|
224 |
+
branch7x7 = self.branch7x7_2(branch7x7)
|
225 |
+
branch7x7 = self.branch7x7_3(branch7x7)
|
226 |
+
|
227 |
+
branch7x7dbl = self.branch7x7dbl_1(x)
|
228 |
+
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
|
229 |
+
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
|
230 |
+
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
|
231 |
+
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
|
232 |
+
|
233 |
+
# Patch: Tensorflow's average pool does not use the padded zero's in
|
234 |
+
# its average calculation
|
235 |
+
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
|
236 |
+
branch_pool = self.branch_pool(branch_pool)
|
237 |
+
|
238 |
+
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
|
239 |
+
return torch.cat(outputs, 1)
|
240 |
+
|
241 |
+
|
242 |
+
class FIDInceptionE_1(models.inception.InceptionE):
|
243 |
+
"""First InceptionE block patched for FID computation"""
|
244 |
+
|
245 |
+
def __init__(self, in_channels):
|
246 |
+
super(FIDInceptionE_1, self).__init__(in_channels)
|
247 |
+
|
248 |
+
def forward(self, x):
|
249 |
+
branch1x1 = self.branch1x1(x)
|
250 |
+
|
251 |
+
branch3x3 = self.branch3x3_1(x)
|
252 |
+
branch3x3 = [
|
253 |
+
self.branch3x3_2a(branch3x3),
|
254 |
+
self.branch3x3_2b(branch3x3),
|
255 |
+
]
|
256 |
+
branch3x3 = torch.cat(branch3x3, 1)
|
257 |
+
|
258 |
+
branch3x3dbl = self.branch3x3dbl_1(x)
|
259 |
+
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
260 |
+
branch3x3dbl = [
|
261 |
+
self.branch3x3dbl_3a(branch3x3dbl),
|
262 |
+
self.branch3x3dbl_3b(branch3x3dbl),
|
263 |
+
]
|
264 |
+
branch3x3dbl = torch.cat(branch3x3dbl, 1)
|
265 |
+
|
266 |
+
# Patch: Tensorflow's average pool does not use the padded zero's in
|
267 |
+
# its average calculation
|
268 |
+
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
|
269 |
+
branch_pool = self.branch_pool(branch_pool)
|
270 |
+
|
271 |
+
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
|
272 |
+
return torch.cat(outputs, 1)
|
273 |
+
|
274 |
+
|
275 |
+
class FIDInceptionE_2(models.inception.InceptionE):
|
276 |
+
"""Second InceptionE block patched for FID computation"""
|
277 |
+
|
278 |
+
def __init__(self, in_channels):
|
279 |
+
super(FIDInceptionE_2, self).__init__(in_channels)
|
280 |
+
|
281 |
+
def forward(self, x):
|
282 |
+
branch1x1 = self.branch1x1(x)
|
283 |
+
|
284 |
+
branch3x3 = self.branch3x3_1(x)
|
285 |
+
branch3x3 = [
|
286 |
+
self.branch3x3_2a(branch3x3),
|
287 |
+
self.branch3x3_2b(branch3x3),
|
288 |
+
]
|
289 |
+
branch3x3 = torch.cat(branch3x3, 1)
|
290 |
+
|
291 |
+
branch3x3dbl = self.branch3x3dbl_1(x)
|
292 |
+
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
|
293 |
+
branch3x3dbl = [
|
294 |
+
self.branch3x3dbl_3a(branch3x3dbl),
|
295 |
+
self.branch3x3dbl_3b(branch3x3dbl),
|
296 |
+
]
|
297 |
+
branch3x3dbl = torch.cat(branch3x3dbl, 1)
|
298 |
+
|
299 |
+
# Patch: The FID Inception model uses max pooling instead of average
|
300 |
+
# pooling. This is likely an error in this specific Inception
|
301 |
+
# implementation, as other Inception models use average pooling here
|
302 |
+
# (which matches the description in the paper).
|
303 |
+
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
|
304 |
+
branch_pool = self.branch_pool(branch_pool)
|
305 |
+
|
306 |
+
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
|
307 |
+
return torch.cat(outputs, 1)
|
basicsr/archs/rcan_arch.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
|
4 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
5 |
+
from .arch_util import Upsample, make_layer
|
6 |
+
|
7 |
+
|
8 |
+
class ChannelAttention(nn.Module):
|
9 |
+
"""Channel attention used in RCAN.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
num_feat (int): Channel number of intermediate features.
|
13 |
+
squeeze_factor (int): Channel squeeze factor. Default: 16.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self, num_feat, squeeze_factor=16):
|
17 |
+
super(ChannelAttention, self).__init__()
|
18 |
+
self.attention = nn.Sequential(
|
19 |
+
nn.AdaptiveAvgPool2d(1), nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0),
|
20 |
+
nn.ReLU(inplace=True), nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), nn.Sigmoid())
|
21 |
+
|
22 |
+
def forward(self, x):
|
23 |
+
y = self.attention(x)
|
24 |
+
return x * y
|
25 |
+
|
26 |
+
|
27 |
+
class RCAB(nn.Module):
|
28 |
+
"""Residual Channel Attention Block (RCAB) used in RCAN.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
num_feat (int): Channel number of intermediate features.
|
32 |
+
squeeze_factor (int): Channel squeeze factor. Default: 16.
|
33 |
+
res_scale (float): Scale the residual. Default: 1.
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(self, num_feat, squeeze_factor=16, res_scale=1):
|
37 |
+
super(RCAB, self).__init__()
|
38 |
+
self.res_scale = res_scale
|
39 |
+
|
40 |
+
self.rcab = nn.Sequential(
|
41 |
+
nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True), nn.Conv2d(num_feat, num_feat, 3, 1, 1),
|
42 |
+
ChannelAttention(num_feat, squeeze_factor))
|
43 |
+
|
44 |
+
def forward(self, x):
|
45 |
+
res = self.rcab(x) * self.res_scale
|
46 |
+
return res + x
|
47 |
+
|
48 |
+
|
49 |
+
class ResidualGroup(nn.Module):
|
50 |
+
"""Residual Group of RCAB.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
num_feat (int): Channel number of intermediate features.
|
54 |
+
num_block (int): Block number in the body network.
|
55 |
+
squeeze_factor (int): Channel squeeze factor. Default: 16.
|
56 |
+
res_scale (float): Scale the residual. Default: 1.
|
57 |
+
"""
|
58 |
+
|
59 |
+
def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1):
|
60 |
+
super(ResidualGroup, self).__init__()
|
61 |
+
|
62 |
+
self.residual_group = make_layer(
|
63 |
+
RCAB, num_block, num_feat=num_feat, squeeze_factor=squeeze_factor, res_scale=res_scale)
|
64 |
+
self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
65 |
+
|
66 |
+
def forward(self, x):
|
67 |
+
res = self.conv(self.residual_group(x))
|
68 |
+
return res + x
|
69 |
+
|
70 |
+
|
71 |
+
@ARCH_REGISTRY.register()
|
72 |
+
class RCAN(nn.Module):
|
73 |
+
"""Residual Channel Attention Networks.
|
74 |
+
|
75 |
+
Paper: Image Super-Resolution Using Very Deep Residual Channel Attention
|
76 |
+
Networks
|
77 |
+
Ref git repo: https://github.com/yulunzhang/RCAN.
|
78 |
+
|
79 |
+
Args:
|
80 |
+
num_in_ch (int): Channel number of inputs.
|
81 |
+
num_out_ch (int): Channel number of outputs.
|
82 |
+
num_feat (int): Channel number of intermediate features.
|
83 |
+
Default: 64.
|
84 |
+
num_group (int): Number of ResidualGroup. Default: 10.
|
85 |
+
num_block (int): Number of RCAB in ResidualGroup. Default: 16.
|
86 |
+
squeeze_factor (int): Channel squeeze factor. Default: 16.
|
87 |
+
upscale (int): Upsampling factor. Support 2^n and 3.
|
88 |
+
Default: 4.
|
89 |
+
res_scale (float): Used to scale the residual in residual block.
|
90 |
+
Default: 1.
|
91 |
+
img_range (float): Image range. Default: 255.
|
92 |
+
rgb_mean (tuple[float]): Image mean in RGB orders.
|
93 |
+
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
|
94 |
+
"""
|
95 |
+
|
96 |
+
def __init__(self,
|
97 |
+
num_in_ch,
|
98 |
+
num_out_ch,
|
99 |
+
num_feat=64,
|
100 |
+
num_group=10,
|
101 |
+
num_block=16,
|
102 |
+
squeeze_factor=16,
|
103 |
+
upscale=4,
|
104 |
+
res_scale=1,
|
105 |
+
img_range=255.,
|
106 |
+
rgb_mean=(0.4488, 0.4371, 0.4040)):
|
107 |
+
super(RCAN, self).__init__()
|
108 |
+
|
109 |
+
self.img_range = img_range
|
110 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
111 |
+
|
112 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
113 |
+
self.body = make_layer(
|
114 |
+
ResidualGroup,
|
115 |
+
num_group,
|
116 |
+
num_feat=num_feat,
|
117 |
+
num_block=num_block,
|
118 |
+
squeeze_factor=squeeze_factor,
|
119 |
+
res_scale=res_scale)
|
120 |
+
self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
121 |
+
self.upsample = Upsample(upscale, num_feat)
|
122 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
123 |
+
|
124 |
+
def forward(self, x):
|
125 |
+
self.mean = self.mean.type_as(x)
|
126 |
+
|
127 |
+
x = (x - self.mean) * self.img_range
|
128 |
+
x = self.conv_first(x)
|
129 |
+
res = self.conv_after_body(self.body(x))
|
130 |
+
res += x
|
131 |
+
|
132 |
+
x = self.conv_last(self.upsample(res))
|
133 |
+
x = x / self.img_range + self.mean
|
134 |
+
|
135 |
+
return x
|
basicsr/archs/ridnet_arch.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
5 |
+
from .arch_util import ResidualBlockNoBN, make_layer
|
6 |
+
|
7 |
+
|
8 |
+
class MeanShift(nn.Conv2d):
|
9 |
+
""" Data normalization with mean and std.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
rgb_range (int): Maximum value of RGB.
|
13 |
+
rgb_mean (list[float]): Mean for RGB channels.
|
14 |
+
rgb_std (list[float]): Std for RGB channels.
|
15 |
+
sign (int): For subtraction, sign is -1, for addition, sign is 1.
|
16 |
+
Default: -1.
|
17 |
+
requires_grad (bool): Whether to update the self.weight and self.bias.
|
18 |
+
Default: True.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1, requires_grad=True):
|
22 |
+
super(MeanShift, self).__init__(3, 3, kernel_size=1)
|
23 |
+
std = torch.Tensor(rgb_std)
|
24 |
+
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
|
25 |
+
self.weight.data.div_(std.view(3, 1, 1, 1))
|
26 |
+
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
|
27 |
+
self.bias.data.div_(std)
|
28 |
+
self.requires_grad = requires_grad
|
29 |
+
|
30 |
+
|
31 |
+
class EResidualBlockNoBN(nn.Module):
|
32 |
+
"""Enhanced Residual block without BN.
|
33 |
+
|
34 |
+
There are three convolution layers in residual branch.
|
35 |
+
|
36 |
+
It has a style of:
|
37 |
+
---Conv-ReLU-Conv-ReLU-Conv-+-ReLU-
|
38 |
+
|__________________________|
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self, in_channels, out_channels):
|
42 |
+
super(EResidualBlockNoBN, self).__init__()
|
43 |
+
|
44 |
+
self.body = nn.Sequential(
|
45 |
+
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
|
46 |
+
nn.ReLU(inplace=True),
|
47 |
+
nn.Conv2d(out_channels, out_channels, 3, 1, 1),
|
48 |
+
nn.ReLU(inplace=True),
|
49 |
+
nn.Conv2d(out_channels, out_channels, 1, 1, 0),
|
50 |
+
)
|
51 |
+
self.relu = nn.ReLU(inplace=True)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
out = self.body(x)
|
55 |
+
out = self.relu(out + x)
|
56 |
+
return out
|
57 |
+
|
58 |
+
|
59 |
+
class MergeRun(nn.Module):
|
60 |
+
""" Merge-and-run unit.
|
61 |
+
|
62 |
+
This unit contains two branches with different dilated convolutions,
|
63 |
+
followed by a convolution to process the concatenated features.
|
64 |
+
|
65 |
+
Paper: Real Image Denoising with Feature Attention
|
66 |
+
Ref git repo: https://github.com/saeed-anwar/RIDNet
|
67 |
+
"""
|
68 |
+
|
69 |
+
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
|
70 |
+
super(MergeRun, self).__init__()
|
71 |
+
|
72 |
+
self.dilation1 = nn.Sequential(
|
73 |
+
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True),
|
74 |
+
nn.Conv2d(out_channels, out_channels, kernel_size, stride, 2, 2), nn.ReLU(inplace=True))
|
75 |
+
self.dilation2 = nn.Sequential(
|
76 |
+
nn.Conv2d(in_channels, out_channels, kernel_size, stride, 3, 3), nn.ReLU(inplace=True),
|
77 |
+
nn.Conv2d(out_channels, out_channels, kernel_size, stride, 4, 4), nn.ReLU(inplace=True))
|
78 |
+
|
79 |
+
self.aggregation = nn.Sequential(
|
80 |
+
nn.Conv2d(out_channels * 2, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True))
|
81 |
+
|
82 |
+
def forward(self, x):
|
83 |
+
dilation1 = self.dilation1(x)
|
84 |
+
dilation2 = self.dilation2(x)
|
85 |
+
out = torch.cat([dilation1, dilation2], dim=1)
|
86 |
+
out = self.aggregation(out)
|
87 |
+
out = out + x
|
88 |
+
return out
|
89 |
+
|
90 |
+
|
91 |
+
class ChannelAttention(nn.Module):
|
92 |
+
"""Channel attention.
|
93 |
+
|
94 |
+
Args:
|
95 |
+
num_feat (int): Channel number of intermediate features.
|
96 |
+
squeeze_factor (int): Channel squeeze factor. Default:
|
97 |
+
"""
|
98 |
+
|
99 |
+
def __init__(self, mid_channels, squeeze_factor=16):
|
100 |
+
super(ChannelAttention, self).__init__()
|
101 |
+
self.attention = nn.Sequential(
|
102 |
+
nn.AdaptiveAvgPool2d(1), nn.Conv2d(mid_channels, mid_channels // squeeze_factor, 1, padding=0),
|
103 |
+
nn.ReLU(inplace=True), nn.Conv2d(mid_channels // squeeze_factor, mid_channels, 1, padding=0), nn.Sigmoid())
|
104 |
+
|
105 |
+
def forward(self, x):
|
106 |
+
y = self.attention(x)
|
107 |
+
return x * y
|
108 |
+
|
109 |
+
|
110 |
+
class EAM(nn.Module):
|
111 |
+
"""Enhancement attention modules (EAM) in RIDNet.
|
112 |
+
|
113 |
+
This module contains a merge-and-run unit, a residual block,
|
114 |
+
an enhanced residual block and a feature attention unit.
|
115 |
+
|
116 |
+
Attributes:
|
117 |
+
merge: The merge-and-run unit.
|
118 |
+
block1: The residual block.
|
119 |
+
block2: The enhanced residual block.
|
120 |
+
ca: The feature/channel attention unit.
|
121 |
+
"""
|
122 |
+
|
123 |
+
def __init__(self, in_channels, mid_channels, out_channels):
|
124 |
+
super(EAM, self).__init__()
|
125 |
+
|
126 |
+
self.merge = MergeRun(in_channels, mid_channels)
|
127 |
+
self.block1 = ResidualBlockNoBN(mid_channels)
|
128 |
+
self.block2 = EResidualBlockNoBN(mid_channels, out_channels)
|
129 |
+
self.ca = ChannelAttention(out_channels)
|
130 |
+
# The residual block in the paper contains a relu after addition.
|
131 |
+
self.relu = nn.ReLU(inplace=True)
|
132 |
+
|
133 |
+
def forward(self, x):
|
134 |
+
out = self.merge(x)
|
135 |
+
out = self.relu(self.block1(out))
|
136 |
+
out = self.block2(out)
|
137 |
+
out = self.ca(out)
|
138 |
+
return out
|
139 |
+
|
140 |
+
|
141 |
+
@ARCH_REGISTRY.register()
|
142 |
+
class RIDNet(nn.Module):
|
143 |
+
"""RIDNet: Real Image Denoising with Feature Attention.
|
144 |
+
|
145 |
+
Ref git repo: https://github.com/saeed-anwar/RIDNet
|
146 |
+
|
147 |
+
Args:
|
148 |
+
in_channels (int): Channel number of inputs.
|
149 |
+
mid_channels (int): Channel number of EAM modules.
|
150 |
+
Default: 64.
|
151 |
+
out_channels (int): Channel number of outputs.
|
152 |
+
num_block (int): Number of EAM. Default: 4.
|
153 |
+
img_range (float): Image range. Default: 255.
|
154 |
+
rgb_mean (tuple[float]): Image mean in RGB orders.
|
155 |
+
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
|
156 |
+
"""
|
157 |
+
|
158 |
+
def __init__(self,
|
159 |
+
in_channels,
|
160 |
+
mid_channels,
|
161 |
+
out_channels,
|
162 |
+
num_block=4,
|
163 |
+
img_range=255.,
|
164 |
+
rgb_mean=(0.4488, 0.4371, 0.4040),
|
165 |
+
rgb_std=(1.0, 1.0, 1.0)):
|
166 |
+
super(RIDNet, self).__init__()
|
167 |
+
|
168 |
+
self.sub_mean = MeanShift(img_range, rgb_mean, rgb_std)
|
169 |
+
self.add_mean = MeanShift(img_range, rgb_mean, rgb_std, 1)
|
170 |
+
|
171 |
+
self.head = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
|
172 |
+
self.body = make_layer(
|
173 |
+
EAM, num_block, in_channels=mid_channels, mid_channels=mid_channels, out_channels=mid_channels)
|
174 |
+
self.tail = nn.Conv2d(mid_channels, out_channels, 3, 1, 1)
|
175 |
+
|
176 |
+
self.relu = nn.ReLU(inplace=True)
|
177 |
+
|
178 |
+
def forward(self, x):
|
179 |
+
res = self.sub_mean(x)
|
180 |
+
res = self.tail(self.body(self.relu(self.head(res))))
|
181 |
+
res = self.add_mean(res)
|
182 |
+
|
183 |
+
out = x + res
|
184 |
+
return out
|
basicsr/archs/rrdbnet_arch.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
from torch.nn import functional as F
|
4 |
+
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
|
7 |
+
|
8 |
+
|
9 |
+
class ResidualDenseBlock(nn.Module):
|
10 |
+
"""Residual Dense Block.
|
11 |
+
|
12 |
+
Used in RRDB block in ESRGAN.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
num_feat (int): Channel number of intermediate features.
|
16 |
+
num_grow_ch (int): Channels for each growth.
|
17 |
+
"""
|
18 |
+
|
19 |
+
def __init__(self, num_feat=64, num_grow_ch=32):
|
20 |
+
super(ResidualDenseBlock, self).__init__()
|
21 |
+
self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
|
22 |
+
self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
|
23 |
+
self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
24 |
+
self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
|
25 |
+
self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
|
26 |
+
|
27 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
28 |
+
|
29 |
+
# initialization
|
30 |
+
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
x1 = self.lrelu(self.conv1(x))
|
34 |
+
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
|
35 |
+
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
|
36 |
+
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
|
37 |
+
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
|
38 |
+
# Emperically, we use 0.2 to scale the residual for better performance
|
39 |
+
return x5 * 0.2 + x
|
40 |
+
|
41 |
+
|
42 |
+
class RRDB(nn.Module):
|
43 |
+
"""Residual in Residual Dense Block.
|
44 |
+
|
45 |
+
Used in RRDB-Net in ESRGAN.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
num_feat (int): Channel number of intermediate features.
|
49 |
+
num_grow_ch (int): Channels for each growth.
|
50 |
+
"""
|
51 |
+
|
52 |
+
def __init__(self, num_feat, num_grow_ch=32):
|
53 |
+
super(RRDB, self).__init__()
|
54 |
+
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
|
55 |
+
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
|
56 |
+
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
|
57 |
+
|
58 |
+
def forward(self, x):
|
59 |
+
out = self.rdb1(x)
|
60 |
+
out = self.rdb2(out)
|
61 |
+
out = self.rdb3(out)
|
62 |
+
# Emperically, we use 0.2 to scale the residual for better performance
|
63 |
+
return out * 0.2 + x
|
64 |
+
|
65 |
+
|
66 |
+
@ARCH_REGISTRY.register()
|
67 |
+
class RRDBNet(nn.Module):
|
68 |
+
"""Networks consisting of Residual in Residual Dense Block, which is used
|
69 |
+
in ESRGAN.
|
70 |
+
|
71 |
+
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
|
72 |
+
|
73 |
+
We extend ESRGAN for scale x2 and scale x1.
|
74 |
+
Note: This is one option for scale 1, scale 2 in RRDBNet.
|
75 |
+
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
|
76 |
+
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
num_in_ch (int): Channel number of inputs.
|
80 |
+
num_out_ch (int): Channel number of outputs.
|
81 |
+
num_feat (int): Channel number of intermediate features.
|
82 |
+
Default: 64
|
83 |
+
num_block (int): Block number in the trunk network. Defaults: 23
|
84 |
+
num_grow_ch (int): Channels for each growth. Default: 32.
|
85 |
+
"""
|
86 |
+
|
87 |
+
def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
|
88 |
+
super(RRDBNet, self).__init__()
|
89 |
+
self.scale = scale
|
90 |
+
if scale == 2:
|
91 |
+
num_in_ch = num_in_ch * 4
|
92 |
+
elif scale == 1:
|
93 |
+
num_in_ch = num_in_ch * 16
|
94 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
95 |
+
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
|
96 |
+
self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
97 |
+
# upsample
|
98 |
+
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
99 |
+
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
100 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
101 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
102 |
+
|
103 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
104 |
+
|
105 |
+
def forward(self, x):
|
106 |
+
if self.scale == 2:
|
107 |
+
feat = pixel_unshuffle(x, scale=2)
|
108 |
+
elif self.scale == 1:
|
109 |
+
feat = pixel_unshuffle(x, scale=4)
|
110 |
+
else:
|
111 |
+
feat = x
|
112 |
+
feat = self.conv_first(feat)
|
113 |
+
body_feat = self.conv_body(self.body(feat))
|
114 |
+
feat = feat + body_feat
|
115 |
+
# upsample
|
116 |
+
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
|
117 |
+
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
|
118 |
+
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
|
119 |
+
return out
|
basicsr/archs/spynet_arch.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn as nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
7 |
+
from .arch_util import flow_warp
|
8 |
+
|
9 |
+
|
10 |
+
class BasicModule(nn.Module):
|
11 |
+
"""Basic Module for SpyNet.
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self):
|
15 |
+
super(BasicModule, self).__init__()
|
16 |
+
|
17 |
+
self.basic_module = nn.Sequential(
|
18 |
+
nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
|
19 |
+
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
|
20 |
+
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
|
21 |
+
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
|
22 |
+
nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3))
|
23 |
+
|
24 |
+
def forward(self, tensor_input):
|
25 |
+
return self.basic_module(tensor_input)
|
26 |
+
|
27 |
+
|
28 |
+
@ARCH_REGISTRY.register()
|
29 |
+
class SpyNet(nn.Module):
|
30 |
+
"""SpyNet architecture.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
load_path (str): path for pretrained SpyNet. Default: None.
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(self, load_path=None):
|
37 |
+
super(SpyNet, self).__init__()
|
38 |
+
self.basic_module = nn.ModuleList([BasicModule() for _ in range(6)])
|
39 |
+
if load_path:
|
40 |
+
self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
|
41 |
+
|
42 |
+
self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
|
43 |
+
self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
|
44 |
+
|
45 |
+
def preprocess(self, tensor_input):
|
46 |
+
tensor_output = (tensor_input - self.mean) / self.std
|
47 |
+
return tensor_output
|
48 |
+
|
49 |
+
def process(self, ref, supp):
|
50 |
+
flow = []
|
51 |
+
|
52 |
+
ref = [self.preprocess(ref)]
|
53 |
+
supp = [self.preprocess(supp)]
|
54 |
+
|
55 |
+
for level in range(5):
|
56 |
+
ref.insert(0, F.avg_pool2d(input=ref[0], kernel_size=2, stride=2, count_include_pad=False))
|
57 |
+
supp.insert(0, F.avg_pool2d(input=supp[0], kernel_size=2, stride=2, count_include_pad=False))
|
58 |
+
|
59 |
+
flow = ref[0].new_zeros(
|
60 |
+
[ref[0].size(0), 2,
|
61 |
+
int(math.floor(ref[0].size(2) / 2.0)),
|
62 |
+
int(math.floor(ref[0].size(3) / 2.0))])
|
63 |
+
|
64 |
+
for level in range(len(ref)):
|
65 |
+
upsampled_flow = F.interpolate(input=flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
|
66 |
+
|
67 |
+
if upsampled_flow.size(2) != ref[level].size(2):
|
68 |
+
upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 0, 0, 1], mode='replicate')
|
69 |
+
if upsampled_flow.size(3) != ref[level].size(3):
|
70 |
+
upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 1, 0, 0], mode='replicate')
|
71 |
+
|
72 |
+
flow = self.basic_module[level](torch.cat([
|
73 |
+
ref[level],
|
74 |
+
flow_warp(
|
75 |
+
supp[level], upsampled_flow.permute(0, 2, 3, 1), interp_mode='bilinear', padding_mode='border'),
|
76 |
+
upsampled_flow
|
77 |
+
], 1)) + upsampled_flow
|
78 |
+
|
79 |
+
return flow
|
80 |
+
|
81 |
+
def forward(self, ref, supp):
|
82 |
+
assert ref.size() == supp.size()
|
83 |
+
|
84 |
+
h, w = ref.size(2), ref.size(3)
|
85 |
+
w_floor = math.floor(math.ceil(w / 32.0) * 32.0)
|
86 |
+
h_floor = math.floor(math.ceil(h / 32.0) * 32.0)
|
87 |
+
|
88 |
+
ref = F.interpolate(input=ref, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
|
89 |
+
supp = F.interpolate(input=supp, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
|
90 |
+
|
91 |
+
flow = F.interpolate(input=self.process(ref, supp), size=(h, w), mode='bilinear', align_corners=False)
|
92 |
+
|
93 |
+
flow[:, 0, :, :] *= float(w) / float(w_floor)
|
94 |
+
flow[:, 1, :, :] *= float(h) / float(h_floor)
|
95 |
+
|
96 |
+
return flow
|
basicsr/archs/srresnet_arch.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import nn as nn
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
5 |
+
from .arch_util import ResidualBlockNoBN, default_init_weights, make_layer
|
6 |
+
|
7 |
+
|
8 |
+
@ARCH_REGISTRY.register()
|
9 |
+
class MSRResNet(nn.Module):
|
10 |
+
"""Modified SRResNet.
|
11 |
+
|
12 |
+
A compacted version modified from SRResNet in
|
13 |
+
"Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network"
|
14 |
+
It uses residual blocks without BN, similar to EDSR.
|
15 |
+
Currently, it supports x2, x3 and x4 upsampling scale factor.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
num_in_ch (int): Channel number of inputs. Default: 3.
|
19 |
+
num_out_ch (int): Channel number of outputs. Default: 3.
|
20 |
+
num_feat (int): Channel number of intermediate features. Default: 64.
|
21 |
+
num_block (int): Block number in the body network. Default: 16.
|
22 |
+
upscale (int): Upsampling factor. Support x2, x3 and x4. Default: 4.
|
23 |
+
"""
|
24 |
+
|
25 |
+
def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_block=16, upscale=4):
|
26 |
+
super(MSRResNet, self).__init__()
|
27 |
+
self.upscale = upscale
|
28 |
+
|
29 |
+
self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
30 |
+
self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat)
|
31 |
+
|
32 |
+
# upsampling
|
33 |
+
if self.upscale in [2, 3]:
|
34 |
+
self.upconv1 = nn.Conv2d(num_feat, num_feat * self.upscale * self.upscale, 3, 1, 1)
|
35 |
+
self.pixel_shuffle = nn.PixelShuffle(self.upscale)
|
36 |
+
elif self.upscale == 4:
|
37 |
+
self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
|
38 |
+
self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
|
39 |
+
self.pixel_shuffle = nn.PixelShuffle(2)
|
40 |
+
|
41 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
42 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
43 |
+
|
44 |
+
# activation function
|
45 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
|
46 |
+
|
47 |
+
# initialization
|
48 |
+
default_init_weights([self.conv_first, self.upconv1, self.conv_hr, self.conv_last], 0.1)
|
49 |
+
if self.upscale == 4:
|
50 |
+
default_init_weights(self.upconv2, 0.1)
|
51 |
+
|
52 |
+
def forward(self, x):
|
53 |
+
feat = self.lrelu(self.conv_first(x))
|
54 |
+
out = self.body(feat)
|
55 |
+
|
56 |
+
if self.upscale == 4:
|
57 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
|
58 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
|
59 |
+
elif self.upscale in [2, 3]:
|
60 |
+
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
|
61 |
+
|
62 |
+
out = self.conv_last(self.lrelu(self.conv_hr(out)))
|
63 |
+
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
|
64 |
+
out += base
|
65 |
+
return out
|
basicsr/archs/stylegan2_arch.py
ADDED
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu
|
8 |
+
from basicsr.ops.upfirdn2d import upfirdn2d
|
9 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
10 |
+
|
11 |
+
|
12 |
+
class NormStyleCode(nn.Module):
|
13 |
+
|
14 |
+
def forward(self, x):
|
15 |
+
"""Normalize the style codes.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
x (Tensor): Style codes with shape (b, c).
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
Tensor: Normalized tensor.
|
22 |
+
"""
|
23 |
+
return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
|
24 |
+
|
25 |
+
|
26 |
+
def make_resample_kernel(k):
|
27 |
+
"""Make resampling kernel for UpFirDn.
|
28 |
+
|
29 |
+
Args:
|
30 |
+
k (list[int]): A list indicating the 1D resample kernel magnitude.
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
Tensor: 2D resampled kernel.
|
34 |
+
"""
|
35 |
+
k = torch.tensor(k, dtype=torch.float32)
|
36 |
+
if k.ndim == 1:
|
37 |
+
k = k[None, :] * k[:, None] # to 2D kernel, outer product
|
38 |
+
# normalize
|
39 |
+
k /= k.sum()
|
40 |
+
return k
|
41 |
+
|
42 |
+
|
43 |
+
class UpFirDnUpsample(nn.Module):
|
44 |
+
"""Upsample, FIR filter, and downsample (upsampole version).
|
45 |
+
|
46 |
+
References:
|
47 |
+
1. https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.upfirdn.html # noqa: E501
|
48 |
+
2. http://www.ece.northwestern.edu/local-apps/matlabhelp/toolbox/signal/upfirdn.html # noqa: E501
|
49 |
+
|
50 |
+
Args:
|
51 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
52 |
+
magnitude.
|
53 |
+
factor (int): Upsampling scale factor. Default: 2.
|
54 |
+
"""
|
55 |
+
|
56 |
+
def __init__(self, resample_kernel, factor=2):
|
57 |
+
super(UpFirDnUpsample, self).__init__()
|
58 |
+
self.kernel = make_resample_kernel(resample_kernel) * (factor**2)
|
59 |
+
self.factor = factor
|
60 |
+
|
61 |
+
pad = self.kernel.shape[0] - factor
|
62 |
+
self.pad = ((pad + 1) // 2 + factor - 1, pad // 2)
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
out = upfirdn2d(x, self.kernel.type_as(x), up=self.factor, down=1, pad=self.pad)
|
66 |
+
return out
|
67 |
+
|
68 |
+
def __repr__(self):
|
69 |
+
return (f'{self.__class__.__name__}(factor={self.factor})')
|
70 |
+
|
71 |
+
|
72 |
+
class UpFirDnDownsample(nn.Module):
|
73 |
+
"""Upsample, FIR filter, and downsample (downsampole version).
|
74 |
+
|
75 |
+
Args:
|
76 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
77 |
+
magnitude.
|
78 |
+
factor (int): Downsampling scale factor. Default: 2.
|
79 |
+
"""
|
80 |
+
|
81 |
+
def __init__(self, resample_kernel, factor=2):
|
82 |
+
super(UpFirDnDownsample, self).__init__()
|
83 |
+
self.kernel = make_resample_kernel(resample_kernel)
|
84 |
+
self.factor = factor
|
85 |
+
|
86 |
+
pad = self.kernel.shape[0] - factor
|
87 |
+
self.pad = ((pad + 1) // 2, pad // 2)
|
88 |
+
|
89 |
+
def forward(self, x):
|
90 |
+
out = upfirdn2d(x, self.kernel.type_as(x), up=1, down=self.factor, pad=self.pad)
|
91 |
+
return out
|
92 |
+
|
93 |
+
def __repr__(self):
|
94 |
+
return (f'{self.__class__.__name__}(factor={self.factor})')
|
95 |
+
|
96 |
+
|
97 |
+
class UpFirDnSmooth(nn.Module):
|
98 |
+
"""Upsample, FIR filter, and downsample (smooth version).
|
99 |
+
|
100 |
+
Args:
|
101 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
102 |
+
magnitude.
|
103 |
+
upsample_factor (int): Upsampling scale factor. Default: 1.
|
104 |
+
downsample_factor (int): Downsampling scale factor. Default: 1.
|
105 |
+
kernel_size (int): Kernel size: Default: 1.
|
106 |
+
"""
|
107 |
+
|
108 |
+
def __init__(self, resample_kernel, upsample_factor=1, downsample_factor=1, kernel_size=1):
|
109 |
+
super(UpFirDnSmooth, self).__init__()
|
110 |
+
self.upsample_factor = upsample_factor
|
111 |
+
self.downsample_factor = downsample_factor
|
112 |
+
self.kernel = make_resample_kernel(resample_kernel)
|
113 |
+
if upsample_factor > 1:
|
114 |
+
self.kernel = self.kernel * (upsample_factor**2)
|
115 |
+
|
116 |
+
if upsample_factor > 1:
|
117 |
+
pad = (self.kernel.shape[0] - upsample_factor) - (kernel_size - 1)
|
118 |
+
self.pad = ((pad + 1) // 2 + upsample_factor - 1, pad // 2 + 1)
|
119 |
+
elif downsample_factor > 1:
|
120 |
+
pad = (self.kernel.shape[0] - downsample_factor) + (kernel_size - 1)
|
121 |
+
self.pad = ((pad + 1) // 2, pad // 2)
|
122 |
+
else:
|
123 |
+
raise NotImplementedError
|
124 |
+
|
125 |
+
def forward(self, x):
|
126 |
+
out = upfirdn2d(x, self.kernel.type_as(x), up=1, down=1, pad=self.pad)
|
127 |
+
return out
|
128 |
+
|
129 |
+
def __repr__(self):
|
130 |
+
return (f'{self.__class__.__name__}(upsample_factor={self.upsample_factor}'
|
131 |
+
f', downsample_factor={self.downsample_factor})')
|
132 |
+
|
133 |
+
|
134 |
+
class EqualLinear(nn.Module):
|
135 |
+
"""Equalized Linear as StyleGAN2.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
in_channels (int): Size of each sample.
|
139 |
+
out_channels (int): Size of each output sample.
|
140 |
+
bias (bool): If set to ``False``, the layer will not learn an additive
|
141 |
+
bias. Default: ``True``.
|
142 |
+
bias_init_val (float): Bias initialized value. Default: 0.
|
143 |
+
lr_mul (float): Learning rate multiplier. Default: 1.
|
144 |
+
activation (None | str): The activation after ``linear`` operation.
|
145 |
+
Supported: 'fused_lrelu', None. Default: None.
|
146 |
+
"""
|
147 |
+
|
148 |
+
def __init__(self, in_channels, out_channels, bias=True, bias_init_val=0, lr_mul=1, activation=None):
|
149 |
+
super(EqualLinear, self).__init__()
|
150 |
+
self.in_channels = in_channels
|
151 |
+
self.out_channels = out_channels
|
152 |
+
self.lr_mul = lr_mul
|
153 |
+
self.activation = activation
|
154 |
+
if self.activation not in ['fused_lrelu', None]:
|
155 |
+
raise ValueError(f'Wrong activation value in EqualLinear: {activation}'
|
156 |
+
"Supported ones are: ['fused_lrelu', None].")
|
157 |
+
self.scale = (1 / math.sqrt(in_channels)) * lr_mul
|
158 |
+
|
159 |
+
self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul))
|
160 |
+
if bias:
|
161 |
+
self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
|
162 |
+
else:
|
163 |
+
self.register_parameter('bias', None)
|
164 |
+
|
165 |
+
def forward(self, x):
|
166 |
+
if self.bias is None:
|
167 |
+
bias = None
|
168 |
+
else:
|
169 |
+
bias = self.bias * self.lr_mul
|
170 |
+
if self.activation == 'fused_lrelu':
|
171 |
+
out = F.linear(x, self.weight * self.scale)
|
172 |
+
out = fused_leaky_relu(out, bias)
|
173 |
+
else:
|
174 |
+
out = F.linear(x, self.weight * self.scale, bias=bias)
|
175 |
+
return out
|
176 |
+
|
177 |
+
def __repr__(self):
|
178 |
+
return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
|
179 |
+
f'out_channels={self.out_channels}, bias={self.bias is not None})')
|
180 |
+
|
181 |
+
|
182 |
+
class ModulatedConv2d(nn.Module):
|
183 |
+
"""Modulated Conv2d used in StyleGAN2.
|
184 |
+
|
185 |
+
There is no bias in ModulatedConv2d.
|
186 |
+
|
187 |
+
Args:
|
188 |
+
in_channels (int): Channel number of the input.
|
189 |
+
out_channels (int): Channel number of the output.
|
190 |
+
kernel_size (int): Size of the convolving kernel.
|
191 |
+
num_style_feat (int): Channel number of style features.
|
192 |
+
demodulate (bool): Whether to demodulate in the conv layer.
|
193 |
+
Default: True.
|
194 |
+
sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
|
195 |
+
Default: None.
|
196 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
197 |
+
magnitude. Default: (1, 3, 3, 1).
|
198 |
+
eps (float): A value added to the denominator for numerical stability.
|
199 |
+
Default: 1e-8.
|
200 |
+
"""
|
201 |
+
|
202 |
+
def __init__(self,
|
203 |
+
in_channels,
|
204 |
+
out_channels,
|
205 |
+
kernel_size,
|
206 |
+
num_style_feat,
|
207 |
+
demodulate=True,
|
208 |
+
sample_mode=None,
|
209 |
+
resample_kernel=(1, 3, 3, 1),
|
210 |
+
eps=1e-8):
|
211 |
+
super(ModulatedConv2d, self).__init__()
|
212 |
+
self.in_channels = in_channels
|
213 |
+
self.out_channels = out_channels
|
214 |
+
self.kernel_size = kernel_size
|
215 |
+
self.demodulate = demodulate
|
216 |
+
self.sample_mode = sample_mode
|
217 |
+
self.eps = eps
|
218 |
+
|
219 |
+
if self.sample_mode == 'upsample':
|
220 |
+
self.smooth = UpFirDnSmooth(
|
221 |
+
resample_kernel, upsample_factor=2, downsample_factor=1, kernel_size=kernel_size)
|
222 |
+
elif self.sample_mode == 'downsample':
|
223 |
+
self.smooth = UpFirDnSmooth(
|
224 |
+
resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size)
|
225 |
+
elif self.sample_mode is None:
|
226 |
+
pass
|
227 |
+
else:
|
228 |
+
raise ValueError(f'Wrong sample mode {self.sample_mode}, '
|
229 |
+
"supported ones are ['upsample', 'downsample', None].")
|
230 |
+
|
231 |
+
self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
|
232 |
+
# modulation inside each modulated conv
|
233 |
+
self.modulation = EqualLinear(
|
234 |
+
num_style_feat, in_channels, bias=True, bias_init_val=1, lr_mul=1, activation=None)
|
235 |
+
|
236 |
+
self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size))
|
237 |
+
self.padding = kernel_size // 2
|
238 |
+
|
239 |
+
def forward(self, x, style):
|
240 |
+
"""Forward function.
|
241 |
+
|
242 |
+
Args:
|
243 |
+
x (Tensor): Tensor with shape (b, c, h, w).
|
244 |
+
style (Tensor): Tensor with shape (b, num_style_feat).
|
245 |
+
|
246 |
+
Returns:
|
247 |
+
Tensor: Modulated tensor after convolution.
|
248 |
+
"""
|
249 |
+
b, c, h, w = x.shape # c = c_in
|
250 |
+
# weight modulation
|
251 |
+
style = self.modulation(style).view(b, 1, c, 1, 1)
|
252 |
+
# self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
|
253 |
+
weight = self.scale * self.weight * style # (b, c_out, c_in, k, k)
|
254 |
+
|
255 |
+
if self.demodulate:
|
256 |
+
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
|
257 |
+
weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
|
258 |
+
|
259 |
+
weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
|
260 |
+
|
261 |
+
if self.sample_mode == 'upsample':
|
262 |
+
x = x.view(1, b * c, h, w)
|
263 |
+
weight = weight.view(b, self.out_channels, c, self.kernel_size, self.kernel_size)
|
264 |
+
weight = weight.transpose(1, 2).reshape(b * c, self.out_channels, self.kernel_size, self.kernel_size)
|
265 |
+
out = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=b)
|
266 |
+
out = out.view(b, self.out_channels, *out.shape[2:4])
|
267 |
+
out = self.smooth(out)
|
268 |
+
elif self.sample_mode == 'downsample':
|
269 |
+
x = self.smooth(x)
|
270 |
+
x = x.view(1, b * c, *x.shape[2:4])
|
271 |
+
out = F.conv2d(x, weight, padding=0, stride=2, groups=b)
|
272 |
+
out = out.view(b, self.out_channels, *out.shape[2:4])
|
273 |
+
else:
|
274 |
+
x = x.view(1, b * c, h, w)
|
275 |
+
# weight: (b*c_out, c_in, k, k), groups=b
|
276 |
+
out = F.conv2d(x, weight, padding=self.padding, groups=b)
|
277 |
+
out = out.view(b, self.out_channels, *out.shape[2:4])
|
278 |
+
|
279 |
+
return out
|
280 |
+
|
281 |
+
def __repr__(self):
|
282 |
+
return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
|
283 |
+
f'out_channels={self.out_channels}, '
|
284 |
+
f'kernel_size={self.kernel_size}, '
|
285 |
+
f'demodulate={self.demodulate}, sample_mode={self.sample_mode})')
|
286 |
+
|
287 |
+
|
288 |
+
class StyleConv(nn.Module):
|
289 |
+
"""Style conv.
|
290 |
+
|
291 |
+
Args:
|
292 |
+
in_channels (int): Channel number of the input.
|
293 |
+
out_channels (int): Channel number of the output.
|
294 |
+
kernel_size (int): Size of the convolving kernel.
|
295 |
+
num_style_feat (int): Channel number of style features.
|
296 |
+
demodulate (bool): Whether demodulate in the conv layer. Default: True.
|
297 |
+
sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
|
298 |
+
Default: None.
|
299 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
300 |
+
magnitude. Default: (1, 3, 3, 1).
|
301 |
+
"""
|
302 |
+
|
303 |
+
def __init__(self,
|
304 |
+
in_channels,
|
305 |
+
out_channels,
|
306 |
+
kernel_size,
|
307 |
+
num_style_feat,
|
308 |
+
demodulate=True,
|
309 |
+
sample_mode=None,
|
310 |
+
resample_kernel=(1, 3, 3, 1)):
|
311 |
+
super(StyleConv, self).__init__()
|
312 |
+
self.modulated_conv = ModulatedConv2d(
|
313 |
+
in_channels,
|
314 |
+
out_channels,
|
315 |
+
kernel_size,
|
316 |
+
num_style_feat,
|
317 |
+
demodulate=demodulate,
|
318 |
+
sample_mode=sample_mode,
|
319 |
+
resample_kernel=resample_kernel)
|
320 |
+
self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
|
321 |
+
self.activate = FusedLeakyReLU(out_channels)
|
322 |
+
|
323 |
+
def forward(self, x, style, noise=None):
|
324 |
+
# modulate
|
325 |
+
out = self.modulated_conv(x, style)
|
326 |
+
# noise injection
|
327 |
+
if noise is None:
|
328 |
+
b, _, h, w = out.shape
|
329 |
+
noise = out.new_empty(b, 1, h, w).normal_()
|
330 |
+
out = out + self.weight * noise
|
331 |
+
# activation (with bias)
|
332 |
+
out = self.activate(out)
|
333 |
+
return out
|
334 |
+
|
335 |
+
|
336 |
+
class ToRGB(nn.Module):
|
337 |
+
"""To RGB from features.
|
338 |
+
|
339 |
+
Args:
|
340 |
+
in_channels (int): Channel number of input.
|
341 |
+
num_style_feat (int): Channel number of style features.
|
342 |
+
upsample (bool): Whether to upsample. Default: True.
|
343 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
344 |
+
magnitude. Default: (1, 3, 3, 1).
|
345 |
+
"""
|
346 |
+
|
347 |
+
def __init__(self, in_channels, num_style_feat, upsample=True, resample_kernel=(1, 3, 3, 1)):
|
348 |
+
super(ToRGB, self).__init__()
|
349 |
+
if upsample:
|
350 |
+
self.upsample = UpFirDnUpsample(resample_kernel, factor=2)
|
351 |
+
else:
|
352 |
+
self.upsample = None
|
353 |
+
self.modulated_conv = ModulatedConv2d(
|
354 |
+
in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None)
|
355 |
+
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
356 |
+
|
357 |
+
def forward(self, x, style, skip=None):
|
358 |
+
"""Forward function.
|
359 |
+
|
360 |
+
Args:
|
361 |
+
x (Tensor): Feature tensor with shape (b, c, h, w).
|
362 |
+
style (Tensor): Tensor with shape (b, num_style_feat).
|
363 |
+
skip (Tensor): Base/skip tensor. Default: None.
|
364 |
+
|
365 |
+
Returns:
|
366 |
+
Tensor: RGB images.
|
367 |
+
"""
|
368 |
+
out = self.modulated_conv(x, style)
|
369 |
+
out = out + self.bias
|
370 |
+
if skip is not None:
|
371 |
+
if self.upsample:
|
372 |
+
skip = self.upsample(skip)
|
373 |
+
out = out + skip
|
374 |
+
return out
|
375 |
+
|
376 |
+
|
377 |
+
class ConstantInput(nn.Module):
|
378 |
+
"""Constant input.
|
379 |
+
|
380 |
+
Args:
|
381 |
+
num_channel (int): Channel number of constant input.
|
382 |
+
size (int): Spatial size of constant input.
|
383 |
+
"""
|
384 |
+
|
385 |
+
def __init__(self, num_channel, size):
|
386 |
+
super(ConstantInput, self).__init__()
|
387 |
+
self.weight = nn.Parameter(torch.randn(1, num_channel, size, size))
|
388 |
+
|
389 |
+
def forward(self, batch):
|
390 |
+
out = self.weight.repeat(batch, 1, 1, 1)
|
391 |
+
return out
|
392 |
+
|
393 |
+
|
394 |
+
@ARCH_REGISTRY.register()
|
395 |
+
class StyleGAN2Generator(nn.Module):
|
396 |
+
"""StyleGAN2 Generator.
|
397 |
+
|
398 |
+
Args:
|
399 |
+
out_size (int): The spatial size of outputs.
|
400 |
+
num_style_feat (int): Channel number of style features. Default: 512.
|
401 |
+
num_mlp (int): Layer number of MLP style layers. Default: 8.
|
402 |
+
channel_multiplier (int): Channel multiplier for large networks of
|
403 |
+
StyleGAN2. Default: 2.
|
404 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
405 |
+
magnitude. A cross production will be applied to extent 1D resample
|
406 |
+
kernel to 2D resample kernel. Default: (1, 3, 3, 1).
|
407 |
+
lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
|
408 |
+
narrow (float): Narrow ratio for channels. Default: 1.0.
|
409 |
+
"""
|
410 |
+
|
411 |
+
def __init__(self,
|
412 |
+
out_size,
|
413 |
+
num_style_feat=512,
|
414 |
+
num_mlp=8,
|
415 |
+
channel_multiplier=2,
|
416 |
+
resample_kernel=(1, 3, 3, 1),
|
417 |
+
lr_mlp=0.01,
|
418 |
+
narrow=1):
|
419 |
+
super(StyleGAN2Generator, self).__init__()
|
420 |
+
# Style MLP layers
|
421 |
+
self.num_style_feat = num_style_feat
|
422 |
+
style_mlp_layers = [NormStyleCode()]
|
423 |
+
for i in range(num_mlp):
|
424 |
+
style_mlp_layers.append(
|
425 |
+
EqualLinear(
|
426 |
+
num_style_feat, num_style_feat, bias=True, bias_init_val=0, lr_mul=lr_mlp,
|
427 |
+
activation='fused_lrelu'))
|
428 |
+
self.style_mlp = nn.Sequential(*style_mlp_layers)
|
429 |
+
|
430 |
+
channels = {
|
431 |
+
'4': int(512 * narrow),
|
432 |
+
'8': int(512 * narrow),
|
433 |
+
'16': int(512 * narrow),
|
434 |
+
'32': int(512 * narrow),
|
435 |
+
'64': int(256 * channel_multiplier * narrow),
|
436 |
+
'128': int(128 * channel_multiplier * narrow),
|
437 |
+
'256': int(64 * channel_multiplier * narrow),
|
438 |
+
'512': int(32 * channel_multiplier * narrow),
|
439 |
+
'1024': int(16 * channel_multiplier * narrow)
|
440 |
+
}
|
441 |
+
self.channels = channels
|
442 |
+
|
443 |
+
self.constant_input = ConstantInput(channels['4'], size=4)
|
444 |
+
self.style_conv1 = StyleConv(
|
445 |
+
channels['4'],
|
446 |
+
channels['4'],
|
447 |
+
kernel_size=3,
|
448 |
+
num_style_feat=num_style_feat,
|
449 |
+
demodulate=True,
|
450 |
+
sample_mode=None,
|
451 |
+
resample_kernel=resample_kernel)
|
452 |
+
self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False, resample_kernel=resample_kernel)
|
453 |
+
|
454 |
+
self.log_size = int(math.log(out_size, 2))
|
455 |
+
self.num_layers = (self.log_size - 2) * 2 + 1
|
456 |
+
self.num_latent = self.log_size * 2 - 2
|
457 |
+
|
458 |
+
self.style_convs = nn.ModuleList()
|
459 |
+
self.to_rgbs = nn.ModuleList()
|
460 |
+
self.noises = nn.Module()
|
461 |
+
|
462 |
+
in_channels = channels['4']
|
463 |
+
# noise
|
464 |
+
for layer_idx in range(self.num_layers):
|
465 |
+
resolution = 2**((layer_idx + 5) // 2)
|
466 |
+
shape = [1, 1, resolution, resolution]
|
467 |
+
self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape))
|
468 |
+
# style convs and to_rgbs
|
469 |
+
for i in range(3, self.log_size + 1):
|
470 |
+
out_channels = channels[f'{2**i}']
|
471 |
+
self.style_convs.append(
|
472 |
+
StyleConv(
|
473 |
+
in_channels,
|
474 |
+
out_channels,
|
475 |
+
kernel_size=3,
|
476 |
+
num_style_feat=num_style_feat,
|
477 |
+
demodulate=True,
|
478 |
+
sample_mode='upsample',
|
479 |
+
resample_kernel=resample_kernel,
|
480 |
+
))
|
481 |
+
self.style_convs.append(
|
482 |
+
StyleConv(
|
483 |
+
out_channels,
|
484 |
+
out_channels,
|
485 |
+
kernel_size=3,
|
486 |
+
num_style_feat=num_style_feat,
|
487 |
+
demodulate=True,
|
488 |
+
sample_mode=None,
|
489 |
+
resample_kernel=resample_kernel))
|
490 |
+
self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True, resample_kernel=resample_kernel))
|
491 |
+
in_channels = out_channels
|
492 |
+
|
493 |
+
def make_noise(self):
|
494 |
+
"""Make noise for noise injection."""
|
495 |
+
device = self.constant_input.weight.device
|
496 |
+
noises = [torch.randn(1, 1, 4, 4, device=device)]
|
497 |
+
|
498 |
+
for i in range(3, self.log_size + 1):
|
499 |
+
for _ in range(2):
|
500 |
+
noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))
|
501 |
+
|
502 |
+
return noises
|
503 |
+
|
504 |
+
def get_latent(self, x):
|
505 |
+
return self.style_mlp(x)
|
506 |
+
|
507 |
+
def mean_latent(self, num_latent):
|
508 |
+
latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device)
|
509 |
+
latent = self.style_mlp(latent_in).mean(0, keepdim=True)
|
510 |
+
return latent
|
511 |
+
|
512 |
+
def forward(self,
|
513 |
+
styles,
|
514 |
+
input_is_latent=False,
|
515 |
+
noise=None,
|
516 |
+
randomize_noise=True,
|
517 |
+
truncation=1,
|
518 |
+
truncation_latent=None,
|
519 |
+
inject_index=None,
|
520 |
+
return_latents=False):
|
521 |
+
"""Forward function for StyleGAN2Generator.
|
522 |
+
|
523 |
+
Args:
|
524 |
+
styles (list[Tensor]): Sample codes of styles.
|
525 |
+
input_is_latent (bool): Whether input is latent style.
|
526 |
+
Default: False.
|
527 |
+
noise (Tensor | None): Input noise or None. Default: None.
|
528 |
+
randomize_noise (bool): Randomize noise, used when 'noise' is
|
529 |
+
False. Default: True.
|
530 |
+
truncation (float): TODO. Default: 1.
|
531 |
+
truncation_latent (Tensor | None): TODO. Default: None.
|
532 |
+
inject_index (int | None): The injection index for mixing noise.
|
533 |
+
Default: None.
|
534 |
+
return_latents (bool): Whether to return style latents.
|
535 |
+
Default: False.
|
536 |
+
"""
|
537 |
+
# style codes -> latents with Style MLP layer
|
538 |
+
if not input_is_latent:
|
539 |
+
styles = [self.style_mlp(s) for s in styles]
|
540 |
+
# noises
|
541 |
+
if noise is None:
|
542 |
+
if randomize_noise:
|
543 |
+
noise = [None] * self.num_layers # for each style conv layer
|
544 |
+
else: # use the stored noise
|
545 |
+
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
|
546 |
+
# style truncation
|
547 |
+
if truncation < 1:
|
548 |
+
style_truncation = []
|
549 |
+
for style in styles:
|
550 |
+
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
|
551 |
+
styles = style_truncation
|
552 |
+
# get style latent with injection
|
553 |
+
if len(styles) == 1:
|
554 |
+
inject_index = self.num_latent
|
555 |
+
|
556 |
+
if styles[0].ndim < 3:
|
557 |
+
# repeat latent code for all the layers
|
558 |
+
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
559 |
+
else: # used for encoder with different latent code for each layer
|
560 |
+
latent = styles[0]
|
561 |
+
elif len(styles) == 2: # mixing noises
|
562 |
+
if inject_index is None:
|
563 |
+
inject_index = random.randint(1, self.num_latent - 1)
|
564 |
+
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
565 |
+
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
|
566 |
+
latent = torch.cat([latent1, latent2], 1)
|
567 |
+
|
568 |
+
# main generation
|
569 |
+
out = self.constant_input(latent.shape[0])
|
570 |
+
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
|
571 |
+
skip = self.to_rgb1(out, latent[:, 1])
|
572 |
+
|
573 |
+
i = 1
|
574 |
+
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
|
575 |
+
noise[2::2], self.to_rgbs):
|
576 |
+
out = conv1(out, latent[:, i], noise=noise1)
|
577 |
+
out = conv2(out, latent[:, i + 1], noise=noise2)
|
578 |
+
skip = to_rgb(out, latent[:, i + 2], skip)
|
579 |
+
i += 2
|
580 |
+
|
581 |
+
image = skip
|
582 |
+
|
583 |
+
if return_latents:
|
584 |
+
return image, latent
|
585 |
+
else:
|
586 |
+
return image, None
|
587 |
+
|
588 |
+
|
589 |
+
class ScaledLeakyReLU(nn.Module):
|
590 |
+
"""Scaled LeakyReLU.
|
591 |
+
|
592 |
+
Args:
|
593 |
+
negative_slope (float): Negative slope. Default: 0.2.
|
594 |
+
"""
|
595 |
+
|
596 |
+
def __init__(self, negative_slope=0.2):
|
597 |
+
super(ScaledLeakyReLU, self).__init__()
|
598 |
+
self.negative_slope = negative_slope
|
599 |
+
|
600 |
+
def forward(self, x):
|
601 |
+
out = F.leaky_relu(x, negative_slope=self.negative_slope)
|
602 |
+
return out * math.sqrt(2)
|
603 |
+
|
604 |
+
|
605 |
+
class EqualConv2d(nn.Module):
|
606 |
+
"""Equalized Linear as StyleGAN2.
|
607 |
+
|
608 |
+
Args:
|
609 |
+
in_channels (int): Channel number of the input.
|
610 |
+
out_channels (int): Channel number of the output.
|
611 |
+
kernel_size (int): Size of the convolving kernel.
|
612 |
+
stride (int): Stride of the convolution. Default: 1
|
613 |
+
padding (int): Zero-padding added to both sides of the input.
|
614 |
+
Default: 0.
|
615 |
+
bias (bool): If ``True``, adds a learnable bias to the output.
|
616 |
+
Default: ``True``.
|
617 |
+
bias_init_val (float): Bias initialized value. Default: 0.
|
618 |
+
"""
|
619 |
+
|
620 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0):
|
621 |
+
super(EqualConv2d, self).__init__()
|
622 |
+
self.in_channels = in_channels
|
623 |
+
self.out_channels = out_channels
|
624 |
+
self.kernel_size = kernel_size
|
625 |
+
self.stride = stride
|
626 |
+
self.padding = padding
|
627 |
+
self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
|
628 |
+
|
629 |
+
self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
|
630 |
+
if bias:
|
631 |
+
self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
|
632 |
+
else:
|
633 |
+
self.register_parameter('bias', None)
|
634 |
+
|
635 |
+
def forward(self, x):
|
636 |
+
out = F.conv2d(
|
637 |
+
x,
|
638 |
+
self.weight * self.scale,
|
639 |
+
bias=self.bias,
|
640 |
+
stride=self.stride,
|
641 |
+
padding=self.padding,
|
642 |
+
)
|
643 |
+
|
644 |
+
return out
|
645 |
+
|
646 |
+
def __repr__(self):
|
647 |
+
return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
|
648 |
+
f'out_channels={self.out_channels}, '
|
649 |
+
f'kernel_size={self.kernel_size},'
|
650 |
+
f' stride={self.stride}, padding={self.padding}, '
|
651 |
+
f'bias={self.bias is not None})')
|
652 |
+
|
653 |
+
|
654 |
+
class ConvLayer(nn.Sequential):
|
655 |
+
"""Conv Layer used in StyleGAN2 Discriminator.
|
656 |
+
|
657 |
+
Args:
|
658 |
+
in_channels (int): Channel number of the input.
|
659 |
+
out_channels (int): Channel number of the output.
|
660 |
+
kernel_size (int): Kernel size.
|
661 |
+
downsample (bool): Whether downsample by a factor of 2.
|
662 |
+
Default: False.
|
663 |
+
resample_kernel (list[int]): A list indicating the 1D resample
|
664 |
+
kernel magnitude. A cross production will be applied to
|
665 |
+
extent 1D resample kernel to 2D resample kernel.
|
666 |
+
Default: (1, 3, 3, 1).
|
667 |
+
bias (bool): Whether with bias. Default: True.
|
668 |
+
activate (bool): Whether use activateion. Default: True.
|
669 |
+
"""
|
670 |
+
|
671 |
+
def __init__(self,
|
672 |
+
in_channels,
|
673 |
+
out_channels,
|
674 |
+
kernel_size,
|
675 |
+
downsample=False,
|
676 |
+
resample_kernel=(1, 3, 3, 1),
|
677 |
+
bias=True,
|
678 |
+
activate=True):
|
679 |
+
layers = []
|
680 |
+
# downsample
|
681 |
+
if downsample:
|
682 |
+
layers.append(
|
683 |
+
UpFirDnSmooth(resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size))
|
684 |
+
stride = 2
|
685 |
+
self.padding = 0
|
686 |
+
else:
|
687 |
+
stride = 1
|
688 |
+
self.padding = kernel_size // 2
|
689 |
+
# conv
|
690 |
+
layers.append(
|
691 |
+
EqualConv2d(
|
692 |
+
in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias
|
693 |
+
and not activate))
|
694 |
+
# activation
|
695 |
+
if activate:
|
696 |
+
if bias:
|
697 |
+
layers.append(FusedLeakyReLU(out_channels))
|
698 |
+
else:
|
699 |
+
layers.append(ScaledLeakyReLU(0.2))
|
700 |
+
|
701 |
+
super(ConvLayer, self).__init__(*layers)
|
702 |
+
|
703 |
+
|
704 |
+
class ResBlock(nn.Module):
|
705 |
+
"""Residual block used in StyleGAN2 Discriminator.
|
706 |
+
|
707 |
+
Args:
|
708 |
+
in_channels (int): Channel number of the input.
|
709 |
+
out_channels (int): Channel number of the output.
|
710 |
+
resample_kernel (list[int]): A list indicating the 1D resample
|
711 |
+
kernel magnitude. A cross production will be applied to
|
712 |
+
extent 1D resample kernel to 2D resample kernel.
|
713 |
+
Default: (1, 3, 3, 1).
|
714 |
+
"""
|
715 |
+
|
716 |
+
def __init__(self, in_channels, out_channels, resample_kernel=(1, 3, 3, 1)):
|
717 |
+
super(ResBlock, self).__init__()
|
718 |
+
|
719 |
+
self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)
|
720 |
+
self.conv2 = ConvLayer(
|
721 |
+
in_channels, out_channels, 3, downsample=True, resample_kernel=resample_kernel, bias=True, activate=True)
|
722 |
+
self.skip = ConvLayer(
|
723 |
+
in_channels, out_channels, 1, downsample=True, resample_kernel=resample_kernel, bias=False, activate=False)
|
724 |
+
|
725 |
+
def forward(self, x):
|
726 |
+
out = self.conv1(x)
|
727 |
+
out = self.conv2(out)
|
728 |
+
skip = self.skip(x)
|
729 |
+
out = (out + skip) / math.sqrt(2)
|
730 |
+
return out
|
731 |
+
|
732 |
+
|
733 |
+
@ARCH_REGISTRY.register()
|
734 |
+
class StyleGAN2Discriminator(nn.Module):
|
735 |
+
"""StyleGAN2 Discriminator.
|
736 |
+
|
737 |
+
Args:
|
738 |
+
out_size (int): The spatial size of outputs.
|
739 |
+
channel_multiplier (int): Channel multiplier for large networks of
|
740 |
+
StyleGAN2. Default: 2.
|
741 |
+
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
742 |
+
magnitude. A cross production will be applied to extent 1D resample
|
743 |
+
kernel to 2D resample kernel. Default: (1, 3, 3, 1).
|
744 |
+
stddev_group (int): For group stddev statistics. Default: 4.
|
745 |
+
narrow (float): Narrow ratio for channels. Default: 1.0.
|
746 |
+
"""
|
747 |
+
|
748 |
+
def __init__(self, out_size, channel_multiplier=2, resample_kernel=(1, 3, 3, 1), stddev_group=4, narrow=1):
|
749 |
+
super(StyleGAN2Discriminator, self).__init__()
|
750 |
+
|
751 |
+
channels = {
|
752 |
+
'4': int(512 * narrow),
|
753 |
+
'8': int(512 * narrow),
|
754 |
+
'16': int(512 * narrow),
|
755 |
+
'32': int(512 * narrow),
|
756 |
+
'64': int(256 * channel_multiplier * narrow),
|
757 |
+
'128': int(128 * channel_multiplier * narrow),
|
758 |
+
'256': int(64 * channel_multiplier * narrow),
|
759 |
+
'512': int(32 * channel_multiplier * narrow),
|
760 |
+
'1024': int(16 * channel_multiplier * narrow)
|
761 |
+
}
|
762 |
+
|
763 |
+
log_size = int(math.log(out_size, 2))
|
764 |
+
|
765 |
+
conv_body = [ConvLayer(3, channels[f'{out_size}'], 1, bias=True, activate=True)]
|
766 |
+
|
767 |
+
in_channels = channels[f'{out_size}']
|
768 |
+
for i in range(log_size, 2, -1):
|
769 |
+
out_channels = channels[f'{2**(i - 1)}']
|
770 |
+
conv_body.append(ResBlock(in_channels, out_channels, resample_kernel))
|
771 |
+
in_channels = out_channels
|
772 |
+
self.conv_body = nn.Sequential(*conv_body)
|
773 |
+
|
774 |
+
self.final_conv = ConvLayer(in_channels + 1, channels['4'], 3, bias=True, activate=True)
|
775 |
+
self.final_linear = nn.Sequential(
|
776 |
+
EqualLinear(
|
777 |
+
channels['4'] * 4 * 4, channels['4'], bias=True, bias_init_val=0, lr_mul=1, activation='fused_lrelu'),
|
778 |
+
EqualLinear(channels['4'], 1, bias=True, bias_init_val=0, lr_mul=1, activation=None),
|
779 |
+
)
|
780 |
+
self.stddev_group = stddev_group
|
781 |
+
self.stddev_feat = 1
|
782 |
+
|
783 |
+
def forward(self, x):
|
784 |
+
out = self.conv_body(x)
|
785 |
+
|
786 |
+
b, c, h, w = out.shape
|
787 |
+
# concatenate a group stddev statistics to out
|
788 |
+
group = min(b, self.stddev_group) # Minibatch must be divisible by (or smaller than) group_size
|
789 |
+
stddev = out.view(group, -1, self.stddev_feat, c // self.stddev_feat, h, w)
|
790 |
+
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
791 |
+
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
792 |
+
stddev = stddev.repeat(group, 1, h, w)
|
793 |
+
out = torch.cat([out, stddev], 1)
|
794 |
+
|
795 |
+
out = self.final_conv(out)
|
796 |
+
out = out.view(b, -1)
|
797 |
+
out = self.final_linear(out)
|
798 |
+
|
799 |
+
return out
|
basicsr/archs/swinir_arch.py
ADDED
@@ -0,0 +1,956 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified from https://github.com/JingyunLiang/SwinIR
|
2 |
+
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
|
3 |
+
# Originally Written by Ze Liu, Modified by Jingyun Liang.
|
4 |
+
|
5 |
+
import math
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.utils.checkpoint as checkpoint
|
9 |
+
|
10 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
11 |
+
from .arch_util import to_2tuple, trunc_normal_
|
12 |
+
|
13 |
+
|
14 |
+
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
15 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
16 |
+
|
17 |
+
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
|
18 |
+
"""
|
19 |
+
if drop_prob == 0. or not training:
|
20 |
+
return x
|
21 |
+
keep_prob = 1 - drop_prob
|
22 |
+
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
23 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
24 |
+
random_tensor.floor_() # binarize
|
25 |
+
output = x.div(keep_prob) * random_tensor
|
26 |
+
return output
|
27 |
+
|
28 |
+
|
29 |
+
class DropPath(nn.Module):
|
30 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
31 |
+
|
32 |
+
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(self, drop_prob=None):
|
36 |
+
super(DropPath, self).__init__()
|
37 |
+
self.drop_prob = drop_prob
|
38 |
+
|
39 |
+
def forward(self, x):
|
40 |
+
return drop_path(x, self.drop_prob, self.training)
|
41 |
+
|
42 |
+
|
43 |
+
class Mlp(nn.Module):
|
44 |
+
|
45 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
46 |
+
super().__init__()
|
47 |
+
out_features = out_features or in_features
|
48 |
+
hidden_features = hidden_features or in_features
|
49 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
50 |
+
self.act = act_layer()
|
51 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
52 |
+
self.drop = nn.Dropout(drop)
|
53 |
+
|
54 |
+
def forward(self, x):
|
55 |
+
x = self.fc1(x)
|
56 |
+
x = self.act(x)
|
57 |
+
x = self.drop(x)
|
58 |
+
x = self.fc2(x)
|
59 |
+
x = self.drop(x)
|
60 |
+
return x
|
61 |
+
|
62 |
+
|
63 |
+
def window_partition(x, window_size):
|
64 |
+
"""
|
65 |
+
Args:
|
66 |
+
x: (b, h, w, c)
|
67 |
+
window_size (int): window size
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
windows: (num_windows*b, window_size, window_size, c)
|
71 |
+
"""
|
72 |
+
b, h, w, c = x.shape
|
73 |
+
x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)
|
74 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)
|
75 |
+
return windows
|
76 |
+
|
77 |
+
|
78 |
+
def window_reverse(windows, window_size, h, w):
|
79 |
+
"""
|
80 |
+
Args:
|
81 |
+
windows: (num_windows*b, window_size, window_size, c)
|
82 |
+
window_size (int): Window size
|
83 |
+
h (int): Height of image
|
84 |
+
w (int): Width of image
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
x: (b, h, w, c)
|
88 |
+
"""
|
89 |
+
b = int(windows.shape[0] / (h * w / window_size / window_size))
|
90 |
+
x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1)
|
91 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
|
92 |
+
return x
|
93 |
+
|
94 |
+
|
95 |
+
class WindowAttention(nn.Module):
|
96 |
+
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
97 |
+
It supports both of shifted and non-shifted window.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
dim (int): Number of input channels.
|
101 |
+
window_size (tuple[int]): The height and width of the window.
|
102 |
+
num_heads (int): Number of attention heads.
|
103 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
104 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
105 |
+
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
106 |
+
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
107 |
+
"""
|
108 |
+
|
109 |
+
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
110 |
+
|
111 |
+
super().__init__()
|
112 |
+
self.dim = dim
|
113 |
+
self.window_size = window_size # Wh, Ww
|
114 |
+
self.num_heads = num_heads
|
115 |
+
head_dim = dim // num_heads
|
116 |
+
self.scale = qk_scale or head_dim**-0.5
|
117 |
+
|
118 |
+
# define a parameter table of relative position bias
|
119 |
+
self.relative_position_bias_table = nn.Parameter(
|
120 |
+
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
121 |
+
|
122 |
+
# get pair-wise relative position index for each token inside the window
|
123 |
+
coords_h = torch.arange(self.window_size[0])
|
124 |
+
coords_w = torch.arange(self.window_size[1])
|
125 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
126 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
127 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
128 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
129 |
+
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
130 |
+
relative_coords[:, :, 1] += self.window_size[1] - 1
|
131 |
+
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
132 |
+
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
133 |
+
self.register_buffer('relative_position_index', relative_position_index)
|
134 |
+
|
135 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
136 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
137 |
+
self.proj = nn.Linear(dim, dim)
|
138 |
+
|
139 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
140 |
+
|
141 |
+
trunc_normal_(self.relative_position_bias_table, std=.02)
|
142 |
+
self.softmax = nn.Softmax(dim=-1)
|
143 |
+
|
144 |
+
def forward(self, x, mask=None):
|
145 |
+
"""
|
146 |
+
Args:
|
147 |
+
x: input features with shape of (num_windows*b, n, c)
|
148 |
+
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
149 |
+
"""
|
150 |
+
b_, n, c = x.shape
|
151 |
+
qkv = self.qkv(x).reshape(b_, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
|
152 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
153 |
+
|
154 |
+
q = q * self.scale
|
155 |
+
attn = (q @ k.transpose(-2, -1))
|
156 |
+
|
157 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
158 |
+
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
159 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
160 |
+
attn = attn + relative_position_bias.unsqueeze(0)
|
161 |
+
|
162 |
+
if mask is not None:
|
163 |
+
nw = mask.shape[0]
|
164 |
+
attn = attn.view(b_ // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)
|
165 |
+
attn = attn.view(-1, self.num_heads, n, n)
|
166 |
+
attn = self.softmax(attn)
|
167 |
+
else:
|
168 |
+
attn = self.softmax(attn)
|
169 |
+
|
170 |
+
attn = self.attn_drop(attn)
|
171 |
+
|
172 |
+
x = (attn @ v).transpose(1, 2).reshape(b_, n, c)
|
173 |
+
x = self.proj(x)
|
174 |
+
x = self.proj_drop(x)
|
175 |
+
return x
|
176 |
+
|
177 |
+
def extra_repr(self) -> str:
|
178 |
+
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
179 |
+
|
180 |
+
def flops(self, n):
|
181 |
+
# calculate flops for 1 window with token length of n
|
182 |
+
flops = 0
|
183 |
+
# qkv = self.qkv(x)
|
184 |
+
flops += n * self.dim * 3 * self.dim
|
185 |
+
# attn = (q @ k.transpose(-2, -1))
|
186 |
+
flops += self.num_heads * n * (self.dim // self.num_heads) * n
|
187 |
+
# x = (attn @ v)
|
188 |
+
flops += self.num_heads * n * n * (self.dim // self.num_heads)
|
189 |
+
# x = self.proj(x)
|
190 |
+
flops += n * self.dim * self.dim
|
191 |
+
return flops
|
192 |
+
|
193 |
+
|
194 |
+
class SwinTransformerBlock(nn.Module):
|
195 |
+
r""" Swin Transformer Block.
|
196 |
+
|
197 |
+
Args:
|
198 |
+
dim (int): Number of input channels.
|
199 |
+
input_resolution (tuple[int]): Input resolution.
|
200 |
+
num_heads (int): Number of attention heads.
|
201 |
+
window_size (int): Window size.
|
202 |
+
shift_size (int): Shift size for SW-MSA.
|
203 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
204 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
205 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
206 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
207 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
208 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
209 |
+
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
210 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
211 |
+
"""
|
212 |
+
|
213 |
+
def __init__(self,
|
214 |
+
dim,
|
215 |
+
input_resolution,
|
216 |
+
num_heads,
|
217 |
+
window_size=7,
|
218 |
+
shift_size=0,
|
219 |
+
mlp_ratio=4.,
|
220 |
+
qkv_bias=True,
|
221 |
+
qk_scale=None,
|
222 |
+
drop=0.,
|
223 |
+
attn_drop=0.,
|
224 |
+
drop_path=0.,
|
225 |
+
act_layer=nn.GELU,
|
226 |
+
norm_layer=nn.LayerNorm):
|
227 |
+
super().__init__()
|
228 |
+
self.dim = dim
|
229 |
+
self.input_resolution = input_resolution
|
230 |
+
self.num_heads = num_heads
|
231 |
+
self.window_size = window_size
|
232 |
+
self.shift_size = shift_size
|
233 |
+
self.mlp_ratio = mlp_ratio
|
234 |
+
if min(self.input_resolution) <= self.window_size:
|
235 |
+
# if window size is larger than input resolution, we don't partition windows
|
236 |
+
self.shift_size = 0
|
237 |
+
self.window_size = min(self.input_resolution)
|
238 |
+
assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size'
|
239 |
+
|
240 |
+
self.norm1 = norm_layer(dim)
|
241 |
+
self.attn = WindowAttention(
|
242 |
+
dim,
|
243 |
+
window_size=to_2tuple(self.window_size),
|
244 |
+
num_heads=num_heads,
|
245 |
+
qkv_bias=qkv_bias,
|
246 |
+
qk_scale=qk_scale,
|
247 |
+
attn_drop=attn_drop,
|
248 |
+
proj_drop=drop)
|
249 |
+
|
250 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
251 |
+
self.norm2 = norm_layer(dim)
|
252 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
253 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
254 |
+
|
255 |
+
if self.shift_size > 0:
|
256 |
+
attn_mask = self.calculate_mask(self.input_resolution)
|
257 |
+
else:
|
258 |
+
attn_mask = None
|
259 |
+
|
260 |
+
self.register_buffer('attn_mask', attn_mask)
|
261 |
+
|
262 |
+
def calculate_mask(self, x_size):
|
263 |
+
# calculate attention mask for SW-MSA
|
264 |
+
h, w = x_size
|
265 |
+
img_mask = torch.zeros((1, h, w, 1)) # 1 h w 1
|
266 |
+
h_slices = (slice(0, -self.window_size), slice(-self.window_size,
|
267 |
+
-self.shift_size), slice(-self.shift_size, None))
|
268 |
+
w_slices = (slice(0, -self.window_size), slice(-self.window_size,
|
269 |
+
-self.shift_size), slice(-self.shift_size, None))
|
270 |
+
cnt = 0
|
271 |
+
for h in h_slices:
|
272 |
+
for w in w_slices:
|
273 |
+
img_mask[:, h, w, :] = cnt
|
274 |
+
cnt += 1
|
275 |
+
|
276 |
+
mask_windows = window_partition(img_mask, self.window_size) # nw, window_size, window_size, 1
|
277 |
+
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
278 |
+
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
279 |
+
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
280 |
+
|
281 |
+
return attn_mask
|
282 |
+
|
283 |
+
def forward(self, x, x_size):
|
284 |
+
h, w = x_size
|
285 |
+
b, _, c = x.shape
|
286 |
+
# assert seq_len == h * w, "input feature has wrong size"
|
287 |
+
|
288 |
+
shortcut = x
|
289 |
+
x = self.norm1(x)
|
290 |
+
x = x.view(b, h, w, c)
|
291 |
+
|
292 |
+
# cyclic shift
|
293 |
+
if self.shift_size > 0:
|
294 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
295 |
+
else:
|
296 |
+
shifted_x = x
|
297 |
+
|
298 |
+
# partition windows
|
299 |
+
x_windows = window_partition(shifted_x, self.window_size) # nw*b, window_size, window_size, c
|
300 |
+
x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # nw*b, window_size*window_size, c
|
301 |
+
|
302 |
+
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
|
303 |
+
if self.input_resolution == x_size:
|
304 |
+
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nw*b, window_size*window_size, c
|
305 |
+
else:
|
306 |
+
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
|
307 |
+
|
308 |
+
# merge windows
|
309 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c)
|
310 |
+
shifted_x = window_reverse(attn_windows, self.window_size, h, w) # b h' w' c
|
311 |
+
|
312 |
+
# reverse cyclic shift
|
313 |
+
if self.shift_size > 0:
|
314 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
315 |
+
else:
|
316 |
+
x = shifted_x
|
317 |
+
x = x.view(b, h * w, c)
|
318 |
+
|
319 |
+
# FFN
|
320 |
+
x = shortcut + self.drop_path(x)
|
321 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
322 |
+
|
323 |
+
return x
|
324 |
+
|
325 |
+
def extra_repr(self) -> str:
|
326 |
+
return (f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, '
|
327 |
+
f'window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}')
|
328 |
+
|
329 |
+
def flops(self):
|
330 |
+
flops = 0
|
331 |
+
h, w = self.input_resolution
|
332 |
+
# norm1
|
333 |
+
flops += self.dim * h * w
|
334 |
+
# W-MSA/SW-MSA
|
335 |
+
nw = h * w / self.window_size / self.window_size
|
336 |
+
flops += nw * self.attn.flops(self.window_size * self.window_size)
|
337 |
+
# mlp
|
338 |
+
flops += 2 * h * w * self.dim * self.dim * self.mlp_ratio
|
339 |
+
# norm2
|
340 |
+
flops += self.dim * h * w
|
341 |
+
return flops
|
342 |
+
|
343 |
+
|
344 |
+
class PatchMerging(nn.Module):
|
345 |
+
r""" Patch Merging Layer.
|
346 |
+
|
347 |
+
Args:
|
348 |
+
input_resolution (tuple[int]): Resolution of input feature.
|
349 |
+
dim (int): Number of input channels.
|
350 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
351 |
+
"""
|
352 |
+
|
353 |
+
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
354 |
+
super().__init__()
|
355 |
+
self.input_resolution = input_resolution
|
356 |
+
self.dim = dim
|
357 |
+
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
358 |
+
self.norm = norm_layer(4 * dim)
|
359 |
+
|
360 |
+
def forward(self, x):
|
361 |
+
"""
|
362 |
+
x: b, h*w, c
|
363 |
+
"""
|
364 |
+
h, w = self.input_resolution
|
365 |
+
b, seq_len, c = x.shape
|
366 |
+
assert seq_len == h * w, 'input feature has wrong size'
|
367 |
+
assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.'
|
368 |
+
|
369 |
+
x = x.view(b, h, w, c)
|
370 |
+
|
371 |
+
x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c
|
372 |
+
x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c
|
373 |
+
x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c
|
374 |
+
x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c
|
375 |
+
x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c
|
376 |
+
x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c
|
377 |
+
|
378 |
+
x = self.norm(x)
|
379 |
+
x = self.reduction(x)
|
380 |
+
|
381 |
+
return x
|
382 |
+
|
383 |
+
def extra_repr(self) -> str:
|
384 |
+
return f'input_resolution={self.input_resolution}, dim={self.dim}'
|
385 |
+
|
386 |
+
def flops(self):
|
387 |
+
h, w = self.input_resolution
|
388 |
+
flops = h * w * self.dim
|
389 |
+
flops += (h // 2) * (w // 2) * 4 * self.dim * 2 * self.dim
|
390 |
+
return flops
|
391 |
+
|
392 |
+
|
393 |
+
class BasicLayer(nn.Module):
|
394 |
+
""" A basic Swin Transformer layer for one stage.
|
395 |
+
|
396 |
+
Args:
|
397 |
+
dim (int): Number of input channels.
|
398 |
+
input_resolution (tuple[int]): Input resolution.
|
399 |
+
depth (int): Number of blocks.
|
400 |
+
num_heads (int): Number of attention heads.
|
401 |
+
window_size (int): Local window size.
|
402 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
403 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
404 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
405 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
406 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
407 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
408 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
409 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
410 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
411 |
+
"""
|
412 |
+
|
413 |
+
def __init__(self,
|
414 |
+
dim,
|
415 |
+
input_resolution,
|
416 |
+
depth,
|
417 |
+
num_heads,
|
418 |
+
window_size,
|
419 |
+
mlp_ratio=4.,
|
420 |
+
qkv_bias=True,
|
421 |
+
qk_scale=None,
|
422 |
+
drop=0.,
|
423 |
+
attn_drop=0.,
|
424 |
+
drop_path=0.,
|
425 |
+
norm_layer=nn.LayerNorm,
|
426 |
+
downsample=None,
|
427 |
+
use_checkpoint=False):
|
428 |
+
|
429 |
+
super().__init__()
|
430 |
+
self.dim = dim
|
431 |
+
self.input_resolution = input_resolution
|
432 |
+
self.depth = depth
|
433 |
+
self.use_checkpoint = use_checkpoint
|
434 |
+
|
435 |
+
# build blocks
|
436 |
+
self.blocks = nn.ModuleList([
|
437 |
+
SwinTransformerBlock(
|
438 |
+
dim=dim,
|
439 |
+
input_resolution=input_resolution,
|
440 |
+
num_heads=num_heads,
|
441 |
+
window_size=window_size,
|
442 |
+
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
443 |
+
mlp_ratio=mlp_ratio,
|
444 |
+
qkv_bias=qkv_bias,
|
445 |
+
qk_scale=qk_scale,
|
446 |
+
drop=drop,
|
447 |
+
attn_drop=attn_drop,
|
448 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
449 |
+
norm_layer=norm_layer) for i in range(depth)
|
450 |
+
])
|
451 |
+
|
452 |
+
# patch merging layer
|
453 |
+
if downsample is not None:
|
454 |
+
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
455 |
+
else:
|
456 |
+
self.downsample = None
|
457 |
+
|
458 |
+
def forward(self, x, x_size):
|
459 |
+
for blk in self.blocks:
|
460 |
+
if self.use_checkpoint:
|
461 |
+
x = checkpoint.checkpoint(blk, x)
|
462 |
+
else:
|
463 |
+
x = blk(x, x_size)
|
464 |
+
if self.downsample is not None:
|
465 |
+
x = self.downsample(x)
|
466 |
+
return x
|
467 |
+
|
468 |
+
def extra_repr(self) -> str:
|
469 |
+
return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
|
470 |
+
|
471 |
+
def flops(self):
|
472 |
+
flops = 0
|
473 |
+
for blk in self.blocks:
|
474 |
+
flops += blk.flops()
|
475 |
+
if self.downsample is not None:
|
476 |
+
flops += self.downsample.flops()
|
477 |
+
return flops
|
478 |
+
|
479 |
+
|
480 |
+
class RSTB(nn.Module):
|
481 |
+
"""Residual Swin Transformer Block (RSTB).
|
482 |
+
|
483 |
+
Args:
|
484 |
+
dim (int): Number of input channels.
|
485 |
+
input_resolution (tuple[int]): Input resolution.
|
486 |
+
depth (int): Number of blocks.
|
487 |
+
num_heads (int): Number of attention heads.
|
488 |
+
window_size (int): Local window size.
|
489 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
490 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
491 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
492 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
493 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
494 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
495 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
496 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
497 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
498 |
+
img_size: Input image size.
|
499 |
+
patch_size: Patch size.
|
500 |
+
resi_connection: The convolutional block before residual connection.
|
501 |
+
"""
|
502 |
+
|
503 |
+
def __init__(self,
|
504 |
+
dim,
|
505 |
+
input_resolution,
|
506 |
+
depth,
|
507 |
+
num_heads,
|
508 |
+
window_size,
|
509 |
+
mlp_ratio=4.,
|
510 |
+
qkv_bias=True,
|
511 |
+
qk_scale=None,
|
512 |
+
drop=0.,
|
513 |
+
attn_drop=0.,
|
514 |
+
drop_path=0.,
|
515 |
+
norm_layer=nn.LayerNorm,
|
516 |
+
downsample=None,
|
517 |
+
use_checkpoint=False,
|
518 |
+
img_size=224,
|
519 |
+
patch_size=4,
|
520 |
+
resi_connection='1conv'):
|
521 |
+
super(RSTB, self).__init__()
|
522 |
+
|
523 |
+
self.dim = dim
|
524 |
+
self.input_resolution = input_resolution
|
525 |
+
|
526 |
+
self.residual_group = BasicLayer(
|
527 |
+
dim=dim,
|
528 |
+
input_resolution=input_resolution,
|
529 |
+
depth=depth,
|
530 |
+
num_heads=num_heads,
|
531 |
+
window_size=window_size,
|
532 |
+
mlp_ratio=mlp_ratio,
|
533 |
+
qkv_bias=qkv_bias,
|
534 |
+
qk_scale=qk_scale,
|
535 |
+
drop=drop,
|
536 |
+
attn_drop=attn_drop,
|
537 |
+
drop_path=drop_path,
|
538 |
+
norm_layer=norm_layer,
|
539 |
+
downsample=downsample,
|
540 |
+
use_checkpoint=use_checkpoint)
|
541 |
+
|
542 |
+
if resi_connection == '1conv':
|
543 |
+
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
|
544 |
+
elif resi_connection == '3conv':
|
545 |
+
# to save parameters and memory
|
546 |
+
self.conv = nn.Sequential(
|
547 |
+
nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
548 |
+
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
549 |
+
nn.Conv2d(dim // 4, dim, 3, 1, 1))
|
550 |
+
|
551 |
+
self.patch_embed = PatchEmbed(
|
552 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
|
553 |
+
|
554 |
+
self.patch_unembed = PatchUnEmbed(
|
555 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
|
556 |
+
|
557 |
+
def forward(self, x, x_size):
|
558 |
+
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
|
559 |
+
|
560 |
+
def flops(self):
|
561 |
+
flops = 0
|
562 |
+
flops += self.residual_group.flops()
|
563 |
+
h, w = self.input_resolution
|
564 |
+
flops += h * w * self.dim * self.dim * 9
|
565 |
+
flops += self.patch_embed.flops()
|
566 |
+
flops += self.patch_unembed.flops()
|
567 |
+
|
568 |
+
return flops
|
569 |
+
|
570 |
+
|
571 |
+
class PatchEmbed(nn.Module):
|
572 |
+
r""" Image to Patch Embedding
|
573 |
+
|
574 |
+
Args:
|
575 |
+
img_size (int): Image size. Default: 224.
|
576 |
+
patch_size (int): Patch token size. Default: 4.
|
577 |
+
in_chans (int): Number of input image channels. Default: 3.
|
578 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
579 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
580 |
+
"""
|
581 |
+
|
582 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
583 |
+
super().__init__()
|
584 |
+
img_size = to_2tuple(img_size)
|
585 |
+
patch_size = to_2tuple(patch_size)
|
586 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
587 |
+
self.img_size = img_size
|
588 |
+
self.patch_size = patch_size
|
589 |
+
self.patches_resolution = patches_resolution
|
590 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
591 |
+
|
592 |
+
self.in_chans = in_chans
|
593 |
+
self.embed_dim = embed_dim
|
594 |
+
|
595 |
+
if norm_layer is not None:
|
596 |
+
self.norm = norm_layer(embed_dim)
|
597 |
+
else:
|
598 |
+
self.norm = None
|
599 |
+
|
600 |
+
def forward(self, x):
|
601 |
+
x = x.flatten(2).transpose(1, 2) # b Ph*Pw c
|
602 |
+
if self.norm is not None:
|
603 |
+
x = self.norm(x)
|
604 |
+
return x
|
605 |
+
|
606 |
+
def flops(self):
|
607 |
+
flops = 0
|
608 |
+
h, w = self.img_size
|
609 |
+
if self.norm is not None:
|
610 |
+
flops += h * w * self.embed_dim
|
611 |
+
return flops
|
612 |
+
|
613 |
+
|
614 |
+
class PatchUnEmbed(nn.Module):
|
615 |
+
r""" Image to Patch Unembedding
|
616 |
+
|
617 |
+
Args:
|
618 |
+
img_size (int): Image size. Default: 224.
|
619 |
+
patch_size (int): Patch token size. Default: 4.
|
620 |
+
in_chans (int): Number of input image channels. Default: 3.
|
621 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
622 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
623 |
+
"""
|
624 |
+
|
625 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
626 |
+
super().__init__()
|
627 |
+
img_size = to_2tuple(img_size)
|
628 |
+
patch_size = to_2tuple(patch_size)
|
629 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
630 |
+
self.img_size = img_size
|
631 |
+
self.patch_size = patch_size
|
632 |
+
self.patches_resolution = patches_resolution
|
633 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
634 |
+
|
635 |
+
self.in_chans = in_chans
|
636 |
+
self.embed_dim = embed_dim
|
637 |
+
|
638 |
+
def forward(self, x, x_size):
|
639 |
+
x = x.transpose(1, 2).view(x.shape[0], self.embed_dim, x_size[0], x_size[1]) # b Ph*Pw c
|
640 |
+
return x
|
641 |
+
|
642 |
+
def flops(self):
|
643 |
+
flops = 0
|
644 |
+
return flops
|
645 |
+
|
646 |
+
|
647 |
+
class Upsample(nn.Sequential):
|
648 |
+
"""Upsample module.
|
649 |
+
|
650 |
+
Args:
|
651 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
652 |
+
num_feat (int): Channel number of intermediate features.
|
653 |
+
"""
|
654 |
+
|
655 |
+
def __init__(self, scale, num_feat):
|
656 |
+
m = []
|
657 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
658 |
+
for _ in range(int(math.log(scale, 2))):
|
659 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
660 |
+
m.append(nn.PixelShuffle(2))
|
661 |
+
elif scale == 3:
|
662 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
663 |
+
m.append(nn.PixelShuffle(3))
|
664 |
+
else:
|
665 |
+
raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
|
666 |
+
super(Upsample, self).__init__(*m)
|
667 |
+
|
668 |
+
|
669 |
+
class UpsampleOneStep(nn.Sequential):
|
670 |
+
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
|
671 |
+
Used in lightweight SR to save parameters.
|
672 |
+
|
673 |
+
Args:
|
674 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
675 |
+
num_feat (int): Channel number of intermediate features.
|
676 |
+
|
677 |
+
"""
|
678 |
+
|
679 |
+
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
|
680 |
+
self.num_feat = num_feat
|
681 |
+
self.input_resolution = input_resolution
|
682 |
+
m = []
|
683 |
+
m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
|
684 |
+
m.append(nn.PixelShuffle(scale))
|
685 |
+
super(UpsampleOneStep, self).__init__(*m)
|
686 |
+
|
687 |
+
def flops(self):
|
688 |
+
h, w = self.input_resolution
|
689 |
+
flops = h * w * self.num_feat * 3 * 9
|
690 |
+
return flops
|
691 |
+
|
692 |
+
|
693 |
+
@ARCH_REGISTRY.register()
|
694 |
+
class SwinIR(nn.Module):
|
695 |
+
r""" SwinIR
|
696 |
+
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
|
697 |
+
|
698 |
+
Args:
|
699 |
+
img_size (int | tuple(int)): Input image size. Default 64
|
700 |
+
patch_size (int | tuple(int)): Patch size. Default: 1
|
701 |
+
in_chans (int): Number of input image channels. Default: 3
|
702 |
+
embed_dim (int): Patch embedding dimension. Default: 96
|
703 |
+
depths (tuple(int)): Depth of each Swin Transformer layer.
|
704 |
+
num_heads (tuple(int)): Number of attention heads in different layers.
|
705 |
+
window_size (int): Window size. Default: 7
|
706 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
707 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
708 |
+
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
709 |
+
drop_rate (float): Dropout rate. Default: 0
|
710 |
+
attn_drop_rate (float): Attention dropout rate. Default: 0
|
711 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
712 |
+
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
713 |
+
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
714 |
+
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
715 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
716 |
+
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
|
717 |
+
img_range: Image range. 1. or 255.
|
718 |
+
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
|
719 |
+
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
|
720 |
+
"""
|
721 |
+
|
722 |
+
def __init__(self,
|
723 |
+
img_size=64,
|
724 |
+
patch_size=1,
|
725 |
+
in_chans=3,
|
726 |
+
embed_dim=96,
|
727 |
+
depths=(6, 6, 6, 6),
|
728 |
+
num_heads=(6, 6, 6, 6),
|
729 |
+
window_size=7,
|
730 |
+
mlp_ratio=4.,
|
731 |
+
qkv_bias=True,
|
732 |
+
qk_scale=None,
|
733 |
+
drop_rate=0.,
|
734 |
+
attn_drop_rate=0.,
|
735 |
+
drop_path_rate=0.1,
|
736 |
+
norm_layer=nn.LayerNorm,
|
737 |
+
ape=False,
|
738 |
+
patch_norm=True,
|
739 |
+
use_checkpoint=False,
|
740 |
+
upscale=2,
|
741 |
+
img_range=1.,
|
742 |
+
upsampler='',
|
743 |
+
resi_connection='1conv',
|
744 |
+
**kwargs):
|
745 |
+
super(SwinIR, self).__init__()
|
746 |
+
num_in_ch = in_chans
|
747 |
+
num_out_ch = in_chans
|
748 |
+
num_feat = 64
|
749 |
+
self.img_range = img_range
|
750 |
+
if in_chans == 3:
|
751 |
+
rgb_mean = (0.4488, 0.4371, 0.4040)
|
752 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
753 |
+
else:
|
754 |
+
self.mean = torch.zeros(1, 1, 1, 1)
|
755 |
+
self.upscale = upscale
|
756 |
+
self.upsampler = upsampler
|
757 |
+
|
758 |
+
# ------------------------- 1, shallow feature extraction ------------------------- #
|
759 |
+
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
|
760 |
+
|
761 |
+
# ------------------------- 2, deep feature extraction ------------------------- #
|
762 |
+
self.num_layers = len(depths)
|
763 |
+
self.embed_dim = embed_dim
|
764 |
+
self.ape = ape
|
765 |
+
self.patch_norm = patch_norm
|
766 |
+
self.num_features = embed_dim
|
767 |
+
self.mlp_ratio = mlp_ratio
|
768 |
+
|
769 |
+
# split image into non-overlapping patches
|
770 |
+
self.patch_embed = PatchEmbed(
|
771 |
+
img_size=img_size,
|
772 |
+
patch_size=patch_size,
|
773 |
+
in_chans=embed_dim,
|
774 |
+
embed_dim=embed_dim,
|
775 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
776 |
+
num_patches = self.patch_embed.num_patches
|
777 |
+
patches_resolution = self.patch_embed.patches_resolution
|
778 |
+
self.patches_resolution = patches_resolution
|
779 |
+
|
780 |
+
# merge non-overlapping patches into image
|
781 |
+
self.patch_unembed = PatchUnEmbed(
|
782 |
+
img_size=img_size,
|
783 |
+
patch_size=patch_size,
|
784 |
+
in_chans=embed_dim,
|
785 |
+
embed_dim=embed_dim,
|
786 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
787 |
+
|
788 |
+
# absolute position embedding
|
789 |
+
if self.ape:
|
790 |
+
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
791 |
+
trunc_normal_(self.absolute_pos_embed, std=.02)
|
792 |
+
|
793 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
794 |
+
|
795 |
+
# stochastic depth
|
796 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
797 |
+
|
798 |
+
# build Residual Swin Transformer blocks (RSTB)
|
799 |
+
self.layers = nn.ModuleList()
|
800 |
+
for i_layer in range(self.num_layers):
|
801 |
+
layer = RSTB(
|
802 |
+
dim=embed_dim,
|
803 |
+
input_resolution=(patches_resolution[0], patches_resolution[1]),
|
804 |
+
depth=depths[i_layer],
|
805 |
+
num_heads=num_heads[i_layer],
|
806 |
+
window_size=window_size,
|
807 |
+
mlp_ratio=self.mlp_ratio,
|
808 |
+
qkv_bias=qkv_bias,
|
809 |
+
qk_scale=qk_scale,
|
810 |
+
drop=drop_rate,
|
811 |
+
attn_drop=attn_drop_rate,
|
812 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
813 |
+
norm_layer=norm_layer,
|
814 |
+
downsample=None,
|
815 |
+
use_checkpoint=use_checkpoint,
|
816 |
+
img_size=img_size,
|
817 |
+
patch_size=patch_size,
|
818 |
+
resi_connection=resi_connection)
|
819 |
+
self.layers.append(layer)
|
820 |
+
self.norm = norm_layer(self.num_features)
|
821 |
+
|
822 |
+
# build the last conv layer in deep feature extraction
|
823 |
+
if resi_connection == '1conv':
|
824 |
+
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
825 |
+
elif resi_connection == '3conv':
|
826 |
+
# to save parameters and memory
|
827 |
+
self.conv_after_body = nn.Sequential(
|
828 |
+
nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
829 |
+
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
830 |
+
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
|
831 |
+
|
832 |
+
# ------------------------- 3, high quality image reconstruction ------------------------- #
|
833 |
+
if self.upsampler == 'pixelshuffle':
|
834 |
+
# for classical SR
|
835 |
+
self.conv_before_upsample = nn.Sequential(
|
836 |
+
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
|
837 |
+
self.upsample = Upsample(upscale, num_feat)
|
838 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
839 |
+
elif self.upsampler == 'pixelshuffledirect':
|
840 |
+
# for lightweight SR (to save parameters)
|
841 |
+
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
|
842 |
+
(patches_resolution[0], patches_resolution[1]))
|
843 |
+
elif self.upsampler == 'nearest+conv':
|
844 |
+
# for real-world SR (less artifacts)
|
845 |
+
assert self.upscale == 4, 'only support x4 now.'
|
846 |
+
self.conv_before_upsample = nn.Sequential(
|
847 |
+
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
|
848 |
+
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
849 |
+
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
850 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
851 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
852 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
853 |
+
else:
|
854 |
+
# for image denoising and JPEG compression artifact reduction
|
855 |
+
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
|
856 |
+
|
857 |
+
self.apply(self._init_weights)
|
858 |
+
|
859 |
+
def _init_weights(self, m):
|
860 |
+
if isinstance(m, nn.Linear):
|
861 |
+
trunc_normal_(m.weight, std=.02)
|
862 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
863 |
+
nn.init.constant_(m.bias, 0)
|
864 |
+
elif isinstance(m, nn.LayerNorm):
|
865 |
+
nn.init.constant_(m.bias, 0)
|
866 |
+
nn.init.constant_(m.weight, 1.0)
|
867 |
+
|
868 |
+
@torch.jit.ignore
|
869 |
+
def no_weight_decay(self):
|
870 |
+
return {'absolute_pos_embed'}
|
871 |
+
|
872 |
+
@torch.jit.ignore
|
873 |
+
def no_weight_decay_keywords(self):
|
874 |
+
return {'relative_position_bias_table'}
|
875 |
+
|
876 |
+
def forward_features(self, x):
|
877 |
+
x_size = (x.shape[2], x.shape[3])
|
878 |
+
x = self.patch_embed(x)
|
879 |
+
if self.ape:
|
880 |
+
x = x + self.absolute_pos_embed
|
881 |
+
x = self.pos_drop(x)
|
882 |
+
|
883 |
+
for layer in self.layers:
|
884 |
+
x = layer(x, x_size)
|
885 |
+
|
886 |
+
x = self.norm(x) # b seq_len c
|
887 |
+
x = self.patch_unembed(x, x_size)
|
888 |
+
|
889 |
+
return x
|
890 |
+
|
891 |
+
def forward(self, x):
|
892 |
+
self.mean = self.mean.type_as(x)
|
893 |
+
x = (x - self.mean) * self.img_range
|
894 |
+
|
895 |
+
if self.upsampler == 'pixelshuffle':
|
896 |
+
# for classical SR
|
897 |
+
x = self.conv_first(x)
|
898 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
899 |
+
x = self.conv_before_upsample(x)
|
900 |
+
x = self.conv_last(self.upsample(x))
|
901 |
+
elif self.upsampler == 'pixelshuffledirect':
|
902 |
+
# for lightweight SR
|
903 |
+
x = self.conv_first(x)
|
904 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
905 |
+
x = self.upsample(x)
|
906 |
+
elif self.upsampler == 'nearest+conv':
|
907 |
+
# for real-world SR
|
908 |
+
x = self.conv_first(x)
|
909 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
910 |
+
x = self.conv_before_upsample(x)
|
911 |
+
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
912 |
+
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
913 |
+
x = self.conv_last(self.lrelu(self.conv_hr(x)))
|
914 |
+
else:
|
915 |
+
# for image denoising and JPEG compression artifact reduction
|
916 |
+
x_first = self.conv_first(x)
|
917 |
+
res = self.conv_after_body(self.forward_features(x_first)) + x_first
|
918 |
+
x = x + self.conv_last(res)
|
919 |
+
|
920 |
+
x = x / self.img_range + self.mean
|
921 |
+
|
922 |
+
return x
|
923 |
+
|
924 |
+
def flops(self):
|
925 |
+
flops = 0
|
926 |
+
h, w = self.patches_resolution
|
927 |
+
flops += h * w * 3 * self.embed_dim * 9
|
928 |
+
flops += self.patch_embed.flops()
|
929 |
+
for layer in self.layers:
|
930 |
+
flops += layer.flops()
|
931 |
+
flops += h * w * 3 * self.embed_dim * self.embed_dim
|
932 |
+
flops += self.upsample.flops()
|
933 |
+
return flops
|
934 |
+
|
935 |
+
|
936 |
+
if __name__ == '__main__':
|
937 |
+
upscale = 4
|
938 |
+
window_size = 8
|
939 |
+
height = (1024 // upscale // window_size + 1) * window_size
|
940 |
+
width = (720 // upscale // window_size + 1) * window_size
|
941 |
+
model = SwinIR(
|
942 |
+
upscale=2,
|
943 |
+
img_size=(height, width),
|
944 |
+
window_size=window_size,
|
945 |
+
img_range=1.,
|
946 |
+
depths=[6, 6, 6, 6],
|
947 |
+
embed_dim=60,
|
948 |
+
num_heads=[6, 6, 6, 6],
|
949 |
+
mlp_ratio=2,
|
950 |
+
upsampler='pixelshuffledirect')
|
951 |
+
print(model)
|
952 |
+
print(height, width, model.flops() / 1e9)
|
953 |
+
|
954 |
+
x = torch.randn((1, 3, height, width))
|
955 |
+
x = model(x)
|
956 |
+
print(x.shape)
|
basicsr/archs/tof_arch.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn as nn
|
3 |
+
from torch.nn import functional as F
|
4 |
+
|
5 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
6 |
+
from .arch_util import flow_warp
|
7 |
+
|
8 |
+
|
9 |
+
class BasicModule(nn.Module):
|
10 |
+
"""Basic module of SPyNet.
|
11 |
+
|
12 |
+
Note that unlike the architecture in spynet_arch.py, the basic module
|
13 |
+
here contains batch normalization.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
super(BasicModule, self).__init__()
|
18 |
+
self.basic_module = nn.Sequential(
|
19 |
+
nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3, bias=False),
|
20 |
+
nn.BatchNorm2d(32), nn.ReLU(inplace=True),
|
21 |
+
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3, bias=False),
|
22 |
+
nn.BatchNorm2d(64), nn.ReLU(inplace=True),
|
23 |
+
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3, bias=False),
|
24 |
+
nn.BatchNorm2d(32), nn.ReLU(inplace=True),
|
25 |
+
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3, bias=False),
|
26 |
+
nn.BatchNorm2d(16), nn.ReLU(inplace=True),
|
27 |
+
nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3))
|
28 |
+
|
29 |
+
def forward(self, tensor_input):
|
30 |
+
"""
|
31 |
+
Args:
|
32 |
+
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
|
33 |
+
8 channels contain:
|
34 |
+
[reference image (3), neighbor image (3), initial flow (2)].
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
Tensor: Estimated flow with shape (b, 2, h, w)
|
38 |
+
"""
|
39 |
+
return self.basic_module(tensor_input)
|
40 |
+
|
41 |
+
|
42 |
+
class SPyNetTOF(nn.Module):
|
43 |
+
"""SPyNet architecture for TOF.
|
44 |
+
|
45 |
+
Note that this implementation is specifically for TOFlow. Please use
|
46 |
+
spynet_arch.py for general use. They differ in the following aspects:
|
47 |
+
1. The basic modules here contain BatchNorm.
|
48 |
+
2. Normalization and denormalization are not done here, as
|
49 |
+
they are done in TOFlow.
|
50 |
+
Paper:
|
51 |
+
Optical Flow Estimation using a Spatial Pyramid Network
|
52 |
+
Code reference:
|
53 |
+
https://github.com/Coldog2333/pytoflow
|
54 |
+
|
55 |
+
Args:
|
56 |
+
load_path (str): Path for pretrained SPyNet. Default: None.
|
57 |
+
"""
|
58 |
+
|
59 |
+
def __init__(self, load_path=None):
|
60 |
+
super(SPyNetTOF, self).__init__()
|
61 |
+
|
62 |
+
self.basic_module = nn.ModuleList([BasicModule() for _ in range(4)])
|
63 |
+
if load_path:
|
64 |
+
self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
|
65 |
+
|
66 |
+
def forward(self, ref, supp):
|
67 |
+
"""
|
68 |
+
Args:
|
69 |
+
ref (Tensor): Reference image with shape of (b, 3, h, w).
|
70 |
+
supp: The supporting image to be warped: (b, 3, h, w).
|
71 |
+
|
72 |
+
Returns:
|
73 |
+
Tensor: Estimated optical flow: (b, 2, h, w).
|
74 |
+
"""
|
75 |
+
num_batches, _, h, w = ref.size()
|
76 |
+
ref = [ref]
|
77 |
+
supp = [supp]
|
78 |
+
|
79 |
+
# generate downsampled frames
|
80 |
+
for _ in range(3):
|
81 |
+
ref.insert(0, F.avg_pool2d(input=ref[0], kernel_size=2, stride=2, count_include_pad=False))
|
82 |
+
supp.insert(0, F.avg_pool2d(input=supp[0], kernel_size=2, stride=2, count_include_pad=False))
|
83 |
+
|
84 |
+
# flow computation
|
85 |
+
flow = ref[0].new_zeros(num_batches, 2, h // 16, w // 16)
|
86 |
+
for i in range(4):
|
87 |
+
flow_up = F.interpolate(input=flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
|
88 |
+
flow = flow_up + self.basic_module[i](
|
89 |
+
torch.cat([ref[i], flow_warp(supp[i], flow_up.permute(0, 2, 3, 1)), flow_up], 1))
|
90 |
+
return flow
|
91 |
+
|
92 |
+
|
93 |
+
@ARCH_REGISTRY.register()
|
94 |
+
class TOFlow(nn.Module):
|
95 |
+
"""PyTorch implementation of TOFlow.
|
96 |
+
|
97 |
+
In TOFlow, the LR frames are pre-upsampled and have the same size with
|
98 |
+
the GT frames.
|
99 |
+
Paper:
|
100 |
+
Xue et al., Video Enhancement with Task-Oriented Flow, IJCV 2018
|
101 |
+
Code reference:
|
102 |
+
1. https://github.com/anchen1011/toflow
|
103 |
+
2. https://github.com/Coldog2333/pytoflow
|
104 |
+
|
105 |
+
Args:
|
106 |
+
adapt_official_weights (bool): Whether to adapt the weights translated
|
107 |
+
from the official implementation. Set to false if you want to
|
108 |
+
train from scratch. Default: False
|
109 |
+
"""
|
110 |
+
|
111 |
+
def __init__(self, adapt_official_weights=False):
|
112 |
+
super(TOFlow, self).__init__()
|
113 |
+
self.adapt_official_weights = adapt_official_weights
|
114 |
+
self.ref_idx = 0 if adapt_official_weights else 3
|
115 |
+
|
116 |
+
self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
|
117 |
+
self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
|
118 |
+
|
119 |
+
# flow estimation module
|
120 |
+
self.spynet = SPyNetTOF()
|
121 |
+
|
122 |
+
# reconstruction module
|
123 |
+
self.conv_1 = nn.Conv2d(3 * 7, 64, 9, 1, 4)
|
124 |
+
self.conv_2 = nn.Conv2d(64, 64, 9, 1, 4)
|
125 |
+
self.conv_3 = nn.Conv2d(64, 64, 1)
|
126 |
+
self.conv_4 = nn.Conv2d(64, 3, 1)
|
127 |
+
|
128 |
+
# activation function
|
129 |
+
self.relu = nn.ReLU(inplace=True)
|
130 |
+
|
131 |
+
def normalize(self, img):
|
132 |
+
return (img - self.mean) / self.std
|
133 |
+
|
134 |
+
def denormalize(self, img):
|
135 |
+
return img * self.std + self.mean
|
136 |
+
|
137 |
+
def forward(self, lrs):
|
138 |
+
"""
|
139 |
+
Args:
|
140 |
+
lrs: Input lr frames: (b, 7, 3, h, w).
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
Tensor: SR frame: (b, 3, h, w).
|
144 |
+
"""
|
145 |
+
# In the official implementation, the 0-th frame is the reference frame
|
146 |
+
if self.adapt_official_weights:
|
147 |
+
lrs = lrs[:, [3, 0, 1, 2, 4, 5, 6], :, :, :]
|
148 |
+
|
149 |
+
num_batches, num_lrs, _, h, w = lrs.size()
|
150 |
+
|
151 |
+
lrs = self.normalize(lrs.view(-1, 3, h, w))
|
152 |
+
lrs = lrs.view(num_batches, num_lrs, 3, h, w)
|
153 |
+
|
154 |
+
lr_ref = lrs[:, self.ref_idx, :, :, :]
|
155 |
+
lr_aligned = []
|
156 |
+
for i in range(7): # 7 frames
|
157 |
+
if i == self.ref_idx:
|
158 |
+
lr_aligned.append(lr_ref)
|
159 |
+
else:
|
160 |
+
lr_supp = lrs[:, i, :, :, :]
|
161 |
+
flow = self.spynet(lr_ref, lr_supp)
|
162 |
+
lr_aligned.append(flow_warp(lr_supp, flow.permute(0, 2, 3, 1)))
|
163 |
+
|
164 |
+
# reconstruction
|
165 |
+
hr = torch.stack(lr_aligned, dim=1)
|
166 |
+
hr = hr.view(num_batches, -1, h, w)
|
167 |
+
hr = self.relu(self.conv_1(hr))
|
168 |
+
hr = self.relu(self.conv_2(hr))
|
169 |
+
hr = self.relu(self.conv_3(hr))
|
170 |
+
hr = self.conv_4(hr) + lr_ref
|
171 |
+
|
172 |
+
return self.denormalize(hr)
|
basicsr/archs/vgg_arch.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from collections import OrderedDict
|
4 |
+
from torch import nn as nn
|
5 |
+
from torchvision.models import vgg as vgg
|
6 |
+
|
7 |
+
from basicsr.utils.registry import ARCH_REGISTRY
|
8 |
+
|
9 |
+
VGG_PRETRAIN_PATH = 'experiments/pretrained_models/vgg19-dcbb9e9d.pth'
|
10 |
+
NAMES = {
|
11 |
+
'vgg11': [
|
12 |
+
'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2',
|
13 |
+
'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2',
|
14 |
+
'pool5'
|
15 |
+
],
|
16 |
+
'vgg13': [
|
17 |
+
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
|
18 |
+
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4',
|
19 |
+
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5'
|
20 |
+
],
|
21 |
+
'vgg16': [
|
22 |
+
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
|
23 |
+
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2',
|
24 |
+
'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
|
25 |
+
'pool5'
|
26 |
+
],
|
27 |
+
'vgg19': [
|
28 |
+
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
|
29 |
+
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
|
30 |
+
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
|
31 |
+
'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5'
|
32 |
+
]
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
def insert_bn(names):
|
37 |
+
"""Insert bn layer after each conv.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
names (list): The list of layer names.
|
41 |
+
|
42 |
+
Returns:
|
43 |
+
list: The list of layer names with bn layers.
|
44 |
+
"""
|
45 |
+
names_bn = []
|
46 |
+
for name in names:
|
47 |
+
names_bn.append(name)
|
48 |
+
if 'conv' in name:
|
49 |
+
position = name.replace('conv', '')
|
50 |
+
names_bn.append('bn' + position)
|
51 |
+
return names_bn
|
52 |
+
|
53 |
+
|
54 |
+
@ARCH_REGISTRY.register()
|
55 |
+
class VGGFeatureExtractor(nn.Module):
|
56 |
+
"""VGG network for feature extraction.
|
57 |
+
|
58 |
+
In this implementation, we allow users to choose whether use normalization
|
59 |
+
in the input feature and the type of vgg network. Note that the pretrained
|
60 |
+
path must fit the vgg type.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
layer_name_list (list[str]): Forward function returns the corresponding
|
64 |
+
features according to the layer_name_list.
|
65 |
+
Example: {'relu1_1', 'relu2_1', 'relu3_1'}.
|
66 |
+
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
|
67 |
+
use_input_norm (bool): If True, normalize the input image. Importantly,
|
68 |
+
the input feature must in the range [0, 1]. Default: True.
|
69 |
+
range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
|
70 |
+
Default: False.
|
71 |
+
requires_grad (bool): If true, the parameters of VGG network will be
|
72 |
+
optimized. Default: False.
|
73 |
+
remove_pooling (bool): If true, the max pooling operations in VGG net
|
74 |
+
will be removed. Default: False.
|
75 |
+
pooling_stride (int): The stride of max pooling operation. Default: 2.
|
76 |
+
"""
|
77 |
+
|
78 |
+
def __init__(self,
|
79 |
+
layer_name_list,
|
80 |
+
vgg_type='vgg19',
|
81 |
+
use_input_norm=True,
|
82 |
+
range_norm=False,
|
83 |
+
requires_grad=False,
|
84 |
+
remove_pooling=False,
|
85 |
+
pooling_stride=2):
|
86 |
+
super(VGGFeatureExtractor, self).__init__()
|
87 |
+
|
88 |
+
self.layer_name_list = layer_name_list
|
89 |
+
self.use_input_norm = use_input_norm
|
90 |
+
self.range_norm = range_norm
|
91 |
+
|
92 |
+
self.names = NAMES[vgg_type.replace('_bn', '')]
|
93 |
+
if 'bn' in vgg_type:
|
94 |
+
self.names = insert_bn(self.names)
|
95 |
+
|
96 |
+
# only borrow layers that will be used to avoid unused params
|
97 |
+
max_idx = 0
|
98 |
+
for v in layer_name_list:
|
99 |
+
idx = self.names.index(v)
|
100 |
+
if idx > max_idx:
|
101 |
+
max_idx = idx
|
102 |
+
|
103 |
+
if os.path.exists(VGG_PRETRAIN_PATH):
|
104 |
+
vgg_net = getattr(vgg, vgg_type)(pretrained=False)
|
105 |
+
state_dict = torch.load(VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage)
|
106 |
+
vgg_net.load_state_dict(state_dict)
|
107 |
+
else:
|
108 |
+
vgg_net = getattr(vgg, vgg_type)(pretrained=True)
|
109 |
+
|
110 |
+
features = vgg_net.features[:max_idx + 1]
|
111 |
+
|
112 |
+
modified_net = OrderedDict()
|
113 |
+
for k, v in zip(self.names, features):
|
114 |
+
if 'pool' in k:
|
115 |
+
# if remove_pooling is true, pooling operation will be removed
|
116 |
+
if remove_pooling:
|
117 |
+
continue
|
118 |
+
else:
|
119 |
+
# in some cases, we may want to change the default stride
|
120 |
+
modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)
|
121 |
+
else:
|
122 |
+
modified_net[k] = v
|
123 |
+
|
124 |
+
self.vgg_net = nn.Sequential(modified_net)
|
125 |
+
|
126 |
+
if not requires_grad:
|
127 |
+
self.vgg_net.eval()
|
128 |
+
for param in self.parameters():
|
129 |
+
param.requires_grad = False
|
130 |
+
else:
|
131 |
+
self.vgg_net.train()
|
132 |
+
for param in self.parameters():
|
133 |
+
param.requires_grad = True
|
134 |
+
|
135 |
+
if self.use_input_norm:
|
136 |
+
# the mean is for image with range [0, 1]
|
137 |
+
self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
|
138 |
+
# the std is for image with range [0, 1]
|
139 |
+
self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
|
140 |
+
|
141 |
+
def forward(self, x):
|
142 |
+
"""Forward function.
|
143 |
+
|
144 |
+
Args:
|
145 |
+
x (Tensor): Input tensor with shape (n, c, h, w).
|
146 |
+
|
147 |
+
Returns:
|
148 |
+
Tensor: Forward results.
|
149 |
+
"""
|
150 |
+
if self.range_norm:
|
151 |
+
x = (x + 1) / 2
|
152 |
+
if self.use_input_norm:
|
153 |
+
x = (x - self.mean) / self.std
|
154 |
+
|
155 |
+
output = {}
|
156 |
+
for key, layer in self.vgg_net._modules.items():
|
157 |
+
x = layer(x)
|
158 |
+
if key in self.layer_name_list:
|
159 |
+
output[key] = x.clone()
|
160 |
+
|
161 |
+
return output
|
basicsr/data/__init__.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
import numpy as np
|
3 |
+
import random
|
4 |
+
import torch
|
5 |
+
import torch.utils.data
|
6 |
+
from copy import deepcopy
|
7 |
+
from functools import partial
|
8 |
+
from os import path as osp
|
9 |
+
|
10 |
+
from basicsr.data.prefetch_dataloader import PrefetchDataLoader
|
11 |
+
from basicsr.utils import get_root_logger, scandir
|
12 |
+
from basicsr.utils.dist_util import get_dist_info
|
13 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
14 |
+
|
15 |
+
__all__ = ['build_dataset', 'build_dataloader']
|
16 |
+
|
17 |
+
# automatically scan and import dataset modules for registry
|
18 |
+
# scan all the files under the data folder with '_dataset' in file names
|
19 |
+
data_folder = osp.dirname(osp.abspath(__file__))
|
20 |
+
dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
|
21 |
+
# import all the dataset modules
|
22 |
+
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
|
23 |
+
|
24 |
+
|
25 |
+
def build_dataset(dataset_opt):
|
26 |
+
"""Build dataset from options.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
dataset_opt (dict): Configuration for dataset. It must contain:
|
30 |
+
name (str): Dataset name.
|
31 |
+
type (str): Dataset type.
|
32 |
+
"""
|
33 |
+
dataset_opt = deepcopy(dataset_opt)
|
34 |
+
dataset = DATASET_REGISTRY.get(dataset_opt['type'])(dataset_opt)
|
35 |
+
logger = get_root_logger()
|
36 |
+
logger.info(f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} is built.')
|
37 |
+
return dataset
|
38 |
+
|
39 |
+
|
40 |
+
def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None):
|
41 |
+
"""Build dataloader.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
dataset (torch.utils.data.Dataset): Dataset.
|
45 |
+
dataset_opt (dict): Dataset options. It contains the following keys:
|
46 |
+
phase (str): 'train' or 'val'.
|
47 |
+
num_worker_per_gpu (int): Number of workers for each GPU.
|
48 |
+
batch_size_per_gpu (int): Training batch size for each GPU.
|
49 |
+
num_gpu (int): Number of GPUs. Used only in the train phase.
|
50 |
+
Default: 1.
|
51 |
+
dist (bool): Whether in distributed training. Used only in the train
|
52 |
+
phase. Default: False.
|
53 |
+
sampler (torch.utils.data.sampler): Data sampler. Default: None.
|
54 |
+
seed (int | None): Seed. Default: None
|
55 |
+
"""
|
56 |
+
phase = dataset_opt['phase']
|
57 |
+
rank, _ = get_dist_info()
|
58 |
+
if phase == 'train':
|
59 |
+
if dist: # distributed training
|
60 |
+
batch_size = dataset_opt['batch_size_per_gpu']
|
61 |
+
num_workers = dataset_opt['num_worker_per_gpu']
|
62 |
+
else: # non-distributed training
|
63 |
+
multiplier = 1 if num_gpu == 0 else num_gpu
|
64 |
+
batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
|
65 |
+
num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
|
66 |
+
dataloader_args = dict(
|
67 |
+
dataset=dataset,
|
68 |
+
batch_size=batch_size,
|
69 |
+
shuffle=False,
|
70 |
+
num_workers=num_workers,
|
71 |
+
sampler=sampler,
|
72 |
+
drop_last=True)
|
73 |
+
if sampler is None:
|
74 |
+
dataloader_args['shuffle'] = True
|
75 |
+
dataloader_args['worker_init_fn'] = partial(
|
76 |
+
worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None
|
77 |
+
elif phase in ['val', 'test']: # validation
|
78 |
+
dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
|
79 |
+
else:
|
80 |
+
raise ValueError(f"Wrong dataset phase: {phase}. Supported ones are 'train', 'val' and 'test'.")
|
81 |
+
|
82 |
+
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
|
83 |
+
dataloader_args['persistent_workers'] = dataset_opt.get('persistent_workers', False)
|
84 |
+
|
85 |
+
prefetch_mode = dataset_opt.get('prefetch_mode')
|
86 |
+
if prefetch_mode == 'cpu': # CPUPrefetcher
|
87 |
+
num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
|
88 |
+
logger = get_root_logger()
|
89 |
+
logger.info(f'Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}')
|
90 |
+
return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args)
|
91 |
+
else:
|
92 |
+
# prefetch_mode=None: Normal dataloader
|
93 |
+
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
|
94 |
+
return torch.utils.data.DataLoader(**dataloader_args)
|
95 |
+
|
96 |
+
|
97 |
+
def worker_init_fn(worker_id, num_workers, rank, seed):
|
98 |
+
# Set the worker seed to num_workers * rank + worker_id + seed
|
99 |
+
worker_seed = num_workers * rank + worker_id + seed
|
100 |
+
np.random.seed(worker_seed)
|
101 |
+
random.seed(worker_seed)
|
basicsr/data/data_sampler.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch.utils.data.sampler import Sampler
|
4 |
+
|
5 |
+
|
6 |
+
class EnlargedSampler(Sampler):
|
7 |
+
"""Sampler that restricts data loading to a subset of the dataset.
|
8 |
+
|
9 |
+
Modified from torch.utils.data.distributed.DistributedSampler
|
10 |
+
Support enlarging the dataset for iteration-based training, for saving
|
11 |
+
time when restart the dataloader after each epoch
|
12 |
+
|
13 |
+
Args:
|
14 |
+
dataset (torch.utils.data.Dataset): Dataset used for sampling.
|
15 |
+
num_replicas (int | None): Number of processes participating in
|
16 |
+
the training. It is usually the world_size.
|
17 |
+
rank (int | None): Rank of the current process within num_replicas.
|
18 |
+
ratio (int): Enlarging ratio. Default: 1.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, dataset, num_replicas, rank, ratio=1):
|
22 |
+
self.dataset = dataset
|
23 |
+
self.num_replicas = num_replicas
|
24 |
+
self.rank = rank
|
25 |
+
self.epoch = 0
|
26 |
+
self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
|
27 |
+
self.total_size = self.num_samples * self.num_replicas
|
28 |
+
|
29 |
+
def __iter__(self):
|
30 |
+
# deterministically shuffle based on epoch
|
31 |
+
g = torch.Generator()
|
32 |
+
g.manual_seed(self.epoch)
|
33 |
+
indices = torch.randperm(self.total_size, generator=g).tolist()
|
34 |
+
|
35 |
+
dataset_size = len(self.dataset)
|
36 |
+
indices = [v % dataset_size for v in indices]
|
37 |
+
|
38 |
+
# subsample
|
39 |
+
indices = indices[self.rank:self.total_size:self.num_replicas]
|
40 |
+
assert len(indices) == self.num_samples
|
41 |
+
|
42 |
+
return iter(indices)
|
43 |
+
|
44 |
+
def __len__(self):
|
45 |
+
return self.num_samples
|
46 |
+
|
47 |
+
def set_epoch(self, epoch):
|
48 |
+
self.epoch = epoch
|
basicsr/data/data_util.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
from os import path as osp
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
from basicsr.data.transforms import mod_crop
|
8 |
+
from basicsr.utils import img2tensor, scandir
|
9 |
+
|
10 |
+
|
11 |
+
def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False):
|
12 |
+
"""Read a sequence of images from a given folder path.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
path (list[str] | str): List of image paths or image folder path.
|
16 |
+
require_mod_crop (bool): Require mod crop for each image.
|
17 |
+
Default: False.
|
18 |
+
scale (int): Scale factor for mod_crop. Default: 1.
|
19 |
+
return_imgname(bool): Whether return image names. Default False.
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
Tensor: size (t, c, h, w), RGB, [0, 1].
|
23 |
+
list[str]: Returned image name list.
|
24 |
+
"""
|
25 |
+
if isinstance(path, list):
|
26 |
+
img_paths = path
|
27 |
+
else:
|
28 |
+
img_paths = sorted(list(scandir(path, full_path=True)))
|
29 |
+
imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths]
|
30 |
+
|
31 |
+
if require_mod_crop:
|
32 |
+
imgs = [mod_crop(img, scale) for img in imgs]
|
33 |
+
imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
|
34 |
+
imgs = torch.stack(imgs, dim=0)
|
35 |
+
|
36 |
+
if return_imgname:
|
37 |
+
imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths]
|
38 |
+
return imgs, imgnames
|
39 |
+
else:
|
40 |
+
return imgs
|
41 |
+
|
42 |
+
|
43 |
+
def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection'):
|
44 |
+
"""Generate an index list for reading `num_frames` frames from a sequence
|
45 |
+
of images.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
crt_idx (int): Current center index.
|
49 |
+
max_frame_num (int): Max number of the sequence of images (from 1).
|
50 |
+
num_frames (int): Reading num_frames frames.
|
51 |
+
padding (str): Padding mode, one of
|
52 |
+
'replicate' | 'reflection' | 'reflection_circle' | 'circle'
|
53 |
+
Examples: current_idx = 0, num_frames = 5
|
54 |
+
The generated frame indices under different padding mode:
|
55 |
+
replicate: [0, 0, 0, 1, 2]
|
56 |
+
reflection: [2, 1, 0, 1, 2]
|
57 |
+
reflection_circle: [4, 3, 0, 1, 2]
|
58 |
+
circle: [3, 4, 0, 1, 2]
|
59 |
+
|
60 |
+
Returns:
|
61 |
+
list[int]: A list of indices.
|
62 |
+
"""
|
63 |
+
assert num_frames % 2 == 1, 'num_frames should be an odd number.'
|
64 |
+
assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.'
|
65 |
+
|
66 |
+
max_frame_num = max_frame_num - 1 # start from 0
|
67 |
+
num_pad = num_frames // 2
|
68 |
+
|
69 |
+
indices = []
|
70 |
+
for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
|
71 |
+
if i < 0:
|
72 |
+
if padding == 'replicate':
|
73 |
+
pad_idx = 0
|
74 |
+
elif padding == 'reflection':
|
75 |
+
pad_idx = -i
|
76 |
+
elif padding == 'reflection_circle':
|
77 |
+
pad_idx = crt_idx + num_pad - i
|
78 |
+
else:
|
79 |
+
pad_idx = num_frames + i
|
80 |
+
elif i > max_frame_num:
|
81 |
+
if padding == 'replicate':
|
82 |
+
pad_idx = max_frame_num
|
83 |
+
elif padding == 'reflection':
|
84 |
+
pad_idx = max_frame_num * 2 - i
|
85 |
+
elif padding == 'reflection_circle':
|
86 |
+
pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
|
87 |
+
else:
|
88 |
+
pad_idx = i - num_frames
|
89 |
+
else:
|
90 |
+
pad_idx = i
|
91 |
+
indices.append(pad_idx)
|
92 |
+
return indices
|
93 |
+
|
94 |
+
|
95 |
+
def paired_paths_from_lmdb(folders, keys):
|
96 |
+
"""Generate paired paths from lmdb files.
|
97 |
+
|
98 |
+
Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
|
99 |
+
|
100 |
+
lq.lmdb
|
101 |
+
├── data.mdb
|
102 |
+
├── lock.mdb
|
103 |
+
├── meta_info.txt
|
104 |
+
|
105 |
+
The data.mdb and lock.mdb are standard lmdb files and you can refer to
|
106 |
+
https://lmdb.readthedocs.io/en/release/ for more details.
|
107 |
+
|
108 |
+
The meta_info.txt is a specified txt file to record the meta information
|
109 |
+
of our datasets. It will be automatically created when preparing
|
110 |
+
datasets by our provided dataset tools.
|
111 |
+
Each line in the txt file records
|
112 |
+
1)image name (with extension),
|
113 |
+
2)image shape,
|
114 |
+
3)compression level, separated by a white space.
|
115 |
+
Example: `baboon.png (120,125,3) 1`
|
116 |
+
|
117 |
+
We use the image name without extension as the lmdb key.
|
118 |
+
Note that we use the same key for the corresponding lq and gt images.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
folders (list[str]): A list of folder path. The order of list should
|
122 |
+
be [input_folder, gt_folder].
|
123 |
+
keys (list[str]): A list of keys identifying folders. The order should
|
124 |
+
be in consistent with folders, e.g., ['lq', 'gt'].
|
125 |
+
Note that this key is different from lmdb keys.
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
list[str]: Returned path list.
|
129 |
+
"""
|
130 |
+
assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
|
131 |
+
f'But got {len(folders)}')
|
132 |
+
assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
|
133 |
+
input_folder, gt_folder = folders
|
134 |
+
input_key, gt_key = keys
|
135 |
+
|
136 |
+
if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')):
|
137 |
+
raise ValueError(f'{input_key} folder and {gt_key} folder should both in lmdb '
|
138 |
+
f'formats. But received {input_key}: {input_folder}; '
|
139 |
+
f'{gt_key}: {gt_folder}')
|
140 |
+
# ensure that the two meta_info files are the same
|
141 |
+
with open(osp.join(input_folder, 'meta_info.txt')) as fin:
|
142 |
+
input_lmdb_keys = [line.split('.')[0] for line in fin]
|
143 |
+
with open(osp.join(gt_folder, 'meta_info.txt')) as fin:
|
144 |
+
gt_lmdb_keys = [line.split('.')[0] for line in fin]
|
145 |
+
if set(input_lmdb_keys) != set(gt_lmdb_keys):
|
146 |
+
raise ValueError(f'Keys in {input_key}_folder and {gt_key}_folder are different.')
|
147 |
+
else:
|
148 |
+
paths = []
|
149 |
+
for lmdb_key in sorted(input_lmdb_keys):
|
150 |
+
paths.append(dict([(f'{input_key}_path', lmdb_key), (f'{gt_key}_path', lmdb_key)]))
|
151 |
+
return paths
|
152 |
+
|
153 |
+
|
154 |
+
def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl):
|
155 |
+
"""Generate paired paths from an meta information file.
|
156 |
+
|
157 |
+
Each line in the meta information file contains the image names and
|
158 |
+
image shape (usually for gt), separated by a white space.
|
159 |
+
|
160 |
+
Example of an meta information file:
|
161 |
+
```
|
162 |
+
0001_s001.png (480,480,3)
|
163 |
+
0001_s002.png (480,480,3)
|
164 |
+
```
|
165 |
+
|
166 |
+
Args:
|
167 |
+
folders (list[str]): A list of folder path. The order of list should
|
168 |
+
be [input_folder, gt_folder].
|
169 |
+
keys (list[str]): A list of keys identifying folders. The order should
|
170 |
+
be in consistent with folders, e.g., ['lq', 'gt'].
|
171 |
+
meta_info_file (str): Path to the meta information file.
|
172 |
+
filename_tmpl (str): Template for each filename. Note that the
|
173 |
+
template excludes the file extension. Usually the filename_tmpl is
|
174 |
+
for files in the input folder.
|
175 |
+
|
176 |
+
Returns:
|
177 |
+
list[str]: Returned path list.
|
178 |
+
"""
|
179 |
+
assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
|
180 |
+
f'But got {len(folders)}')
|
181 |
+
assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
|
182 |
+
input_folder, gt_folder = folders
|
183 |
+
input_key, gt_key = keys
|
184 |
+
|
185 |
+
with open(meta_info_file, 'r') as fin:
|
186 |
+
gt_names = [line.strip().split(' ')[0] for line in fin]
|
187 |
+
|
188 |
+
paths = []
|
189 |
+
for gt_name in gt_names:
|
190 |
+
basename, ext = osp.splitext(osp.basename(gt_name))
|
191 |
+
input_name = f'{filename_tmpl.format(basename)}{ext}'
|
192 |
+
input_path = osp.join(input_folder, input_name)
|
193 |
+
gt_path = osp.join(gt_folder, gt_name)
|
194 |
+
paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)]))
|
195 |
+
return paths
|
196 |
+
|
197 |
+
|
198 |
+
def paired_paths_from_folder(folders, keys, filename_tmpl):
|
199 |
+
"""Generate paired paths from folders.
|
200 |
+
|
201 |
+
Args:
|
202 |
+
folders (list[str]): A list of folder path. The order of list should
|
203 |
+
be [input_folder, gt_folder].
|
204 |
+
keys (list[str]): A list of keys identifying folders. The order should
|
205 |
+
be in consistent with folders, e.g., ['lq', 'gt'].
|
206 |
+
filename_tmpl (str): Template for each filename. Note that the
|
207 |
+
template excludes the file extension. Usually the filename_tmpl is
|
208 |
+
for files in the input folder.
|
209 |
+
|
210 |
+
Returns:
|
211 |
+
list[str]: Returned path list.
|
212 |
+
"""
|
213 |
+
assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
|
214 |
+
f'But got {len(folders)}')
|
215 |
+
assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
|
216 |
+
input_folder, gt_folder = folders
|
217 |
+
input_key, gt_key = keys
|
218 |
+
|
219 |
+
input_paths = list(scandir(input_folder))
|
220 |
+
gt_paths = list(scandir(gt_folder))
|
221 |
+
assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: '
|
222 |
+
f'{len(input_paths)}, {len(gt_paths)}.')
|
223 |
+
paths = []
|
224 |
+
for gt_path in gt_paths:
|
225 |
+
basename, ext = osp.splitext(osp.basename(gt_path))
|
226 |
+
input_name = f'{filename_tmpl.format(basename)}{ext}'
|
227 |
+
input_path = osp.join(input_folder, input_name)
|
228 |
+
assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.'
|
229 |
+
gt_path = osp.join(gt_folder, gt_path)
|
230 |
+
paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)]))
|
231 |
+
return paths
|
232 |
+
|
233 |
+
|
234 |
+
def paths_from_folder(folder):
|
235 |
+
"""Generate paths from folder.
|
236 |
+
|
237 |
+
Args:
|
238 |
+
folder (str): Folder path.
|
239 |
+
|
240 |
+
Returns:
|
241 |
+
list[str]: Returned path list.
|
242 |
+
"""
|
243 |
+
|
244 |
+
paths = list(scandir(folder))
|
245 |
+
paths = [osp.join(folder, path) for path in paths]
|
246 |
+
return paths
|
247 |
+
|
248 |
+
|
249 |
+
def paths_from_lmdb(folder):
|
250 |
+
"""Generate paths from lmdb.
|
251 |
+
|
252 |
+
Args:
|
253 |
+
folder (str): Folder path.
|
254 |
+
|
255 |
+
Returns:
|
256 |
+
list[str]: Returned path list.
|
257 |
+
"""
|
258 |
+
if not folder.endswith('.lmdb'):
|
259 |
+
raise ValueError(f'Folder {folder}folder should in lmdb format.')
|
260 |
+
with open(osp.join(folder, 'meta_info.txt')) as fin:
|
261 |
+
paths = [line.split('.')[0] for line in fin]
|
262 |
+
return paths
|
263 |
+
|
264 |
+
|
265 |
+
def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
|
266 |
+
"""Generate Gaussian kernel used in `duf_downsample`.
|
267 |
+
|
268 |
+
Args:
|
269 |
+
kernel_size (int): Kernel size. Default: 13.
|
270 |
+
sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
|
271 |
+
|
272 |
+
Returns:
|
273 |
+
np.array: The Gaussian kernel.
|
274 |
+
"""
|
275 |
+
from scipy.ndimage import filters as filters
|
276 |
+
kernel = np.zeros((kernel_size, kernel_size))
|
277 |
+
# set element at the middle to one, a dirac delta
|
278 |
+
kernel[kernel_size // 2, kernel_size // 2] = 1
|
279 |
+
# gaussian-smooth the dirac, resulting in a gaussian filter
|
280 |
+
return filters.gaussian_filter(kernel, sigma)
|
281 |
+
|
282 |
+
|
283 |
+
def duf_downsample(x, kernel_size=13, scale=4):
|
284 |
+
"""Downsamping with Gaussian kernel used in the DUF official code.
|
285 |
+
|
286 |
+
Args:
|
287 |
+
x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
|
288 |
+
kernel_size (int): Kernel size. Default: 13.
|
289 |
+
scale (int): Downsampling factor. Supported scale: (2, 3, 4).
|
290 |
+
Default: 4.
|
291 |
+
|
292 |
+
Returns:
|
293 |
+
Tensor: DUF downsampled frames.
|
294 |
+
"""
|
295 |
+
assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.'
|
296 |
+
|
297 |
+
squeeze_flag = False
|
298 |
+
if x.ndim == 4:
|
299 |
+
squeeze_flag = True
|
300 |
+
x = x.unsqueeze(0)
|
301 |
+
b, t, c, h, w = x.size()
|
302 |
+
x = x.view(-1, 1, h, w)
|
303 |
+
pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
|
304 |
+
x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect')
|
305 |
+
|
306 |
+
gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
|
307 |
+
gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0)
|
308 |
+
x = F.conv2d(x, gaussian_filter, stride=scale)
|
309 |
+
x = x[:, :, 2:-2, 2:-2]
|
310 |
+
x = x.view(b, t, c, x.size(2), x.size(3))
|
311 |
+
if squeeze_flag:
|
312 |
+
x = x.squeeze(0)
|
313 |
+
return x
|
basicsr/data/degradations.py
ADDED
@@ -0,0 +1,765 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import random
|
5 |
+
import torch
|
6 |
+
from scipy import special
|
7 |
+
from scipy.stats import multivariate_normal
|
8 |
+
from torchvision.transforms.functional_tensor import rgb_to_grayscale
|
9 |
+
|
10 |
+
# -------------------------------------------------------------------- #
|
11 |
+
# --------------------------- blur kernels --------------------------- #
|
12 |
+
# -------------------------------------------------------------------- #
|
13 |
+
|
14 |
+
|
15 |
+
# --------------------------- util functions --------------------------- #
|
16 |
+
def sigma_matrix2(sig_x, sig_y, theta):
|
17 |
+
"""Calculate the rotated sigma matrix (two dimensional matrix).
|
18 |
+
|
19 |
+
Args:
|
20 |
+
sig_x (float):
|
21 |
+
sig_y (float):
|
22 |
+
theta (float): Radian measurement.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
ndarray: Rotated sigma matrix.
|
26 |
+
"""
|
27 |
+
d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
|
28 |
+
u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
|
29 |
+
return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
|
30 |
+
|
31 |
+
|
32 |
+
def mesh_grid(kernel_size):
|
33 |
+
"""Generate the mesh grid, centering at zero.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
kernel_size (int):
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
xy (ndarray): with the shape (kernel_size, kernel_size, 2)
|
40 |
+
xx (ndarray): with the shape (kernel_size, kernel_size)
|
41 |
+
yy (ndarray): with the shape (kernel_size, kernel_size)
|
42 |
+
"""
|
43 |
+
ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
|
44 |
+
xx, yy = np.meshgrid(ax, ax)
|
45 |
+
xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
|
46 |
+
1))).reshape(kernel_size, kernel_size, 2)
|
47 |
+
return xy, xx, yy
|
48 |
+
|
49 |
+
|
50 |
+
def pdf2(sigma_matrix, grid):
|
51 |
+
"""Calculate PDF of the bivariate Gaussian distribution.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
sigma_matrix (ndarray): with the shape (2, 2)
|
55 |
+
grid (ndarray): generated by :func:`mesh_grid`,
|
56 |
+
with the shape (K, K, 2), K is the kernel size.
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
kernel (ndarrray): un-normalized kernel.
|
60 |
+
"""
|
61 |
+
inverse_sigma = np.linalg.inv(sigma_matrix)
|
62 |
+
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
|
63 |
+
return kernel
|
64 |
+
|
65 |
+
|
66 |
+
def cdf2(d_matrix, grid):
|
67 |
+
"""Calculate the CDF of the standard bivariate Gaussian distribution.
|
68 |
+
Used in skewed Gaussian distribution.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
d_matrix (ndarrasy): skew matrix.
|
72 |
+
grid (ndarray): generated by :func:`mesh_grid`,
|
73 |
+
with the shape (K, K, 2), K is the kernel size.
|
74 |
+
|
75 |
+
Returns:
|
76 |
+
cdf (ndarray): skewed cdf.
|
77 |
+
"""
|
78 |
+
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
|
79 |
+
grid = np.dot(grid, d_matrix)
|
80 |
+
cdf = rv.cdf(grid)
|
81 |
+
return cdf
|
82 |
+
|
83 |
+
|
84 |
+
def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
|
85 |
+
"""Generate a bivariate isotropic or anisotropic Gaussian kernel.
|
86 |
+
|
87 |
+
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
kernel_size (int):
|
91 |
+
sig_x (float):
|
92 |
+
sig_y (float):
|
93 |
+
theta (float): Radian measurement.
|
94 |
+
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
95 |
+
with the shape (K, K, 2), K is the kernel size. Default: None
|
96 |
+
isotropic (bool):
|
97 |
+
|
98 |
+
Returns:
|
99 |
+
kernel (ndarray): normalized kernel.
|
100 |
+
"""
|
101 |
+
if grid is None:
|
102 |
+
grid, _, _ = mesh_grid(kernel_size)
|
103 |
+
if isotropic:
|
104 |
+
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
|
105 |
+
else:
|
106 |
+
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
107 |
+
kernel = pdf2(sigma_matrix, grid)
|
108 |
+
kernel = kernel / np.sum(kernel)
|
109 |
+
return kernel
|
110 |
+
|
111 |
+
|
112 |
+
def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
|
113 |
+
"""Generate a bivariate generalized Gaussian kernel.
|
114 |
+
Described in `Parameter Estimation For Multivariate Generalized
|
115 |
+
Gaussian Distributions`_
|
116 |
+
by Pascal et. al (2013).
|
117 |
+
|
118 |
+
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
kernel_size (int):
|
122 |
+
sig_x (float):
|
123 |
+
sig_y (float):
|
124 |
+
theta (float): Radian measurement.
|
125 |
+
beta (float): shape parameter, beta = 1 is the normal distribution.
|
126 |
+
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
127 |
+
with the shape (K, K, 2), K is the kernel size. Default: None
|
128 |
+
|
129 |
+
Returns:
|
130 |
+
kernel (ndarray): normalized kernel.
|
131 |
+
|
132 |
+
.. _Parameter Estimation For Multivariate Generalized Gaussian
|
133 |
+
Distributions: https://arxiv.org/abs/1302.6498
|
134 |
+
"""
|
135 |
+
if grid is None:
|
136 |
+
grid, _, _ = mesh_grid(kernel_size)
|
137 |
+
if isotropic:
|
138 |
+
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
|
139 |
+
else:
|
140 |
+
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
141 |
+
inverse_sigma = np.linalg.inv(sigma_matrix)
|
142 |
+
kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
|
143 |
+
kernel = kernel / np.sum(kernel)
|
144 |
+
return kernel
|
145 |
+
|
146 |
+
|
147 |
+
def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
|
148 |
+
"""Generate a plateau-like anisotropic kernel.
|
149 |
+
1 / (1+x^(beta))
|
150 |
+
|
151 |
+
Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
|
152 |
+
|
153 |
+
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
154 |
+
|
155 |
+
Args:
|
156 |
+
kernel_size (int):
|
157 |
+
sig_x (float):
|
158 |
+
sig_y (float):
|
159 |
+
theta (float): Radian measurement.
|
160 |
+
beta (float): shape parameter, beta = 1 is the normal distribution.
|
161 |
+
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
162 |
+
with the shape (K, K, 2), K is the kernel size. Default: None
|
163 |
+
|
164 |
+
Returns:
|
165 |
+
kernel (ndarray): normalized kernel.
|
166 |
+
"""
|
167 |
+
if grid is None:
|
168 |
+
grid, _, _ = mesh_grid(kernel_size)
|
169 |
+
if isotropic:
|
170 |
+
sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
|
171 |
+
else:
|
172 |
+
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
173 |
+
inverse_sigma = np.linalg.inv(sigma_matrix)
|
174 |
+
kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
|
175 |
+
kernel = kernel / np.sum(kernel)
|
176 |
+
return kernel
|
177 |
+
|
178 |
+
|
179 |
+
def random_bivariate_Gaussian(kernel_size,
|
180 |
+
sigma_x_range,
|
181 |
+
sigma_y_range,
|
182 |
+
rotation_range,
|
183 |
+
noise_range=None,
|
184 |
+
isotropic=True):
|
185 |
+
"""Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
|
186 |
+
|
187 |
+
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
188 |
+
|
189 |
+
Args:
|
190 |
+
kernel_size (int):
|
191 |
+
sigma_x_range (tuple): [0.6, 5]
|
192 |
+
sigma_y_range (tuple): [0.6, 5]
|
193 |
+
rotation range (tuple): [-math.pi, math.pi]
|
194 |
+
noise_range(tuple, optional): multiplicative kernel noise,
|
195 |
+
[0.75, 1.25]. Default: None
|
196 |
+
|
197 |
+
Returns:
|
198 |
+
kernel (ndarray):
|
199 |
+
"""
|
200 |
+
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
201 |
+
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
202 |
+
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
203 |
+
if isotropic is False:
|
204 |
+
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
205 |
+
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
206 |
+
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
207 |
+
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
208 |
+
else:
|
209 |
+
sigma_y = sigma_x
|
210 |
+
rotation = 0
|
211 |
+
|
212 |
+
kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
|
213 |
+
|
214 |
+
# add multiplicative noise
|
215 |
+
if noise_range is not None:
|
216 |
+
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
217 |
+
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
218 |
+
kernel = kernel * noise
|
219 |
+
kernel = kernel / np.sum(kernel)
|
220 |
+
return kernel
|
221 |
+
|
222 |
+
|
223 |
+
def random_bivariate_generalized_Gaussian(kernel_size,
|
224 |
+
sigma_x_range,
|
225 |
+
sigma_y_range,
|
226 |
+
rotation_range,
|
227 |
+
beta_range,
|
228 |
+
noise_range=None,
|
229 |
+
isotropic=True):
|
230 |
+
"""Randomly generate bivariate generalized Gaussian kernels.
|
231 |
+
|
232 |
+
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
233 |
+
|
234 |
+
Args:
|
235 |
+
kernel_size (int):
|
236 |
+
sigma_x_range (tuple): [0.6, 5]
|
237 |
+
sigma_y_range (tuple): [0.6, 5]
|
238 |
+
rotation range (tuple): [-math.pi, math.pi]
|
239 |
+
beta_range (tuple): [0.5, 8]
|
240 |
+
noise_range(tuple, optional): multiplicative kernel noise,
|
241 |
+
[0.75, 1.25]. Default: None
|
242 |
+
|
243 |
+
Returns:
|
244 |
+
kernel (ndarray):
|
245 |
+
"""
|
246 |
+
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
247 |
+
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
248 |
+
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
249 |
+
if isotropic is False:
|
250 |
+
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
251 |
+
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
252 |
+
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
253 |
+
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
254 |
+
else:
|
255 |
+
sigma_y = sigma_x
|
256 |
+
rotation = 0
|
257 |
+
|
258 |
+
# assume beta_range[0] < 1 < beta_range[1]
|
259 |
+
if np.random.uniform() < 0.5:
|
260 |
+
beta = np.random.uniform(beta_range[0], 1)
|
261 |
+
else:
|
262 |
+
beta = np.random.uniform(1, beta_range[1])
|
263 |
+
|
264 |
+
kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
|
265 |
+
|
266 |
+
# add multiplicative noise
|
267 |
+
if noise_range is not None:
|
268 |
+
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
269 |
+
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
270 |
+
kernel = kernel * noise
|
271 |
+
kernel = kernel / np.sum(kernel)
|
272 |
+
return kernel
|
273 |
+
|
274 |
+
|
275 |
+
def random_bivariate_plateau(kernel_size,
|
276 |
+
sigma_x_range,
|
277 |
+
sigma_y_range,
|
278 |
+
rotation_range,
|
279 |
+
beta_range,
|
280 |
+
noise_range=None,
|
281 |
+
isotropic=True):
|
282 |
+
"""Randomly generate bivariate plateau kernels.
|
283 |
+
|
284 |
+
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
285 |
+
|
286 |
+
Args:
|
287 |
+
kernel_size (int):
|
288 |
+
sigma_x_range (tuple): [0.6, 5]
|
289 |
+
sigma_y_range (tuple): [0.6, 5]
|
290 |
+
rotation range (tuple): [-math.pi/2, math.pi/2]
|
291 |
+
beta_range (tuple): [1, 4]
|
292 |
+
noise_range(tuple, optional): multiplicative kernel noise,
|
293 |
+
[0.75, 1.25]. Default: None
|
294 |
+
|
295 |
+
Returns:
|
296 |
+
kernel (ndarray):
|
297 |
+
"""
|
298 |
+
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
299 |
+
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
300 |
+
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
301 |
+
if isotropic is False:
|
302 |
+
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
303 |
+
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
304 |
+
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
305 |
+
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
306 |
+
else:
|
307 |
+
sigma_y = sigma_x
|
308 |
+
rotation = 0
|
309 |
+
|
310 |
+
# TODO: this may be not proper
|
311 |
+
if np.random.uniform() < 0.5:
|
312 |
+
beta = np.random.uniform(beta_range[0], 1)
|
313 |
+
else:
|
314 |
+
beta = np.random.uniform(1, beta_range[1])
|
315 |
+
|
316 |
+
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
|
317 |
+
# add multiplicative noise
|
318 |
+
if noise_range is not None:
|
319 |
+
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
320 |
+
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
321 |
+
kernel = kernel * noise
|
322 |
+
kernel = kernel / np.sum(kernel)
|
323 |
+
|
324 |
+
return kernel
|
325 |
+
|
326 |
+
|
327 |
+
def random_mixed_kernels(kernel_list,
|
328 |
+
kernel_prob,
|
329 |
+
kernel_size=21,
|
330 |
+
sigma_x_range=(0.6, 5),
|
331 |
+
sigma_y_range=(0.6, 5),
|
332 |
+
rotation_range=(-math.pi, math.pi),
|
333 |
+
betag_range=(0.5, 8),
|
334 |
+
betap_range=(0.5, 8),
|
335 |
+
noise_range=None):
|
336 |
+
"""Randomly generate mixed kernels.
|
337 |
+
|
338 |
+
Args:
|
339 |
+
kernel_list (tuple): a list name of kernel types,
|
340 |
+
support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
|
341 |
+
'plateau_aniso']
|
342 |
+
kernel_prob (tuple): corresponding kernel probability for each
|
343 |
+
kernel type
|
344 |
+
kernel_size (int):
|
345 |
+
sigma_x_range (tuple): [0.6, 5]
|
346 |
+
sigma_y_range (tuple): [0.6, 5]
|
347 |
+
rotation range (tuple): [-math.pi, math.pi]
|
348 |
+
beta_range (tuple): [0.5, 8]
|
349 |
+
noise_range(tuple, optional): multiplicative kernel noise,
|
350 |
+
[0.75, 1.25]. Default: None
|
351 |
+
|
352 |
+
Returns:
|
353 |
+
kernel (ndarray):
|
354 |
+
"""
|
355 |
+
kernel_type = random.choices(kernel_list, kernel_prob)[0]
|
356 |
+
if kernel_type == 'iso':
|
357 |
+
kernel = random_bivariate_Gaussian(
|
358 |
+
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
|
359 |
+
elif kernel_type == 'aniso':
|
360 |
+
kernel = random_bivariate_Gaussian(
|
361 |
+
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
|
362 |
+
elif kernel_type == 'generalized_iso':
|
363 |
+
kernel = random_bivariate_generalized_Gaussian(
|
364 |
+
kernel_size,
|
365 |
+
sigma_x_range,
|
366 |
+
sigma_y_range,
|
367 |
+
rotation_range,
|
368 |
+
betag_range,
|
369 |
+
noise_range=noise_range,
|
370 |
+
isotropic=True)
|
371 |
+
elif kernel_type == 'generalized_aniso':
|
372 |
+
kernel = random_bivariate_generalized_Gaussian(
|
373 |
+
kernel_size,
|
374 |
+
sigma_x_range,
|
375 |
+
sigma_y_range,
|
376 |
+
rotation_range,
|
377 |
+
betag_range,
|
378 |
+
noise_range=noise_range,
|
379 |
+
isotropic=False)
|
380 |
+
elif kernel_type == 'plateau_iso':
|
381 |
+
kernel = random_bivariate_plateau(
|
382 |
+
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
|
383 |
+
elif kernel_type == 'plateau_aniso':
|
384 |
+
kernel = random_bivariate_plateau(
|
385 |
+
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
|
386 |
+
return kernel
|
387 |
+
|
388 |
+
|
389 |
+
np.seterr(divide='ignore', invalid='ignore')
|
390 |
+
|
391 |
+
|
392 |
+
def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
|
393 |
+
"""2D sinc filter, ref: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
|
394 |
+
|
395 |
+
Args:
|
396 |
+
cutoff (float): cutoff frequency in radians (pi is max)
|
397 |
+
kernel_size (int): horizontal and vertical size, must be odd.
|
398 |
+
pad_to (int): pad kernel size to desired size, must be odd or zero.
|
399 |
+
"""
|
400 |
+
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
401 |
+
kernel = np.fromfunction(
|
402 |
+
lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
|
403 |
+
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
|
404 |
+
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
|
405 |
+
kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
|
406 |
+
kernel = kernel / np.sum(kernel)
|
407 |
+
if pad_to > kernel_size:
|
408 |
+
pad_size = (pad_to - kernel_size) // 2
|
409 |
+
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
|
410 |
+
return kernel
|
411 |
+
|
412 |
+
|
413 |
+
# ------------------------------------------------------------- #
|
414 |
+
# --------------------------- noise --------------------------- #
|
415 |
+
# ------------------------------------------------------------- #
|
416 |
+
|
417 |
+
# ----------------------- Gaussian Noise ----------------------- #
|
418 |
+
|
419 |
+
|
420 |
+
def generate_gaussian_noise(img, sigma=10, gray_noise=False):
|
421 |
+
"""Generate Gaussian noise.
|
422 |
+
|
423 |
+
Args:
|
424 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
425 |
+
sigma (float): Noise scale (measured in range 255). Default: 10.
|
426 |
+
|
427 |
+
Returns:
|
428 |
+
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
429 |
+
float32.
|
430 |
+
"""
|
431 |
+
if gray_noise:
|
432 |
+
noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
|
433 |
+
noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
|
434 |
+
else:
|
435 |
+
noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
|
436 |
+
return noise
|
437 |
+
|
438 |
+
|
439 |
+
def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
|
440 |
+
"""Add Gaussian noise.
|
441 |
+
|
442 |
+
Args:
|
443 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
444 |
+
sigma (float): Noise scale (measured in range 255). Default: 10.
|
445 |
+
|
446 |
+
Returns:
|
447 |
+
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
448 |
+
float32.
|
449 |
+
"""
|
450 |
+
noise = generate_gaussian_noise(img, sigma, gray_noise)
|
451 |
+
out = img + noise
|
452 |
+
if clip and rounds:
|
453 |
+
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
454 |
+
elif clip:
|
455 |
+
out = np.clip(out, 0, 1)
|
456 |
+
elif rounds:
|
457 |
+
out = (out * 255.0).round() / 255.
|
458 |
+
return out
|
459 |
+
|
460 |
+
|
461 |
+
def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
|
462 |
+
"""Add Gaussian noise (PyTorch version).
|
463 |
+
|
464 |
+
Args:
|
465 |
+
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
|
466 |
+
scale (float | Tensor): Noise scale. Default: 1.0.
|
467 |
+
|
468 |
+
Returns:
|
469 |
+
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
470 |
+
float32.
|
471 |
+
"""
|
472 |
+
b, _, h, w = img.size()
|
473 |
+
if not isinstance(sigma, (float, int)):
|
474 |
+
sigma = sigma.view(img.size(0), 1, 1, 1)
|
475 |
+
if isinstance(gray_noise, (float, int)):
|
476 |
+
cal_gray_noise = gray_noise > 0
|
477 |
+
else:
|
478 |
+
gray_noise = gray_noise.view(b, 1, 1, 1)
|
479 |
+
cal_gray_noise = torch.sum(gray_noise) > 0
|
480 |
+
|
481 |
+
if cal_gray_noise:
|
482 |
+
noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
|
483 |
+
noise_gray = noise_gray.view(b, 1, h, w)
|
484 |
+
|
485 |
+
# always calculate color noise
|
486 |
+
noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
|
487 |
+
|
488 |
+
if cal_gray_noise:
|
489 |
+
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
|
490 |
+
return noise
|
491 |
+
|
492 |
+
|
493 |
+
def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
|
494 |
+
"""Add Gaussian noise (PyTorch version).
|
495 |
+
|
496 |
+
Args:
|
497 |
+
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
|
498 |
+
scale (float | Tensor): Noise scale. Default: 1.0.
|
499 |
+
|
500 |
+
Returns:
|
501 |
+
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
502 |
+
float32.
|
503 |
+
"""
|
504 |
+
noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
|
505 |
+
out = img + noise
|
506 |
+
if clip and rounds:
|
507 |
+
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
508 |
+
elif clip:
|
509 |
+
out = torch.clamp(out, 0, 1)
|
510 |
+
elif rounds:
|
511 |
+
out = (out * 255.0).round() / 255.
|
512 |
+
return out
|
513 |
+
|
514 |
+
|
515 |
+
# ----------------------- Random Gaussian Noise ----------------------- #
|
516 |
+
def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
|
517 |
+
sigma = np.random.uniform(sigma_range[0], sigma_range[1])
|
518 |
+
if np.random.uniform() < gray_prob:
|
519 |
+
gray_noise = True
|
520 |
+
else:
|
521 |
+
gray_noise = False
|
522 |
+
return generate_gaussian_noise(img, sigma, gray_noise)
|
523 |
+
|
524 |
+
|
525 |
+
def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
526 |
+
noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
|
527 |
+
out = img + noise
|
528 |
+
if clip and rounds:
|
529 |
+
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
530 |
+
elif clip:
|
531 |
+
out = np.clip(out, 0, 1)
|
532 |
+
elif rounds:
|
533 |
+
out = (out * 255.0).round() / 255.
|
534 |
+
return out
|
535 |
+
|
536 |
+
|
537 |
+
def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
|
538 |
+
sigma = torch.rand(
|
539 |
+
img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
|
540 |
+
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
|
541 |
+
gray_noise = (gray_noise < gray_prob).float()
|
542 |
+
return generate_gaussian_noise_pt(img, sigma, gray_noise)
|
543 |
+
|
544 |
+
|
545 |
+
def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
546 |
+
noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
|
547 |
+
out = img + noise
|
548 |
+
if clip and rounds:
|
549 |
+
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
550 |
+
elif clip:
|
551 |
+
out = torch.clamp(out, 0, 1)
|
552 |
+
elif rounds:
|
553 |
+
out = (out * 255.0).round() / 255.
|
554 |
+
return out
|
555 |
+
|
556 |
+
|
557 |
+
# ----------------------- Poisson (Shot) Noise ----------------------- #
|
558 |
+
|
559 |
+
|
560 |
+
def generate_poisson_noise(img, scale=1.0, gray_noise=False):
|
561 |
+
"""Generate poisson noise.
|
562 |
+
|
563 |
+
Ref: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
|
564 |
+
|
565 |
+
Args:
|
566 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
567 |
+
scale (float): Noise scale. Default: 1.0.
|
568 |
+
gray_noise (bool): Whether generate gray noise. Default: False.
|
569 |
+
|
570 |
+
Returns:
|
571 |
+
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
572 |
+
float32.
|
573 |
+
"""
|
574 |
+
if gray_noise:
|
575 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
576 |
+
# round and clip image for counting vals correctly
|
577 |
+
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
578 |
+
vals = len(np.unique(img))
|
579 |
+
vals = 2**np.ceil(np.log2(vals))
|
580 |
+
out = np.float32(np.random.poisson(img * vals) / float(vals))
|
581 |
+
noise = out - img
|
582 |
+
if gray_noise:
|
583 |
+
noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
|
584 |
+
return noise * scale
|
585 |
+
|
586 |
+
|
587 |
+
def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
|
588 |
+
"""Add poisson noise.
|
589 |
+
|
590 |
+
Args:
|
591 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
592 |
+
scale (float): Noise scale. Default: 1.0.
|
593 |
+
gray_noise (bool): Whether generate gray noise. Default: False.
|
594 |
+
|
595 |
+
Returns:
|
596 |
+
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
597 |
+
float32.
|
598 |
+
"""
|
599 |
+
noise = generate_poisson_noise(img, scale, gray_noise)
|
600 |
+
out = img + noise
|
601 |
+
if clip and rounds:
|
602 |
+
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
603 |
+
elif clip:
|
604 |
+
out = np.clip(out, 0, 1)
|
605 |
+
elif rounds:
|
606 |
+
out = (out * 255.0).round() / 255.
|
607 |
+
return out
|
608 |
+
|
609 |
+
|
610 |
+
def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
|
611 |
+
"""Generate a batch of poisson noise (PyTorch version)
|
612 |
+
|
613 |
+
Args:
|
614 |
+
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
|
615 |
+
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
|
616 |
+
Default: 1.0.
|
617 |
+
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
|
618 |
+
0 for False, 1 for True. Default: 0.
|
619 |
+
|
620 |
+
Returns:
|
621 |
+
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
622 |
+
float32.
|
623 |
+
"""
|
624 |
+
b, _, h, w = img.size()
|
625 |
+
if isinstance(gray_noise, (float, int)):
|
626 |
+
cal_gray_noise = gray_noise > 0
|
627 |
+
else:
|
628 |
+
gray_noise = gray_noise.view(b, 1, 1, 1)
|
629 |
+
cal_gray_noise = torch.sum(gray_noise) > 0
|
630 |
+
if cal_gray_noise:
|
631 |
+
img_gray = rgb_to_grayscale(img, num_output_channels=1)
|
632 |
+
# round and clip image for counting vals correctly
|
633 |
+
img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
|
634 |
+
# use for-loop to get the unique values for each sample
|
635 |
+
vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
|
636 |
+
vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
|
637 |
+
vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
|
638 |
+
out = torch.poisson(img_gray * vals) / vals
|
639 |
+
noise_gray = out - img_gray
|
640 |
+
noise_gray = noise_gray.expand(b, 3, h, w)
|
641 |
+
|
642 |
+
# always calculate color noise
|
643 |
+
# round and clip image for counting vals correctly
|
644 |
+
img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
|
645 |
+
# use for-loop to get the unique values for each sample
|
646 |
+
vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
|
647 |
+
vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
|
648 |
+
vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
|
649 |
+
out = torch.poisson(img * vals) / vals
|
650 |
+
noise = out - img
|
651 |
+
if cal_gray_noise:
|
652 |
+
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
|
653 |
+
if not isinstance(scale, (float, int)):
|
654 |
+
scale = scale.view(b, 1, 1, 1)
|
655 |
+
return noise * scale
|
656 |
+
|
657 |
+
|
658 |
+
def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
|
659 |
+
"""Add poisson noise to a batch of images (PyTorch version).
|
660 |
+
|
661 |
+
Args:
|
662 |
+
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
|
663 |
+
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
|
664 |
+
Default: 1.0.
|
665 |
+
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
|
666 |
+
0 for False, 1 for True. Default: 0.
|
667 |
+
|
668 |
+
Returns:
|
669 |
+
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
670 |
+
float32.
|
671 |
+
"""
|
672 |
+
noise = generate_poisson_noise_pt(img, scale, gray_noise)
|
673 |
+
out = img + noise
|
674 |
+
if clip and rounds:
|
675 |
+
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
676 |
+
elif clip:
|
677 |
+
out = torch.clamp(out, 0, 1)
|
678 |
+
elif rounds:
|
679 |
+
out = (out * 255.0).round() / 255.
|
680 |
+
return out
|
681 |
+
|
682 |
+
|
683 |
+
# ----------------------- Random Poisson (Shot) Noise ----------------------- #
|
684 |
+
|
685 |
+
|
686 |
+
def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
|
687 |
+
scale = np.random.uniform(scale_range[0], scale_range[1])
|
688 |
+
if np.random.uniform() < gray_prob:
|
689 |
+
gray_noise = True
|
690 |
+
else:
|
691 |
+
gray_noise = False
|
692 |
+
return generate_poisson_noise(img, scale, gray_noise)
|
693 |
+
|
694 |
+
|
695 |
+
def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
696 |
+
noise = random_generate_poisson_noise(img, scale_range, gray_prob)
|
697 |
+
out = img + noise
|
698 |
+
if clip and rounds:
|
699 |
+
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
700 |
+
elif clip:
|
701 |
+
out = np.clip(out, 0, 1)
|
702 |
+
elif rounds:
|
703 |
+
out = (out * 255.0).round() / 255.
|
704 |
+
return out
|
705 |
+
|
706 |
+
|
707 |
+
def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
|
708 |
+
scale = torch.rand(
|
709 |
+
img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
|
710 |
+
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
|
711 |
+
gray_noise = (gray_noise < gray_prob).float()
|
712 |
+
return generate_poisson_noise_pt(img, scale, gray_noise)
|
713 |
+
|
714 |
+
|
715 |
+
def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
716 |
+
noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
|
717 |
+
out = img + noise
|
718 |
+
if clip and rounds:
|
719 |
+
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
720 |
+
elif clip:
|
721 |
+
out = torch.clamp(out, 0, 1)
|
722 |
+
elif rounds:
|
723 |
+
out = (out * 255.0).round() / 255.
|
724 |
+
return out
|
725 |
+
|
726 |
+
|
727 |
+
# ------------------------------------------------------------------------ #
|
728 |
+
# --------------------------- JPEG compression --------------------------- #
|
729 |
+
# ------------------------------------------------------------------------ #
|
730 |
+
|
731 |
+
|
732 |
+
def add_jpg_compression(img, quality=90):
|
733 |
+
"""Add JPG compression artifacts.
|
734 |
+
|
735 |
+
Args:
|
736 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
737 |
+
quality (float): JPG compression quality. 0 for lowest quality, 100 for
|
738 |
+
best quality. Default: 90.
|
739 |
+
|
740 |
+
Returns:
|
741 |
+
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
|
742 |
+
float32.
|
743 |
+
"""
|
744 |
+
img = np.clip(img, 0, 1)
|
745 |
+
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
746 |
+
_, encimg = cv2.imencode('.jpg', img * 255., encode_param)
|
747 |
+
img = np.float32(cv2.imdecode(encimg, 1)) / 255.
|
748 |
+
return img
|
749 |
+
|
750 |
+
|
751 |
+
def random_add_jpg_compression(img, quality_range=(90, 100)):
|
752 |
+
"""Randomly add JPG compression artifacts.
|
753 |
+
|
754 |
+
Args:
|
755 |
+
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
756 |
+
quality_range (tuple[float] | list[float]): JPG compression quality
|
757 |
+
range. 0 for lowest quality, 100 for best quality.
|
758 |
+
Default: (90, 100).
|
759 |
+
|
760 |
+
Returns:
|
761 |
+
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
|
762 |
+
float32.
|
763 |
+
"""
|
764 |
+
quality = np.random.uniform(quality_range[0], quality_range[1])
|
765 |
+
return add_jpg_compression(img, quality)
|
basicsr/data/ffhq_dataset.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import time
|
3 |
+
from os import path as osp
|
4 |
+
from torch.utils import data as data
|
5 |
+
from torchvision.transforms.functional import normalize
|
6 |
+
|
7 |
+
from basicsr.data.transforms import augment
|
8 |
+
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
9 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
10 |
+
|
11 |
+
|
12 |
+
@DATASET_REGISTRY.register()
|
13 |
+
class FFHQDataset(data.Dataset):
|
14 |
+
"""FFHQ dataset for StyleGAN.
|
15 |
+
|
16 |
+
Args:
|
17 |
+
opt (dict): Config for train datasets. It contains the following keys:
|
18 |
+
dataroot_gt (str): Data root path for gt.
|
19 |
+
io_backend (dict): IO backend type and other kwarg.
|
20 |
+
mean (list | tuple): Image mean.
|
21 |
+
std (list | tuple): Image std.
|
22 |
+
use_hflip (bool): Whether to horizontally flip.
|
23 |
+
|
24 |
+
"""
|
25 |
+
|
26 |
+
def __init__(self, opt):
|
27 |
+
super(FFHQDataset, self).__init__()
|
28 |
+
self.opt = opt
|
29 |
+
# file client (io backend)
|
30 |
+
self.file_client = None
|
31 |
+
self.io_backend_opt = opt['io_backend']
|
32 |
+
|
33 |
+
self.gt_folder = opt['dataroot_gt']
|
34 |
+
self.mean = opt['mean']
|
35 |
+
self.std = opt['std']
|
36 |
+
|
37 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
38 |
+
self.io_backend_opt['db_paths'] = self.gt_folder
|
39 |
+
if not self.gt_folder.endswith('.lmdb'):
|
40 |
+
raise ValueError("'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
|
41 |
+
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
|
42 |
+
self.paths = [line.split('.')[0] for line in fin]
|
43 |
+
else:
|
44 |
+
# FFHQ has 70000 images in total
|
45 |
+
self.paths = [osp.join(self.gt_folder, f'{v:08d}.png') for v in range(70000)]
|
46 |
+
|
47 |
+
def __getitem__(self, index):
|
48 |
+
if self.file_client is None:
|
49 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
50 |
+
|
51 |
+
# load gt image
|
52 |
+
gt_path = self.paths[index]
|
53 |
+
# avoid errors caused by high latency in reading files
|
54 |
+
retry = 3
|
55 |
+
while retry > 0:
|
56 |
+
try:
|
57 |
+
img_bytes = self.file_client.get(gt_path)
|
58 |
+
except Exception as e:
|
59 |
+
logger = get_root_logger()
|
60 |
+
logger.warning(f'File client error: {e}, remaining retry times: {retry - 1}')
|
61 |
+
# change another file to read
|
62 |
+
index = random.randint(0, self.__len__())
|
63 |
+
gt_path = self.paths[index]
|
64 |
+
time.sleep(1) # sleep 1s for occasional server congestion
|
65 |
+
else:
|
66 |
+
break
|
67 |
+
finally:
|
68 |
+
retry -= 1
|
69 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
70 |
+
|
71 |
+
# random horizontal flip
|
72 |
+
img_gt = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False)
|
73 |
+
# BGR to RGB, HWC to CHW, numpy to tensor
|
74 |
+
img_gt = img2tensor(img_gt, bgr2rgb=True, float32=True)
|
75 |
+
# normalize
|
76 |
+
normalize(img_gt, self.mean, self.std, inplace=True)
|
77 |
+
return {'gt': img_gt, 'gt_path': gt_path}
|
78 |
+
|
79 |
+
def __len__(self):
|
80 |
+
return len(self.paths)
|
basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
basicsr/data/meta_info/meta_info_REDS4_test_GT.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
000 100 (720,1280,3)
|
2 |
+
011 100 (720,1280,3)
|
3 |
+
015 100 (720,1280,3)
|
4 |
+
020 100 (720,1280,3)
|
basicsr/data/meta_info/meta_info_REDS_GT.txt
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
000 100 (720,1280,3)
|
2 |
+
001 100 (720,1280,3)
|
3 |
+
002 100 (720,1280,3)
|
4 |
+
003 100 (720,1280,3)
|
5 |
+
004 100 (720,1280,3)
|
6 |
+
005 100 (720,1280,3)
|
7 |
+
006 100 (720,1280,3)
|
8 |
+
007 100 (720,1280,3)
|
9 |
+
008 100 (720,1280,3)
|
10 |
+
009 100 (720,1280,3)
|
11 |
+
010 100 (720,1280,3)
|
12 |
+
011 100 (720,1280,3)
|
13 |
+
012 100 (720,1280,3)
|
14 |
+
013 100 (720,1280,3)
|
15 |
+
014 100 (720,1280,3)
|
16 |
+
015 100 (720,1280,3)
|
17 |
+
016 100 (720,1280,3)
|
18 |
+
017 100 (720,1280,3)
|
19 |
+
018 100 (720,1280,3)
|
20 |
+
019 100 (720,1280,3)
|
21 |
+
020 100 (720,1280,3)
|
22 |
+
021 100 (720,1280,3)
|
23 |
+
022 100 (720,1280,3)
|
24 |
+
023 100 (720,1280,3)
|
25 |
+
024 100 (720,1280,3)
|
26 |
+
025 100 (720,1280,3)
|
27 |
+
026 100 (720,1280,3)
|
28 |
+
027 100 (720,1280,3)
|
29 |
+
028 100 (720,1280,3)
|
30 |
+
029 100 (720,1280,3)
|
31 |
+
030 100 (720,1280,3)
|
32 |
+
031 100 (720,1280,3)
|
33 |
+
032 100 (720,1280,3)
|
34 |
+
033 100 (720,1280,3)
|
35 |
+
034 100 (720,1280,3)
|
36 |
+
035 100 (720,1280,3)
|
37 |
+
036 100 (720,1280,3)
|
38 |
+
037 100 (720,1280,3)
|
39 |
+
038 100 (720,1280,3)
|
40 |
+
039 100 (720,1280,3)
|
41 |
+
040 100 (720,1280,3)
|
42 |
+
041 100 (720,1280,3)
|
43 |
+
042 100 (720,1280,3)
|
44 |
+
043 100 (720,1280,3)
|
45 |
+
044 100 (720,1280,3)
|
46 |
+
045 100 (720,1280,3)
|
47 |
+
046 100 (720,1280,3)
|
48 |
+
047 100 (720,1280,3)
|
49 |
+
048 100 (720,1280,3)
|
50 |
+
049 100 (720,1280,3)
|
51 |
+
050 100 (720,1280,3)
|
52 |
+
051 100 (720,1280,3)
|
53 |
+
052 100 (720,1280,3)
|
54 |
+
053 100 (720,1280,3)
|
55 |
+
054 100 (720,1280,3)
|
56 |
+
055 100 (720,1280,3)
|
57 |
+
056 100 (720,1280,3)
|
58 |
+
057 100 (720,1280,3)
|
59 |
+
058 100 (720,1280,3)
|
60 |
+
059 100 (720,1280,3)
|
61 |
+
060 100 (720,1280,3)
|
62 |
+
061 100 (720,1280,3)
|
63 |
+
062 100 (720,1280,3)
|
64 |
+
063 100 (720,1280,3)
|
65 |
+
064 100 (720,1280,3)
|
66 |
+
065 100 (720,1280,3)
|
67 |
+
066 100 (720,1280,3)
|
68 |
+
067 100 (720,1280,3)
|
69 |
+
068 100 (720,1280,3)
|
70 |
+
069 100 (720,1280,3)
|
71 |
+
070 100 (720,1280,3)
|
72 |
+
071 100 (720,1280,3)
|
73 |
+
072 100 (720,1280,3)
|
74 |
+
073 100 (720,1280,3)
|
75 |
+
074 100 (720,1280,3)
|
76 |
+
075 100 (720,1280,3)
|
77 |
+
076 100 (720,1280,3)
|
78 |
+
077 100 (720,1280,3)
|
79 |
+
078 100 (720,1280,3)
|
80 |
+
079 100 (720,1280,3)
|
81 |
+
080 100 (720,1280,3)
|
82 |
+
081 100 (720,1280,3)
|
83 |
+
082 100 (720,1280,3)
|
84 |
+
083 100 (720,1280,3)
|
85 |
+
084 100 (720,1280,3)
|
86 |
+
085 100 (720,1280,3)
|
87 |
+
086 100 (720,1280,3)
|
88 |
+
087 100 (720,1280,3)
|
89 |
+
088 100 (720,1280,3)
|
90 |
+
089 100 (720,1280,3)
|
91 |
+
090 100 (720,1280,3)
|
92 |
+
091 100 (720,1280,3)
|
93 |
+
092 100 (720,1280,3)
|
94 |
+
093 100 (720,1280,3)
|
95 |
+
094 100 (720,1280,3)
|
96 |
+
095 100 (720,1280,3)
|
97 |
+
096 100 (720,1280,3)
|
98 |
+
097 100 (720,1280,3)
|
99 |
+
098 100 (720,1280,3)
|
100 |
+
099 100 (720,1280,3)
|
101 |
+
100 100 (720,1280,3)
|
102 |
+
101 100 (720,1280,3)
|
103 |
+
102 100 (720,1280,3)
|
104 |
+
103 100 (720,1280,3)
|
105 |
+
104 100 (720,1280,3)
|
106 |
+
105 100 (720,1280,3)
|
107 |
+
106 100 (720,1280,3)
|
108 |
+
107 100 (720,1280,3)
|
109 |
+
108 100 (720,1280,3)
|
110 |
+
109 100 (720,1280,3)
|
111 |
+
110 100 (720,1280,3)
|
112 |
+
111 100 (720,1280,3)
|
113 |
+
112 100 (720,1280,3)
|
114 |
+
113 100 (720,1280,3)
|
115 |
+
114 100 (720,1280,3)
|
116 |
+
115 100 (720,1280,3)
|
117 |
+
116 100 (720,1280,3)
|
118 |
+
117 100 (720,1280,3)
|
119 |
+
118 100 (720,1280,3)
|
120 |
+
119 100 (720,1280,3)
|
121 |
+
120 100 (720,1280,3)
|
122 |
+
121 100 (720,1280,3)
|
123 |
+
122 100 (720,1280,3)
|
124 |
+
123 100 (720,1280,3)
|
125 |
+
124 100 (720,1280,3)
|
126 |
+
125 100 (720,1280,3)
|
127 |
+
126 100 (720,1280,3)
|
128 |
+
127 100 (720,1280,3)
|
129 |
+
128 100 (720,1280,3)
|
130 |
+
129 100 (720,1280,3)
|
131 |
+
130 100 (720,1280,3)
|
132 |
+
131 100 (720,1280,3)
|
133 |
+
132 100 (720,1280,3)
|
134 |
+
133 100 (720,1280,3)
|
135 |
+
134 100 (720,1280,3)
|
136 |
+
135 100 (720,1280,3)
|
137 |
+
136 100 (720,1280,3)
|
138 |
+
137 100 (720,1280,3)
|
139 |
+
138 100 (720,1280,3)
|
140 |
+
139 100 (720,1280,3)
|
141 |
+
140 100 (720,1280,3)
|
142 |
+
141 100 (720,1280,3)
|
143 |
+
142 100 (720,1280,3)
|
144 |
+
143 100 (720,1280,3)
|
145 |
+
144 100 (720,1280,3)
|
146 |
+
145 100 (720,1280,3)
|
147 |
+
146 100 (720,1280,3)
|
148 |
+
147 100 (720,1280,3)
|
149 |
+
148 100 (720,1280,3)
|
150 |
+
149 100 (720,1280,3)
|
151 |
+
150 100 (720,1280,3)
|
152 |
+
151 100 (720,1280,3)
|
153 |
+
152 100 (720,1280,3)
|
154 |
+
153 100 (720,1280,3)
|
155 |
+
154 100 (720,1280,3)
|
156 |
+
155 100 (720,1280,3)
|
157 |
+
156 100 (720,1280,3)
|
158 |
+
157 100 (720,1280,3)
|
159 |
+
158 100 (720,1280,3)
|
160 |
+
159 100 (720,1280,3)
|
161 |
+
160 100 (720,1280,3)
|
162 |
+
161 100 (720,1280,3)
|
163 |
+
162 100 (720,1280,3)
|
164 |
+
163 100 (720,1280,3)
|
165 |
+
164 100 (720,1280,3)
|
166 |
+
165 100 (720,1280,3)
|
167 |
+
166 100 (720,1280,3)
|
168 |
+
167 100 (720,1280,3)
|
169 |
+
168 100 (720,1280,3)
|
170 |
+
169 100 (720,1280,3)
|
171 |
+
170 100 (720,1280,3)
|
172 |
+
171 100 (720,1280,3)
|
173 |
+
172 100 (720,1280,3)
|
174 |
+
173 100 (720,1280,3)
|
175 |
+
174 100 (720,1280,3)
|
176 |
+
175 100 (720,1280,3)
|
177 |
+
176 100 (720,1280,3)
|
178 |
+
177 100 (720,1280,3)
|
179 |
+
178 100 (720,1280,3)
|
180 |
+
179 100 (720,1280,3)
|
181 |
+
180 100 (720,1280,3)
|
182 |
+
181 100 (720,1280,3)
|
183 |
+
182 100 (720,1280,3)
|
184 |
+
183 100 (720,1280,3)
|
185 |
+
184 100 (720,1280,3)
|
186 |
+
185 100 (720,1280,3)
|
187 |
+
186 100 (720,1280,3)
|
188 |
+
187 100 (720,1280,3)
|
189 |
+
188 100 (720,1280,3)
|
190 |
+
189 100 (720,1280,3)
|
191 |
+
190 100 (720,1280,3)
|
192 |
+
191 100 (720,1280,3)
|
193 |
+
192 100 (720,1280,3)
|
194 |
+
193 100 (720,1280,3)
|
195 |
+
194 100 (720,1280,3)
|
196 |
+
195 100 (720,1280,3)
|
197 |
+
196 100 (720,1280,3)
|
198 |
+
197 100 (720,1280,3)
|
199 |
+
198 100 (720,1280,3)
|
200 |
+
199 100 (720,1280,3)
|
201 |
+
200 100 (720,1280,3)
|
202 |
+
201 100 (720,1280,3)
|
203 |
+
202 100 (720,1280,3)
|
204 |
+
203 100 (720,1280,3)
|
205 |
+
204 100 (720,1280,3)
|
206 |
+
205 100 (720,1280,3)
|
207 |
+
206 100 (720,1280,3)
|
208 |
+
207 100 (720,1280,3)
|
209 |
+
208 100 (720,1280,3)
|
210 |
+
209 100 (720,1280,3)
|
211 |
+
210 100 (720,1280,3)
|
212 |
+
211 100 (720,1280,3)
|
213 |
+
212 100 (720,1280,3)
|
214 |
+
213 100 (720,1280,3)
|
215 |
+
214 100 (720,1280,3)
|
216 |
+
215 100 (720,1280,3)
|
217 |
+
216 100 (720,1280,3)
|
218 |
+
217 100 (720,1280,3)
|
219 |
+
218 100 (720,1280,3)
|
220 |
+
219 100 (720,1280,3)
|
221 |
+
220 100 (720,1280,3)
|
222 |
+
221 100 (720,1280,3)
|
223 |
+
222 100 (720,1280,3)
|
224 |
+
223 100 (720,1280,3)
|
225 |
+
224 100 (720,1280,3)
|
226 |
+
225 100 (720,1280,3)
|
227 |
+
226 100 (720,1280,3)
|
228 |
+
227 100 (720,1280,3)
|
229 |
+
228 100 (720,1280,3)
|
230 |
+
229 100 (720,1280,3)
|
231 |
+
230 100 (720,1280,3)
|
232 |
+
231 100 (720,1280,3)
|
233 |
+
232 100 (720,1280,3)
|
234 |
+
233 100 (720,1280,3)
|
235 |
+
234 100 (720,1280,3)
|
236 |
+
235 100 (720,1280,3)
|
237 |
+
236 100 (720,1280,3)
|
238 |
+
237 100 (720,1280,3)
|
239 |
+
238 100 (720,1280,3)
|
240 |
+
239 100 (720,1280,3)
|
241 |
+
240 100 (720,1280,3)
|
242 |
+
241 100 (720,1280,3)
|
243 |
+
242 100 (720,1280,3)
|
244 |
+
243 100 (720,1280,3)
|
245 |
+
244 100 (720,1280,3)
|
246 |
+
245 100 (720,1280,3)
|
247 |
+
246 100 (720,1280,3)
|
248 |
+
247 100 (720,1280,3)
|
249 |
+
248 100 (720,1280,3)
|
250 |
+
249 100 (720,1280,3)
|
251 |
+
250 100 (720,1280,3)
|
252 |
+
251 100 (720,1280,3)
|
253 |
+
252 100 (720,1280,3)
|
254 |
+
253 100 (720,1280,3)
|
255 |
+
254 100 (720,1280,3)
|
256 |
+
255 100 (720,1280,3)
|
257 |
+
256 100 (720,1280,3)
|
258 |
+
257 100 (720,1280,3)
|
259 |
+
258 100 (720,1280,3)
|
260 |
+
259 100 (720,1280,3)
|
261 |
+
260 100 (720,1280,3)
|
262 |
+
261 100 (720,1280,3)
|
263 |
+
262 100 (720,1280,3)
|
264 |
+
263 100 (720,1280,3)
|
265 |
+
264 100 (720,1280,3)
|
266 |
+
265 100 (720,1280,3)
|
267 |
+
266 100 (720,1280,3)
|
268 |
+
267 100 (720,1280,3)
|
269 |
+
268 100 (720,1280,3)
|
270 |
+
269 100 (720,1280,3)
|
basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
240 100 (720,1280,3)
|
2 |
+
241 100 (720,1280,3)
|
3 |
+
246 100 (720,1280,3)
|
4 |
+
257 100 (720,1280,3)
|
basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
240 100 (720,1280,3)
|
2 |
+
241 100 (720,1280,3)
|
3 |
+
242 100 (720,1280,3)
|
4 |
+
243 100 (720,1280,3)
|
5 |
+
244 100 (720,1280,3)
|
6 |
+
245 100 (720,1280,3)
|
7 |
+
246 100 (720,1280,3)
|
8 |
+
247 100 (720,1280,3)
|
9 |
+
248 100 (720,1280,3)
|
10 |
+
249 100 (720,1280,3)
|
11 |
+
250 100 (720,1280,3)
|
12 |
+
251 100 (720,1280,3)
|
13 |
+
252 100 (720,1280,3)
|
14 |
+
253 100 (720,1280,3)
|
15 |
+
254 100 (720,1280,3)
|
16 |
+
255 100 (720,1280,3)
|
17 |
+
256 100 (720,1280,3)
|
18 |
+
257 100 (720,1280,3)
|
19 |
+
258 100 (720,1280,3)
|
20 |
+
259 100 (720,1280,3)
|
21 |
+
260 100 (720,1280,3)
|
22 |
+
261 100 (720,1280,3)
|
23 |
+
262 100 (720,1280,3)
|
24 |
+
263 100 (720,1280,3)
|
25 |
+
264 100 (720,1280,3)
|
26 |
+
265 100 (720,1280,3)
|
27 |
+
266 100 (720,1280,3)
|
28 |
+
267 100 (720,1280,3)
|
29 |
+
268 100 (720,1280,3)
|
30 |
+
269 100 (720,1280,3)
|
basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt
ADDED
@@ -0,0 +1,1225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
00001/0625 7 (256,448,3)
|
2 |
+
00001/0632 7 (256,448,3)
|
3 |
+
00001/0807 7 (256,448,3)
|
4 |
+
00001/0832 7 (256,448,3)
|
5 |
+
00001/0834 7 (256,448,3)
|
6 |
+
00001/0836 7 (256,448,3)
|
7 |
+
00002/0004 7 (256,448,3)
|
8 |
+
00002/0112 7 (256,448,3)
|
9 |
+
00002/0116 7 (256,448,3)
|
10 |
+
00002/0123 7 (256,448,3)
|
11 |
+
00002/0455 7 (256,448,3)
|
12 |
+
00002/0602 7 (256,448,3)
|
13 |
+
00002/0976 7 (256,448,3)
|
14 |
+
00002/0980 7 (256,448,3)
|
15 |
+
00002/0983 7 (256,448,3)
|
16 |
+
00002/1000 7 (256,448,3)
|
17 |
+
00003/0022 7 (256,448,3)
|
18 |
+
00003/0031 7 (256,448,3)
|
19 |
+
00003/0035 7 (256,448,3)
|
20 |
+
00003/0041 7 (256,448,3)
|
21 |
+
00003/0073 7 (256,448,3)
|
22 |
+
00003/0107 7 (256,448,3)
|
23 |
+
00003/0111 7 (256,448,3)
|
24 |
+
00003/0114 7 (256,448,3)
|
25 |
+
00003/0117 7 (256,448,3)
|
26 |
+
00003/0121 7 (256,448,3)
|
27 |
+
00003/0499 7 (256,448,3)
|
28 |
+
00003/0501 7 (256,448,3)
|
29 |
+
00003/0507 7 (256,448,3)
|
30 |
+
00003/0510 7 (256,448,3)
|
31 |
+
00003/0517 7 (256,448,3)
|
32 |
+
00003/0522 7 (256,448,3)
|
33 |
+
00003/0531 7 (256,448,3)
|
34 |
+
00003/0533 7 (256,448,3)
|
35 |
+
00003/0534 7 (256,448,3)
|
36 |
+
00003/0682 7 (256,448,3)
|
37 |
+
00003/0687 7 (256,448,3)
|
38 |
+
00003/0715 7 (256,448,3)
|
39 |
+
00003/0742 7 (256,448,3)
|
40 |
+
00003/0751 7 (256,448,3)
|
41 |
+
00003/0984 7 (256,448,3)
|
42 |
+
00004/0042 7 (256,448,3)
|
43 |
+
00004/0165 7 (256,448,3)
|
44 |
+
00004/0321 7 (256,448,3)
|
45 |
+
00004/0569 7 (256,448,3)
|
46 |
+
00004/0572 7 (256,448,3)
|
47 |
+
00004/0619 7 (256,448,3)
|
48 |
+
00004/0776 7 (256,448,3)
|
49 |
+
00004/0780 7 (256,448,3)
|
50 |
+
00004/0825 7 (256,448,3)
|
51 |
+
00004/0832 7 (256,448,3)
|
52 |
+
00004/0853 7 (256,448,3)
|
53 |
+
00004/0876 7 (256,448,3)
|
54 |
+
00004/0888 7 (256,448,3)
|
55 |
+
00005/0015 7 (256,448,3)
|
56 |
+
00005/0021 7 (256,448,3)
|
57 |
+
00005/0022 7 (256,448,3)
|
58 |
+
00005/0024 7 (256,448,3)
|
59 |
+
00005/0026 7 (256,448,3)
|
60 |
+
00005/0394 7 (256,448,3)
|
61 |
+
00005/0403 7 (256,448,3)
|
62 |
+
00005/0531 7 (256,448,3)
|
63 |
+
00005/0546 7 (256,448,3)
|
64 |
+
00005/0554 7 (256,448,3)
|
65 |
+
00005/0694 7 (256,448,3)
|
66 |
+
00005/0700 7 (256,448,3)
|
67 |
+
00005/0740 7 (256,448,3)
|
68 |
+
00005/0826 7 (256,448,3)
|
69 |
+
00005/0832 7 (256,448,3)
|
70 |
+
00005/0834 7 (256,448,3)
|
71 |
+
00005/0943 7 (256,448,3)
|
72 |
+
00006/0184 7 (256,448,3)
|
73 |
+
00006/0205 7 (256,448,3)
|
74 |
+
00006/0206 7 (256,448,3)
|
75 |
+
00006/0211 7 (256,448,3)
|
76 |
+
00006/0271 7 (256,448,3)
|
77 |
+
00006/0273 7 (256,448,3)
|
78 |
+
00006/0277 7 (256,448,3)
|
79 |
+
00006/0283 7 (256,448,3)
|
80 |
+
00006/0287 7 (256,448,3)
|
81 |
+
00006/0298 7 (256,448,3)
|
82 |
+
00006/0310 7 (256,448,3)
|
83 |
+
00006/0356 7 (256,448,3)
|
84 |
+
00006/0357 7 (256,448,3)
|
85 |
+
00006/0544 7 (256,448,3)
|
86 |
+
00006/0565 7 (256,448,3)
|
87 |
+
00006/0569 7 (256,448,3)
|
88 |
+
00006/0573 7 (256,448,3)
|
89 |
+
00006/0592 7 (256,448,3)
|
90 |
+
00006/0613 7 (256,448,3)
|
91 |
+
00006/0633 7 (256,448,3)
|
92 |
+
00006/0637 7 (256,448,3)
|
93 |
+
00006/0646 7 (256,448,3)
|
94 |
+
00006/0649 7 (256,448,3)
|
95 |
+
00006/0655 7 (256,448,3)
|
96 |
+
00006/0658 7 (256,448,3)
|
97 |
+
00006/0662 7 (256,448,3)
|
98 |
+
00006/0666 7 (256,448,3)
|
99 |
+
00006/0673 7 (256,448,3)
|
100 |
+
00007/0248 7 (256,448,3)
|
101 |
+
00007/0253 7 (256,448,3)
|
102 |
+
00007/0430 7 (256,448,3)
|
103 |
+
00007/0434 7 (256,448,3)
|
104 |
+
00007/0436 7 (256,448,3)
|
105 |
+
00007/0452 7 (256,448,3)
|
106 |
+
00007/0464 7 (256,448,3)
|
107 |
+
00007/0470 7 (256,448,3)
|
108 |
+
00007/0472 7 (256,448,3)
|
109 |
+
00007/0483 7 (256,448,3)
|
110 |
+
00007/0484 7 (256,448,3)
|
111 |
+
00007/0493 7 (256,448,3)
|
112 |
+
00007/0508 7 (256,448,3)
|
113 |
+
00007/0514 7 (256,448,3)
|
114 |
+
00007/0697 7 (256,448,3)
|
115 |
+
00007/0698 7 (256,448,3)
|
116 |
+
00007/0744 7 (256,448,3)
|
117 |
+
00007/0775 7 (256,448,3)
|
118 |
+
00007/0786 7 (256,448,3)
|
119 |
+
00007/0790 7 (256,448,3)
|
120 |
+
00007/0800 7 (256,448,3)
|
121 |
+
00007/0833 7 (256,448,3)
|
122 |
+
00007/0867 7 (256,448,3)
|
123 |
+
00007/0879 7 (256,448,3)
|
124 |
+
00007/0899 7 (256,448,3)
|
125 |
+
00008/0251 7 (256,448,3)
|
126 |
+
00008/0322 7 (256,448,3)
|
127 |
+
00008/0971 7 (256,448,3)
|
128 |
+
00008/0976 7 (256,448,3)
|
129 |
+
00009/0016 7 (256,448,3)
|
130 |
+
00009/0036 7 (256,448,3)
|
131 |
+
00009/0037 7 (256,448,3)
|
132 |
+
00009/0609 7 (256,448,3)
|
133 |
+
00009/0812 7 (256,448,3)
|
134 |
+
00009/0821 7 (256,448,3)
|
135 |
+
00009/0947 7 (256,448,3)
|
136 |
+
00009/0952 7 (256,448,3)
|
137 |
+
00009/0955 7 (256,448,3)
|
138 |
+
00009/0970 7 (256,448,3)
|
139 |
+
00010/0072 7 (256,448,3)
|
140 |
+
00010/0074 7 (256,448,3)
|
141 |
+
00010/0079 7 (256,448,3)
|
142 |
+
00010/0085 7 (256,448,3)
|
143 |
+
00010/0139 7 (256,448,3)
|
144 |
+
00010/0140 7 (256,448,3)
|
145 |
+
00010/0183 7 (256,448,3)
|
146 |
+
00010/0200 7 (256,448,3)
|
147 |
+
00010/0223 7 (256,448,3)
|
148 |
+
00010/0305 7 (256,448,3)
|
149 |
+
00010/0323 7 (256,448,3)
|
150 |
+
00010/0338 7 (256,448,3)
|
151 |
+
00010/0342 7 (256,448,3)
|
152 |
+
00010/0350 7 (256,448,3)
|
153 |
+
00010/0356 7 (256,448,3)
|
154 |
+
00010/0362 7 (256,448,3)
|
155 |
+
00010/0366 7 (256,448,3)
|
156 |
+
00010/0375 7 (256,448,3)
|
157 |
+
00010/0404 7 (256,448,3)
|
158 |
+
00010/0407 7 (256,448,3)
|
159 |
+
00010/0414 7 (256,448,3)
|
160 |
+
00010/0418 7 (256,448,3)
|
161 |
+
00010/0429 7 (256,448,3)
|
162 |
+
00010/0557 7 (256,448,3)
|
163 |
+
00010/0564 7 (256,448,3)
|
164 |
+
00010/0733 7 (256,448,3)
|
165 |
+
00010/0935 7 (256,448,3)
|
166 |
+
00010/0939 7 (256,448,3)
|
167 |
+
00010/0943 7 (256,448,3)
|
168 |
+
00011/0242 7 (256,448,3)
|
169 |
+
00011/0259 7 (256,448,3)
|
170 |
+
00011/0263 7 (256,448,3)
|
171 |
+
00011/0266 7 (256,448,3)
|
172 |
+
00011/0278 7 (256,448,3)
|
173 |
+
00011/0890 7 (256,448,3)
|
174 |
+
00011/0894 7 (256,448,3)
|
175 |
+
00011/0903 7 (256,448,3)
|
176 |
+
00011/0906 7 (256,448,3)
|
177 |
+
00011/0913 7 (256,448,3)
|
178 |
+
00012/0011 7 (256,448,3)
|
179 |
+
00012/0014 7 (256,448,3)
|
180 |
+
00012/0126 7 (256,448,3)
|
181 |
+
00012/0127 7 (256,448,3)
|
182 |
+
00012/0526 7 (256,448,3)
|
183 |
+
00012/0551 7 (256,448,3)
|
184 |
+
00012/0896 7 (256,448,3)
|
185 |
+
00012/0910 7 (256,448,3)
|
186 |
+
00012/0915 7 (256,448,3)
|
187 |
+
00013/0167 7 (256,448,3)
|
188 |
+
00013/0794 7 (256,448,3)
|
189 |
+
00013/0807 7 (256,448,3)
|
190 |
+
00013/0846 7 (256,448,3)
|
191 |
+
00013/0882 7 (256,448,3)
|
192 |
+
00013/0889 7 (256,448,3)
|
193 |
+
00013/0910 7 (256,448,3)
|
194 |
+
00013/0913 7 (256,448,3)
|
195 |
+
00013/0924 7 (256,448,3)
|
196 |
+
00013/0931 7 (256,448,3)
|
197 |
+
00013/0944 7 (256,448,3)
|
198 |
+
00013/0955 7 (256,448,3)
|
199 |
+
00013/0962 7 (256,448,3)
|
200 |
+
00013/0969 7 (256,448,3)
|
201 |
+
00014/0012 7 (256,448,3)
|
202 |
+
00014/0025 7 (256,448,3)
|
203 |
+
00014/0473 7 (256,448,3)
|
204 |
+
00014/0499 7 (256,448,3)
|
205 |
+
00014/0524 7 (256,448,3)
|
206 |
+
00014/0739 7 (256,448,3)
|
207 |
+
00014/0753 7 (256,448,3)
|
208 |
+
00014/0771 7 (256,448,3)
|
209 |
+
00014/0832 7 (256,448,3)
|
210 |
+
00014/0836 7 (256,448,3)
|
211 |
+
00014/0838 7 (256,448,3)
|
212 |
+
00014/0839 7 (256,448,3)
|
213 |
+
00014/0843 7 (256,448,3)
|
214 |
+
00014/0846 7 (256,448,3)
|
215 |
+
00014/0849 7 (256,448,3)
|
216 |
+
00014/0859 7 (256,448,3)
|
217 |
+
00014/0880 7 (256,448,3)
|
218 |
+
00014/0906 7 (256,448,3)
|
219 |
+
00015/0030 7 (256,448,3)
|
220 |
+
00015/0067 7 (256,448,3)
|
221 |
+
00015/0084 7 (256,448,3)
|
222 |
+
00015/0190 7 (256,448,3)
|
223 |
+
00015/0575 7 (256,448,3)
|
224 |
+
00015/0784 7 (256,448,3)
|
225 |
+
00015/0855 7 (256,448,3)
|
226 |
+
00015/0904 7 (256,448,3)
|
227 |
+
00015/0914 7 (256,448,3)
|
228 |
+
00015/0936 7 (256,448,3)
|
229 |
+
00015/0939 7 (256,448,3)
|
230 |
+
00015/0943 7 (256,448,3)
|
231 |
+
00015/0957 7 (256,448,3)
|
232 |
+
00016/0131 7 (256,448,3)
|
233 |
+
00016/0173 7 (256,448,3)
|
234 |
+
00016/0320 7 (256,448,3)
|
235 |
+
00016/0328 7 (256,448,3)
|
236 |
+
00016/0334 7 (256,448,3)
|
237 |
+
00016/0338 7 (256,448,3)
|
238 |
+
00016/0339 7 (256,448,3)
|
239 |
+
00016/0345 7 (256,448,3)
|
240 |
+
00016/0365 7 (256,448,3)
|
241 |
+
00016/0584 7 (256,448,3)
|
242 |
+
00016/0634 7 (256,448,3)
|
243 |
+
00017/0342 7 (256,448,3)
|
244 |
+
00017/0346 7 (256,448,3)
|
245 |
+
00017/0350 7 (256,448,3)
|
246 |
+
00017/0766 7 (256,448,3)
|
247 |
+
00017/0786 7 (256,448,3)
|
248 |
+
00017/0911 7 (256,448,3)
|
249 |
+
00017/0914 7 (256,448,3)
|
250 |
+
00018/0217 7 (256,448,3)
|
251 |
+
00018/0258 7 (256,448,3)
|
252 |
+
00018/0307 7 (256,448,3)
|
253 |
+
00018/0480 7 (256,448,3)
|
254 |
+
00018/0491 7 (256,448,3)
|
255 |
+
00018/0994 7 (256,448,3)
|
256 |
+
00018/0995 7 (256,448,3)
|
257 |
+
00018/0997 7 (256,448,3)
|
258 |
+
00018/1000 7 (256,448,3)
|
259 |
+
00019/0007 7 (256,448,3)
|
260 |
+
00019/0016 7 (256,448,3)
|
261 |
+
00019/0026 7 (256,448,3)
|
262 |
+
00019/0030 7 (256,448,3)
|
263 |
+
00019/0086 7 (256,448,3)
|
264 |
+
00019/0089 7 (256,448,3)
|
265 |
+
00019/0111 7 (256,448,3)
|
266 |
+
00019/0285 7 (256,448,3)
|
267 |
+
00019/0415 7 (256,448,3)
|
268 |
+
00019/0434 7 (256,448,3)
|
269 |
+
00019/0437 7 (256,448,3)
|
270 |
+
00019/0568 7 (256,448,3)
|
271 |
+
00019/0570 7 (256,448,3)
|
272 |
+
00019/0591 7 (256,448,3)
|
273 |
+
00019/0596 7 (256,448,3)
|
274 |
+
00019/0603 7 (256,448,3)
|
275 |
+
00019/0607 7 (256,448,3)
|
276 |
+
00019/0637 7 (256,448,3)
|
277 |
+
00019/0644 7 (256,448,3)
|
278 |
+
00019/0647 7 (256,448,3)
|
279 |
+
00019/0787 7 (256,448,3)
|
280 |
+
00019/0993 7 (256,448,3)
|
281 |
+
00019/0998 7 (256,448,3)
|
282 |
+
00021/0232 7 (256,448,3)
|
283 |
+
00021/0255 7 (256,448,3)
|
284 |
+
00021/0646 7 (256,448,3)
|
285 |
+
00021/0653 7 (256,448,3)
|
286 |
+
00021/0657 7 (256,448,3)
|
287 |
+
00021/0668 7 (256,448,3)
|
288 |
+
00021/0672 7 (256,448,3)
|
289 |
+
00021/0725 7 (256,448,3)
|
290 |
+
00021/0750 7 (256,448,3)
|
291 |
+
00021/0764 7 (256,448,3)
|
292 |
+
00021/0821 7 (256,448,3)
|
293 |
+
00022/0192 7 (256,448,3)
|
294 |
+
00022/0391 7 (256,448,3)
|
295 |
+
00022/0514 7 (256,448,3)
|
296 |
+
00022/0567 7 (256,448,3)
|
297 |
+
00022/0674 7 (256,448,3)
|
298 |
+
00022/0686 7 (256,448,3)
|
299 |
+
00022/0700 7 (256,448,3)
|
300 |
+
00023/0020 7 (256,448,3)
|
301 |
+
00023/0024 7 (256,448,3)
|
302 |
+
00023/0025 7 (256,448,3)
|
303 |
+
00023/0042 7 (256,448,3)
|
304 |
+
00023/0050 7 (256,448,3)
|
305 |
+
00023/0094 7 (256,448,3)
|
306 |
+
00023/0107 7 (256,448,3)
|
307 |
+
00023/0635 7 (256,448,3)
|
308 |
+
00023/0698 7 (256,448,3)
|
309 |
+
00023/0774 7 (256,448,3)
|
310 |
+
00023/0795 7 (256,448,3)
|
311 |
+
00023/0821 7 (256,448,3)
|
312 |
+
00023/0839 7 (256,448,3)
|
313 |
+
00023/0846 7 (256,448,3)
|
314 |
+
00023/0869 7 (256,448,3)
|
315 |
+
00023/0879 7 (256,448,3)
|
316 |
+
00023/0887 7 (256,448,3)
|
317 |
+
00023/0899 7 (256,448,3)
|
318 |
+
00023/0910 7 (256,448,3)
|
319 |
+
00023/0920 7 (256,448,3)
|
320 |
+
00023/0929 7 (256,448,3)
|
321 |
+
00023/0941 7 (256,448,3)
|
322 |
+
00023/0942 7 (256,448,3)
|
323 |
+
00023/0952 7 (256,448,3)
|
324 |
+
00024/0066 7 (256,448,3)
|
325 |
+
00024/0072 7 (256,448,3)
|
326 |
+
00024/0080 7 (256,448,3)
|
327 |
+
00024/0093 7 (256,448,3)
|
328 |
+
00024/0107 7 (256,448,3)
|
329 |
+
00024/0262 7 (256,448,3)
|
330 |
+
00024/0283 7 (256,448,3)
|
331 |
+
00024/0294 7 (256,448,3)
|
332 |
+
00024/0296 7 (256,448,3)
|
333 |
+
00024/0304 7 (256,448,3)
|
334 |
+
00024/0315 7 (256,448,3)
|
335 |
+
00024/0322 7 (256,448,3)
|
336 |
+
00024/0648 7 (256,448,3)
|
337 |
+
00024/0738 7 (256,448,3)
|
338 |
+
00024/0743 7 (256,448,3)
|
339 |
+
00025/0542 7 (256,448,3)
|
340 |
+
00025/0769 7 (256,448,3)
|
341 |
+
00025/0984 7 (256,448,3)
|
342 |
+
00025/0985 7 (256,448,3)
|
343 |
+
00025/0989 7 (256,448,3)
|
344 |
+
00025/0991 7 (256,448,3)
|
345 |
+
00026/0009 7 (256,448,3)
|
346 |
+
00026/0013 7 (256,448,3)
|
347 |
+
00026/0020 7 (256,448,3)
|
348 |
+
00026/0021 7 (256,448,3)
|
349 |
+
00026/0025 7 (256,448,3)
|
350 |
+
00026/0135 7 (256,448,3)
|
351 |
+
00026/0200 7 (256,448,3)
|
352 |
+
00026/0297 7 (256,448,3)
|
353 |
+
00026/0306 7 (256,448,3)
|
354 |
+
00026/0444 7 (256,448,3)
|
355 |
+
00026/0450 7 (256,448,3)
|
356 |
+
00026/0453 7 (256,448,3)
|
357 |
+
00026/0464 7 (256,448,3)
|
358 |
+
00026/0486 7 (256,448,3)
|
359 |
+
00026/0773 7 (256,448,3)
|
360 |
+
00026/0785 7 (256,448,3)
|
361 |
+
00026/0836 7 (256,448,3)
|
362 |
+
00026/0838 7 (256,448,3)
|
363 |
+
00026/0848 7 (256,448,3)
|
364 |
+
00026/0885 7 (256,448,3)
|
365 |
+
00026/0893 7 (256,448,3)
|
366 |
+
00026/0939 7 (256,448,3)
|
367 |
+
00026/0942 7 (256,448,3)
|
368 |
+
00027/0092 7 (256,448,3)
|
369 |
+
00027/0112 7 (256,448,3)
|
370 |
+
00027/0115 7 (256,448,3)
|
371 |
+
00027/0143 7 (256,448,3)
|
372 |
+
00027/0175 7 (256,448,3)
|
373 |
+
00027/0179 7 (256,448,3)
|
374 |
+
00027/0183 7 (256,448,3)
|
375 |
+
00027/0197 7 (256,448,3)
|
376 |
+
00027/0199 7 (256,448,3)
|
377 |
+
00027/0300 7 (256,448,3)
|
378 |
+
00028/0015 7 (256,448,3)
|
379 |
+
00028/0032 7 (256,448,3)
|
380 |
+
00028/0048 7 (256,448,3)
|
381 |
+
00028/0068 7 (256,448,3)
|
382 |
+
00028/0219 7 (256,448,3)
|
383 |
+
00028/0606 7 (256,448,3)
|
384 |
+
00028/0626 7 (256,448,3)
|
385 |
+
00028/0748 7 (256,448,3)
|
386 |
+
00028/0764 7 (256,448,3)
|
387 |
+
00028/0772 7 (256,448,3)
|
388 |
+
00028/0780 7 (256,448,3)
|
389 |
+
00028/0926 7 (256,448,3)
|
390 |
+
00028/0947 7 (256,448,3)
|
391 |
+
00028/0962 7 (256,448,3)
|
392 |
+
00029/0085 7 (256,448,3)
|
393 |
+
00029/0281 7 (256,448,3)
|
394 |
+
00029/0284 7 (256,448,3)
|
395 |
+
00029/0288 7 (256,448,3)
|
396 |
+
00029/0294 7 (256,448,3)
|
397 |
+
00029/0364 7 (256,448,3)
|
398 |
+
00029/0369 7 (256,448,3)
|
399 |
+
00029/0421 7 (256,448,3)
|
400 |
+
00029/0425 7 (256,448,3)
|
401 |
+
00029/0550 7 (256,448,3)
|
402 |
+
00030/0014 7 (256,448,3)
|
403 |
+
00030/0101 7 (256,448,3)
|
404 |
+
00030/0143 7 (256,448,3)
|
405 |
+
00030/0351 7 (256,448,3)
|
406 |
+
00030/0356 7 (256,448,3)
|
407 |
+
00030/0371 7 (256,448,3)
|
408 |
+
00030/0484 7 (256,448,3)
|
409 |
+
00030/0492 7 (256,448,3)
|
410 |
+
00030/0503 7 (256,448,3)
|
411 |
+
00030/0682 7 (256,448,3)
|
412 |
+
00030/0696 7 (256,448,3)
|
413 |
+
00030/0735 7 (256,448,3)
|
414 |
+
00030/0737 7 (256,448,3)
|
415 |
+
00030/0868 7 (256,448,3)
|
416 |
+
00031/0161 7 (256,448,3)
|
417 |
+
00031/0180 7 (256,448,3)
|
418 |
+
00031/0194 7 (256,448,3)
|
419 |
+
00031/0253 7 (256,448,3)
|
420 |
+
00031/0293 7 (256,448,3)
|
421 |
+
00031/0466 7 (256,448,3)
|
422 |
+
00031/0477 7 (256,448,3)
|
423 |
+
00031/0549 7 (256,448,3)
|
424 |
+
00031/0600 7 (256,448,3)
|
425 |
+
00031/0617 7 (256,448,3)
|
426 |
+
00031/0649 7 (256,448,3)
|
427 |
+
00032/0015 7 (256,448,3)
|
428 |
+
00032/0020 7 (256,448,3)
|
429 |
+
00032/0023 7 (256,448,3)
|
430 |
+
00032/0048 7 (256,448,3)
|
431 |
+
00032/0056 7 (256,448,3)
|
432 |
+
00032/0872 7 (256,448,3)
|
433 |
+
00033/0069 7 (256,448,3)
|
434 |
+
00033/0073 7 (256,448,3)
|
435 |
+
00033/0078 7 (256,448,3)
|
436 |
+
00033/0079 7 (256,448,3)
|
437 |
+
00033/0086 7 (256,448,3)
|
438 |
+
00033/0088 7 (256,448,3)
|
439 |
+
00033/0091 7 (256,448,3)
|
440 |
+
00033/0096 7 (256,448,3)
|
441 |
+
00033/0607 7 (256,448,3)
|
442 |
+
00033/0613 7 (256,448,3)
|
443 |
+
00033/0616 7 (256,448,3)
|
444 |
+
00033/0619 7 (256,448,3)
|
445 |
+
00033/0626 7 (256,448,3)
|
446 |
+
00033/0628 7 (256,448,3)
|
447 |
+
00033/0637 7 (256,448,3)
|
448 |
+
00033/0686 7 (256,448,3)
|
449 |
+
00033/0842 7 (256,448,3)
|
450 |
+
00034/0261 7 (256,448,3)
|
451 |
+
00034/0265 7 (256,448,3)
|
452 |
+
00034/0269 7 (256,448,3)
|
453 |
+
00034/0275 7 (256,448,3)
|
454 |
+
00034/0286 7 (256,448,3)
|
455 |
+
00034/0294 7 (256,448,3)
|
456 |
+
00034/0431 7 (256,448,3)
|
457 |
+
00034/0577 7 (256,448,3)
|
458 |
+
00034/0685 7 (256,448,3)
|
459 |
+
00034/0687 7 (256,448,3)
|
460 |
+
00034/0703 7 (256,448,3)
|
461 |
+
00034/0715 7 (256,448,3)
|
462 |
+
00034/0935 7 (256,448,3)
|
463 |
+
00034/0943 7 (256,448,3)
|
464 |
+
00034/0963 7 (256,448,3)
|
465 |
+
00034/0979 7 (256,448,3)
|
466 |
+
00034/0990 7 (256,448,3)
|
467 |
+
00035/0129 7 (256,448,3)
|
468 |
+
00035/0153 7 (256,448,3)
|
469 |
+
00035/0156 7 (256,448,3)
|
470 |
+
00035/0474 7 (256,448,3)
|
471 |
+
00035/0507 7 (256,448,3)
|
472 |
+
00035/0532 7 (256,448,3)
|
473 |
+
00035/0560 7 (256,448,3)
|
474 |
+
00035/0572 7 (256,448,3)
|
475 |
+
00035/0587 7 (256,448,3)
|
476 |
+
00035/0588 7 (256,448,3)
|
477 |
+
00035/0640 7 (256,448,3)
|
478 |
+
00035/0654 7 (256,448,3)
|
479 |
+
00035/0655 7 (256,448,3)
|
480 |
+
00035/0737 7 (256,448,3)
|
481 |
+
00035/0843 7 (256,448,3)
|
482 |
+
00035/0932 7 (256,448,3)
|
483 |
+
00035/0957 7 (256,448,3)
|
484 |
+
00036/0029 7 (256,448,3)
|
485 |
+
00036/0266 7 (256,448,3)
|
486 |
+
00036/0276 7 (256,448,3)
|
487 |
+
00036/0310 7 (256,448,3)
|
488 |
+
00036/0314 7 (256,448,3)
|
489 |
+
00036/0320 7 (256,448,3)
|
490 |
+
00036/0333 7 (256,448,3)
|
491 |
+
00036/0348 7 (256,448,3)
|
492 |
+
00036/0357 7 (256,448,3)
|
493 |
+
00036/0360 7 (256,448,3)
|
494 |
+
00036/0368 7 (256,448,3)
|
495 |
+
00036/0371 7 (256,448,3)
|
496 |
+
00036/0378 7 (256,448,3)
|
497 |
+
00036/0391 7 (256,448,3)
|
498 |
+
00036/0440 7 (256,448,3)
|
499 |
+
00036/0731 7 (256,448,3)
|
500 |
+
00036/0733 7 (256,448,3)
|
501 |
+
00036/0741 7 (256,448,3)
|
502 |
+
00036/0743 7 (256,448,3)
|
503 |
+
00036/0927 7 (256,448,3)
|
504 |
+
00036/0931 7 (256,448,3)
|
505 |
+
00036/0933 7 (256,448,3)
|
506 |
+
00036/0938 7 (256,448,3)
|
507 |
+
00036/0944 7 (256,448,3)
|
508 |
+
00036/0946 7 (256,448,3)
|
509 |
+
00036/0951 7 (256,448,3)
|
510 |
+
00036/0953 7 (256,448,3)
|
511 |
+
00036/0963 7 (256,448,3)
|
512 |
+
00036/0964 7 (256,448,3)
|
513 |
+
00036/0981 7 (256,448,3)
|
514 |
+
00036/0991 7 (256,448,3)
|
515 |
+
00037/0072 7 (256,448,3)
|
516 |
+
00037/0079 7 (256,448,3)
|
517 |
+
00037/0132 7 (256,448,3)
|
518 |
+
00037/0135 7 (256,448,3)
|
519 |
+
00037/0137 7 (256,448,3)
|
520 |
+
00037/0141 7 (256,448,3)
|
521 |
+
00037/0229 7 (256,448,3)
|
522 |
+
00037/0234 7 (256,448,3)
|
523 |
+
00037/0239 7 (256,448,3)
|
524 |
+
00037/0242 7 (256,448,3)
|
525 |
+
00037/0254 7 (256,448,3)
|
526 |
+
00037/0269 7 (256,448,3)
|
527 |
+
00037/0276 7 (256,448,3)
|
528 |
+
00037/0279 7 (256,448,3)
|
529 |
+
00037/0286 7 (256,448,3)
|
530 |
+
00037/0345 7 (256,448,3)
|
531 |
+
00037/0449 7 (256,448,3)
|
532 |
+
00037/0450 7 (256,448,3)
|
533 |
+
00037/0820 7 (256,448,3)
|
534 |
+
00037/0824 7 (256,448,3)
|
535 |
+
00037/0859 7 (256,448,3)
|
536 |
+
00037/0899 7 (256,448,3)
|
537 |
+
00037/0906 7 (256,448,3)
|
538 |
+
00038/0535 7 (256,448,3)
|
539 |
+
00038/0572 7 (256,448,3)
|
540 |
+
00038/0675 7 (256,448,3)
|
541 |
+
00038/0731 7 (256,448,3)
|
542 |
+
00038/0732 7 (256,448,3)
|
543 |
+
00038/0744 7 (256,448,3)
|
544 |
+
00038/0755 7 (256,448,3)
|
545 |
+
00039/0002 7 (256,448,3)
|
546 |
+
00039/0013 7 (256,448,3)
|
547 |
+
00039/0247 7 (256,448,3)
|
548 |
+
00039/0489 7 (256,448,3)
|
549 |
+
00039/0504 7 (256,448,3)
|
550 |
+
00039/0558 7 (256,448,3)
|
551 |
+
00039/0686 7 (256,448,3)
|
552 |
+
00039/0727 7 (256,448,3)
|
553 |
+
00039/0769 7 (256,448,3)
|
554 |
+
00040/0081 7 (256,448,3)
|
555 |
+
00040/0082 7 (256,448,3)
|
556 |
+
00040/0402 7 (256,448,3)
|
557 |
+
00040/0407 7 (256,448,3)
|
558 |
+
00040/0408 7 (256,448,3)
|
559 |
+
00040/0410 7 (256,448,3)
|
560 |
+
00040/0411 7 (256,448,3)
|
561 |
+
00040/0412 7 (256,448,3)
|
562 |
+
00040/0413 7 (256,448,3)
|
563 |
+
00040/0415 7 (256,448,3)
|
564 |
+
00040/0421 7 (256,448,3)
|
565 |
+
00040/0422 7 (256,448,3)
|
566 |
+
00040/0426 7 (256,448,3)
|
567 |
+
00040/0438 7 (256,448,3)
|
568 |
+
00040/0439 7 (256,448,3)
|
569 |
+
00040/0440 7 (256,448,3)
|
570 |
+
00040/0443 7 (256,448,3)
|
571 |
+
00040/0457 7 (256,448,3)
|
572 |
+
00040/0459 7 (256,448,3)
|
573 |
+
00040/0725 7 (256,448,3)
|
574 |
+
00040/0727 7 (256,448,3)
|
575 |
+
00040/0936 7 (256,448,3)
|
576 |
+
00040/0959 7 (256,448,3)
|
577 |
+
00040/0964 7 (256,448,3)
|
578 |
+
00040/0968 7 (256,448,3)
|
579 |
+
00040/0974 7 (256,448,3)
|
580 |
+
00040/0978 7 (256,448,3)
|
581 |
+
00040/0979 7 (256,448,3)
|
582 |
+
00040/0989 7 (256,448,3)
|
583 |
+
00040/0993 7 (256,448,3)
|
584 |
+
00040/0994 7 (256,448,3)
|
585 |
+
00040/0997 7 (256,448,3)
|
586 |
+
00041/0001 7 (256,448,3)
|
587 |
+
00041/0007 7 (256,448,3)
|
588 |
+
00041/0019 7 (256,448,3)
|
589 |
+
00041/0040 7 (256,448,3)
|
590 |
+
00041/0350 7 (256,448,3)
|
591 |
+
00041/0357 7 (256,448,3)
|
592 |
+
00041/0393 7 (256,448,3)
|
593 |
+
00041/0890 7 (256,448,3)
|
594 |
+
00041/0909 7 (256,448,3)
|
595 |
+
00041/0915 7 (256,448,3)
|
596 |
+
00041/0933 7 (256,448,3)
|
597 |
+
00042/0017 7 (256,448,3)
|
598 |
+
00042/0332 7 (256,448,3)
|
599 |
+
00042/0346 7 (256,448,3)
|
600 |
+
00042/0350 7 (256,448,3)
|
601 |
+
00042/0356 7 (256,448,3)
|
602 |
+
00042/0382 7 (256,448,3)
|
603 |
+
00042/0389 7 (256,448,3)
|
604 |
+
00042/0539 7 (256,448,3)
|
605 |
+
00042/0546 7 (256,448,3)
|
606 |
+
00042/0550 7 (256,448,3)
|
607 |
+
00042/0553 7 (256,448,3)
|
608 |
+
00042/0555 7 (256,448,3)
|
609 |
+
00042/0560 7 (256,448,3)
|
610 |
+
00042/0570 7 (256,448,3)
|
611 |
+
00043/0119 7 (256,448,3)
|
612 |
+
00043/0122 7 (256,448,3)
|
613 |
+
00043/0168 7 (256,448,3)
|
614 |
+
00043/0274 7 (256,448,3)
|
615 |
+
00043/0304 7 (256,448,3)
|
616 |
+
00043/0731 7 (256,448,3)
|
617 |
+
00043/0735 7 (256,448,3)
|
618 |
+
00043/0739 7 (256,448,3)
|
619 |
+
00043/0740 7 (256,448,3)
|
620 |
+
00044/0212 7 (256,448,3)
|
621 |
+
00044/0432 7 (256,448,3)
|
622 |
+
00044/0934 7 (256,448,3)
|
623 |
+
00044/0940 7 (256,448,3)
|
624 |
+
00044/0987 7 (256,448,3)
|
625 |
+
00045/0004 7 (256,448,3)
|
626 |
+
00045/0009 7 (256,448,3)
|
627 |
+
00045/0011 7 (256,448,3)
|
628 |
+
00045/0019 7 (256,448,3)
|
629 |
+
00045/0023 7 (256,448,3)
|
630 |
+
00045/0289 7 (256,448,3)
|
631 |
+
00045/0760 7 (256,448,3)
|
632 |
+
00045/0779 7 (256,448,3)
|
633 |
+
00045/0816 7 (256,448,3)
|
634 |
+
00045/0820 7 (256,448,3)
|
635 |
+
00046/0132 7 (256,448,3)
|
636 |
+
00046/0350 7 (256,448,3)
|
637 |
+
00046/0356 7 (256,448,3)
|
638 |
+
00046/0357 7 (256,448,3)
|
639 |
+
00046/0379 7 (256,448,3)
|
640 |
+
00046/0410 7 (256,448,3)
|
641 |
+
00046/0412 7 (256,448,3)
|
642 |
+
00046/0481 7 (256,448,3)
|
643 |
+
00046/0497 7 (256,448,3)
|
644 |
+
00046/0510 7 (256,448,3)
|
645 |
+
00046/0515 7 (256,448,3)
|
646 |
+
00046/0529 7 (256,448,3)
|
647 |
+
00046/0544 7 (256,448,3)
|
648 |
+
00046/0545 7 (256,448,3)
|
649 |
+
00046/0552 7 (256,448,3)
|
650 |
+
00046/0559 7 (256,448,3)
|
651 |
+
00046/0589 7 (256,448,3)
|
652 |
+
00046/0642 7 (256,448,3)
|
653 |
+
00046/0724 7 (256,448,3)
|
654 |
+
00046/0758 7 (256,448,3)
|
655 |
+
00046/0930 7 (256,448,3)
|
656 |
+
00046/0953 7 (256,448,3)
|
657 |
+
00047/0013 7 (256,448,3)
|
658 |
+
00047/0014 7 (256,448,3)
|
659 |
+
00047/0017 7 (256,448,3)
|
660 |
+
00047/0076 7 (256,448,3)
|
661 |
+
00047/0151 7 (256,448,3)
|
662 |
+
00047/0797 7 (256,448,3)
|
663 |
+
00048/0014 7 (256,448,3)
|
664 |
+
00048/0021 7 (256,448,3)
|
665 |
+
00048/0026 7 (256,448,3)
|
666 |
+
00048/0030 7 (256,448,3)
|
667 |
+
00048/0039 7 (256,448,3)
|
668 |
+
00048/0045 7 (256,448,3)
|
669 |
+
00048/0049 7 (256,448,3)
|
670 |
+
00048/0145 7 (256,448,3)
|
671 |
+
00048/0188 7 (256,448,3)
|
672 |
+
00048/0302 7 (256,448,3)
|
673 |
+
00048/0361 7 (256,448,3)
|
674 |
+
00048/0664 7 (256,448,3)
|
675 |
+
00048/0672 7 (256,448,3)
|
676 |
+
00048/0681 7 (256,448,3)
|
677 |
+
00048/0689 7 (256,448,3)
|
678 |
+
00048/0690 7 (256,448,3)
|
679 |
+
00048/0691 7 (256,448,3)
|
680 |
+
00048/0711 7 (256,448,3)
|
681 |
+
00049/0085 7 (256,448,3)
|
682 |
+
00049/0810 7 (256,448,3)
|
683 |
+
00049/0858 7 (256,448,3)
|
684 |
+
00049/0865 7 (256,448,3)
|
685 |
+
00049/0871 7 (256,448,3)
|
686 |
+
00049/0903 7 (256,448,3)
|
687 |
+
00049/0928 7 (256,448,3)
|
688 |
+
00050/0092 7 (256,448,3)
|
689 |
+
00050/0101 7 (256,448,3)
|
690 |
+
00050/0108 7 (256,448,3)
|
691 |
+
00050/0112 7 (256,448,3)
|
692 |
+
00050/0120 7 (256,448,3)
|
693 |
+
00050/0128 7 (256,448,3)
|
694 |
+
00050/0383 7 (256,448,3)
|
695 |
+
00050/0395 7 (256,448,3)
|
696 |
+
00050/0405 7 (256,448,3)
|
697 |
+
00050/0632 7 (256,448,3)
|
698 |
+
00050/0648 7 (256,448,3)
|
699 |
+
00050/0649 7 (256,448,3)
|
700 |
+
00050/0659 7 (256,448,3)
|
701 |
+
00050/0699 7 (256,448,3)
|
702 |
+
00050/0708 7 (256,448,3)
|
703 |
+
00050/0716 7 (256,448,3)
|
704 |
+
00050/0758 7 (256,448,3)
|
705 |
+
00050/0761 7 (256,448,3)
|
706 |
+
00051/0572 7 (256,448,3)
|
707 |
+
00052/0163 7 (256,448,3)
|
708 |
+
00052/0242 7 (256,448,3)
|
709 |
+
00052/0260 7 (256,448,3)
|
710 |
+
00052/0322 7 (256,448,3)
|
711 |
+
00052/0333 7 (256,448,3)
|
712 |
+
00052/0806 7 (256,448,3)
|
713 |
+
00052/0813 7 (256,448,3)
|
714 |
+
00052/0821 7 (256,448,3)
|
715 |
+
00052/0830 7 (256,448,3)
|
716 |
+
00052/0914 7 (256,448,3)
|
717 |
+
00052/0923 7 (256,448,3)
|
718 |
+
00052/0959 7 (256,448,3)
|
719 |
+
00053/0288 7 (256,448,3)
|
720 |
+
00053/0290 7 (256,448,3)
|
721 |
+
00053/0323 7 (256,448,3)
|
722 |
+
00053/0337 7 (256,448,3)
|
723 |
+
00053/0340 7 (256,448,3)
|
724 |
+
00053/0437 7 (256,448,3)
|
725 |
+
00053/0595 7 (256,448,3)
|
726 |
+
00053/0739 7 (256,448,3)
|
727 |
+
00053/0761 7 (256,448,3)
|
728 |
+
00054/0014 7 (256,448,3)
|
729 |
+
00054/0017 7 (256,448,3)
|
730 |
+
00054/0178 7 (256,448,3)
|
731 |
+
00054/0183 7 (256,448,3)
|
732 |
+
00054/0196 7 (256,448,3)
|
733 |
+
00054/0205 7 (256,448,3)
|
734 |
+
00054/0214 7 (256,448,3)
|
735 |
+
00054/0289 7 (256,448,3)
|
736 |
+
00054/0453 7 (256,448,3)
|
737 |
+
00054/0498 7 (256,448,3)
|
738 |
+
00054/0502 7 (256,448,3)
|
739 |
+
00054/0514 7 (256,448,3)
|
740 |
+
00054/0773 7 (256,448,3)
|
741 |
+
00055/0001 7 (256,448,3)
|
742 |
+
00055/0115 7 (256,448,3)
|
743 |
+
00055/0118 7 (256,448,3)
|
744 |
+
00055/0171 7 (256,448,3)
|
745 |
+
00055/0214 7 (256,448,3)
|
746 |
+
00055/0354 7 (256,448,3)
|
747 |
+
00055/0449 7 (256,448,3)
|
748 |
+
00055/0473 7 (256,448,3)
|
749 |
+
00055/0649 7 (256,448,3)
|
750 |
+
00055/0800 7 (256,448,3)
|
751 |
+
00055/0803 7 (256,448,3)
|
752 |
+
00055/0990 7 (256,448,3)
|
753 |
+
00056/0041 7 (256,448,3)
|
754 |
+
00056/0120 7 (256,448,3)
|
755 |
+
00056/0293 7 (256,448,3)
|
756 |
+
00056/0357 7 (256,448,3)
|
757 |
+
00056/0506 7 (256,448,3)
|
758 |
+
00056/0561 7 (256,448,3)
|
759 |
+
00056/0567 7 (256,448,3)
|
760 |
+
00056/0575 7 (256,448,3)
|
761 |
+
00057/0175 7 (256,448,3)
|
762 |
+
00057/0495 7 (256,448,3)
|
763 |
+
00057/0498 7 (256,448,3)
|
764 |
+
00057/0506 7 (256,448,3)
|
765 |
+
00057/0612 7 (256,448,3)
|
766 |
+
00057/0620 7 (256,448,3)
|
767 |
+
00057/0623 7 (256,448,3)
|
768 |
+
00057/0635 7 (256,448,3)
|
769 |
+
00057/0773 7 (256,448,3)
|
770 |
+
00057/0778 7 (256,448,3)
|
771 |
+
00057/0867 7 (256,448,3)
|
772 |
+
00057/0976 7 (256,448,3)
|
773 |
+
00057/0980 7 (256,448,3)
|
774 |
+
00057/0985 7 (256,448,3)
|
775 |
+
00057/0992 7 (256,448,3)
|
776 |
+
00058/0009 7 (256,448,3)
|
777 |
+
00058/0076 7 (256,448,3)
|
778 |
+
00058/0078 7 (256,448,3)
|
779 |
+
00058/0279 7 (256,448,3)
|
780 |
+
00058/0283 7 (256,448,3)
|
781 |
+
00058/0286 7 (256,448,3)
|
782 |
+
00058/0350 7 (256,448,3)
|
783 |
+
00058/0380 7 (256,448,3)
|
784 |
+
00061/0132 7 (256,448,3)
|
785 |
+
00061/0141 7 (256,448,3)
|
786 |
+
00061/0156 7 (256,448,3)
|
787 |
+
00061/0159 7 (256,448,3)
|
788 |
+
00061/0168 7 (256,448,3)
|
789 |
+
00061/0170 7 (256,448,3)
|
790 |
+
00061/0186 7 (256,448,3)
|
791 |
+
00061/0219 7 (256,448,3)
|
792 |
+
00061/0227 7 (256,448,3)
|
793 |
+
00061/0238 7 (256,448,3)
|
794 |
+
00061/0256 7 (256,448,3)
|
795 |
+
00061/0303 7 (256,448,3)
|
796 |
+
00061/0312 7 (256,448,3)
|
797 |
+
00061/0313 7 (256,448,3)
|
798 |
+
00061/0325 7 (256,448,3)
|
799 |
+
00061/0367 7 (256,448,3)
|
800 |
+
00061/0369 7 (256,448,3)
|
801 |
+
00061/0387 7 (256,448,3)
|
802 |
+
00061/0396 7 (256,448,3)
|
803 |
+
00061/0486 7 (256,448,3)
|
804 |
+
00061/0895 7 (256,448,3)
|
805 |
+
00061/0897 7 (256,448,3)
|
806 |
+
00062/0846 7 (256,448,3)
|
807 |
+
00063/0156 7 (256,448,3)
|
808 |
+
00063/0184 7 (256,448,3)
|
809 |
+
00063/0191 7 (256,448,3)
|
810 |
+
00063/0334 7 (256,448,3)
|
811 |
+
00063/0350 7 (256,448,3)
|
812 |
+
00063/0499 7 (256,448,3)
|
813 |
+
00063/0878 7 (256,448,3)
|
814 |
+
00064/0004 7 (256,448,3)
|
815 |
+
00064/0264 7 (256,448,3)
|
816 |
+
00064/0735 7 (256,448,3)
|
817 |
+
00064/0738 7 (256,448,3)
|
818 |
+
00065/0105 7 (256,448,3)
|
819 |
+
00065/0169 7 (256,448,3)
|
820 |
+
00065/0305 7 (256,448,3)
|
821 |
+
00065/0324 7 (256,448,3)
|
822 |
+
00065/0353 7 (256,448,3)
|
823 |
+
00065/0520 7 (256,448,3)
|
824 |
+
00065/0533 7 (256,448,3)
|
825 |
+
00065/0545 7 (256,448,3)
|
826 |
+
00065/0551 7 (256,448,3)
|
827 |
+
00065/0568 7 (256,448,3)
|
828 |
+
00065/0603 7 (256,448,3)
|
829 |
+
00065/0884 7 (256,448,3)
|
830 |
+
00065/0988 7 (256,448,3)
|
831 |
+
00066/0002 7 (256,448,3)
|
832 |
+
00066/0011 7 (256,448,3)
|
833 |
+
00066/0031 7 (256,448,3)
|
834 |
+
00066/0037 7 (256,448,3)
|
835 |
+
00066/0136 7 (256,448,3)
|
836 |
+
00066/0137 7 (256,448,3)
|
837 |
+
00066/0150 7 (256,448,3)
|
838 |
+
00066/0166 7 (256,448,3)
|
839 |
+
00066/0178 7 (256,448,3)
|
840 |
+
00066/0357 7 (256,448,3)
|
841 |
+
00066/0428 7 (256,448,3)
|
842 |
+
00066/0483 7 (256,448,3)
|
843 |
+
00066/0600 7 (256,448,3)
|
844 |
+
00066/0863 7 (256,448,3)
|
845 |
+
00066/0873 7 (256,448,3)
|
846 |
+
00066/0875 7 (256,448,3)
|
847 |
+
00066/0899 7 (256,448,3)
|
848 |
+
00067/0020 7 (256,448,3)
|
849 |
+
00067/0025 7 (256,448,3)
|
850 |
+
00067/0132 7 (256,448,3)
|
851 |
+
00067/0492 7 (256,448,3)
|
852 |
+
00067/0726 7 (256,448,3)
|
853 |
+
00067/0734 7 (256,448,3)
|
854 |
+
00067/0744 7 (256,448,3)
|
855 |
+
00067/0754 7 (256,448,3)
|
856 |
+
00067/0779 7 (256,448,3)
|
857 |
+
00068/0078 7 (256,448,3)
|
858 |
+
00068/0083 7 (256,448,3)
|
859 |
+
00068/0113 7 (256,448,3)
|
860 |
+
00068/0117 7 (256,448,3)
|
861 |
+
00068/0121 7 (256,448,3)
|
862 |
+
00068/0206 7 (256,448,3)
|
863 |
+
00068/0261 7 (256,448,3)
|
864 |
+
00068/0321 7 (256,448,3)
|
865 |
+
00068/0354 7 (256,448,3)
|
866 |
+
00068/0380 7 (256,448,3)
|
867 |
+
00068/0419 7 (256,448,3)
|
868 |
+
00068/0547 7 (256,448,3)
|
869 |
+
00068/0561 7 (256,448,3)
|
870 |
+
00068/0565 7 (256,448,3)
|
871 |
+
00068/0583 7 (256,448,3)
|
872 |
+
00068/0599 7 (256,448,3)
|
873 |
+
00068/0739 7 (256,448,3)
|
874 |
+
00068/0743 7 (256,448,3)
|
875 |
+
00068/0754 7 (256,448,3)
|
876 |
+
00068/0812 7 (256,448,3)
|
877 |
+
00069/0178 7 (256,448,3)
|
878 |
+
00070/0025 7 (256,448,3)
|
879 |
+
00070/0030 7 (256,448,3)
|
880 |
+
00070/0036 7 (256,448,3)
|
881 |
+
00070/0042 7 (256,448,3)
|
882 |
+
00070/0078 7 (256,448,3)
|
883 |
+
00070/0079 7 (256,448,3)
|
884 |
+
00070/0362 7 (256,448,3)
|
885 |
+
00071/0195 7 (256,448,3)
|
886 |
+
00071/0210 7 (256,448,3)
|
887 |
+
00071/0211 7 (256,448,3)
|
888 |
+
00071/0221 7 (256,448,3)
|
889 |
+
00071/0352 7 (256,448,3)
|
890 |
+
00071/0354 7 (256,448,3)
|
891 |
+
00071/0366 7 (256,448,3)
|
892 |
+
00071/0454 7 (256,448,3)
|
893 |
+
00071/0464 7 (256,448,3)
|
894 |
+
00071/0487 7 (256,448,3)
|
895 |
+
00071/0502 7 (256,448,3)
|
896 |
+
00071/0561 7 (256,448,3)
|
897 |
+
00071/0676 7 (256,448,3)
|
898 |
+
00071/0808 7 (256,448,3)
|
899 |
+
00071/0813 7 (256,448,3)
|
900 |
+
00071/0836 7 (256,448,3)
|
901 |
+
00072/0286 7 (256,448,3)
|
902 |
+
00072/0290 7 (256,448,3)
|
903 |
+
00072/0298 7 (256,448,3)
|
904 |
+
00072/0302 7 (256,448,3)
|
905 |
+
00072/0333 7 (256,448,3)
|
906 |
+
00072/0590 7 (256,448,3)
|
907 |
+
00072/0793 7 (256,448,3)
|
908 |
+
00072/0803 7 (256,448,3)
|
909 |
+
00072/0833 7 (256,448,3)
|
910 |
+
00073/0049 7 (256,448,3)
|
911 |
+
00073/0050 7 (256,448,3)
|
912 |
+
00073/0388 7 (256,448,3)
|
913 |
+
00073/0480 7 (256,448,3)
|
914 |
+
00073/0485 7 (256,448,3)
|
915 |
+
00073/0611 7 (256,448,3)
|
916 |
+
00073/0616 7 (256,448,3)
|
917 |
+
00073/0714 7 (256,448,3)
|
918 |
+
00073/0724 7 (256,448,3)
|
919 |
+
00073/0730 7 (256,448,3)
|
920 |
+
00074/0034 7 (256,448,3)
|
921 |
+
00074/0228 7 (256,448,3)
|
922 |
+
00074/0239 7 (256,448,3)
|
923 |
+
00074/0275 7 (256,448,3)
|
924 |
+
00074/0527 7 (256,448,3)
|
925 |
+
00074/0620 7 (256,448,3)
|
926 |
+
00074/0764 7 (256,448,3)
|
927 |
+
00074/0849 7 (256,448,3)
|
928 |
+
00074/0893 7 (256,448,3)
|
929 |
+
00075/0333 7 (256,448,3)
|
930 |
+
00075/0339 7 (256,448,3)
|
931 |
+
00075/0347 7 (256,448,3)
|
932 |
+
00075/0399 7 (256,448,3)
|
933 |
+
00075/0478 7 (256,448,3)
|
934 |
+
00075/0494 7 (256,448,3)
|
935 |
+
00075/0678 7 (256,448,3)
|
936 |
+
00075/0688 7 (256,448,3)
|
937 |
+
00075/0706 7 (256,448,3)
|
938 |
+
00075/0709 7 (256,448,3)
|
939 |
+
00075/0748 7 (256,448,3)
|
940 |
+
00075/0769 7 (256,448,3)
|
941 |
+
00075/0777 7 (256,448,3)
|
942 |
+
00075/0781 7 (256,448,3)
|
943 |
+
00076/0151 7 (256,448,3)
|
944 |
+
00076/0159 7 (256,448,3)
|
945 |
+
00076/0164 7 (256,448,3)
|
946 |
+
00076/0265 7 (256,448,3)
|
947 |
+
00076/0269 7 (256,448,3)
|
948 |
+
00076/0433 7 (256,448,3)
|
949 |
+
00076/0813 7 (256,448,3)
|
950 |
+
00076/0817 7 (256,448,3)
|
951 |
+
00076/0818 7 (256,448,3)
|
952 |
+
00076/0827 7 (256,448,3)
|
953 |
+
00076/0874 7 (256,448,3)
|
954 |
+
00076/0880 7 (256,448,3)
|
955 |
+
00076/0891 7 (256,448,3)
|
956 |
+
00076/0894 7 (256,448,3)
|
957 |
+
00076/0909 7 (256,448,3)
|
958 |
+
00076/0913 7 (256,448,3)
|
959 |
+
00076/0926 7 (256,448,3)
|
960 |
+
00076/0962 7 (256,448,3)
|
961 |
+
00076/0973 7 (256,448,3)
|
962 |
+
00076/0986 7 (256,448,3)
|
963 |
+
00077/0617 7 (256,448,3)
|
964 |
+
00077/0623 7 (256,448,3)
|
965 |
+
00077/0628 7 (256,448,3)
|
966 |
+
00077/0629 7 (256,448,3)
|
967 |
+
00077/0631 7 (256,448,3)
|
968 |
+
00077/0639 7 (256,448,3)
|
969 |
+
00077/0982 7 (256,448,3)
|
970 |
+
00077/0984 7 (256,448,3)
|
971 |
+
00077/0995 7 (256,448,3)
|
972 |
+
00077/0998 7 (256,448,3)
|
973 |
+
00078/0001 7 (256,448,3)
|
974 |
+
00078/0015 7 (256,448,3)
|
975 |
+
00078/0157 7 (256,448,3)
|
976 |
+
00078/0161 7 (256,448,3)
|
977 |
+
00078/0175 7 (256,448,3)
|
978 |
+
00078/0178 7 (256,448,3)
|
979 |
+
00078/0189 7 (256,448,3)
|
980 |
+
00078/0192 7 (256,448,3)
|
981 |
+
00078/0229 7 (256,448,3)
|
982 |
+
00078/0237 7 (256,448,3)
|
983 |
+
00078/0241 7 (256,448,3)
|
984 |
+
00078/0249 7 (256,448,3)
|
985 |
+
00078/0251 7 (256,448,3)
|
986 |
+
00078/0254 7 (256,448,3)
|
987 |
+
00078/0258 7 (256,448,3)
|
988 |
+
00078/0311 7 (256,448,3)
|
989 |
+
00078/0603 7 (256,448,3)
|
990 |
+
00078/0607 7 (256,448,3)
|
991 |
+
00078/0824 7 (256,448,3)
|
992 |
+
00079/0045 7 (256,448,3)
|
993 |
+
00079/0048 7 (256,448,3)
|
994 |
+
00079/0054 7 (256,448,3)
|
995 |
+
00080/0050 7 (256,448,3)
|
996 |
+
00080/0488 7 (256,448,3)
|
997 |
+
00080/0494 7 (256,448,3)
|
998 |
+
00080/0496 7 (256,448,3)
|
999 |
+
00080/0499 7 (256,448,3)
|
1000 |
+
00080/0502 7 (256,448,3)
|
1001 |
+
00080/0510 7 (256,448,3)
|
1002 |
+
00080/0534 7 (256,448,3)
|
1003 |
+
00080/0558 7 (256,448,3)
|
1004 |
+
00080/0571 7 (256,448,3)
|
1005 |
+
00080/0709 7 (256,448,3)
|
1006 |
+
00080/0882 7 (256,448,3)
|
1007 |
+
00081/0322 7 (256,448,3)
|
1008 |
+
00081/0428 7 (256,448,3)
|
1009 |
+
00081/0700 7 (256,448,3)
|
1010 |
+
00081/0706 7 (256,448,3)
|
1011 |
+
00081/0707 7 (256,448,3)
|
1012 |
+
00081/0937 7 (256,448,3)
|
1013 |
+
00082/0021 7 (256,448,3)
|
1014 |
+
00082/0424 7 (256,448,3)
|
1015 |
+
00082/0794 7 (256,448,3)
|
1016 |
+
00082/0807 7 (256,448,3)
|
1017 |
+
00082/0810 7 (256,448,3)
|
1018 |
+
00082/0824 7 (256,448,3)
|
1019 |
+
00083/0129 7 (256,448,3)
|
1020 |
+
00083/0131 7 (256,448,3)
|
1021 |
+
00083/0249 7 (256,448,3)
|
1022 |
+
00083/0250 7 (256,448,3)
|
1023 |
+
00083/0656 7 (256,448,3)
|
1024 |
+
00083/0812 7 (256,448,3)
|
1025 |
+
00083/0819 7 (256,448,3)
|
1026 |
+
00083/0824 7 (256,448,3)
|
1027 |
+
00083/0827 7 (256,448,3)
|
1028 |
+
00083/0841 7 (256,448,3)
|
1029 |
+
00083/0963 7 (256,448,3)
|
1030 |
+
00084/0047 7 (256,448,3)
|
1031 |
+
00084/0319 7 (256,448,3)
|
1032 |
+
00084/0334 7 (256,448,3)
|
1033 |
+
00084/0363 7 (256,448,3)
|
1034 |
+
00084/0493 7 (256,448,3)
|
1035 |
+
00084/0655 7 (256,448,3)
|
1036 |
+
00084/0752 7 (256,448,3)
|
1037 |
+
00084/0813 7 (256,448,3)
|
1038 |
+
00084/0886 7 (256,448,3)
|
1039 |
+
00084/0948 7 (256,448,3)
|
1040 |
+
00084/0976 7 (256,448,3)
|
1041 |
+
00085/0512 7 (256,448,3)
|
1042 |
+
00085/0641 7 (256,448,3)
|
1043 |
+
00085/0653 7 (256,448,3)
|
1044 |
+
00085/0655 7 (256,448,3)
|
1045 |
+
00085/0697 7 (256,448,3)
|
1046 |
+
00085/0698 7 (256,448,3)
|
1047 |
+
00085/0700 7 (256,448,3)
|
1048 |
+
00085/0703 7 (256,448,3)
|
1049 |
+
00085/0705 7 (256,448,3)
|
1050 |
+
00085/0709 7 (256,448,3)
|
1051 |
+
00085/0713 7 (256,448,3)
|
1052 |
+
00085/0739 7 (256,448,3)
|
1053 |
+
00085/0750 7 (256,448,3)
|
1054 |
+
00085/0763 7 (256,448,3)
|
1055 |
+
00085/0765 7 (256,448,3)
|
1056 |
+
00085/0769 7 (256,448,3)
|
1057 |
+
00085/0863 7 (256,448,3)
|
1058 |
+
00085/0868 7 (256,448,3)
|
1059 |
+
00085/0927 7 (256,448,3)
|
1060 |
+
00085/0936 7 (256,448,3)
|
1061 |
+
00085/0965 7 (256,448,3)
|
1062 |
+
00085/0969 7 (256,448,3)
|
1063 |
+
00085/0974 7 (256,448,3)
|
1064 |
+
00085/0981 7 (256,448,3)
|
1065 |
+
00085/0982 7 (256,448,3)
|
1066 |
+
00085/1000 7 (256,448,3)
|
1067 |
+
00086/0003 7 (256,448,3)
|
1068 |
+
00086/0009 7 (256,448,3)
|
1069 |
+
00086/0011 7 (256,448,3)
|
1070 |
+
00086/0028 7 (256,448,3)
|
1071 |
+
00086/0032 7 (256,448,3)
|
1072 |
+
00086/0034 7 (256,448,3)
|
1073 |
+
00086/0035 7 (256,448,3)
|
1074 |
+
00086/0042 7 (256,448,3)
|
1075 |
+
00086/0064 7 (256,448,3)
|
1076 |
+
00086/0066 7 (256,448,3)
|
1077 |
+
00086/0095 7 (256,448,3)
|
1078 |
+
00086/0099 7 (256,448,3)
|
1079 |
+
00086/0101 7 (256,448,3)
|
1080 |
+
00086/0104 7 (256,448,3)
|
1081 |
+
00086/0115 7 (256,448,3)
|
1082 |
+
00086/0116 7 (256,448,3)
|
1083 |
+
00086/0284 7 (256,448,3)
|
1084 |
+
00086/0291 7 (256,448,3)
|
1085 |
+
00086/0295 7 (256,448,3)
|
1086 |
+
00086/0302 7 (256,448,3)
|
1087 |
+
00086/0318 7 (256,448,3)
|
1088 |
+
00086/0666 7 (256,448,3)
|
1089 |
+
00086/0797 7 (256,448,3)
|
1090 |
+
00086/0851 7 (256,448,3)
|
1091 |
+
00086/0855 7 (256,448,3)
|
1092 |
+
00086/0874 7 (256,448,3)
|
1093 |
+
00086/0878 7 (256,448,3)
|
1094 |
+
00086/0881 7 (256,448,3)
|
1095 |
+
00086/0883 7 (256,448,3)
|
1096 |
+
00086/0896 7 (256,448,3)
|
1097 |
+
00086/0899 7 (256,448,3)
|
1098 |
+
00086/0903 7 (256,448,3)
|
1099 |
+
00086/0989 7 (256,448,3)
|
1100 |
+
00087/0008 7 (256,448,3)
|
1101 |
+
00087/0429 7 (256,448,3)
|
1102 |
+
00087/0511 7 (256,448,3)
|
1103 |
+
00088/0241 7 (256,448,3)
|
1104 |
+
00088/0319 7 (256,448,3)
|
1105 |
+
00088/0323 7 (256,448,3)
|
1106 |
+
00088/0411 7 (256,448,3)
|
1107 |
+
00088/0427 7 (256,448,3)
|
1108 |
+
00088/0452 7 (256,448,3)
|
1109 |
+
00088/0463 7 (256,448,3)
|
1110 |
+
00088/0476 7 (256,448,3)
|
1111 |
+
00088/0496 7 (256,448,3)
|
1112 |
+
00088/0559 7 (256,448,3)
|
1113 |
+
00089/0058 7 (256,448,3)
|
1114 |
+
00089/0061 7 (256,448,3)
|
1115 |
+
00089/0069 7 (256,448,3)
|
1116 |
+
00089/0077 7 (256,448,3)
|
1117 |
+
00089/0096 7 (256,448,3)
|
1118 |
+
00089/0099 7 (256,448,3)
|
1119 |
+
00089/0100 7 (256,448,3)
|
1120 |
+
00089/0211 7 (256,448,3)
|
1121 |
+
00089/0380 7 (256,448,3)
|
1122 |
+
00089/0381 7 (256,448,3)
|
1123 |
+
00089/0384 7 (256,448,3)
|
1124 |
+
00089/0390 7 (256,448,3)
|
1125 |
+
00089/0393 7 (256,448,3)
|
1126 |
+
00089/0394 7 (256,448,3)
|
1127 |
+
00089/0395 7 (256,448,3)
|
1128 |
+
00089/0406 7 (256,448,3)
|
1129 |
+
00089/0410 7 (256,448,3)
|
1130 |
+
00089/0412 7 (256,448,3)
|
1131 |
+
00089/0703 7 (256,448,3)
|
1132 |
+
00089/0729 7 (256,448,3)
|
1133 |
+
00089/0930 7 (256,448,3)
|
1134 |
+
00089/0952 7 (256,448,3)
|
1135 |
+
00090/0062 7 (256,448,3)
|
1136 |
+
00090/0101 7 (256,448,3)
|
1137 |
+
00090/0213 7 (256,448,3)
|
1138 |
+
00090/0216 7 (256,448,3)
|
1139 |
+
00090/0268 7 (256,448,3)
|
1140 |
+
00090/0406 7 (256,448,3)
|
1141 |
+
00090/0411 7 (256,448,3)
|
1142 |
+
00090/0442 7 (256,448,3)
|
1143 |
+
00090/0535 7 (256,448,3)
|
1144 |
+
00090/0542 7 (256,448,3)
|
1145 |
+
00090/0571 7 (256,448,3)
|
1146 |
+
00090/0934 7 (256,448,3)
|
1147 |
+
00090/0938 7 (256,448,3)
|
1148 |
+
00090/0947 7 (256,448,3)
|
1149 |
+
00091/0066 7 (256,448,3)
|
1150 |
+
00091/0448 7 (256,448,3)
|
1151 |
+
00091/0451 7 (256,448,3)
|
1152 |
+
00091/0454 7 (256,448,3)
|
1153 |
+
00091/0457 7 (256,448,3)
|
1154 |
+
00091/0467 7 (256,448,3)
|
1155 |
+
00091/0470 7 (256,448,3)
|
1156 |
+
00091/0477 7 (256,448,3)
|
1157 |
+
00091/0583 7 (256,448,3)
|
1158 |
+
00091/0981 7 (256,448,3)
|
1159 |
+
00091/0994 7 (256,448,3)
|
1160 |
+
00092/0112 7 (256,448,3)
|
1161 |
+
00092/0119 7 (256,448,3)
|
1162 |
+
00092/0129 7 (256,448,3)
|
1163 |
+
00092/0146 7 (256,448,3)
|
1164 |
+
00092/0149 7 (256,448,3)
|
1165 |
+
00092/0608 7 (256,448,3)
|
1166 |
+
00092/0643 7 (256,448,3)
|
1167 |
+
00092/0646 7 (256,448,3)
|
1168 |
+
00092/0766 7 (256,448,3)
|
1169 |
+
00092/0768 7 (256,448,3)
|
1170 |
+
00092/0779 7 (256,448,3)
|
1171 |
+
00093/0081 7 (256,448,3)
|
1172 |
+
00093/0085 7 (256,448,3)
|
1173 |
+
00093/0135 7 (256,448,3)
|
1174 |
+
00093/0241 7 (256,448,3)
|
1175 |
+
00093/0277 7 (256,448,3)
|
1176 |
+
00093/0283 7 (256,448,3)
|
1177 |
+
00093/0320 7 (256,448,3)
|
1178 |
+
00093/0598 7 (256,448,3)
|
1179 |
+
00094/0159 7 (256,448,3)
|
1180 |
+
00094/0253 7 (256,448,3)
|
1181 |
+
00094/0265 7 (256,448,3)
|
1182 |
+
00094/0267 7 (256,448,3)
|
1183 |
+
00094/0269 7 (256,448,3)
|
1184 |
+
00094/0281 7 (256,448,3)
|
1185 |
+
00094/0293 7 (256,448,3)
|
1186 |
+
00094/0404 7 (256,448,3)
|
1187 |
+
00094/0593 7 (256,448,3)
|
1188 |
+
00094/0612 7 (256,448,3)
|
1189 |
+
00094/0638 7 (256,448,3)
|
1190 |
+
00094/0656 7 (256,448,3)
|
1191 |
+
00094/0668 7 (256,448,3)
|
1192 |
+
00094/0786 7 (256,448,3)
|
1193 |
+
00094/0870 7 (256,448,3)
|
1194 |
+
00094/0897 7 (256,448,3)
|
1195 |
+
00094/0900 7 (256,448,3)
|
1196 |
+
00094/0944 7 (256,448,3)
|
1197 |
+
00094/0946 7 (256,448,3)
|
1198 |
+
00094/0952 7 (256,448,3)
|
1199 |
+
00094/0969 7 (256,448,3)
|
1200 |
+
00094/0973 7 (256,448,3)
|
1201 |
+
00094/0981 7 (256,448,3)
|
1202 |
+
00095/0088 7 (256,448,3)
|
1203 |
+
00095/0125 7 (256,448,3)
|
1204 |
+
00095/0130 7 (256,448,3)
|
1205 |
+
00095/0142 7 (256,448,3)
|
1206 |
+
00095/0151 7 (256,448,3)
|
1207 |
+
00095/0180 7 (256,448,3)
|
1208 |
+
00095/0192 7 (256,448,3)
|
1209 |
+
00095/0194 7 (256,448,3)
|
1210 |
+
00095/0195 7 (256,448,3)
|
1211 |
+
00095/0204 7 (256,448,3)
|
1212 |
+
00095/0245 7 (256,448,3)
|
1213 |
+
00095/0315 7 (256,448,3)
|
1214 |
+
00095/0321 7 (256,448,3)
|
1215 |
+
00095/0324 7 (256,448,3)
|
1216 |
+
00095/0327 7 (256,448,3)
|
1217 |
+
00095/0730 7 (256,448,3)
|
1218 |
+
00095/0731 7 (256,448,3)
|
1219 |
+
00095/0741 7 (256,448,3)
|
1220 |
+
00095/0948 7 (256,448,3)
|
1221 |
+
00096/0407 7 (256,448,3)
|
1222 |
+
00096/0420 7 (256,448,3)
|
1223 |
+
00096/0435 7 (256,448,3)
|
1224 |
+
00096/0682 7 (256,448,3)
|
1225 |
+
00096/0865 7 (256,448,3)
|
basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt
ADDED
@@ -0,0 +1,1613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
00001/0266 7 (256,448,3)
|
2 |
+
00001/0268 7 (256,448,3)
|
3 |
+
00001/0275 7 (256,448,3)
|
4 |
+
00001/0278 7 (256,448,3)
|
5 |
+
00001/0287 7 (256,448,3)
|
6 |
+
00001/0291 7 (256,448,3)
|
7 |
+
00001/0627 7 (256,448,3)
|
8 |
+
00001/0636 7 (256,448,3)
|
9 |
+
00001/0804 7 (256,448,3)
|
10 |
+
00001/0837 7 (256,448,3)
|
11 |
+
00001/0849 7 (256,448,3)
|
12 |
+
00001/0851 7 (256,448,3)
|
13 |
+
00001/0852 7 (256,448,3)
|
14 |
+
00001/0986 7 (256,448,3)
|
15 |
+
00001/0991 7 (256,448,3)
|
16 |
+
00002/0007 7 (256,448,3)
|
17 |
+
00002/0008 7 (256,448,3)
|
18 |
+
00002/0016 7 (256,448,3)
|
19 |
+
00002/0036 7 (256,448,3)
|
20 |
+
00002/0091 7 (256,448,3)
|
21 |
+
00002/0093 7 (256,448,3)
|
22 |
+
00002/0209 7 (256,448,3)
|
23 |
+
00002/0235 7 (256,448,3)
|
24 |
+
00002/0236 7 (256,448,3)
|
25 |
+
00002/0241 7 (256,448,3)
|
26 |
+
00002/0466 7 (256,448,3)
|
27 |
+
00002/0504 7 (256,448,3)
|
28 |
+
00002/0960 7 (256,448,3)
|
29 |
+
00002/0961 7 (256,448,3)
|
30 |
+
00002/0964 7 (256,448,3)
|
31 |
+
00003/0007 7 (256,448,3)
|
32 |
+
00003/0069 7 (256,448,3)
|
33 |
+
00003/0345 7 (256,448,3)
|
34 |
+
00003/0347 7 (256,448,3)
|
35 |
+
00003/0372 7 (256,448,3)
|
36 |
+
00003/0525 7 (256,448,3)
|
37 |
+
00003/0652 7 (256,448,3)
|
38 |
+
00003/0667 7 (256,448,3)
|
39 |
+
00003/0669 7 (256,448,3)
|
40 |
+
00003/0706 7 (256,448,3)
|
41 |
+
00003/0713 7 (256,448,3)
|
42 |
+
00003/0721 7 (256,448,3)
|
43 |
+
00003/0747 7 (256,448,3)
|
44 |
+
00003/0829 7 (256,448,3)
|
45 |
+
00003/0916 7 (256,448,3)
|
46 |
+
00003/0918 7 (256,448,3)
|
47 |
+
00003/0924 7 (256,448,3)
|
48 |
+
00003/0926 7 (256,448,3)
|
49 |
+
00003/0927 7 (256,448,3)
|
50 |
+
00004/0288 7 (256,448,3)
|
51 |
+
00004/0303 7 (256,448,3)
|
52 |
+
00004/0307 7 (256,448,3)
|
53 |
+
00004/0628 7 (256,448,3)
|
54 |
+
00004/0713 7 (256,448,3)
|
55 |
+
00004/0715 7 (256,448,3)
|
56 |
+
00004/0719 7 (256,448,3)
|
57 |
+
00004/0727 7 (256,448,3)
|
58 |
+
00004/0821 7 (256,448,3)
|
59 |
+
00005/0006 7 (256,448,3)
|
60 |
+
00005/0007 7 (256,448,3)
|
61 |
+
00005/0012 7 (256,448,3)
|
62 |
+
00005/0013 7 (256,448,3)
|
63 |
+
00005/0040 7 (256,448,3)
|
64 |
+
00005/0055 7 (256,448,3)
|
65 |
+
00005/0119 7 (256,448,3)
|
66 |
+
00005/0130 7 (256,448,3)
|
67 |
+
00005/0185 7 (256,448,3)
|
68 |
+
00005/0198 7 (256,448,3)
|
69 |
+
00005/0270 7 (256,448,3)
|
70 |
+
00005/0541 7 (256,448,3)
|
71 |
+
00005/0560 7 (256,448,3)
|
72 |
+
00005/0660 7 (256,448,3)
|
73 |
+
00005/0682 7 (256,448,3)
|
74 |
+
00005/0683 7 (256,448,3)
|
75 |
+
00005/0688 7 (256,448,3)
|
76 |
+
00005/0706 7 (256,448,3)
|
77 |
+
00005/0728 7 (256,448,3)
|
78 |
+
00005/0732 7 (256,448,3)
|
79 |
+
00005/0739 7 (256,448,3)
|
80 |
+
00005/0804 7 (256,448,3)
|
81 |
+
00005/0805 7 (256,448,3)
|
82 |
+
00005/0827 7 (256,448,3)
|
83 |
+
00005/0828 7 (256,448,3)
|
84 |
+
00005/0857 7 (256,448,3)
|
85 |
+
00005/0861 7 (256,448,3)
|
86 |
+
00005/0862 7 (256,448,3)
|
87 |
+
00005/0868 7 (256,448,3)
|
88 |
+
00005/0872 7 (256,448,3)
|
89 |
+
00005/0933 7 (256,448,3)
|
90 |
+
00005/0958 7 (256,448,3)
|
91 |
+
00005/0960 7 (256,448,3)
|
92 |
+
00006/0087 7 (256,448,3)
|
93 |
+
00006/0090 7 (256,448,3)
|
94 |
+
00006/0351 7 (256,448,3)
|
95 |
+
00006/0353 7 (256,448,3)
|
96 |
+
00006/0558 7 (256,448,3)
|
97 |
+
00006/0588 7 (256,448,3)
|
98 |
+
00006/0619 7 (256,448,3)
|
99 |
+
00006/0621 7 (256,448,3)
|
100 |
+
00006/0748 7 (256,448,3)
|
101 |
+
00006/0796 7 (256,448,3)
|
102 |
+
00006/0805 7 (256,448,3)
|
103 |
+
00006/0807 7 (256,448,3)
|
104 |
+
00007/0236 7 (256,448,3)
|
105 |
+
00007/0240 7 (256,448,3)
|
106 |
+
00007/0243 7 (256,448,3)
|
107 |
+
00007/0246 7 (256,448,3)
|
108 |
+
00007/0247 7 (256,448,3)
|
109 |
+
00007/0252 7 (256,448,3)
|
110 |
+
00007/0322 7 (256,448,3)
|
111 |
+
00007/0458 7 (256,448,3)
|
112 |
+
00007/0492 7 (256,448,3)
|
113 |
+
00007/0658 7 (256,448,3)
|
114 |
+
00007/0717 7 (256,448,3)
|
115 |
+
00007/0722 7 (256,448,3)
|
116 |
+
00007/0725 7 (256,448,3)
|
117 |
+
00007/0740 7 (256,448,3)
|
118 |
+
00007/0748 7 (256,448,3)
|
119 |
+
00007/0749 7 (256,448,3)
|
120 |
+
00007/0759 7 (256,448,3)
|
121 |
+
00007/0772 7 (256,448,3)
|
122 |
+
00007/0783 7 (256,448,3)
|
123 |
+
00007/0787 7 (256,448,3)
|
124 |
+
00007/0883 7 (256,448,3)
|
125 |
+
00008/0033 7 (256,448,3)
|
126 |
+
00008/0035 7 (256,448,3)
|
127 |
+
00008/0091 7 (256,448,3)
|
128 |
+
00008/0154 7 (256,448,3)
|
129 |
+
00008/0966 7 (256,448,3)
|
130 |
+
00008/0987 7 (256,448,3)
|
131 |
+
00009/0108 7 (256,448,3)
|
132 |
+
00009/0607 7 (256,448,3)
|
133 |
+
00009/0668 7 (256,448,3)
|
134 |
+
00009/0683 7 (256,448,3)
|
135 |
+
00009/0941 7 (256,448,3)
|
136 |
+
00009/0949 7 (256,448,3)
|
137 |
+
00009/0962 7 (256,448,3)
|
138 |
+
00009/0972 7 (256,448,3)
|
139 |
+
00009/0974 7 (256,448,3)
|
140 |
+
00010/0014 7 (256,448,3)
|
141 |
+
00010/0018 7 (256,448,3)
|
142 |
+
00010/0043 7 (256,448,3)
|
143 |
+
00010/0099 7 (256,448,3)
|
144 |
+
00010/0252 7 (256,448,3)
|
145 |
+
00010/0296 7 (256,448,3)
|
146 |
+
00010/0413 7 (256,448,3)
|
147 |
+
00010/0422 7 (256,448,3)
|
148 |
+
00010/0516 7 (256,448,3)
|
149 |
+
00010/0525 7 (256,448,3)
|
150 |
+
00010/0556 7 (256,448,3)
|
151 |
+
00010/0701 7 (256,448,3)
|
152 |
+
00010/0740 7 (256,448,3)
|
153 |
+
00010/0772 7 (256,448,3)
|
154 |
+
00010/0831 7 (256,448,3)
|
155 |
+
00010/0925 7 (256,448,3)
|
156 |
+
00011/0013 7 (256,448,3)
|
157 |
+
00011/0016 7 (256,448,3)
|
158 |
+
00011/0017 7 (256,448,3)
|
159 |
+
00011/0249 7 (256,448,3)
|
160 |
+
00011/0826 7 (256,448,3)
|
161 |
+
00011/0827 7 (256,448,3)
|
162 |
+
00011/0831 7 (256,448,3)
|
163 |
+
00011/0833 7 (256,448,3)
|
164 |
+
00011/0835 7 (256,448,3)
|
165 |
+
00011/0998 7 (256,448,3)
|
166 |
+
00012/0023 7 (256,448,3)
|
167 |
+
00012/0024 7 (256,448,3)
|
168 |
+
00012/0027 7 (256,448,3)
|
169 |
+
00012/0037 7 (256,448,3)
|
170 |
+
00012/0444 7 (256,448,3)
|
171 |
+
00012/0445 7 (256,448,3)
|
172 |
+
00012/0451 7 (256,448,3)
|
173 |
+
00012/0461 7 (256,448,3)
|
174 |
+
00012/0521 7 (256,448,3)
|
175 |
+
00012/0758 7 (256,448,3)
|
176 |
+
00012/0760 7 (256,448,3)
|
177 |
+
00012/0771 7 (256,448,3)
|
178 |
+
00012/0903 7 (256,448,3)
|
179 |
+
00012/0909 7 (256,448,3)
|
180 |
+
00013/0581 7 (256,448,3)
|
181 |
+
00013/0786 7 (256,448,3)
|
182 |
+
00013/0789 7 (256,448,3)
|
183 |
+
00013/0791 7 (256,448,3)
|
184 |
+
00013/0798 7 (256,448,3)
|
185 |
+
00013/0802 7 (256,448,3)
|
186 |
+
00013/0820 7 (256,448,3)
|
187 |
+
00013/0850 7 (256,448,3)
|
188 |
+
00013/0854 7 (256,448,3)
|
189 |
+
00013/0894 7 (256,448,3)
|
190 |
+
00013/0919 7 (256,448,3)
|
191 |
+
00013/0999 7 (256,448,3)
|
192 |
+
00014/0001 7 (256,448,3)
|
193 |
+
00014/0014 7 (256,448,3)
|
194 |
+
00014/0018 7 (256,448,3)
|
195 |
+
00014/0244 7 (256,448,3)
|
196 |
+
00014/0475 7 (256,448,3)
|
197 |
+
00014/0483 7 (256,448,3)
|
198 |
+
00014/0680 7 (256,448,3)
|
199 |
+
00014/0700 7 (256,448,3)
|
200 |
+
00014/0701 7 (256,448,3)
|
201 |
+
00014/0706 7 (256,448,3)
|
202 |
+
00014/0712 7 (256,448,3)
|
203 |
+
00014/0713 7 (256,448,3)
|
204 |
+
00014/0717 7 (256,448,3)
|
205 |
+
00014/0719 7 (256,448,3)
|
206 |
+
00014/0728 7 (256,448,3)
|
207 |
+
00014/0734 7 (256,448,3)
|
208 |
+
00014/0736 7 (256,448,3)
|
209 |
+
00014/0738 7 (256,448,3)
|
210 |
+
00014/0742 7 (256,448,3)
|
211 |
+
00014/0745 7 (256,448,3)
|
212 |
+
00014/0746 7 (256,448,3)
|
213 |
+
00014/0750 7 (256,448,3)
|
214 |
+
00014/0769 7 (256,448,3)
|
215 |
+
00014/0774 7 (256,448,3)
|
216 |
+
00014/0781 7 (256,448,3)
|
217 |
+
00014/0782 7 (256,448,3)
|
218 |
+
00014/0852 7 (256,448,3)
|
219 |
+
00014/0853 7 (256,448,3)
|
220 |
+
00014/0855 7 (256,448,3)
|
221 |
+
00014/0867 7 (256,448,3)
|
222 |
+
00014/0876 7 (256,448,3)
|
223 |
+
00014/0881 7 (256,448,3)
|
224 |
+
00014/0890 7 (256,448,3)
|
225 |
+
00014/0914 7 (256,448,3)
|
226 |
+
00015/0033 7 (256,448,3)
|
227 |
+
00015/0113 7 (256,448,3)
|
228 |
+
00015/0125 7 (256,448,3)
|
229 |
+
00015/0185 7 (256,448,3)
|
230 |
+
00015/0194 7 (256,448,3)
|
231 |
+
00015/0202 7 (256,448,3)
|
232 |
+
00015/0312 7 (256,448,3)
|
233 |
+
00015/0688 7 (256,448,3)
|
234 |
+
00015/0698 7 (256,448,3)
|
235 |
+
00015/0788 7 (256,448,3)
|
236 |
+
00015/0854 7 (256,448,3)
|
237 |
+
00015/0863 7 (256,448,3)
|
238 |
+
00015/0864 7 (256,448,3)
|
239 |
+
00015/0918 7 (256,448,3)
|
240 |
+
00015/0931 7 (256,448,3)
|
241 |
+
00016/0276 7 (256,448,3)
|
242 |
+
00016/0301 7 (256,448,3)
|
243 |
+
00016/0306 7 (256,448,3)
|
244 |
+
00016/0324 7 (256,448,3)
|
245 |
+
00016/0362 7 (256,448,3)
|
246 |
+
00016/0364 7 (256,448,3)
|
247 |
+
00016/0370 7 (256,448,3)
|
248 |
+
00016/0378 7 (256,448,3)
|
249 |
+
00016/0379 7 (256,448,3)
|
250 |
+
00016/0402 7 (256,448,3)
|
251 |
+
00016/0405 7 (256,448,3)
|
252 |
+
00016/0418 7 (256,448,3)
|
253 |
+
00016/0419 7 (256,448,3)
|
254 |
+
00016/0435 7 (256,448,3)
|
255 |
+
00016/0501 7 (256,448,3)
|
256 |
+
00016/0561 7 (256,448,3)
|
257 |
+
00016/0562 7 (256,448,3)
|
258 |
+
00016/0569 7 (256,448,3)
|
259 |
+
00016/0591 7 (256,448,3)
|
260 |
+
00016/0599 7 (256,448,3)
|
261 |
+
00016/0711 7 (256,448,3)
|
262 |
+
00016/0713 7 (256,448,3)
|
263 |
+
00016/0813 7 (256,448,3)
|
264 |
+
00016/0953 7 (256,448,3)
|
265 |
+
00016/0960 7 (256,448,3)
|
266 |
+
00016/0961 7 (256,448,3)
|
267 |
+
00017/0519 7 (256,448,3)
|
268 |
+
00017/0523 7 (256,448,3)
|
269 |
+
00017/0588 7 (256,448,3)
|
270 |
+
00017/0608 7 (256,448,3)
|
271 |
+
00017/0609 7 (256,448,3)
|
272 |
+
00017/0719 7 (256,448,3)
|
273 |
+
00017/0721 7 (256,448,3)
|
274 |
+
00017/0727 7 (256,448,3)
|
275 |
+
00017/0728 7 (256,448,3)
|
276 |
+
00017/0769 7 (256,448,3)
|
277 |
+
00017/0775 7 (256,448,3)
|
278 |
+
00017/0787 7 (256,448,3)
|
279 |
+
00017/0797 7 (256,448,3)
|
280 |
+
00018/0043 7 (256,448,3)
|
281 |
+
00018/0206 7 (256,448,3)
|
282 |
+
00018/0209 7 (256,448,3)
|
283 |
+
00018/0211 7 (256,448,3)
|
284 |
+
00018/0216 7 (256,448,3)
|
285 |
+
00018/0220 7 (256,448,3)
|
286 |
+
00018/0221 7 (256,448,3)
|
287 |
+
00018/0252 7 (256,448,3)
|
288 |
+
00018/0260 7 (256,448,3)
|
289 |
+
00018/0331 7 (256,448,3)
|
290 |
+
00018/0333 7 (256,448,3)
|
291 |
+
00018/0447 7 (256,448,3)
|
292 |
+
00018/0523 7 (256,448,3)
|
293 |
+
00019/0014 7 (256,448,3)
|
294 |
+
00019/0015 7 (256,448,3)
|
295 |
+
00019/0019 7 (256,448,3)
|
296 |
+
00019/0049 7 (256,448,3)
|
297 |
+
00019/0109 7 (256,448,3)
|
298 |
+
00019/0114 7 (256,448,3)
|
299 |
+
00019/0125 7 (256,448,3)
|
300 |
+
00019/0137 7 (256,448,3)
|
301 |
+
00019/0140 7 (256,448,3)
|
302 |
+
00019/0148 7 (256,448,3)
|
303 |
+
00019/0153 7 (256,448,3)
|
304 |
+
00019/0155 7 (256,448,3)
|
305 |
+
00019/0158 7 (256,448,3)
|
306 |
+
00019/0159 7 (256,448,3)
|
307 |
+
00019/0160 7 (256,448,3)
|
308 |
+
00019/0162 7 (256,448,3)
|
309 |
+
00019/0279 7 (256,448,3)
|
310 |
+
00019/0282 7 (256,448,3)
|
311 |
+
00019/0409 7 (256,448,3)
|
312 |
+
00019/0427 7 (256,448,3)
|
313 |
+
00019/0430 7 (256,448,3)
|
314 |
+
00019/0545 7 (256,448,3)
|
315 |
+
00019/0555 7 (256,448,3)
|
316 |
+
00019/0558 7 (256,448,3)
|
317 |
+
00019/0650 7 (256,448,3)
|
318 |
+
00019/0681 7 (256,448,3)
|
319 |
+
00019/0747 7 (256,448,3)
|
320 |
+
00019/0748 7 (256,448,3)
|
321 |
+
00019/0749 7 (256,448,3)
|
322 |
+
00019/0752 7 (256,448,3)
|
323 |
+
00019/0768 7 (256,448,3)
|
324 |
+
00019/0772 7 (256,448,3)
|
325 |
+
00019/0773 7 (256,448,3)
|
326 |
+
00019/0777 7 (256,448,3)
|
327 |
+
00019/0795 7 (256,448,3)
|
328 |
+
00019/0806 7 (256,448,3)
|
329 |
+
00019/0815 7 (256,448,3)
|
330 |
+
00019/0840 7 (256,448,3)
|
331 |
+
00019/0844 7 (256,448,3)
|
332 |
+
00019/0848 7 (256,448,3)
|
333 |
+
00019/0853 7 (256,448,3)
|
334 |
+
00019/0863 7 (256,448,3)
|
335 |
+
00019/0888 7 (256,448,3)
|
336 |
+
00019/0894 7 (256,448,3)
|
337 |
+
00019/0901 7 (256,448,3)
|
338 |
+
00019/0995 7 (256,448,3)
|
339 |
+
00021/0030 7 (256,448,3)
|
340 |
+
00021/0035 7 (256,448,3)
|
341 |
+
00021/0039 7 (256,448,3)
|
342 |
+
00021/0041 7 (256,448,3)
|
343 |
+
00021/0044 7 (256,448,3)
|
344 |
+
00021/0045 7 (256,448,3)
|
345 |
+
00021/0264 7 (256,448,3)
|
346 |
+
00021/0330 7 (256,448,3)
|
347 |
+
00021/0332 7 (256,448,3)
|
348 |
+
00021/0333 7 (256,448,3)
|
349 |
+
00021/0336 7 (256,448,3)
|
350 |
+
00021/0337 7 (256,448,3)
|
351 |
+
00021/0338 7 (256,448,3)
|
352 |
+
00021/0343 7 (256,448,3)
|
353 |
+
00021/0472 7 (256,448,3)
|
354 |
+
00021/0667 7 (256,448,3)
|
355 |
+
00021/0731 7 (256,448,3)
|
356 |
+
00021/0779 7 (256,448,3)
|
357 |
+
00021/0805 7 (256,448,3)
|
358 |
+
00021/0814 7 (256,448,3)
|
359 |
+
00021/0818 7 (256,448,3)
|
360 |
+
00021/0874 7 (256,448,3)
|
361 |
+
00022/0008 7 (256,448,3)
|
362 |
+
00022/0010 7 (256,448,3)
|
363 |
+
00022/0231 7 (256,448,3)
|
364 |
+
00022/0323 7 (256,448,3)
|
365 |
+
00022/0337 7 (256,448,3)
|
366 |
+
00022/0359 7 (256,448,3)
|
367 |
+
00022/0377 7 (256,448,3)
|
368 |
+
00022/0378 7 (256,448,3)
|
369 |
+
00022/0385 7 (256,448,3)
|
370 |
+
00022/0393 7 (256,448,3)
|
371 |
+
00022/0424 7 (256,448,3)
|
372 |
+
00022/0582 7 (256,448,3)
|
373 |
+
00022/0583 7 (256,448,3)
|
374 |
+
00022/0605 7 (256,448,3)
|
375 |
+
00022/0632 7 (256,448,3)
|
376 |
+
00022/0633 7 (256,448,3)
|
377 |
+
00022/0666 7 (256,448,3)
|
378 |
+
00022/0671 7 (256,448,3)
|
379 |
+
00022/0673 7 (256,448,3)
|
380 |
+
00022/0702 7 (256,448,3)
|
381 |
+
00022/0852 7 (256,448,3)
|
382 |
+
00022/0853 7 (256,448,3)
|
383 |
+
00022/0971 7 (256,448,3)
|
384 |
+
00023/0037 7 (256,448,3)
|
385 |
+
00023/0224 7 (256,448,3)
|
386 |
+
00023/0308 7 (256,448,3)
|
387 |
+
00023/0393 7 (256,448,3)
|
388 |
+
00023/0633 7 (256,448,3)
|
389 |
+
00023/0637 7 (256,448,3)
|
390 |
+
00023/0638 7 (256,448,3)
|
391 |
+
00023/0770 7 (256,448,3)
|
392 |
+
00023/0786 7 (256,448,3)
|
393 |
+
00023/0898 7 (256,448,3)
|
394 |
+
00024/0247 7 (256,448,3)
|
395 |
+
00024/0251 7 (256,448,3)
|
396 |
+
00024/0267 7 (256,448,3)
|
397 |
+
00024/0288 7 (256,448,3)
|
398 |
+
00024/0530 7 (256,448,3)
|
399 |
+
00024/0569 7 (256,448,3)
|
400 |
+
00024/0587 7 (256,448,3)
|
401 |
+
00024/0730 7 (256,448,3)
|
402 |
+
00024/0736 7 (256,448,3)
|
403 |
+
00024/0742 7 (256,448,3)
|
404 |
+
00024/0832 7 (256,448,3)
|
405 |
+
00024/0936 7 (256,448,3)
|
406 |
+
00025/0044 7 (256,448,3)
|
407 |
+
00025/0047 7 (256,448,3)
|
408 |
+
00025/0540 7 (256,448,3)
|
409 |
+
00025/0552 7 (256,448,3)
|
410 |
+
00025/0554 7 (256,448,3)
|
411 |
+
00025/0559 7 (256,448,3)
|
412 |
+
00025/0572 7 (256,448,3)
|
413 |
+
00025/0576 7 (256,448,3)
|
414 |
+
00025/0699 7 (256,448,3)
|
415 |
+
00025/0709 7 (256,448,3)
|
416 |
+
00025/0743 7 (256,448,3)
|
417 |
+
00025/0767 7 (256,448,3)
|
418 |
+
00025/0780 7 (256,448,3)
|
419 |
+
00025/0782 7 (256,448,3)
|
420 |
+
00025/0784 7 (256,448,3)
|
421 |
+
00025/0791 7 (256,448,3)
|
422 |
+
00025/0889 7 (256,448,3)
|
423 |
+
00025/0890 7 (256,448,3)
|
424 |
+
00025/0894 7 (256,448,3)
|
425 |
+
00025/0896 7 (256,448,3)
|
426 |
+
00025/0898 7 (256,448,3)
|
427 |
+
00025/0905 7 (256,448,3)
|
428 |
+
00025/0999 7 (256,448,3)
|
429 |
+
00026/0003 7 (256,448,3)
|
430 |
+
00026/0005 7 (256,448,3)
|
431 |
+
00026/0011 7 (256,448,3)
|
432 |
+
00026/0017 7 (256,448,3)
|
433 |
+
00026/0036 7 (256,448,3)
|
434 |
+
00026/0129 7 (256,448,3)
|
435 |
+
00026/0131 7 (256,448,3)
|
436 |
+
00026/0161 7 (256,448,3)
|
437 |
+
00026/0177 7 (256,448,3)
|
438 |
+
00026/0178 7 (256,448,3)
|
439 |
+
00026/0180 7 (256,448,3)
|
440 |
+
00026/0298 7 (256,448,3)
|
441 |
+
00026/0307 7 (256,448,3)
|
442 |
+
00026/0308 7 (256,448,3)
|
443 |
+
00026/0312 7 (256,448,3)
|
444 |
+
00026/0352 7 (256,448,3)
|
445 |
+
00026/0440 7 (256,448,3)
|
446 |
+
00026/0706 7 (256,448,3)
|
447 |
+
00026/0708 7 (256,448,3)
|
448 |
+
00026/0715 7 (256,448,3)
|
449 |
+
00026/0769 7 (256,448,3)
|
450 |
+
00026/0777 7 (256,448,3)
|
451 |
+
00026/0779 7 (256,448,3)
|
452 |
+
00026/0789 7 (256,448,3)
|
453 |
+
00026/0924 7 (256,448,3)
|
454 |
+
00026/0928 7 (256,448,3)
|
455 |
+
00026/0932 7 (256,448,3)
|
456 |
+
00026/0935 7 (256,448,3)
|
457 |
+
00027/0118 7 (256,448,3)
|
458 |
+
00027/0121 7 (256,448,3)
|
459 |
+
00027/0155 7 (256,448,3)
|
460 |
+
00027/0168 7 (256,448,3)
|
461 |
+
00027/0196 7 (256,448,3)
|
462 |
+
00027/0289 7 (256,448,3)
|
463 |
+
00027/0294 7 (256,448,3)
|
464 |
+
00027/0803 7 (256,448,3)
|
465 |
+
00028/0016 7 (256,448,3)
|
466 |
+
00028/0045 7 (256,448,3)
|
467 |
+
00028/0063 7 (256,448,3)
|
468 |
+
00028/0601 7 (256,448,3)
|
469 |
+
00028/0638 7 (256,448,3)
|
470 |
+
00028/0733 7 (256,448,3)
|
471 |
+
00028/0736 7 (256,448,3)
|
472 |
+
00028/0741 7 (256,448,3)
|
473 |
+
00028/0753 7 (256,448,3)
|
474 |
+
00028/0770 7 (256,448,3)
|
475 |
+
00028/0771 7 (256,448,3)
|
476 |
+
00028/0777 7 (256,448,3)
|
477 |
+
00028/0950 7 (256,448,3)
|
478 |
+
00028/0951 7 (256,448,3)
|
479 |
+
00029/0048 7 (256,448,3)
|
480 |
+
00029/0060 7 (256,448,3)
|
481 |
+
00029/0362 7 (256,448,3)
|
482 |
+
00029/0399 7 (256,448,3)
|
483 |
+
00029/0404 7 (256,448,3)
|
484 |
+
00029/0412 7 (256,448,3)
|
485 |
+
00029/0416 7 (256,448,3)
|
486 |
+
00029/0418 7 (256,448,3)
|
487 |
+
00029/0428 7 (256,448,3)
|
488 |
+
00030/0131 7 (256,448,3)
|
489 |
+
00030/0135 7 (256,448,3)
|
490 |
+
00030/0150 7 (256,448,3)
|
491 |
+
00030/0245 7 (256,448,3)
|
492 |
+
00030/0339 7 (256,448,3)
|
493 |
+
00030/0472 7 (256,448,3)
|
494 |
+
00030/0482 7 (256,448,3)
|
495 |
+
00030/0500 7 (256,448,3)
|
496 |
+
00030/0501 7 (256,448,3)
|
497 |
+
00030/0697 7 (256,448,3)
|
498 |
+
00030/0707 7 (256,448,3)
|
499 |
+
00030/0733 7 (256,448,3)
|
500 |
+
00030/0743 7 (256,448,3)
|
501 |
+
00030/0747 7 (256,448,3)
|
502 |
+
00030/0754 7 (256,448,3)
|
503 |
+
00030/0755 7 (256,448,3)
|
504 |
+
00030/0759 7 (256,448,3)
|
505 |
+
00030/0762 7 (256,448,3)
|
506 |
+
00030/0764 7 (256,448,3)
|
507 |
+
00030/0767 7 (256,448,3)
|
508 |
+
00030/0794 7 (256,448,3)
|
509 |
+
00030/0796 7 (256,448,3)
|
510 |
+
00030/0799 7 (256,448,3)
|
511 |
+
00030/0814 7 (256,448,3)
|
512 |
+
00030/0823 7 (256,448,3)
|
513 |
+
00030/0829 7 (256,448,3)
|
514 |
+
00030/0833 7 (256,448,3)
|
515 |
+
00030/0848 7 (256,448,3)
|
516 |
+
00030/0853 7 (256,448,3)
|
517 |
+
00030/0861 7 (256,448,3)
|
518 |
+
00031/0182 7 (256,448,3)
|
519 |
+
00031/0275 7 (256,448,3)
|
520 |
+
00031/0279 7 (256,448,3)
|
521 |
+
00031/0555 7 (256,448,3)
|
522 |
+
00031/0648 7 (256,448,3)
|
523 |
+
00031/0663 7 (256,448,3)
|
524 |
+
00031/0680 7 (256,448,3)
|
525 |
+
00031/0880 7 (256,448,3)
|
526 |
+
00031/0922 7 (256,448,3)
|
527 |
+
00031/0925 7 (256,448,3)
|
528 |
+
00031/0928 7 (256,448,3)
|
529 |
+
00032/0025 7 (256,448,3)
|
530 |
+
00032/0377 7 (256,448,3)
|
531 |
+
00032/0378 7 (256,448,3)
|
532 |
+
00032/0382 7 (256,448,3)
|
533 |
+
00032/0384 7 (256,448,3)
|
534 |
+
00032/0386 7 (256,448,3)
|
535 |
+
00032/0389 7 (256,448,3)
|
536 |
+
00032/0391 7 (256,448,3)
|
537 |
+
00032/0393 7 (256,448,3)
|
538 |
+
00032/0492 7 (256,448,3)
|
539 |
+
00032/0497 7 (256,448,3)
|
540 |
+
00032/0505 7 (256,448,3)
|
541 |
+
00032/0523 7 (256,448,3)
|
542 |
+
00032/0542 7 (256,448,3)
|
543 |
+
00032/0544 7 (256,448,3)
|
544 |
+
00032/0712 7 (256,448,3)
|
545 |
+
00032/0847 7 (256,448,3)
|
546 |
+
00032/0850 7 (256,448,3)
|
547 |
+
00032/0875 7 (256,448,3)
|
548 |
+
00033/0062 7 (256,448,3)
|
549 |
+
00033/0063 7 (256,448,3)
|
550 |
+
00033/0098 7 (256,448,3)
|
551 |
+
00033/0101 7 (256,448,3)
|
552 |
+
00033/0105 7 (256,448,3)
|
553 |
+
00033/0114 7 (256,448,3)
|
554 |
+
00033/0432 7 (256,448,3)
|
555 |
+
00033/0441 7 (256,448,3)
|
556 |
+
00033/0606 7 (256,448,3)
|
557 |
+
00033/0611 7 (256,448,3)
|
558 |
+
00033/0634 7 (256,448,3)
|
559 |
+
00033/0787 7 (256,448,3)
|
560 |
+
00033/0792 7 (256,448,3)
|
561 |
+
00033/0802 7 (256,448,3)
|
562 |
+
00033/0825 7 (256,448,3)
|
563 |
+
00033/0835 7 (256,448,3)
|
564 |
+
00034/0249 7 (256,448,3)
|
565 |
+
00034/0253 7 (256,448,3)
|
566 |
+
00034/0254 7 (256,448,3)
|
567 |
+
00034/0282 7 (256,448,3)
|
568 |
+
00034/0318 7 (256,448,3)
|
569 |
+
00034/0319 7 (256,448,3)
|
570 |
+
00034/0323 7 (256,448,3)
|
571 |
+
00034/0336 7 (256,448,3)
|
572 |
+
00034/0348 7 (256,448,3)
|
573 |
+
00034/0356 7 (256,448,3)
|
574 |
+
00034/0379 7 (256,448,3)
|
575 |
+
00034/0387 7 (256,448,3)
|
576 |
+
00034/0575 7 (256,448,3)
|
577 |
+
00034/0608 7 (256,448,3)
|
578 |
+
00034/0663 7 (256,448,3)
|
579 |
+
00034/0811 7 (256,448,3)
|
580 |
+
00034/0812 7 (256,448,3)
|
581 |
+
00034/0946 7 (256,448,3)
|
582 |
+
00034/0948 7 (256,448,3)
|
583 |
+
00034/0950 7 (256,448,3)
|
584 |
+
00035/0204 7 (256,448,3)
|
585 |
+
00035/0243 7 (256,448,3)
|
586 |
+
00035/0308 7 (256,448,3)
|
587 |
+
00035/0465 7 (256,448,3)
|
588 |
+
00035/0478 7 (256,448,3)
|
589 |
+
00035/0523 7 (256,448,3)
|
590 |
+
00035/0540 7 (256,448,3)
|
591 |
+
00035/0544 7 (256,448,3)
|
592 |
+
00035/0556 7 (256,448,3)
|
593 |
+
00035/0568 7 (256,448,3)
|
594 |
+
00035/0570 7 (256,448,3)
|
595 |
+
00035/0609 7 (256,448,3)
|
596 |
+
00035/0643 7 (256,448,3)
|
597 |
+
00035/0644 7 (256,448,3)
|
598 |
+
00035/0645 7 (256,448,3)
|
599 |
+
00035/0646 7 (256,448,3)
|
600 |
+
00035/0650 7 (256,448,3)
|
601 |
+
00035/0661 7 (256,448,3)
|
602 |
+
00035/0724 7 (256,448,3)
|
603 |
+
00035/0725 7 (256,448,3)
|
604 |
+
00035/0850 7 (256,448,3)
|
605 |
+
00035/0863 7 (256,448,3)
|
606 |
+
00035/0870 7 (256,448,3)
|
607 |
+
00035/0951 7 (256,448,3)
|
608 |
+
00036/0038 7 (256,448,3)
|
609 |
+
00036/0062 7 (256,448,3)
|
610 |
+
00036/0423 7 (256,448,3)
|
611 |
+
00036/0737 7 (256,448,3)
|
612 |
+
00036/0750 7 (256,448,3)
|
613 |
+
00036/0751 7 (256,448,3)
|
614 |
+
00036/0754 7 (256,448,3)
|
615 |
+
00036/0929 7 (256,448,3)
|
616 |
+
00037/0085 7 (256,448,3)
|
617 |
+
00037/0113 7 (256,448,3)
|
618 |
+
00037/0130 7 (256,448,3)
|
619 |
+
00037/0153 7 (256,448,3)
|
620 |
+
00037/0169 7 (256,448,3)
|
621 |
+
00037/0263 7 (256,448,3)
|
622 |
+
00037/0272 7 (256,448,3)
|
623 |
+
00037/0273 7 (256,448,3)
|
624 |
+
00037/0275 7 (256,448,3)
|
625 |
+
00037/0280 7 (256,448,3)
|
626 |
+
00037/0399 7 (256,448,3)
|
627 |
+
00037/0456 7 (256,448,3)
|
628 |
+
00037/0853 7 (256,448,3)
|
629 |
+
00037/0855 7 (256,448,3)
|
630 |
+
00037/0856 7 (256,448,3)
|
631 |
+
00037/0857 7 (256,448,3)
|
632 |
+
00037/0925 7 (256,448,3)
|
633 |
+
00037/0947 7 (256,448,3)
|
634 |
+
00038/0148 7 (256,448,3)
|
635 |
+
00038/0533 7 (256,448,3)
|
636 |
+
00038/0534 7 (256,448,3)
|
637 |
+
00038/0560 7 (256,448,3)
|
638 |
+
00038/0562 7 (256,448,3)
|
639 |
+
00038/0566 7 (256,448,3)
|
640 |
+
00038/0578 7 (256,448,3)
|
641 |
+
00038/0652 7 (256,448,3)
|
642 |
+
00038/0674 7 (256,448,3)
|
643 |
+
00038/0685 7 (256,448,3)
|
644 |
+
00038/0686 7 (256,448,3)
|
645 |
+
00038/0692 7 (256,448,3)
|
646 |
+
00038/0736 7 (256,448,3)
|
647 |
+
00039/0035 7 (256,448,3)
|
648 |
+
00039/0105 7 (256,448,3)
|
649 |
+
00039/0109 7 (256,448,3)
|
650 |
+
00039/0121 7 (256,448,3)
|
651 |
+
00039/0128 7 (256,448,3)
|
652 |
+
00039/0129 7 (256,448,3)
|
653 |
+
00039/0132 7 (256,448,3)
|
654 |
+
00039/0137 7 (256,448,3)
|
655 |
+
00039/0157 7 (256,448,3)
|
656 |
+
00039/0496 7 (256,448,3)
|
657 |
+
00039/0502 7 (256,448,3)
|
658 |
+
00039/0526 7 (256,448,3)
|
659 |
+
00039/0529 7 (256,448,3)
|
660 |
+
00039/0682 7 (256,448,3)
|
661 |
+
00039/0690 7 (256,448,3)
|
662 |
+
00039/0693 7 (256,448,3)
|
663 |
+
00039/0703 7 (256,448,3)
|
664 |
+
00039/0725 7 (256,448,3)
|
665 |
+
00039/0734 7 (256,448,3)
|
666 |
+
00040/0518 7 (256,448,3)
|
667 |
+
00040/0728 7 (256,448,3)
|
668 |
+
00040/0774 7 (256,448,3)
|
669 |
+
00040/0812 7 (256,448,3)
|
670 |
+
00040/0818 7 (256,448,3)
|
671 |
+
00040/0827 7 (256,448,3)
|
672 |
+
00040/0914 7 (256,448,3)
|
673 |
+
00040/0917 7 (256,448,3)
|
674 |
+
00040/0918 7 (256,448,3)
|
675 |
+
00040/0924 7 (256,448,3)
|
676 |
+
00040/0925 7 (256,448,3)
|
677 |
+
00041/0004 7 (256,448,3)
|
678 |
+
00041/0006 7 (256,448,3)
|
679 |
+
00041/0013 7 (256,448,3)
|
680 |
+
00041/0059 7 (256,448,3)
|
681 |
+
00041/0110 7 (256,448,3)
|
682 |
+
00041/0291 7 (256,448,3)
|
683 |
+
00041/0366 7 (256,448,3)
|
684 |
+
00041/0388 7 (256,448,3)
|
685 |
+
00041/0434 7 (256,448,3)
|
686 |
+
00041/0436 7 (256,448,3)
|
687 |
+
00041/0450 7 (256,448,3)
|
688 |
+
00041/0457 7 (256,448,3)
|
689 |
+
00041/0460 7 (256,448,3)
|
690 |
+
00041/0468 7 (256,448,3)
|
691 |
+
00041/0471 7 (256,448,3)
|
692 |
+
00041/0474 7 (256,448,3)
|
693 |
+
00041/0809 7 (256,448,3)
|
694 |
+
00041/0844 7 (256,448,3)
|
695 |
+
00041/0858 7 (256,448,3)
|
696 |
+
00041/0874 7 (256,448,3)
|
697 |
+
00041/0876 7 (256,448,3)
|
698 |
+
00042/0020 7 (256,448,3)
|
699 |
+
00042/0205 7 (256,448,3)
|
700 |
+
00042/0206 7 (256,448,3)
|
701 |
+
00042/0432 7 (256,448,3)
|
702 |
+
00042/0563 7 (256,448,3)
|
703 |
+
00042/0569 7 (256,448,3)
|
704 |
+
00042/0575 7 (256,448,3)
|
705 |
+
00042/0576 7 (256,448,3)
|
706 |
+
00042/0888 7 (256,448,3)
|
707 |
+
00042/0892 7 (256,448,3)
|
708 |
+
00042/0943 7 (256,448,3)
|
709 |
+
00042/0944 7 (256,448,3)
|
710 |
+
00043/0126 7 (256,448,3)
|
711 |
+
00043/0130 7 (256,448,3)
|
712 |
+
00043/0136 7 (256,448,3)
|
713 |
+
00043/0233 7 (256,448,3)
|
714 |
+
00043/0235 7 (256,448,3)
|
715 |
+
00043/0237 7 (256,448,3)
|
716 |
+
00043/0277 7 (256,448,3)
|
717 |
+
00043/0301 7 (256,448,3)
|
718 |
+
00043/0302 7 (256,448,3)
|
719 |
+
00043/0303 7 (256,448,3)
|
720 |
+
00043/0308 7 (256,448,3)
|
721 |
+
00043/0309 7 (256,448,3)
|
722 |
+
00043/0314 7 (256,448,3)
|
723 |
+
00043/0713 7 (256,448,3)
|
724 |
+
00043/0715 7 (256,448,3)
|
725 |
+
00043/0923 7 (256,448,3)
|
726 |
+
00044/0095 7 (256,448,3)
|
727 |
+
00044/0255 7 (256,448,3)
|
728 |
+
00044/0864 7 (256,448,3)
|
729 |
+
00044/0892 7 (256,448,3)
|
730 |
+
00044/0898 7 (256,448,3)
|
731 |
+
00044/0993 7 (256,448,3)
|
732 |
+
00044/0995 7 (256,448,3)
|
733 |
+
00044/0997 7 (256,448,3)
|
734 |
+
00045/0001 7 (256,448,3)
|
735 |
+
00045/0006 7 (256,448,3)
|
736 |
+
00045/0269 7 (256,448,3)
|
737 |
+
00045/0276 7 (256,448,3)
|
738 |
+
00045/0280 7 (256,448,3)
|
739 |
+
00045/0281 7 (256,448,3)
|
740 |
+
00045/0282 7 (256,448,3)
|
741 |
+
00045/0284 7 (256,448,3)
|
742 |
+
00045/0550 7 (256,448,3)
|
743 |
+
00045/0571 7 (256,448,3)
|
744 |
+
00045/0629 7 (256,448,3)
|
745 |
+
00045/0631 7 (256,448,3)
|
746 |
+
00045/0659 7 (256,448,3)
|
747 |
+
00045/0693 7 (256,448,3)
|
748 |
+
00045/0807 7 (256,448,3)
|
749 |
+
00045/0810 7 (256,448,3)
|
750 |
+
00045/0826 7 (256,448,3)
|
751 |
+
00045/0849 7 (256,448,3)
|
752 |
+
00045/0946 7 (256,448,3)
|
753 |
+
00045/0987 7 (256,448,3)
|
754 |
+
00045/0990 7 (256,448,3)
|
755 |
+
00046/0104 7 (256,448,3)
|
756 |
+
00046/0477 7 (256,448,3)
|
757 |
+
00046/0490 7 (256,448,3)
|
758 |
+
00046/0491 7 (256,448,3)
|
759 |
+
00046/0509 7 (256,448,3)
|
760 |
+
00046/0513 7 (256,448,3)
|
761 |
+
00046/0603 7 (256,448,3)
|
762 |
+
00046/0723 7 (256,448,3)
|
763 |
+
00046/0744 7 (256,448,3)
|
764 |
+
00046/0746 7 (256,448,3)
|
765 |
+
00046/0750 7 (256,448,3)
|
766 |
+
00046/0852 7 (256,448,3)
|
767 |
+
00046/0927 7 (256,448,3)
|
768 |
+
00046/0928 7 (256,448,3)
|
769 |
+
00046/0929 7 (256,448,3)
|
770 |
+
00046/0931 7 (256,448,3)
|
771 |
+
00046/0936 7 (256,448,3)
|
772 |
+
00046/0939 7 (256,448,3)
|
773 |
+
00046/0947 7 (256,448,3)
|
774 |
+
00046/0948 7 (256,448,3)
|
775 |
+
00046/0950 7 (256,448,3)
|
776 |
+
00046/0955 7 (256,448,3)
|
777 |
+
00046/0961 7 (256,448,3)
|
778 |
+
00047/0023 7 (256,448,3)
|
779 |
+
00047/0029 7 (256,448,3)
|
780 |
+
00047/0035 7 (256,448,3)
|
781 |
+
00047/0058 7 (256,448,3)
|
782 |
+
00047/0061 7 (256,448,3)
|
783 |
+
00047/0065 7 (256,448,3)
|
784 |
+
00047/0068 7 (256,448,3)
|
785 |
+
00047/0072 7 (256,448,3)
|
786 |
+
00047/0074 7 (256,448,3)
|
787 |
+
00047/0148 7 (256,448,3)
|
788 |
+
00047/0594 7 (256,448,3)
|
789 |
+
00047/0782 7 (256,448,3)
|
790 |
+
00047/0787 7 (256,448,3)
|
791 |
+
00047/0860 7 (256,448,3)
|
792 |
+
00047/0889 7 (256,448,3)
|
793 |
+
00047/0893 7 (256,448,3)
|
794 |
+
00047/0894 7 (256,448,3)
|
795 |
+
00047/0902 7 (256,448,3)
|
796 |
+
00047/0975 7 (256,448,3)
|
797 |
+
00047/0995 7 (256,448,3)
|
798 |
+
00048/0033 7 (256,448,3)
|
799 |
+
00048/0113 7 (256,448,3)
|
800 |
+
00048/0115 7 (256,448,3)
|
801 |
+
00048/0120 7 (256,448,3)
|
802 |
+
00048/0129 7 (256,448,3)
|
803 |
+
00048/0136 7 (256,448,3)
|
804 |
+
00048/0327 7 (256,448,3)
|
805 |
+
00048/0329 7 (256,448,3)
|
806 |
+
00048/0341 7 (256,448,3)
|
807 |
+
00048/0343 7 (256,448,3)
|
808 |
+
00048/0345 7 (256,448,3)
|
809 |
+
00048/0346 7 (256,448,3)
|
810 |
+
00048/0355 7 (256,448,3)
|
811 |
+
00048/0359 7 (256,448,3)
|
812 |
+
00048/0363 7 (256,448,3)
|
813 |
+
00048/0378 7 (256,448,3)
|
814 |
+
00048/0386 7 (256,448,3)
|
815 |
+
00048/0387 7 (256,448,3)
|
816 |
+
00048/0388 7 (256,448,3)
|
817 |
+
00048/0428 7 (256,448,3)
|
818 |
+
00048/0439 7 (256,448,3)
|
819 |
+
00048/0507 7 (256,448,3)
|
820 |
+
00048/0510 7 (256,448,3)
|
821 |
+
00048/0512 7 (256,448,3)
|
822 |
+
00048/0514 7 (256,448,3)
|
823 |
+
00048/0539 7 (256,448,3)
|
824 |
+
00048/0542 7 (256,448,3)
|
825 |
+
00048/0544 7 (256,448,3)
|
826 |
+
00048/0631 7 (256,448,3)
|
827 |
+
00048/0632 7 (256,448,3)
|
828 |
+
00048/0636 7 (256,448,3)
|
829 |
+
00048/0640 7 (256,448,3)
|
830 |
+
00048/0644 7 (256,448,3)
|
831 |
+
00048/0653 7 (256,448,3)
|
832 |
+
00048/0655 7 (256,448,3)
|
833 |
+
00048/0658 7 (256,448,3)
|
834 |
+
00048/0667 7 (256,448,3)
|
835 |
+
00048/0688 7 (256,448,3)
|
836 |
+
00048/0708 7 (256,448,3)
|
837 |
+
00049/0005 7 (256,448,3)
|
838 |
+
00049/0074 7 (256,448,3)
|
839 |
+
00049/0077 7 (256,448,3)
|
840 |
+
00049/0084 7 (256,448,3)
|
841 |
+
00049/0516 7 (256,448,3)
|
842 |
+
00049/0800 7 (256,448,3)
|
843 |
+
00049/0900 7 (256,448,3)
|
844 |
+
00050/0607 7 (256,448,3)
|
845 |
+
00050/0661 7 (256,448,3)
|
846 |
+
00050/0665 7 (256,448,3)
|
847 |
+
00050/0685 7 (256,448,3)
|
848 |
+
00050/0711 7 (256,448,3)
|
849 |
+
00051/0068 7 (256,448,3)
|
850 |
+
00051/0069 7 (256,448,3)
|
851 |
+
00051/0076 7 (256,448,3)
|
852 |
+
00051/0569 7 (256,448,3)
|
853 |
+
00051/0801 7 (256,448,3)
|
854 |
+
00051/0927 7 (256,448,3)
|
855 |
+
00051/0945 7 (256,448,3)
|
856 |
+
00051/0952 7 (256,448,3)
|
857 |
+
00051/0976 7 (256,448,3)
|
858 |
+
00051/0985 7 (256,448,3)
|
859 |
+
00052/0012 7 (256,448,3)
|
860 |
+
00052/0015 7 (256,448,3)
|
861 |
+
00052/0052 7 (256,448,3)
|
862 |
+
00052/0056 7 (256,448,3)
|
863 |
+
00052/0060 7 (256,448,3)
|
864 |
+
00052/0157 7 (256,448,3)
|
865 |
+
00052/0265 7 (256,448,3)
|
866 |
+
00052/0788 7 (256,448,3)
|
867 |
+
00052/0790 7 (256,448,3)
|
868 |
+
00052/0793 7 (256,448,3)
|
869 |
+
00052/0816 7 (256,448,3)
|
870 |
+
00052/0824 7 (256,448,3)
|
871 |
+
00052/0918 7 (256,448,3)
|
872 |
+
00052/0933 7 (256,448,3)
|
873 |
+
00052/0947 7 (256,448,3)
|
874 |
+
00053/0232 7 (256,448,3)
|
875 |
+
00053/0277 7 (256,448,3)
|
876 |
+
00053/0362 7 (256,448,3)
|
877 |
+
00053/0577 7 (256,448,3)
|
878 |
+
00053/0609 7 (256,448,3)
|
879 |
+
00053/0612 7 (256,448,3)
|
880 |
+
00053/0628 7 (256,448,3)
|
881 |
+
00053/0629 7 (256,448,3)
|
882 |
+
00053/0633 7 (256,448,3)
|
883 |
+
00053/0659 7 (256,448,3)
|
884 |
+
00053/0667 7 (256,448,3)
|
885 |
+
00053/0671 7 (256,448,3)
|
886 |
+
00053/0797 7 (256,448,3)
|
887 |
+
00053/0804 7 (256,448,3)
|
888 |
+
00053/0807 7 (256,448,3)
|
889 |
+
00053/0952 7 (256,448,3)
|
890 |
+
00053/0970 7 (256,448,3)
|
891 |
+
00053/0981 7 (256,448,3)
|
892 |
+
00053/0999 7 (256,448,3)
|
893 |
+
00054/0003 7 (256,448,3)
|
894 |
+
00054/0013 7 (256,448,3)
|
895 |
+
00054/0020 7 (256,448,3)
|
896 |
+
00054/0022 7 (256,448,3)
|
897 |
+
00054/0023 7 (256,448,3)
|
898 |
+
00054/0044 7 (256,448,3)
|
899 |
+
00054/0051 7 (256,448,3)
|
900 |
+
00054/0063 7 (256,448,3)
|
901 |
+
00054/0065 7 (256,448,3)
|
902 |
+
00054/0145 7 (256,448,3)
|
903 |
+
00054/0153 7 (256,448,3)
|
904 |
+
00054/0203 7 (256,448,3)
|
905 |
+
00054/0325 7 (256,448,3)
|
906 |
+
00054/0445 7 (256,448,3)
|
907 |
+
00054/0448 7 (256,448,3)
|
908 |
+
00054/0456 7 (256,448,3)
|
909 |
+
00054/0457 7 (256,448,3)
|
910 |
+
00054/0519 7 (256,448,3)
|
911 |
+
00054/0524 7 (256,448,3)
|
912 |
+
00054/0530 7 (256,448,3)
|
913 |
+
00054/0532 7 (256,448,3)
|
914 |
+
00054/0535 7 (256,448,3)
|
915 |
+
00054/0574 7 (256,448,3)
|
916 |
+
00054/0760 7 (256,448,3)
|
917 |
+
00054/0767 7 (256,448,3)
|
918 |
+
00054/0837 7 (256,448,3)
|
919 |
+
00055/0011 7 (256,448,3)
|
920 |
+
00055/0109 7 (256,448,3)
|
921 |
+
00055/0111 7 (256,448,3)
|
922 |
+
00055/0117 7 (256,448,3)
|
923 |
+
00055/0119 7 (256,448,3)
|
924 |
+
00055/0182 7 (256,448,3)
|
925 |
+
00055/0192 7 (256,448,3)
|
926 |
+
00055/0193 7 (256,448,3)
|
927 |
+
00055/0200 7 (256,448,3)
|
928 |
+
00055/0204 7 (256,448,3)
|
929 |
+
00055/0207 7 (256,448,3)
|
930 |
+
00055/0212 7 (256,448,3)
|
931 |
+
00055/0213 7 (256,448,3)
|
932 |
+
00055/0348 7 (256,448,3)
|
933 |
+
00055/0423 7 (256,448,3)
|
934 |
+
00055/0427 7 (256,448,3)
|
935 |
+
00055/0456 7 (256,448,3)
|
936 |
+
00055/0489 7 (256,448,3)
|
937 |
+
00055/0689 7 (256,448,3)
|
938 |
+
00055/0753 7 (256,448,3)
|
939 |
+
00055/0802 7 (256,448,3)
|
940 |
+
00055/0844 7 (256,448,3)
|
941 |
+
00055/0850 7 (256,448,3)
|
942 |
+
00055/0982 7 (256,448,3)
|
943 |
+
00055/0993 7 (256,448,3)
|
944 |
+
00056/0113 7 (256,448,3)
|
945 |
+
00056/0148 7 (256,448,3)
|
946 |
+
00056/0151 7 (256,448,3)
|
947 |
+
00056/0316 7 (256,448,3)
|
948 |
+
00056/0379 7 (256,448,3)
|
949 |
+
00056/0380 7 (256,448,3)
|
950 |
+
00056/0385 7 (256,448,3)
|
951 |
+
00056/0505 7 (256,448,3)
|
952 |
+
00056/0579 7 (256,448,3)
|
953 |
+
00057/0254 7 (256,448,3)
|
954 |
+
00057/0264 7 (256,448,3)
|
955 |
+
00057/0272 7 (256,448,3)
|
956 |
+
00057/0403 7 (256,448,3)
|
957 |
+
00057/0501 7 (256,448,3)
|
958 |
+
00057/0503 7 (256,448,3)
|
959 |
+
00057/0884 7 (256,448,3)
|
960 |
+
00058/0026 7 (256,448,3)
|
961 |
+
00058/0029 7 (256,448,3)
|
962 |
+
00058/0104 7 (256,448,3)
|
963 |
+
00058/0124 7 (256,448,3)
|
964 |
+
00058/0162 7 (256,448,3)
|
965 |
+
00058/0288 7 (256,448,3)
|
966 |
+
00058/0289 7 (256,448,3)
|
967 |
+
00058/0323 7 (256,448,3)
|
968 |
+
00058/0328 7 (256,448,3)
|
969 |
+
00058/0329 7 (256,448,3)
|
970 |
+
00058/0337 7 (256,448,3)
|
971 |
+
00058/0367 7 (256,448,3)
|
972 |
+
00058/0383 7 (256,448,3)
|
973 |
+
00058/0395 7 (256,448,3)
|
974 |
+
00060/0178 7 (256,448,3)
|
975 |
+
00060/0182 7 (256,448,3)
|
976 |
+
00061/0001 7 (256,448,3)
|
977 |
+
00061/0003 7 (256,448,3)
|
978 |
+
00061/0006 7 (256,448,3)
|
979 |
+
00061/0443 7 (256,448,3)
|
980 |
+
00061/0586 7 (256,448,3)
|
981 |
+
00061/0587 7 (256,448,3)
|
982 |
+
00061/0774 7 (256,448,3)
|
983 |
+
00061/0789 7 (256,448,3)
|
984 |
+
00061/0815 7 (256,448,3)
|
985 |
+
00061/0817 7 (256,448,3)
|
986 |
+
00061/0826 7 (256,448,3)
|
987 |
+
00061/0829 7 (256,448,3)
|
988 |
+
00061/0830 7 (256,448,3)
|
989 |
+
00061/0832 7 (256,448,3)
|
990 |
+
00061/0833 7 (256,448,3)
|
991 |
+
00061/0836 7 (256,448,3)
|
992 |
+
00061/0837 7 (256,448,3)
|
993 |
+
00061/0839 7 (256,448,3)
|
994 |
+
00061/0843 7 (256,448,3)
|
995 |
+
00061/0849 7 (256,448,3)
|
996 |
+
00061/0859 7 (256,448,3)
|
997 |
+
00061/0861 7 (256,448,3)
|
998 |
+
00061/0868 7 (256,448,3)
|
999 |
+
00061/0877 7 (256,448,3)
|
1000 |
+
00061/0889 7 (256,448,3)
|
1001 |
+
00061/0905 7 (256,448,3)
|
1002 |
+
00062/0115 7 (256,448,3)
|
1003 |
+
00062/0118 7 (256,448,3)
|
1004 |
+
00062/0125 7 (256,448,3)
|
1005 |
+
00062/0134 7 (256,448,3)
|
1006 |
+
00062/0142 7 (256,448,3)
|
1007 |
+
00062/0400 7 (256,448,3)
|
1008 |
+
00062/0457 7 (256,448,3)
|
1009 |
+
00062/0459 7 (256,448,3)
|
1010 |
+
00062/0560 7 (256,448,3)
|
1011 |
+
00062/0650 7 (256,448,3)
|
1012 |
+
00062/0655 7 (256,448,3)
|
1013 |
+
00062/0715 7 (256,448,3)
|
1014 |
+
00062/0847 7 (256,448,3)
|
1015 |
+
00062/0905 7 (256,448,3)
|
1016 |
+
00062/0981 7 (256,448,3)
|
1017 |
+
00063/0177 7 (256,448,3)
|
1018 |
+
00063/0230 7 (256,448,3)
|
1019 |
+
00063/0253 7 (256,448,3)
|
1020 |
+
00063/0257 7 (256,448,3)
|
1021 |
+
00063/0326 7 (256,448,3)
|
1022 |
+
00063/0530 7 (256,448,3)
|
1023 |
+
00063/0677 7 (256,448,3)
|
1024 |
+
00063/0759 7 (256,448,3)
|
1025 |
+
00063/0761 7 (256,448,3)
|
1026 |
+
00063/0777 7 (256,448,3)
|
1027 |
+
00063/0842 7 (256,448,3)
|
1028 |
+
00063/0900 7 (256,448,3)
|
1029 |
+
00064/0014 7 (256,448,3)
|
1030 |
+
00064/0028 7 (256,448,3)
|
1031 |
+
00064/0029 7 (256,448,3)
|
1032 |
+
00064/0030 7 (256,448,3)
|
1033 |
+
00064/0037 7 (256,448,3)
|
1034 |
+
00064/0044 7 (256,448,3)
|
1035 |
+
00064/0280 7 (256,448,3)
|
1036 |
+
00064/0285 7 (256,448,3)
|
1037 |
+
00064/0286 7 (256,448,3)
|
1038 |
+
00064/0291 7 (256,448,3)
|
1039 |
+
00064/0300 7 (256,448,3)
|
1040 |
+
00064/0303 7 (256,448,3)
|
1041 |
+
00064/0308 7 (256,448,3)
|
1042 |
+
00064/0314 7 (256,448,3)
|
1043 |
+
00064/0316 7 (256,448,3)
|
1044 |
+
00064/0317 7 (256,448,3)
|
1045 |
+
00064/0323 7 (256,448,3)
|
1046 |
+
00064/0435 7 (256,448,3)
|
1047 |
+
00064/0733 7 (256,448,3)
|
1048 |
+
00064/0848 7 (256,448,3)
|
1049 |
+
00064/0868 7 (256,448,3)
|
1050 |
+
00064/0888 7 (256,448,3)
|
1051 |
+
00064/0898 7 (256,448,3)
|
1052 |
+
00065/0116 7 (256,448,3)
|
1053 |
+
00065/0121 7 (256,448,3)
|
1054 |
+
00065/0122 7 (256,448,3)
|
1055 |
+
00065/0124 7 (256,448,3)
|
1056 |
+
00065/0125 7 (256,448,3)
|
1057 |
+
00065/0126 7 (256,448,3)
|
1058 |
+
00065/0136 7 (256,448,3)
|
1059 |
+
00065/0146 7 (256,448,3)
|
1060 |
+
00065/0147 7 (256,448,3)
|
1061 |
+
00065/0163 7 (256,448,3)
|
1062 |
+
00065/0170 7 (256,448,3)
|
1063 |
+
00065/0175 7 (256,448,3)
|
1064 |
+
00065/0176 7 (256,448,3)
|
1065 |
+
00065/0180 7 (256,448,3)
|
1066 |
+
00065/0184 7 (256,448,3)
|
1067 |
+
00065/0186 7 (256,448,3)
|
1068 |
+
00065/0332 7 (256,448,3)
|
1069 |
+
00065/0343 7 (256,448,3)
|
1070 |
+
00065/0365 7 (256,448,3)
|
1071 |
+
00065/0393 7 (256,448,3)
|
1072 |
+
00065/0394 7 (256,448,3)
|
1073 |
+
00065/0442 7 (256,448,3)
|
1074 |
+
00065/0459 7 (256,448,3)
|
1075 |
+
00065/0462 7 (256,448,3)
|
1076 |
+
00065/0476 7 (256,448,3)
|
1077 |
+
00065/0483 7 (256,448,3)
|
1078 |
+
00065/0590 7 (256,448,3)
|
1079 |
+
00065/0593 7 (256,448,3)
|
1080 |
+
00065/0595 7 (256,448,3)
|
1081 |
+
00065/0774 7 (256,448,3)
|
1082 |
+
00065/0947 7 (256,448,3)
|
1083 |
+
00065/0985 7 (256,448,3)
|
1084 |
+
00065/0986 7 (256,448,3)
|
1085 |
+
00066/0015 7 (256,448,3)
|
1086 |
+
00066/0043 7 (256,448,3)
|
1087 |
+
00066/0131 7 (256,448,3)
|
1088 |
+
00066/0157 7 (256,448,3)
|
1089 |
+
00066/0169 7 (256,448,3)
|
1090 |
+
00066/0374 7 (256,448,3)
|
1091 |
+
00066/0382 7 (256,448,3)
|
1092 |
+
00066/0481 7 (256,448,3)
|
1093 |
+
00066/0482 7 (256,448,3)
|
1094 |
+
00066/0491 7 (256,448,3)
|
1095 |
+
00066/0493 7 (256,448,3)
|
1096 |
+
00066/0494 7 (256,448,3)
|
1097 |
+
00066/0496 7 (256,448,3)
|
1098 |
+
00066/0680 7 (256,448,3)
|
1099 |
+
00066/0700 7 (256,448,3)
|
1100 |
+
00066/0887 7 (256,448,3)
|
1101 |
+
00066/0910 7 (256,448,3)
|
1102 |
+
00066/0918 7 (256,448,3)
|
1103 |
+
00067/0024 7 (256,448,3)
|
1104 |
+
00067/0059 7 (256,448,3)
|
1105 |
+
00067/0408 7 (256,448,3)
|
1106 |
+
00067/0414 7 (256,448,3)
|
1107 |
+
00067/0417 7 (256,448,3)
|
1108 |
+
00067/0419 7 (256,448,3)
|
1109 |
+
00067/0423 7 (256,448,3)
|
1110 |
+
00067/0441 7 (256,448,3)
|
1111 |
+
00067/0467 7 (256,448,3)
|
1112 |
+
00067/0471 7 (256,448,3)
|
1113 |
+
00067/0487 7 (256,448,3)
|
1114 |
+
00067/0494 7 (256,448,3)
|
1115 |
+
00067/0497 7 (256,448,3)
|
1116 |
+
00067/0513 7 (256,448,3)
|
1117 |
+
00067/0521 7 (256,448,3)
|
1118 |
+
00068/0111 7 (256,448,3)
|
1119 |
+
00068/0123 7 (256,448,3)
|
1120 |
+
00068/0126 7 (256,448,3)
|
1121 |
+
00068/0129 7 (256,448,3)
|
1122 |
+
00068/0270 7 (256,448,3)
|
1123 |
+
00068/0330 7 (256,448,3)
|
1124 |
+
00068/0407 7 (256,448,3)
|
1125 |
+
00068/0428 7 (256,448,3)
|
1126 |
+
00068/0544 7 (256,448,3)
|
1127 |
+
00068/0635 7 (256,448,3)
|
1128 |
+
00068/0637 7 (256,448,3)
|
1129 |
+
00068/0736 7 (256,448,3)
|
1130 |
+
00068/0738 7 (256,448,3)
|
1131 |
+
00068/0747 7 (256,448,3)
|
1132 |
+
00068/0748 7 (256,448,3)
|
1133 |
+
00068/0749 7 (256,448,3)
|
1134 |
+
00068/0762 7 (256,448,3)
|
1135 |
+
00068/0815 7 (256,448,3)
|
1136 |
+
00068/0981 7 (256,448,3)
|
1137 |
+
00068/0982 7 (256,448,3)
|
1138 |
+
00069/0187 7 (256,448,3)
|
1139 |
+
00069/0191 7 (256,448,3)
|
1140 |
+
00070/0001 7 (256,448,3)
|
1141 |
+
00070/0003 7 (256,448,3)
|
1142 |
+
00070/0340 7 (256,448,3)
|
1143 |
+
00070/0341 7 (256,448,3)
|
1144 |
+
00070/0342 7 (256,448,3)
|
1145 |
+
00070/0347 7 (256,448,3)
|
1146 |
+
00070/0372 7 (256,448,3)
|
1147 |
+
00070/0383 7 (256,448,3)
|
1148 |
+
00070/0389 7 (256,448,3)
|
1149 |
+
00070/0728 7 (256,448,3)
|
1150 |
+
00070/0813 7 (256,448,3)
|
1151 |
+
00070/0814 7 (256,448,3)
|
1152 |
+
00070/0823 7 (256,448,3)
|
1153 |
+
00070/0840 7 (256,448,3)
|
1154 |
+
00070/0843 7 (256,448,3)
|
1155 |
+
00070/0861 7 (256,448,3)
|
1156 |
+
00071/0111 7 (256,448,3)
|
1157 |
+
00071/0138 7 (256,448,3)
|
1158 |
+
00071/0143 7 (256,448,3)
|
1159 |
+
00071/0150 7 (256,448,3)
|
1160 |
+
00071/0508 7 (256,448,3)
|
1161 |
+
00071/0514 7 (256,448,3)
|
1162 |
+
00071/0550 7 (256,448,3)
|
1163 |
+
00071/0556 7 (256,448,3)
|
1164 |
+
00071/0600 7 (256,448,3)
|
1165 |
+
00071/0665 7 (256,448,3)
|
1166 |
+
00071/0670 7 (256,448,3)
|
1167 |
+
00071/0672 7 (256,448,3)
|
1168 |
+
00071/0673 7 (256,448,3)
|
1169 |
+
00071/0705 7 (256,448,3)
|
1170 |
+
00071/0706 7 (256,448,3)
|
1171 |
+
00071/0707 7 (256,448,3)
|
1172 |
+
00071/0774 7 (256,448,3)
|
1173 |
+
00071/0799 7 (256,448,3)
|
1174 |
+
00071/0814 7 (256,448,3)
|
1175 |
+
00071/0816 7 (256,448,3)
|
1176 |
+
00071/0819 7 (256,448,3)
|
1177 |
+
00071/0823 7 (256,448,3)
|
1178 |
+
00071/0828 7 (256,448,3)
|
1179 |
+
00071/0830 7 (256,448,3)
|
1180 |
+
00071/0839 7 (256,448,3)
|
1181 |
+
00071/0841 7 (256,448,3)
|
1182 |
+
00072/0192 7 (256,448,3)
|
1183 |
+
00072/0194 7 (256,448,3)
|
1184 |
+
00072/0197 7 (256,448,3)
|
1185 |
+
00072/0199 7 (256,448,3)
|
1186 |
+
00072/0285 7 (256,448,3)
|
1187 |
+
00072/0586 7 (256,448,3)
|
1188 |
+
00072/0795 7 (256,448,3)
|
1189 |
+
00072/0811 7 (256,448,3)
|
1190 |
+
00072/0812 7 (256,448,3)
|
1191 |
+
00072/0824 7 (256,448,3)
|
1192 |
+
00072/0831 7 (256,448,3)
|
1193 |
+
00072/0835 7 (256,448,3)
|
1194 |
+
00072/0837 7 (256,448,3)
|
1195 |
+
00072/0841 7 (256,448,3)
|
1196 |
+
00072/0962 7 (256,448,3)
|
1197 |
+
00073/0296 7 (256,448,3)
|
1198 |
+
00073/0299 7 (256,448,3)
|
1199 |
+
00073/0300 7 (256,448,3)
|
1200 |
+
00073/0301 7 (256,448,3)
|
1201 |
+
00073/0427 7 (256,448,3)
|
1202 |
+
00073/0428 7 (256,448,3)
|
1203 |
+
00073/0494 7 (256,448,3)
|
1204 |
+
00073/0615 7 (256,448,3)
|
1205 |
+
00073/0620 7 (256,448,3)
|
1206 |
+
00073/0624 7 (256,448,3)
|
1207 |
+
00073/0979 7 (256,448,3)
|
1208 |
+
00074/0226 7 (256,448,3)
|
1209 |
+
00074/0250 7 (256,448,3)
|
1210 |
+
00074/0284 7 (256,448,3)
|
1211 |
+
00074/0503 7 (256,448,3)
|
1212 |
+
00074/0614 7 (256,448,3)
|
1213 |
+
00074/0629 7 (256,448,3)
|
1214 |
+
00074/0762 7 (256,448,3)
|
1215 |
+
00074/0765 7 (256,448,3)
|
1216 |
+
00074/0900 7 (256,448,3)
|
1217 |
+
00074/0908 7 (256,448,3)
|
1218 |
+
00075/0352 7 (256,448,3)
|
1219 |
+
00075/0360 7 (256,448,3)
|
1220 |
+
00075/0361 7 (256,448,3)
|
1221 |
+
00075/0365 7 (256,448,3)
|
1222 |
+
00075/0383 7 (256,448,3)
|
1223 |
+
00075/0384 7 (256,448,3)
|
1224 |
+
00075/0386 7 (256,448,3)
|
1225 |
+
00075/0407 7 (256,448,3)
|
1226 |
+
00075/0410 7 (256,448,3)
|
1227 |
+
00075/0412 7 (256,448,3)
|
1228 |
+
00075/0413 7 (256,448,3)
|
1229 |
+
00075/0459 7 (256,448,3)
|
1230 |
+
00075/0504 7 (256,448,3)
|
1231 |
+
00075/0515 7 (256,448,3)
|
1232 |
+
00075/0518 7 (256,448,3)
|
1233 |
+
00075/0567 7 (256,448,3)
|
1234 |
+
00075/0681 7 (256,448,3)
|
1235 |
+
00075/0693 7 (256,448,3)
|
1236 |
+
00075/0728 7 (256,448,3)
|
1237 |
+
00075/0731 7 (256,448,3)
|
1238 |
+
00075/0804 7 (256,448,3)
|
1239 |
+
00075/0974 7 (256,448,3)
|
1240 |
+
00075/0975 7 (256,448,3)
|
1241 |
+
00075/0983 7 (256,448,3)
|
1242 |
+
00075/0997 7 (256,448,3)
|
1243 |
+
00076/0006 7 (256,448,3)
|
1244 |
+
00076/0007 7 (256,448,3)
|
1245 |
+
00076/0011 7 (256,448,3)
|
1246 |
+
00076/0013 7 (256,448,3)
|
1247 |
+
00076/0014 7 (256,448,3)
|
1248 |
+
00076/0027 7 (256,448,3)
|
1249 |
+
00076/0029 7 (256,448,3)
|
1250 |
+
00076/0037 7 (256,448,3)
|
1251 |
+
00076/0041 7 (256,448,3)
|
1252 |
+
00076/0055 7 (256,448,3)
|
1253 |
+
00076/0071 7 (256,448,3)
|
1254 |
+
00076/0172 7 (256,448,3)
|
1255 |
+
00076/0275 7 (256,448,3)
|
1256 |
+
00076/0286 7 (256,448,3)
|
1257 |
+
00076/0467 7 (256,448,3)
|
1258 |
+
00076/0481 7 (256,448,3)
|
1259 |
+
00076/0527 7 (256,448,3)
|
1260 |
+
00076/0895 7 (256,448,3)
|
1261 |
+
00076/0896 7 (256,448,3)
|
1262 |
+
00076/0906 7 (256,448,3)
|
1263 |
+
00076/0924 7 (256,448,3)
|
1264 |
+
00076/0964 7 (256,448,3)
|
1265 |
+
00076/0984 7 (256,448,3)
|
1266 |
+
00077/0317 7 (256,448,3)
|
1267 |
+
00077/0322 7 (256,448,3)
|
1268 |
+
00077/0333 7 (256,448,3)
|
1269 |
+
00077/0334 7 (256,448,3)
|
1270 |
+
00077/0480 7 (256,448,3)
|
1271 |
+
00077/0488 7 (256,448,3)
|
1272 |
+
00077/0490 7 (256,448,3)
|
1273 |
+
00077/0582 7 (256,448,3)
|
1274 |
+
00077/0586 7 (256,448,3)
|
1275 |
+
00077/0969 7 (256,448,3)
|
1276 |
+
00078/0007 7 (256,448,3)
|
1277 |
+
00078/0011 7 (256,448,3)
|
1278 |
+
00078/0153 7 (256,448,3)
|
1279 |
+
00078/0289 7 (256,448,3)
|
1280 |
+
00078/0312 7 (256,448,3)
|
1281 |
+
00078/0492 7 (256,448,3)
|
1282 |
+
00078/0580 7 (256,448,3)
|
1283 |
+
00078/0595 7 (256,448,3)
|
1284 |
+
00078/0814 7 (256,448,3)
|
1285 |
+
00078/0950 7 (256,448,3)
|
1286 |
+
00078/0955 7 (256,448,3)
|
1287 |
+
00079/0060 7 (256,448,3)
|
1288 |
+
00079/0067 7 (256,448,3)
|
1289 |
+
00080/0216 7 (256,448,3)
|
1290 |
+
00080/0308 7 (256,448,3)
|
1291 |
+
00080/0504 7 (256,448,3)
|
1292 |
+
00080/0552 7 (256,448,3)
|
1293 |
+
00080/0576 7 (256,448,3)
|
1294 |
+
00080/0583 7 (256,448,3)
|
1295 |
+
00080/0837 7 (256,448,3)
|
1296 |
+
00080/0839 7 (256,448,3)
|
1297 |
+
00080/0871 7 (256,448,3)
|
1298 |
+
00080/0877 7 (256,448,3)
|
1299 |
+
00080/0880 7 (256,448,3)
|
1300 |
+
00080/0969 7 (256,448,3)
|
1301 |
+
00080/0973 7 (256,448,3)
|
1302 |
+
00080/0980 7 (256,448,3)
|
1303 |
+
00081/0202 7 (256,448,3)
|
1304 |
+
00081/0203 7 (256,448,3)
|
1305 |
+
00081/0210 7 (256,448,3)
|
1306 |
+
00081/0268 7 (256,448,3)
|
1307 |
+
00081/0281 7 (256,448,3)
|
1308 |
+
00081/0283 7 (256,448,3)
|
1309 |
+
00081/0317 7 (256,448,3)
|
1310 |
+
00081/0327 7 (256,448,3)
|
1311 |
+
00082/0018 7 (256,448,3)
|
1312 |
+
00082/0025 7 (256,448,3)
|
1313 |
+
00082/0089 7 (256,448,3)
|
1314 |
+
00082/0140 7 (256,448,3)
|
1315 |
+
00082/0442 7 (256,448,3)
|
1316 |
+
00082/0465 7 (256,448,3)
|
1317 |
+
00082/0473 7 (256,448,3)
|
1318 |
+
00082/0481 7 (256,448,3)
|
1319 |
+
00082/0492 7 (256,448,3)
|
1320 |
+
00082/0495 7 (256,448,3)
|
1321 |
+
00082/0497 7 (256,448,3)
|
1322 |
+
00082/0502 7 (256,448,3)
|
1323 |
+
00082/0504 7 (256,448,3)
|
1324 |
+
00082/0506 7 (256,448,3)
|
1325 |
+
00082/0507 7 (256,448,3)
|
1326 |
+
00082/0510 7 (256,448,3)
|
1327 |
+
00082/0519 7 (256,448,3)
|
1328 |
+
00082/0523 7 (256,448,3)
|
1329 |
+
00082/0588 7 (256,448,3)
|
1330 |
+
00082/0597 7 (256,448,3)
|
1331 |
+
00082/0632 7 (256,448,3)
|
1332 |
+
00082/0751 7 (256,448,3)
|
1333 |
+
00082/0767 7 (256,448,3)
|
1334 |
+
00082/0771 7 (256,448,3)
|
1335 |
+
00082/0790 7 (256,448,3)
|
1336 |
+
00082/0804 7 (256,448,3)
|
1337 |
+
00082/0823 7 (256,448,3)
|
1338 |
+
00083/0052 7 (256,448,3)
|
1339 |
+
00083/0056 7 (256,448,3)
|
1340 |
+
00083/0113 7 (256,448,3)
|
1341 |
+
00083/0114 7 (256,448,3)
|
1342 |
+
00083/0122 7 (256,448,3)
|
1343 |
+
00083/0137 7 (256,448,3)
|
1344 |
+
00083/0270 7 (256,448,3)
|
1345 |
+
00083/0295 7 (256,448,3)
|
1346 |
+
00083/0303 7 (256,448,3)
|
1347 |
+
00083/0308 7 (256,448,3)
|
1348 |
+
00083/0586 7 (256,448,3)
|
1349 |
+
00083/0592 7 (256,448,3)
|
1350 |
+
00083/0640 7 (256,448,3)
|
1351 |
+
00083/0648 7 (256,448,3)
|
1352 |
+
00083/0654 7 (256,448,3)
|
1353 |
+
00083/0662 7 (256,448,3)
|
1354 |
+
00083/0666 7 (256,448,3)
|
1355 |
+
00083/0668 7 (256,448,3)
|
1356 |
+
00083/0669 7 (256,448,3)
|
1357 |
+
00083/0675 7 (256,448,3)
|
1358 |
+
00083/0679 7 (256,448,3)
|
1359 |
+
00083/0681 7 (256,448,3)
|
1360 |
+
00083/0682 7 (256,448,3)
|
1361 |
+
00083/0694 7 (256,448,3)
|
1362 |
+
00083/0695 7 (256,448,3)
|
1363 |
+
00083/0697 7 (256,448,3)
|
1364 |
+
00083/0704 7 (256,448,3)
|
1365 |
+
00083/0713 7 (256,448,3)
|
1366 |
+
00083/0721 7 (256,448,3)
|
1367 |
+
00083/0855 7 (256,448,3)
|
1368 |
+
00084/0109 7 (256,448,3)
|
1369 |
+
00084/0113 7 (256,448,3)
|
1370 |
+
00084/0306 7 (256,448,3)
|
1371 |
+
00084/0442 7 (256,448,3)
|
1372 |
+
00084/0669 7 (256,448,3)
|
1373 |
+
00084/0679 7 (256,448,3)
|
1374 |
+
00084/0685 7 (256,448,3)
|
1375 |
+
00084/0691 7 (256,448,3)
|
1376 |
+
00084/0768 7 (256,448,3)
|
1377 |
+
00084/0817 7 (256,448,3)
|
1378 |
+
00085/0027 7 (256,448,3)
|
1379 |
+
00085/0035 7 (256,448,3)
|
1380 |
+
00085/0038 7 (256,448,3)
|
1381 |
+
00085/0223 7 (256,448,3)
|
1382 |
+
00085/0233 7 (256,448,3)
|
1383 |
+
00085/0281 7 (256,448,3)
|
1384 |
+
00085/0287 7 (256,448,3)
|
1385 |
+
00085/0313 7 (256,448,3)
|
1386 |
+
00085/0521 7 (256,448,3)
|
1387 |
+
00085/0848 7 (256,448,3)
|
1388 |
+
00085/0855 7 (256,448,3)
|
1389 |
+
00085/0865 7 (256,448,3)
|
1390 |
+
00085/0952 7 (256,448,3)
|
1391 |
+
00085/0964 7 (256,448,3)
|
1392 |
+
00085/0973 7 (256,448,3)
|
1393 |
+
00085/0986 7 (256,448,3)
|
1394 |
+
00085/0993 7 (256,448,3)
|
1395 |
+
00086/0070 7 (256,448,3)
|
1396 |
+
00086/0075 7 (256,448,3)
|
1397 |
+
00086/0094 7 (256,448,3)
|
1398 |
+
00086/0103 7 (256,448,3)
|
1399 |
+
00086/0112 7 (256,448,3)
|
1400 |
+
00086/0288 7 (256,448,3)
|
1401 |
+
00086/0576 7 (256,448,3)
|
1402 |
+
00086/0580 7 (256,448,3)
|
1403 |
+
00086/0584 7 (256,448,3)
|
1404 |
+
00086/0599 7 (256,448,3)
|
1405 |
+
00086/0600 7 (256,448,3)
|
1406 |
+
00086/0602 7 (256,448,3)
|
1407 |
+
00086/0612 7 (256,448,3)
|
1408 |
+
00086/0629 7 (256,448,3)
|
1409 |
+
00086/0655 7 (256,448,3)
|
1410 |
+
00086/0679 7 (256,448,3)
|
1411 |
+
00086/0694 7 (256,448,3)
|
1412 |
+
00086/0695 7 (256,448,3)
|
1413 |
+
00086/0701 7 (256,448,3)
|
1414 |
+
00086/0760 7 (256,448,3)
|
1415 |
+
00086/0786 7 (256,448,3)
|
1416 |
+
00086/0845 7 (256,448,3)
|
1417 |
+
00086/0868 7 (256,448,3)
|
1418 |
+
00086/0889 7 (256,448,3)
|
1419 |
+
00086/0891 7 (256,448,3)
|
1420 |
+
00086/0927 7 (256,448,3)
|
1421 |
+
00086/0938 7 (256,448,3)
|
1422 |
+
00086/0946 7 (256,448,3)
|
1423 |
+
00086/0963 7 (256,448,3)
|
1424 |
+
00086/0969 7 (256,448,3)
|
1425 |
+
00087/0023 7 (256,448,3)
|
1426 |
+
00087/0029 7 (256,448,3)
|
1427 |
+
00087/0144 7 (256,448,3)
|
1428 |
+
00087/0148 7 (256,448,3)
|
1429 |
+
00087/0159 7 (256,448,3)
|
1430 |
+
00087/0174 7 (256,448,3)
|
1431 |
+
00087/0283 7 (256,448,3)
|
1432 |
+
00087/0284 7 (256,448,3)
|
1433 |
+
00087/0294 7 (256,448,3)
|
1434 |
+
00087/0296 7 (256,448,3)
|
1435 |
+
00087/0498 7 (256,448,3)
|
1436 |
+
00087/0502 7 (256,448,3)
|
1437 |
+
00087/0532 7 (256,448,3)
|
1438 |
+
00087/0557 7 (256,448,3)
|
1439 |
+
00087/0559 7 (256,448,3)
|
1440 |
+
00087/0574 7 (256,448,3)
|
1441 |
+
00087/0577 7 (256,448,3)
|
1442 |
+
00088/0006 7 (256,448,3)
|
1443 |
+
00088/0268 7 (256,448,3)
|
1444 |
+
00088/0320 7 (256,448,3)
|
1445 |
+
00088/0412 7 (256,448,3)
|
1446 |
+
00088/0431 7 (256,448,3)
|
1447 |
+
00088/0432 7 (256,448,3)
|
1448 |
+
00088/0465 7 (256,448,3)
|
1449 |
+
00088/0507 7 (256,448,3)
|
1450 |
+
00088/0565 7 (256,448,3)
|
1451 |
+
00088/0629 7 (256,448,3)
|
1452 |
+
00088/0831 7 (256,448,3)
|
1453 |
+
00088/0836 7 (256,448,3)
|
1454 |
+
00088/0972 7 (256,448,3)
|
1455 |
+
00088/0974 7 (256,448,3)
|
1456 |
+
00088/0980 7 (256,448,3)
|
1457 |
+
00089/0067 7 (256,448,3)
|
1458 |
+
00089/0244 7 (256,448,3)
|
1459 |
+
00089/0404 7 (256,448,3)
|
1460 |
+
00089/0416 7 (256,448,3)
|
1461 |
+
00089/0419 7 (256,448,3)
|
1462 |
+
00089/0428 7 (256,448,3)
|
1463 |
+
00089/0712 7 (256,448,3)
|
1464 |
+
00089/0713 7 (256,448,3)
|
1465 |
+
00089/0723 7 (256,448,3)
|
1466 |
+
00089/0727 7 (256,448,3)
|
1467 |
+
00089/0770 7 (256,448,3)
|
1468 |
+
00089/0809 7 (256,448,3)
|
1469 |
+
00089/0811 7 (256,448,3)
|
1470 |
+
00089/0888 7 (256,448,3)
|
1471 |
+
00089/0898 7 (256,448,3)
|
1472 |
+
00089/0903 7 (256,448,3)
|
1473 |
+
00089/0907 7 (256,448,3)
|
1474 |
+
00089/0911 7 (256,448,3)
|
1475 |
+
00089/0915 7 (256,448,3)
|
1476 |
+
00089/0926 7 (256,448,3)
|
1477 |
+
00089/0955 7 (256,448,3)
|
1478 |
+
00090/0027 7 (256,448,3)
|
1479 |
+
00090/0028 7 (256,448,3)
|
1480 |
+
00090/0032 7 (256,448,3)
|
1481 |
+
00090/0038 7 (256,448,3)
|
1482 |
+
00090/0076 7 (256,448,3)
|
1483 |
+
00090/0081 7 (256,448,3)
|
1484 |
+
00090/0086 7 (256,448,3)
|
1485 |
+
00090/0119 7 (256,448,3)
|
1486 |
+
00090/0258 7 (256,448,3)
|
1487 |
+
00090/0261 7 (256,448,3)
|
1488 |
+
00090/0447 7 (256,448,3)
|
1489 |
+
00090/0498 7 (256,448,3)
|
1490 |
+
00090/0514 7 (256,448,3)
|
1491 |
+
00090/0523 7 (256,448,3)
|
1492 |
+
00090/0530 7 (256,448,3)
|
1493 |
+
00090/0540 7 (256,448,3)
|
1494 |
+
00090/0548 7 (256,448,3)
|
1495 |
+
00090/0565 7 (256,448,3)
|
1496 |
+
00090/0578 7 (256,448,3)
|
1497 |
+
00090/0580 7 (256,448,3)
|
1498 |
+
00090/0581 7 (256,448,3)
|
1499 |
+
00090/0780 7 (256,448,3)
|
1500 |
+
00090/0940 7 (256,448,3)
|
1501 |
+
00090/0984 7 (256,448,3)
|
1502 |
+
00091/0023 7 (256,448,3)
|
1503 |
+
00091/0051 7 (256,448,3)
|
1504 |
+
00091/0317 7 (256,448,3)
|
1505 |
+
00091/0320 7 (256,448,3)
|
1506 |
+
00091/0582 7 (256,448,3)
|
1507 |
+
00091/0585 7 (256,448,3)
|
1508 |
+
00091/0588 7 (256,448,3)
|
1509 |
+
00091/0601 7 (256,448,3)
|
1510 |
+
00091/0602 7 (256,448,3)
|
1511 |
+
00091/0603 7 (256,448,3)
|
1512 |
+
00091/0634 7 (256,448,3)
|
1513 |
+
00091/0693 7 (256,448,3)
|
1514 |
+
00091/0741 7 (256,448,3)
|
1515 |
+
00091/0966 7 (256,448,3)
|
1516 |
+
00091/0973 7 (256,448,3)
|
1517 |
+
00091/0985 7 (256,448,3)
|
1518 |
+
00092/0007 7 (256,448,3)
|
1519 |
+
00092/0132 7 (256,448,3)
|
1520 |
+
00092/0270 7 (256,448,3)
|
1521 |
+
00092/0296 7 (256,448,3)
|
1522 |
+
00092/0611 7 (256,448,3)
|
1523 |
+
00092/0625 7 (256,448,3)
|
1524 |
+
00092/0627 7 (256,448,3)
|
1525 |
+
00092/0651 7 (256,448,3)
|
1526 |
+
00092/0652 7 (256,448,3)
|
1527 |
+
00092/0910 7 (256,448,3)
|
1528 |
+
00093/0075 7 (256,448,3)
|
1529 |
+
00093/0078 7 (256,448,3)
|
1530 |
+
00093/0100 7 (256,448,3)
|
1531 |
+
00093/0132 7 (256,448,3)
|
1532 |
+
00093/0133 7 (256,448,3)
|
1533 |
+
00093/0176 7 (256,448,3)
|
1534 |
+
00093/0177 7 (256,448,3)
|
1535 |
+
00093/0178 7 (256,448,3)
|
1536 |
+
00093/0181 7 (256,448,3)
|
1537 |
+
00093/0183 7 (256,448,3)
|
1538 |
+
00093/0184 7 (256,448,3)
|
1539 |
+
00093/0286 7 (256,448,3)
|
1540 |
+
00093/0304 7 (256,448,3)
|
1541 |
+
00093/0305 7 (256,448,3)
|
1542 |
+
00093/0319 7 (256,448,3)
|
1543 |
+
00093/0324 7 (256,448,3)
|
1544 |
+
00093/0325 7 (256,448,3)
|
1545 |
+
00093/0327 7 (256,448,3)
|
1546 |
+
00093/0331 7 (256,448,3)
|
1547 |
+
00093/0444 7 (256,448,3)
|
1548 |
+
00093/0450 7 (256,448,3)
|
1549 |
+
00093/0593 7 (256,448,3)
|
1550 |
+
00094/0032 7 (256,448,3)
|
1551 |
+
00094/0057 7 (256,448,3)
|
1552 |
+
00094/0139 7 (256,448,3)
|
1553 |
+
00094/0206 7 (256,448,3)
|
1554 |
+
00094/0211 7 (256,448,3)
|
1555 |
+
00094/0215 7 (256,448,3)
|
1556 |
+
00094/0218 7 (256,448,3)
|
1557 |
+
00094/0257 7 (256,448,3)
|
1558 |
+
00094/0329 7 (256,448,3)
|
1559 |
+
00094/0331 7 (256,448,3)
|
1560 |
+
00094/0332 7 (256,448,3)
|
1561 |
+
00094/0369 7 (256,448,3)
|
1562 |
+
00094/0370 7 (256,448,3)
|
1563 |
+
00094/0383 7 (256,448,3)
|
1564 |
+
00094/0385 7 (256,448,3)
|
1565 |
+
00094/0387 7 (256,448,3)
|
1566 |
+
00094/0399 7 (256,448,3)
|
1567 |
+
00094/0605 7 (256,448,3)
|
1568 |
+
00094/0648 7 (256,448,3)
|
1569 |
+
00094/0649 7 (256,448,3)
|
1570 |
+
00094/0759 7 (256,448,3)
|
1571 |
+
00094/0800 7 (256,448,3)
|
1572 |
+
00094/0894 7 (256,448,3)
|
1573 |
+
00094/0896 7 (256,448,3)
|
1574 |
+
00095/0089 7 (256,448,3)
|
1575 |
+
00095/0108 7 (256,448,3)
|
1576 |
+
00095/0109 7 (256,448,3)
|
1577 |
+
00095/0114 7 (256,448,3)
|
1578 |
+
00095/0128 7 (256,448,3)
|
1579 |
+
00095/0133 7 (256,448,3)
|
1580 |
+
00095/0150 7 (256,448,3)
|
1581 |
+
00095/0153 7 (256,448,3)
|
1582 |
+
00095/0154 7 (256,448,3)
|
1583 |
+
00095/0196 7 (256,448,3)
|
1584 |
+
00095/0209 7 (256,448,3)
|
1585 |
+
00095/0228 7 (256,448,3)
|
1586 |
+
00095/0230 7 (256,448,3)
|
1587 |
+
00095/0231 7 (256,448,3)
|
1588 |
+
00095/0242 7 (256,448,3)
|
1589 |
+
00095/0243 7 (256,448,3)
|
1590 |
+
00095/0253 7 (256,448,3)
|
1591 |
+
00095/0280 7 (256,448,3)
|
1592 |
+
00095/0281 7 (256,448,3)
|
1593 |
+
00095/0283 7 (256,448,3)
|
1594 |
+
00095/0314 7 (256,448,3)
|
1595 |
+
00095/0868 7 (256,448,3)
|
1596 |
+
00095/0894 7 (256,448,3)
|
1597 |
+
00096/0062 7 (256,448,3)
|
1598 |
+
00096/0347 7 (256,448,3)
|
1599 |
+
00096/0348 7 (256,448,3)
|
1600 |
+
00096/0359 7 (256,448,3)
|
1601 |
+
00096/0363 7 (256,448,3)
|
1602 |
+
00096/0373 7 (256,448,3)
|
1603 |
+
00096/0378 7 (256,448,3)
|
1604 |
+
00096/0387 7 (256,448,3)
|
1605 |
+
00096/0395 7 (256,448,3)
|
1606 |
+
00096/0396 7 (256,448,3)
|
1607 |
+
00096/0404 7 (256,448,3)
|
1608 |
+
00096/0653 7 (256,448,3)
|
1609 |
+
00096/0668 7 (256,448,3)
|
1610 |
+
00096/0679 7 (256,448,3)
|
1611 |
+
00096/0729 7 (256,448,3)
|
1612 |
+
00096/0736 7 (256,448,3)
|
1613 |
+
00096/0823 7 (256,448,3)
|
basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
basicsr/data/paired_image_dataset.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import data as data
|
2 |
+
from torchvision.transforms.functional import normalize
|
3 |
+
|
4 |
+
from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb, paired_paths_from_meta_info_file
|
5 |
+
from basicsr.data.transforms import augment, paired_random_crop
|
6 |
+
from basicsr.utils import FileClient, imfrombytes, img2tensor
|
7 |
+
from basicsr.utils.matlab_functions import rgb2ycbcr
|
8 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
9 |
+
|
10 |
+
|
11 |
+
@DATASET_REGISTRY.register()
|
12 |
+
class PairedImageDataset(data.Dataset):
|
13 |
+
"""Paired image dataset for image restoration.
|
14 |
+
|
15 |
+
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs.
|
16 |
+
|
17 |
+
There are three modes:
|
18 |
+
1. 'lmdb': Use lmdb files.
|
19 |
+
If opt['io_backend'] == lmdb.
|
20 |
+
2. 'meta_info_file': Use meta information file to generate paths.
|
21 |
+
If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
|
22 |
+
3. 'folder': Scan folders to generate paths.
|
23 |
+
The rest.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
opt (dict): Config for train datasets. It contains the following keys:
|
27 |
+
dataroot_gt (str): Data root path for gt.
|
28 |
+
dataroot_lq (str): Data root path for lq.
|
29 |
+
meta_info_file (str): Path for meta information file.
|
30 |
+
io_backend (dict): IO backend type and other kwarg.
|
31 |
+
filename_tmpl (str): Template for each filename. Note that the template excludes the file extension.
|
32 |
+
Default: '{}'.
|
33 |
+
gt_size (int): Cropped patched size for gt patches.
|
34 |
+
use_hflip (bool): Use horizontal flips.
|
35 |
+
use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
|
36 |
+
|
37 |
+
scale (bool): Scale, which will be added automatically.
|
38 |
+
phase (str): 'train' or 'val'.
|
39 |
+
"""
|
40 |
+
|
41 |
+
def __init__(self, opt):
|
42 |
+
super(PairedImageDataset, self).__init__()
|
43 |
+
self.opt = opt
|
44 |
+
# file client (io backend)
|
45 |
+
self.file_client = None
|
46 |
+
self.io_backend_opt = opt['io_backend']
|
47 |
+
self.mean = opt['mean'] if 'mean' in opt else None
|
48 |
+
self.std = opt['std'] if 'std' in opt else None
|
49 |
+
|
50 |
+
self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
|
51 |
+
if 'filename_tmpl' in opt:
|
52 |
+
self.filename_tmpl = opt['filename_tmpl']
|
53 |
+
else:
|
54 |
+
self.filename_tmpl = '{}'
|
55 |
+
|
56 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
57 |
+
self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
|
58 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt']
|
59 |
+
self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt'])
|
60 |
+
elif 'meta_info_file' in self.opt and self.opt['meta_info_file'] is not None:
|
61 |
+
self.paths = paired_paths_from_meta_info_file([self.lq_folder, self.gt_folder], ['lq', 'gt'],
|
62 |
+
self.opt['meta_info_file'], self.filename_tmpl)
|
63 |
+
else:
|
64 |
+
self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
|
65 |
+
|
66 |
+
def __getitem__(self, index):
|
67 |
+
if self.file_client is None:
|
68 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
69 |
+
|
70 |
+
scale = self.opt['scale']
|
71 |
+
|
72 |
+
# Load gt and lq images. Dimension order: HWC; channel order: BGR;
|
73 |
+
# image range: [0, 1], float32.
|
74 |
+
gt_path = self.paths[index]['gt_path']
|
75 |
+
img_bytes = self.file_client.get(gt_path, 'gt')
|
76 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
77 |
+
lq_path = self.paths[index]['lq_path']
|
78 |
+
img_bytes = self.file_client.get(lq_path, 'lq')
|
79 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
80 |
+
|
81 |
+
# augmentation for training
|
82 |
+
if self.opt['phase'] == 'train':
|
83 |
+
gt_size = self.opt['gt_size']
|
84 |
+
# random crop
|
85 |
+
img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
|
86 |
+
# flip, rotation
|
87 |
+
img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot'])
|
88 |
+
|
89 |
+
# color space transform
|
90 |
+
if 'color' in self.opt and self.opt['color'] == 'y':
|
91 |
+
img_gt = rgb2ycbcr(img_gt, y_only=True)[..., None]
|
92 |
+
img_lq = rgb2ycbcr(img_lq, y_only=True)[..., None]
|
93 |
+
|
94 |
+
# crop the unmatched GT images during validation or testing, especially for SR benchmark datasets
|
95 |
+
# TODO: It is better to update the datasets, rather than force to crop
|
96 |
+
if self.opt['phase'] != 'train':
|
97 |
+
img_gt = img_gt[0:img_lq.shape[0] * scale, 0:img_lq.shape[1] * scale, :]
|
98 |
+
|
99 |
+
# BGR to RGB, HWC to CHW, numpy to tensor
|
100 |
+
img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
|
101 |
+
# normalize
|
102 |
+
if self.mean is not None or self.std is not None:
|
103 |
+
normalize(img_lq, self.mean, self.std, inplace=True)
|
104 |
+
normalize(img_gt, self.mean, self.std, inplace=True)
|
105 |
+
|
106 |
+
return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path}
|
107 |
+
|
108 |
+
def __len__(self):
|
109 |
+
return len(self.paths)
|
basicsr/data/prefetch_dataloader.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import queue as Queue
|
2 |
+
import threading
|
3 |
+
import torch
|
4 |
+
from torch.utils.data import DataLoader
|
5 |
+
|
6 |
+
|
7 |
+
class PrefetchGenerator(threading.Thread):
|
8 |
+
"""A general prefetch generator.
|
9 |
+
|
10 |
+
Ref:
|
11 |
+
https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
|
12 |
+
|
13 |
+
Args:
|
14 |
+
generator: Python generator.
|
15 |
+
num_prefetch_queue (int): Number of prefetch queue.
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(self, generator, num_prefetch_queue):
|
19 |
+
threading.Thread.__init__(self)
|
20 |
+
self.queue = Queue.Queue(num_prefetch_queue)
|
21 |
+
self.generator = generator
|
22 |
+
self.daemon = True
|
23 |
+
self.start()
|
24 |
+
|
25 |
+
def run(self):
|
26 |
+
for item in self.generator:
|
27 |
+
self.queue.put(item)
|
28 |
+
self.queue.put(None)
|
29 |
+
|
30 |
+
def __next__(self):
|
31 |
+
next_item = self.queue.get()
|
32 |
+
if next_item is None:
|
33 |
+
raise StopIteration
|
34 |
+
return next_item
|
35 |
+
|
36 |
+
def __iter__(self):
|
37 |
+
return self
|
38 |
+
|
39 |
+
|
40 |
+
class PrefetchDataLoader(DataLoader):
|
41 |
+
"""Prefetch version of dataloader.
|
42 |
+
|
43 |
+
Ref:
|
44 |
+
https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
|
45 |
+
|
46 |
+
TODO:
|
47 |
+
Need to test on single gpu and ddp (multi-gpu). There is a known issue in
|
48 |
+
ddp.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
num_prefetch_queue (int): Number of prefetch queue.
|
52 |
+
kwargs (dict): Other arguments for dataloader.
|
53 |
+
"""
|
54 |
+
|
55 |
+
def __init__(self, num_prefetch_queue, **kwargs):
|
56 |
+
self.num_prefetch_queue = num_prefetch_queue
|
57 |
+
super(PrefetchDataLoader, self).__init__(**kwargs)
|
58 |
+
|
59 |
+
def __iter__(self):
|
60 |
+
return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
|
61 |
+
|
62 |
+
|
63 |
+
class CPUPrefetcher():
|
64 |
+
"""CPU prefetcher.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
loader: Dataloader.
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(self, loader):
|
71 |
+
self.ori_loader = loader
|
72 |
+
self.loader = iter(loader)
|
73 |
+
|
74 |
+
def next(self):
|
75 |
+
try:
|
76 |
+
return next(self.loader)
|
77 |
+
except StopIteration:
|
78 |
+
return None
|
79 |
+
|
80 |
+
def reset(self):
|
81 |
+
self.loader = iter(self.ori_loader)
|
82 |
+
|
83 |
+
|
84 |
+
class CUDAPrefetcher():
|
85 |
+
"""CUDA prefetcher.
|
86 |
+
|
87 |
+
Ref:
|
88 |
+
https://github.com/NVIDIA/apex/issues/304#
|
89 |
+
|
90 |
+
It may consums more GPU memory.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
loader: Dataloader.
|
94 |
+
opt (dict): Options.
|
95 |
+
"""
|
96 |
+
|
97 |
+
def __init__(self, loader, opt):
|
98 |
+
self.ori_loader = loader
|
99 |
+
self.loader = iter(loader)
|
100 |
+
self.opt = opt
|
101 |
+
self.stream = torch.cuda.Stream()
|
102 |
+
self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
|
103 |
+
self.preload()
|
104 |
+
|
105 |
+
def preload(self):
|
106 |
+
try:
|
107 |
+
self.batch = next(self.loader) # self.batch is a dict
|
108 |
+
except StopIteration:
|
109 |
+
self.batch = None
|
110 |
+
return None
|
111 |
+
# put tensors to gpu
|
112 |
+
with torch.cuda.stream(self.stream):
|
113 |
+
for k, v in self.batch.items():
|
114 |
+
if torch.is_tensor(v):
|
115 |
+
self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True)
|
116 |
+
|
117 |
+
def next(self):
|
118 |
+
torch.cuda.current_stream().wait_stream(self.stream)
|
119 |
+
batch = self.batch
|
120 |
+
self.preload()
|
121 |
+
return batch
|
122 |
+
|
123 |
+
def reset(self):
|
124 |
+
self.loader = iter(self.ori_loader)
|
125 |
+
self.preload()
|
basicsr/data/reds_dataset.py
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
+
from pathlib import Path
|
5 |
+
from torch.utils import data as data
|
6 |
+
|
7 |
+
from basicsr.data.transforms import augment, paired_random_crop
|
8 |
+
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
9 |
+
from basicsr.utils.flow_util import dequantize_flow
|
10 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
11 |
+
|
12 |
+
|
13 |
+
@DATASET_REGISTRY.register()
|
14 |
+
class REDSDataset(data.Dataset):
|
15 |
+
"""REDS dataset for training.
|
16 |
+
|
17 |
+
The keys are generated from a meta info txt file.
|
18 |
+
basicsr/data/meta_info/meta_info_REDS_GT.txt
|
19 |
+
|
20 |
+
Each line contains:
|
21 |
+
1. subfolder (clip) name; 2. frame number; 3. image shape, separated by
|
22 |
+
a white space.
|
23 |
+
Examples:
|
24 |
+
000 100 (720,1280,3)
|
25 |
+
001 100 (720,1280,3)
|
26 |
+
...
|
27 |
+
|
28 |
+
Key examples: "000/00000000"
|
29 |
+
GT (gt): Ground-Truth;
|
30 |
+
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
opt (dict): Config for train dataset. It contains the following keys:
|
34 |
+
dataroot_gt (str): Data root path for gt.
|
35 |
+
dataroot_lq (str): Data root path for lq.
|
36 |
+
dataroot_flow (str, optional): Data root path for flow.
|
37 |
+
meta_info_file (str): Path for meta information file.
|
38 |
+
val_partition (str): Validation partition types. 'REDS4' or
|
39 |
+
'official'.
|
40 |
+
io_backend (dict): IO backend type and other kwarg.
|
41 |
+
|
42 |
+
num_frame (int): Window size for input frames.
|
43 |
+
gt_size (int): Cropped patched size for gt patches.
|
44 |
+
interval_list (list): Interval list for temporal augmentation.
|
45 |
+
random_reverse (bool): Random reverse input frames.
|
46 |
+
use_hflip (bool): Use horizontal flips.
|
47 |
+
use_rot (bool): Use rotation (use vertical flip and transposing h
|
48 |
+
and w for implementation).
|
49 |
+
|
50 |
+
scale (bool): Scale, which will be added automatically.
|
51 |
+
"""
|
52 |
+
|
53 |
+
def __init__(self, opt):
|
54 |
+
super(REDSDataset, self).__init__()
|
55 |
+
self.opt = opt
|
56 |
+
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
|
57 |
+
self.flow_root = Path(opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None
|
58 |
+
assert opt['num_frame'] % 2 == 1, (f'num_frame should be odd number, but got {opt["num_frame"]}')
|
59 |
+
self.num_frame = opt['num_frame']
|
60 |
+
self.num_half_frames = opt['num_frame'] // 2
|
61 |
+
|
62 |
+
self.keys = []
|
63 |
+
with open(opt['meta_info_file'], 'r') as fin:
|
64 |
+
for line in fin:
|
65 |
+
folder, frame_num, _ = line.split(' ')
|
66 |
+
self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))])
|
67 |
+
|
68 |
+
# remove the video clips used in validation
|
69 |
+
if opt['val_partition'] == 'REDS4':
|
70 |
+
val_partition = ['000', '011', '015', '020']
|
71 |
+
elif opt['val_partition'] == 'official':
|
72 |
+
val_partition = [f'{v:03d}' for v in range(240, 270)]
|
73 |
+
else:
|
74 |
+
raise ValueError(f'Wrong validation partition {opt["val_partition"]}.'
|
75 |
+
f"Supported ones are ['official', 'REDS4'].")
|
76 |
+
self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition]
|
77 |
+
|
78 |
+
# file client (io backend)
|
79 |
+
self.file_client = None
|
80 |
+
self.io_backend_opt = opt['io_backend']
|
81 |
+
self.is_lmdb = False
|
82 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
83 |
+
self.is_lmdb = True
|
84 |
+
if self.flow_root is not None:
|
85 |
+
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
|
86 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
|
87 |
+
else:
|
88 |
+
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
|
89 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt']
|
90 |
+
|
91 |
+
# temporal augmentation configs
|
92 |
+
self.interval_list = opt['interval_list']
|
93 |
+
self.random_reverse = opt['random_reverse']
|
94 |
+
interval_str = ','.join(str(x) for x in opt['interval_list'])
|
95 |
+
logger = get_root_logger()
|
96 |
+
logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
|
97 |
+
f'random reverse is {self.random_reverse}.')
|
98 |
+
|
99 |
+
def __getitem__(self, index):
|
100 |
+
if self.file_client is None:
|
101 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
102 |
+
|
103 |
+
scale = self.opt['scale']
|
104 |
+
gt_size = self.opt['gt_size']
|
105 |
+
key = self.keys[index]
|
106 |
+
clip_name, frame_name = key.split('/') # key example: 000/00000000
|
107 |
+
center_frame_idx = int(frame_name)
|
108 |
+
|
109 |
+
# determine the neighboring frames
|
110 |
+
interval = random.choice(self.interval_list)
|
111 |
+
|
112 |
+
# ensure not exceeding the borders
|
113 |
+
start_frame_idx = center_frame_idx - self.num_half_frames * interval
|
114 |
+
end_frame_idx = center_frame_idx + self.num_half_frames * interval
|
115 |
+
# each clip has 100 frames starting from 0 to 99
|
116 |
+
while (start_frame_idx < 0) or (end_frame_idx > 99):
|
117 |
+
center_frame_idx = random.randint(0, 99)
|
118 |
+
start_frame_idx = (center_frame_idx - self.num_half_frames * interval)
|
119 |
+
end_frame_idx = center_frame_idx + self.num_half_frames * interval
|
120 |
+
frame_name = f'{center_frame_idx:08d}'
|
121 |
+
neighbor_list = list(range(start_frame_idx, end_frame_idx + 1, interval))
|
122 |
+
# random reverse
|
123 |
+
if self.random_reverse and random.random() < 0.5:
|
124 |
+
neighbor_list.reverse()
|
125 |
+
|
126 |
+
assert len(neighbor_list) == self.num_frame, (f'Wrong length of neighbor list: {len(neighbor_list)}')
|
127 |
+
|
128 |
+
# get the GT frame (as the center frame)
|
129 |
+
if self.is_lmdb:
|
130 |
+
img_gt_path = f'{clip_name}/{frame_name}'
|
131 |
+
else:
|
132 |
+
img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'
|
133 |
+
img_bytes = self.file_client.get(img_gt_path, 'gt')
|
134 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
135 |
+
|
136 |
+
# get the neighboring LQ frames
|
137 |
+
img_lqs = []
|
138 |
+
for neighbor in neighbor_list:
|
139 |
+
if self.is_lmdb:
|
140 |
+
img_lq_path = f'{clip_name}/{neighbor:08d}'
|
141 |
+
else:
|
142 |
+
img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
|
143 |
+
img_bytes = self.file_client.get(img_lq_path, 'lq')
|
144 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
145 |
+
img_lqs.append(img_lq)
|
146 |
+
|
147 |
+
# get flows
|
148 |
+
if self.flow_root is not None:
|
149 |
+
img_flows = []
|
150 |
+
# read previous flows
|
151 |
+
for i in range(self.num_half_frames, 0, -1):
|
152 |
+
if self.is_lmdb:
|
153 |
+
flow_path = f'{clip_name}/{frame_name}_p{i}'
|
154 |
+
else:
|
155 |
+
flow_path = (self.flow_root / clip_name / f'{frame_name}_p{i}.png')
|
156 |
+
img_bytes = self.file_client.get(flow_path, 'flow')
|
157 |
+
cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) # uint8, [0, 255]
|
158 |
+
dx, dy = np.split(cat_flow, 2, axis=0)
|
159 |
+
flow = dequantize_flow(dx, dy, max_val=20, denorm=False) # we use max_val 20 here.
|
160 |
+
img_flows.append(flow)
|
161 |
+
# read next flows
|
162 |
+
for i in range(1, self.num_half_frames + 1):
|
163 |
+
if self.is_lmdb:
|
164 |
+
flow_path = f'{clip_name}/{frame_name}_n{i}'
|
165 |
+
else:
|
166 |
+
flow_path = (self.flow_root / clip_name / f'{frame_name}_n{i}.png')
|
167 |
+
img_bytes = self.file_client.get(flow_path, 'flow')
|
168 |
+
cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) # uint8, [0, 255]
|
169 |
+
dx, dy = np.split(cat_flow, 2, axis=0)
|
170 |
+
flow = dequantize_flow(dx, dy, max_val=20, denorm=False) # we use max_val 20 here.
|
171 |
+
img_flows.append(flow)
|
172 |
+
|
173 |
+
# for random crop, here, img_flows and img_lqs have the same
|
174 |
+
# spatial size
|
175 |
+
img_lqs.extend(img_flows)
|
176 |
+
|
177 |
+
# randomly crop
|
178 |
+
img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path)
|
179 |
+
if self.flow_root is not None:
|
180 |
+
img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.num_frame:]
|
181 |
+
|
182 |
+
# augmentation - flip, rotate
|
183 |
+
img_lqs.append(img_gt)
|
184 |
+
if self.flow_root is not None:
|
185 |
+
img_results, img_flows = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'], img_flows)
|
186 |
+
else:
|
187 |
+
img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
|
188 |
+
|
189 |
+
img_results = img2tensor(img_results)
|
190 |
+
img_lqs = torch.stack(img_results[0:-1], dim=0)
|
191 |
+
img_gt = img_results[-1]
|
192 |
+
|
193 |
+
if self.flow_root is not None:
|
194 |
+
img_flows = img2tensor(img_flows)
|
195 |
+
# add the zero center flow
|
196 |
+
img_flows.insert(self.num_half_frames, torch.zeros_like(img_flows[0]))
|
197 |
+
img_flows = torch.stack(img_flows, dim=0)
|
198 |
+
|
199 |
+
# img_lqs: (t, c, h, w)
|
200 |
+
# img_flows: (t, 2, h, w)
|
201 |
+
# img_gt: (c, h, w)
|
202 |
+
# key: str
|
203 |
+
if self.flow_root is not None:
|
204 |
+
return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}
|
205 |
+
else:
|
206 |
+
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
|
207 |
+
|
208 |
+
def __len__(self):
|
209 |
+
return len(self.keys)
|
210 |
+
|
211 |
+
|
212 |
+
@DATASET_REGISTRY.register()
|
213 |
+
class REDSRecurrentDataset(data.Dataset):
|
214 |
+
"""REDS dataset for training recurrent networks.
|
215 |
+
|
216 |
+
The keys are generated from a meta info txt file.
|
217 |
+
basicsr/data/meta_info/meta_info_REDS_GT.txt
|
218 |
+
|
219 |
+
Each line contains:
|
220 |
+
1. subfolder (clip) name; 2. frame number; 3. image shape, separated by
|
221 |
+
a white space.
|
222 |
+
Examples:
|
223 |
+
000 100 (720,1280,3)
|
224 |
+
001 100 (720,1280,3)
|
225 |
+
...
|
226 |
+
|
227 |
+
Key examples: "000/00000000"
|
228 |
+
GT (gt): Ground-Truth;
|
229 |
+
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
|
230 |
+
|
231 |
+
Args:
|
232 |
+
opt (dict): Config for train dataset. It contains the following keys:
|
233 |
+
dataroot_gt (str): Data root path for gt.
|
234 |
+
dataroot_lq (str): Data root path for lq.
|
235 |
+
dataroot_flow (str, optional): Data root path for flow.
|
236 |
+
meta_info_file (str): Path for meta information file.
|
237 |
+
val_partition (str): Validation partition types. 'REDS4' or
|
238 |
+
'official'.
|
239 |
+
io_backend (dict): IO backend type and other kwarg.
|
240 |
+
|
241 |
+
num_frame (int): Window size for input frames.
|
242 |
+
gt_size (int): Cropped patched size for gt patches.
|
243 |
+
interval_list (list): Interval list for temporal augmentation.
|
244 |
+
random_reverse (bool): Random reverse input frames.
|
245 |
+
use_hflip (bool): Use horizontal flips.
|
246 |
+
use_rot (bool): Use rotation (use vertical flip and transposing h
|
247 |
+
and w for implementation).
|
248 |
+
|
249 |
+
scale (bool): Scale, which will be added automatically.
|
250 |
+
"""
|
251 |
+
|
252 |
+
def __init__(self, opt):
|
253 |
+
super(REDSRecurrentDataset, self).__init__()
|
254 |
+
self.opt = opt
|
255 |
+
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
|
256 |
+
self.num_frame = opt['num_frame']
|
257 |
+
|
258 |
+
self.keys = []
|
259 |
+
with open(opt['meta_info_file'], 'r') as fin:
|
260 |
+
for line in fin:
|
261 |
+
folder, frame_num, _ = line.split(' ')
|
262 |
+
self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))])
|
263 |
+
|
264 |
+
# remove the video clips used in validation
|
265 |
+
if opt['val_partition'] == 'REDS4':
|
266 |
+
val_partition = ['000', '011', '015', '020']
|
267 |
+
elif opt['val_partition'] == 'official':
|
268 |
+
val_partition = [f'{v:03d}' for v in range(240, 270)]
|
269 |
+
else:
|
270 |
+
raise ValueError(f'Wrong validation partition {opt["val_partition"]}.'
|
271 |
+
f"Supported ones are ['official', 'REDS4'].")
|
272 |
+
if opt['test_mode']:
|
273 |
+
self.keys = [v for v in self.keys if v.split('/')[0] in val_partition]
|
274 |
+
else:
|
275 |
+
self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition]
|
276 |
+
|
277 |
+
# file client (io backend)
|
278 |
+
self.file_client = None
|
279 |
+
self.io_backend_opt = opt['io_backend']
|
280 |
+
self.is_lmdb = False
|
281 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
282 |
+
self.is_lmdb = True
|
283 |
+
if hasattr(self, 'flow_root') and self.flow_root is not None:
|
284 |
+
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
|
285 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
|
286 |
+
else:
|
287 |
+
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
|
288 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt']
|
289 |
+
|
290 |
+
# temporal augmentation configs
|
291 |
+
self.interval_list = opt.get('interval_list', [1])
|
292 |
+
self.random_reverse = opt.get('random_reverse', False)
|
293 |
+
interval_str = ','.join(str(x) for x in self.interval_list)
|
294 |
+
logger = get_root_logger()
|
295 |
+
logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
|
296 |
+
f'random reverse is {self.random_reverse}.')
|
297 |
+
|
298 |
+
def __getitem__(self, index):
|
299 |
+
if self.file_client is None:
|
300 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
301 |
+
|
302 |
+
scale = self.opt['scale']
|
303 |
+
gt_size = self.opt['gt_size']
|
304 |
+
key = self.keys[index]
|
305 |
+
clip_name, frame_name = key.split('/') # key example: 000/00000000
|
306 |
+
|
307 |
+
# determine the neighboring frames
|
308 |
+
interval = random.choice(self.interval_list)
|
309 |
+
|
310 |
+
# ensure not exceeding the borders
|
311 |
+
start_frame_idx = int(frame_name)
|
312 |
+
if start_frame_idx > 100 - self.num_frame * interval:
|
313 |
+
start_frame_idx = random.randint(0, 100 - self.num_frame * interval)
|
314 |
+
end_frame_idx = start_frame_idx + self.num_frame * interval
|
315 |
+
|
316 |
+
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
|
317 |
+
|
318 |
+
# random reverse
|
319 |
+
if self.random_reverse and random.random() < 0.5:
|
320 |
+
neighbor_list.reverse()
|
321 |
+
|
322 |
+
# get the neighboring LQ and GT frames
|
323 |
+
img_lqs = []
|
324 |
+
img_gts = []
|
325 |
+
for neighbor in neighbor_list:
|
326 |
+
if self.is_lmdb:
|
327 |
+
img_lq_path = f'{clip_name}/{neighbor:08d}'
|
328 |
+
img_gt_path = f'{clip_name}/{neighbor:08d}'
|
329 |
+
else:
|
330 |
+
img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
|
331 |
+
img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png'
|
332 |
+
|
333 |
+
# get LQ
|
334 |
+
img_bytes = self.file_client.get(img_lq_path, 'lq')
|
335 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
336 |
+
img_lqs.append(img_lq)
|
337 |
+
|
338 |
+
# get GT
|
339 |
+
img_bytes = self.file_client.get(img_gt_path, 'gt')
|
340 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
341 |
+
img_gts.append(img_gt)
|
342 |
+
|
343 |
+
# randomly crop
|
344 |
+
img_gts, img_lqs = paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path)
|
345 |
+
|
346 |
+
# augmentation - flip, rotate
|
347 |
+
img_lqs.extend(img_gts)
|
348 |
+
img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
|
349 |
+
|
350 |
+
img_results = img2tensor(img_results)
|
351 |
+
img_gts = torch.stack(img_results[len(img_lqs) // 2:], dim=0)
|
352 |
+
img_lqs = torch.stack(img_results[:len(img_lqs) // 2], dim=0)
|
353 |
+
|
354 |
+
# img_lqs: (t, c, h, w)
|
355 |
+
# img_gts: (t, c, h, w)
|
356 |
+
# key: str
|
357 |
+
return {'lq': img_lqs, 'gt': img_gts, 'key': key}
|
358 |
+
|
359 |
+
def __len__(self):
|
360 |
+
return len(self.keys)
|
basicsr/data/single_image_dataset.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os import path as osp
|
2 |
+
from torch.utils import data as data
|
3 |
+
from torchvision.transforms.functional import normalize
|
4 |
+
|
5 |
+
from basicsr.data.data_util import paths_from_lmdb
|
6 |
+
from basicsr.utils import FileClient, imfrombytes, img2tensor, scandir
|
7 |
+
from basicsr.utils.matlab_functions import rgb2ycbcr
|
8 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
9 |
+
|
10 |
+
|
11 |
+
@DATASET_REGISTRY.register()
|
12 |
+
class SingleImageDataset(data.Dataset):
|
13 |
+
"""Read only lq images in the test phase.
|
14 |
+
|
15 |
+
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc).
|
16 |
+
|
17 |
+
There are two modes:
|
18 |
+
1. 'meta_info_file': Use meta information file to generate paths.
|
19 |
+
2. 'folder': Scan folders to generate paths.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
opt (dict): Config for train datasets. It contains the following keys:
|
23 |
+
dataroot_lq (str): Data root path for lq.
|
24 |
+
meta_info_file (str): Path for meta information file.
|
25 |
+
io_backend (dict): IO backend type and other kwarg.
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, opt):
|
29 |
+
super(SingleImageDataset, self).__init__()
|
30 |
+
self.opt = opt
|
31 |
+
# file client (io backend)
|
32 |
+
self.file_client = None
|
33 |
+
self.io_backend_opt = opt['io_backend']
|
34 |
+
self.mean = opt['mean'] if 'mean' in opt else None
|
35 |
+
self.std = opt['std'] if 'std' in opt else None
|
36 |
+
self.lq_folder = opt['dataroot_lq']
|
37 |
+
|
38 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
39 |
+
self.io_backend_opt['db_paths'] = [self.lq_folder]
|
40 |
+
self.io_backend_opt['client_keys'] = ['lq']
|
41 |
+
self.paths = paths_from_lmdb(self.lq_folder)
|
42 |
+
elif 'meta_info_file' in self.opt:
|
43 |
+
with open(self.opt['meta_info_file'], 'r') as fin:
|
44 |
+
self.paths = [osp.join(self.lq_folder, line.rstrip().split(' ')[0]) for line in fin]
|
45 |
+
else:
|
46 |
+
self.paths = sorted(list(scandir(self.lq_folder, full_path=True)))
|
47 |
+
|
48 |
+
def __getitem__(self, index):
|
49 |
+
if self.file_client is None:
|
50 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
51 |
+
|
52 |
+
# load lq image
|
53 |
+
lq_path = self.paths[index]
|
54 |
+
img_bytes = self.file_client.get(lq_path, 'lq')
|
55 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
56 |
+
|
57 |
+
# color space transform
|
58 |
+
if 'color' in self.opt and self.opt['color'] == 'y':
|
59 |
+
img_lq = rgb2ycbcr(img_lq, y_only=True)[..., None]
|
60 |
+
|
61 |
+
# BGR to RGB, HWC to CHW, numpy to tensor
|
62 |
+
img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
|
63 |
+
# normalize
|
64 |
+
if self.mean is not None or self.std is not None:
|
65 |
+
normalize(img_lq, self.mean, self.std, inplace=True)
|
66 |
+
return {'lq': img_lq, 'lq_path': lq_path}
|
67 |
+
|
68 |
+
def __len__(self):
|
69 |
+
return len(self.paths)
|
basicsr/data/transforms.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
def mod_crop(img, scale):
|
7 |
+
"""Mod crop images, used during testing.
|
8 |
+
|
9 |
+
Args:
|
10 |
+
img (ndarray): Input image.
|
11 |
+
scale (int): Scale factor.
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
ndarray: Result image.
|
15 |
+
"""
|
16 |
+
img = img.copy()
|
17 |
+
if img.ndim in (2, 3):
|
18 |
+
h, w = img.shape[0], img.shape[1]
|
19 |
+
h_remainder, w_remainder = h % scale, w % scale
|
20 |
+
img = img[:h - h_remainder, :w - w_remainder, ...]
|
21 |
+
else:
|
22 |
+
raise ValueError(f'Wrong img ndim: {img.ndim}.')
|
23 |
+
return img
|
24 |
+
|
25 |
+
|
26 |
+
def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):
|
27 |
+
"""Paired random crop. Support Numpy array and Tensor inputs.
|
28 |
+
|
29 |
+
It crops lists of lq and gt images with corresponding locations.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images
|
33 |
+
should have the same shape. If the input is an ndarray, it will
|
34 |
+
be transformed to a list containing itself.
|
35 |
+
img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
|
36 |
+
should have the same shape. If the input is an ndarray, it will
|
37 |
+
be transformed to a list containing itself.
|
38 |
+
gt_patch_size (int): GT patch size.
|
39 |
+
scale (int): Scale factor.
|
40 |
+
gt_path (str): Path to ground-truth. Default: None.
|
41 |
+
|
42 |
+
Returns:
|
43 |
+
list[ndarray] | ndarray: GT images and LQ images. If returned results
|
44 |
+
only have one element, just return ndarray.
|
45 |
+
"""
|
46 |
+
|
47 |
+
if not isinstance(img_gts, list):
|
48 |
+
img_gts = [img_gts]
|
49 |
+
if not isinstance(img_lqs, list):
|
50 |
+
img_lqs = [img_lqs]
|
51 |
+
|
52 |
+
# determine input type: Numpy array or Tensor
|
53 |
+
input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'
|
54 |
+
|
55 |
+
if input_type == 'Tensor':
|
56 |
+
h_lq, w_lq = img_lqs[0].size()[-2:]
|
57 |
+
h_gt, w_gt = img_gts[0].size()[-2:]
|
58 |
+
else:
|
59 |
+
h_lq, w_lq = img_lqs[0].shape[0:2]
|
60 |
+
h_gt, w_gt = img_gts[0].shape[0:2]
|
61 |
+
lq_patch_size = gt_patch_size // scale
|
62 |
+
|
63 |
+
if h_gt != h_lq * scale or w_gt != w_lq * scale:
|
64 |
+
raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
|
65 |
+
f'multiplication of LQ ({h_lq}, {w_lq}).')
|
66 |
+
if h_lq < lq_patch_size or w_lq < lq_patch_size:
|
67 |
+
raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
|
68 |
+
f'({lq_patch_size}, {lq_patch_size}). '
|
69 |
+
f'Please remove {gt_path}.')
|
70 |
+
|
71 |
+
# randomly choose top and left coordinates for lq patch
|
72 |
+
top = random.randint(0, h_lq - lq_patch_size)
|
73 |
+
left = random.randint(0, w_lq - lq_patch_size)
|
74 |
+
|
75 |
+
# crop lq patch
|
76 |
+
if input_type == 'Tensor':
|
77 |
+
img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]
|
78 |
+
else:
|
79 |
+
img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]
|
80 |
+
|
81 |
+
# crop corresponding gt patch
|
82 |
+
top_gt, left_gt = int(top * scale), int(left * scale)
|
83 |
+
if input_type == 'Tensor':
|
84 |
+
img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]
|
85 |
+
else:
|
86 |
+
img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]
|
87 |
+
if len(img_gts) == 1:
|
88 |
+
img_gts = img_gts[0]
|
89 |
+
if len(img_lqs) == 1:
|
90 |
+
img_lqs = img_lqs[0]
|
91 |
+
return img_gts, img_lqs
|
92 |
+
|
93 |
+
|
94 |
+
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
|
95 |
+
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
|
96 |
+
|
97 |
+
We use vertical flip and transpose for rotation implementation.
|
98 |
+
All the images in the list use the same augmentation.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
|
102 |
+
is an ndarray, it will be transformed to a list.
|
103 |
+
hflip (bool): Horizontal flip. Default: True.
|
104 |
+
rotation (bool): Ratotation. Default: True.
|
105 |
+
flows (list[ndarray]: Flows to be augmented. If the input is an
|
106 |
+
ndarray, it will be transformed to a list.
|
107 |
+
Dimension is (h, w, 2). Default: None.
|
108 |
+
return_status (bool): Return the status of flip and rotation.
|
109 |
+
Default: False.
|
110 |
+
|
111 |
+
Returns:
|
112 |
+
list[ndarray] | ndarray: Augmented images and flows. If returned
|
113 |
+
results only have one element, just return ndarray.
|
114 |
+
|
115 |
+
"""
|
116 |
+
hflip = hflip and random.random() < 0.5
|
117 |
+
vflip = rotation and random.random() < 0.5
|
118 |
+
rot90 = rotation and random.random() < 0.5
|
119 |
+
|
120 |
+
def _augment(img):
|
121 |
+
if hflip: # horizontal
|
122 |
+
cv2.flip(img, 1, img)
|
123 |
+
if vflip: # vertical
|
124 |
+
cv2.flip(img, 0, img)
|
125 |
+
if rot90:
|
126 |
+
img = img.transpose(1, 0, 2)
|
127 |
+
return img
|
128 |
+
|
129 |
+
def _augment_flow(flow):
|
130 |
+
if hflip: # horizontal
|
131 |
+
cv2.flip(flow, 1, flow)
|
132 |
+
flow[:, :, 0] *= -1
|
133 |
+
if vflip: # vertical
|
134 |
+
cv2.flip(flow, 0, flow)
|
135 |
+
flow[:, :, 1] *= -1
|
136 |
+
if rot90:
|
137 |
+
flow = flow.transpose(1, 0, 2)
|
138 |
+
flow = flow[:, :, [1, 0]]
|
139 |
+
return flow
|
140 |
+
|
141 |
+
if not isinstance(imgs, list):
|
142 |
+
imgs = [imgs]
|
143 |
+
imgs = [_augment(img) for img in imgs]
|
144 |
+
if len(imgs) == 1:
|
145 |
+
imgs = imgs[0]
|
146 |
+
|
147 |
+
if flows is not None:
|
148 |
+
if not isinstance(flows, list):
|
149 |
+
flows = [flows]
|
150 |
+
flows = [_augment_flow(flow) for flow in flows]
|
151 |
+
if len(flows) == 1:
|
152 |
+
flows = flows[0]
|
153 |
+
return imgs, flows
|
154 |
+
else:
|
155 |
+
if return_status:
|
156 |
+
return imgs, (hflip, vflip, rot90)
|
157 |
+
else:
|
158 |
+
return imgs
|
159 |
+
|
160 |
+
|
161 |
+
def img_rotate(img, angle, center=None, scale=1.0):
|
162 |
+
"""Rotate image.
|
163 |
+
|
164 |
+
Args:
|
165 |
+
img (ndarray): Image to be rotated.
|
166 |
+
angle (float): Rotation angle in degrees. Positive values mean
|
167 |
+
counter-clockwise rotation.
|
168 |
+
center (tuple[int]): Rotation center. If the center is None,
|
169 |
+
initialize it as the center of the image. Default: None.
|
170 |
+
scale (float): Isotropic scale factor. Default: 1.0.
|
171 |
+
"""
|
172 |
+
(h, w) = img.shape[:2]
|
173 |
+
|
174 |
+
if center is None:
|
175 |
+
center = (w // 2, h // 2)
|
176 |
+
|
177 |
+
matrix = cv2.getRotationMatrix2D(center, angle, scale)
|
178 |
+
rotated_img = cv2.warpAffine(img, matrix, (w, h))
|
179 |
+
return rotated_img
|
basicsr/data/video_test_dataset.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import glob
|
2 |
+
import torch
|
3 |
+
from os import path as osp
|
4 |
+
from torch.utils import data as data
|
5 |
+
|
6 |
+
from basicsr.data.data_util import duf_downsample, generate_frame_indices, read_img_seq
|
7 |
+
from basicsr.utils import get_root_logger, scandir
|
8 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
9 |
+
|
10 |
+
|
11 |
+
@DATASET_REGISTRY.register()
|
12 |
+
class VideoTestDataset(data.Dataset):
|
13 |
+
"""Video test dataset.
|
14 |
+
|
15 |
+
Supported datasets: Vid4, REDS4, REDSofficial.
|
16 |
+
More generally, it supports testing dataset with following structures:
|
17 |
+
|
18 |
+
dataroot
|
19 |
+
├── subfolder1
|
20 |
+
├── frame000
|
21 |
+
├── frame001
|
22 |
+
├── ...
|
23 |
+
├── subfolder1
|
24 |
+
├── frame000
|
25 |
+
├── frame001
|
26 |
+
├── ...
|
27 |
+
├── ...
|
28 |
+
|
29 |
+
For testing datasets, there is no need to prepare LMDB files.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
opt (dict): Config for train dataset. It contains the following keys:
|
33 |
+
dataroot_gt (str): Data root path for gt.
|
34 |
+
dataroot_lq (str): Data root path for lq.
|
35 |
+
io_backend (dict): IO backend type and other kwarg.
|
36 |
+
cache_data (bool): Whether to cache testing datasets.
|
37 |
+
name (str): Dataset name.
|
38 |
+
meta_info_file (str): The path to the file storing the list of test
|
39 |
+
folders. If not provided, all the folders in the dataroot will
|
40 |
+
be used.
|
41 |
+
num_frame (int): Window size for input frames.
|
42 |
+
padding (str): Padding mode.
|
43 |
+
"""
|
44 |
+
|
45 |
+
def __init__(self, opt):
|
46 |
+
super(VideoTestDataset, self).__init__()
|
47 |
+
self.opt = opt
|
48 |
+
self.cache_data = opt['cache_data']
|
49 |
+
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
|
50 |
+
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
|
51 |
+
# file client (io backend)
|
52 |
+
self.file_client = None
|
53 |
+
self.io_backend_opt = opt['io_backend']
|
54 |
+
assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.'
|
55 |
+
|
56 |
+
logger = get_root_logger()
|
57 |
+
logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
|
58 |
+
self.imgs_lq, self.imgs_gt = {}, {}
|
59 |
+
if 'meta_info_file' in opt:
|
60 |
+
with open(opt['meta_info_file'], 'r') as fin:
|
61 |
+
subfolders = [line.split(' ')[0] for line in fin]
|
62 |
+
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
|
63 |
+
subfolders_gt = [osp.join(self.gt_root, key) for key in subfolders]
|
64 |
+
else:
|
65 |
+
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
|
66 |
+
subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*')))
|
67 |
+
|
68 |
+
if opt['name'].lower() in ['vid4', 'reds4', 'redsofficial']:
|
69 |
+
for subfolder_lq, subfolder_gt in zip(subfolders_lq, subfolders_gt):
|
70 |
+
# get frame list for lq and gt
|
71 |
+
subfolder_name = osp.basename(subfolder_lq)
|
72 |
+
img_paths_lq = sorted(list(scandir(subfolder_lq, full_path=True)))
|
73 |
+
img_paths_gt = sorted(list(scandir(subfolder_gt, full_path=True)))
|
74 |
+
|
75 |
+
max_idx = len(img_paths_lq)
|
76 |
+
assert max_idx == len(img_paths_gt), (f'Different number of images in lq ({max_idx})'
|
77 |
+
f' and gt folders ({len(img_paths_gt)})')
|
78 |
+
|
79 |
+
self.data_info['lq_path'].extend(img_paths_lq)
|
80 |
+
self.data_info['gt_path'].extend(img_paths_gt)
|
81 |
+
self.data_info['folder'].extend([subfolder_name] * max_idx)
|
82 |
+
for i in range(max_idx):
|
83 |
+
self.data_info['idx'].append(f'{i}/{max_idx}')
|
84 |
+
border_l = [0] * max_idx
|
85 |
+
for i in range(self.opt['num_frame'] // 2):
|
86 |
+
border_l[i] = 1
|
87 |
+
border_l[max_idx - i - 1] = 1
|
88 |
+
self.data_info['border'].extend(border_l)
|
89 |
+
|
90 |
+
# cache data or save the frame list
|
91 |
+
if self.cache_data:
|
92 |
+
logger.info(f'Cache {subfolder_name} for VideoTestDataset...')
|
93 |
+
self.imgs_lq[subfolder_name] = read_img_seq(img_paths_lq)
|
94 |
+
self.imgs_gt[subfolder_name] = read_img_seq(img_paths_gt)
|
95 |
+
else:
|
96 |
+
self.imgs_lq[subfolder_name] = img_paths_lq
|
97 |
+
self.imgs_gt[subfolder_name] = img_paths_gt
|
98 |
+
else:
|
99 |
+
raise ValueError(f'Non-supported video test dataset: {type(opt["name"])}')
|
100 |
+
|
101 |
+
def __getitem__(self, index):
|
102 |
+
folder = self.data_info['folder'][index]
|
103 |
+
idx, max_idx = self.data_info['idx'][index].split('/')
|
104 |
+
idx, max_idx = int(idx), int(max_idx)
|
105 |
+
border = self.data_info['border'][index]
|
106 |
+
lq_path = self.data_info['lq_path'][index]
|
107 |
+
|
108 |
+
select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
|
109 |
+
|
110 |
+
if self.cache_data:
|
111 |
+
imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx))
|
112 |
+
img_gt = self.imgs_gt[folder][idx]
|
113 |
+
else:
|
114 |
+
img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
|
115 |
+
imgs_lq = read_img_seq(img_paths_lq)
|
116 |
+
img_gt = read_img_seq([self.imgs_gt[folder][idx]])
|
117 |
+
img_gt.squeeze_(0)
|
118 |
+
|
119 |
+
return {
|
120 |
+
'lq': imgs_lq, # (t, c, h, w)
|
121 |
+
'gt': img_gt, # (c, h, w)
|
122 |
+
'folder': folder, # folder name
|
123 |
+
'idx': self.data_info['idx'][index], # e.g., 0/99
|
124 |
+
'border': border, # 1 for border, 0 for non-border
|
125 |
+
'lq_path': lq_path # center frame
|
126 |
+
}
|
127 |
+
|
128 |
+
def __len__(self):
|
129 |
+
return len(self.data_info['gt_path'])
|
130 |
+
|
131 |
+
|
132 |
+
@DATASET_REGISTRY.register()
|
133 |
+
class VideoTestVimeo90KDataset(data.Dataset):
|
134 |
+
"""Video test dataset for Vimeo90k-Test dataset.
|
135 |
+
|
136 |
+
It only keeps the center frame for testing.
|
137 |
+
For testing datasets, there is no need to prepare LMDB files.
|
138 |
+
|
139 |
+
Args:
|
140 |
+
opt (dict): Config for train dataset. It contains the following keys:
|
141 |
+
dataroot_gt (str): Data root path for gt.
|
142 |
+
dataroot_lq (str): Data root path for lq.
|
143 |
+
io_backend (dict): IO backend type and other kwarg.
|
144 |
+
cache_data (bool): Whether to cache testing datasets.
|
145 |
+
name (str): Dataset name.
|
146 |
+
meta_info_file (str): The path to the file storing the list of test
|
147 |
+
folders. If not provided, all the folders in the dataroot will
|
148 |
+
be used.
|
149 |
+
num_frame (int): Window size for input frames.
|
150 |
+
padding (str): Padding mode.
|
151 |
+
"""
|
152 |
+
|
153 |
+
def __init__(self, opt):
|
154 |
+
super(VideoTestVimeo90KDataset, self).__init__()
|
155 |
+
self.opt = opt
|
156 |
+
self.cache_data = opt['cache_data']
|
157 |
+
if self.cache_data:
|
158 |
+
raise NotImplementedError('cache_data in Vimeo90K-Test dataset is not implemented.')
|
159 |
+
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
|
160 |
+
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
|
161 |
+
neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])]
|
162 |
+
|
163 |
+
# file client (io backend)
|
164 |
+
self.file_client = None
|
165 |
+
self.io_backend_opt = opt['io_backend']
|
166 |
+
assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.'
|
167 |
+
|
168 |
+
logger = get_root_logger()
|
169 |
+
logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
|
170 |
+
with open(opt['meta_info_file'], 'r') as fin:
|
171 |
+
subfolders = [line.split(' ')[0] for line in fin]
|
172 |
+
for idx, subfolder in enumerate(subfolders):
|
173 |
+
gt_path = osp.join(self.gt_root, subfolder, 'im4.png')
|
174 |
+
self.data_info['gt_path'].append(gt_path)
|
175 |
+
lq_paths = [osp.join(self.lq_root, subfolder, f'im{i}.png') for i in neighbor_list]
|
176 |
+
self.data_info['lq_path'].append(lq_paths)
|
177 |
+
self.data_info['folder'].append('vimeo90k')
|
178 |
+
self.data_info['idx'].append(f'{idx}/{len(subfolders)}')
|
179 |
+
self.data_info['border'].append(0)
|
180 |
+
|
181 |
+
def __getitem__(self, index):
|
182 |
+
lq_path = self.data_info['lq_path'][index]
|
183 |
+
gt_path = self.data_info['gt_path'][index]
|
184 |
+
imgs_lq = read_img_seq(lq_path)
|
185 |
+
img_gt = read_img_seq([gt_path])
|
186 |
+
img_gt.squeeze_(0)
|
187 |
+
|
188 |
+
return {
|
189 |
+
'lq': imgs_lq, # (t, c, h, w)
|
190 |
+
'gt': img_gt, # (c, h, w)
|
191 |
+
'folder': self.data_info['folder'][index], # folder name
|
192 |
+
'idx': self.data_info['idx'][index], # e.g., 0/843
|
193 |
+
'border': self.data_info['border'][index], # 0 for non-border
|
194 |
+
'lq_path': lq_path[self.opt['num_frame'] // 2] # center frame
|
195 |
+
}
|
196 |
+
|
197 |
+
def __len__(self):
|
198 |
+
return len(self.data_info['gt_path'])
|
199 |
+
|
200 |
+
|
201 |
+
@DATASET_REGISTRY.register()
|
202 |
+
class VideoTestDUFDataset(VideoTestDataset):
|
203 |
+
""" Video test dataset for DUF dataset.
|
204 |
+
|
205 |
+
Args:
|
206 |
+
opt (dict): Config for train dataset.
|
207 |
+
Most of keys are the same as VideoTestDataset.
|
208 |
+
It has the following extra keys:
|
209 |
+
|
210 |
+
use_duf_downsampling (bool): Whether to use duf downsampling to
|
211 |
+
generate low-resolution frames.
|
212 |
+
scale (bool): Scale, which will be added automatically.
|
213 |
+
"""
|
214 |
+
|
215 |
+
def __getitem__(self, index):
|
216 |
+
folder = self.data_info['folder'][index]
|
217 |
+
idx, max_idx = self.data_info['idx'][index].split('/')
|
218 |
+
idx, max_idx = int(idx), int(max_idx)
|
219 |
+
border = self.data_info['border'][index]
|
220 |
+
lq_path = self.data_info['lq_path'][index]
|
221 |
+
|
222 |
+
select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
|
223 |
+
|
224 |
+
if self.cache_data:
|
225 |
+
if self.opt['use_duf_downsampling']:
|
226 |
+
# read imgs_gt to generate low-resolution frames
|
227 |
+
imgs_lq = self.imgs_gt[folder].index_select(0, torch.LongTensor(select_idx))
|
228 |
+
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
|
229 |
+
else:
|
230 |
+
imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx))
|
231 |
+
img_gt = self.imgs_gt[folder][idx]
|
232 |
+
else:
|
233 |
+
if self.opt['use_duf_downsampling']:
|
234 |
+
img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
|
235 |
+
# read imgs_gt to generate low-resolution frames
|
236 |
+
imgs_lq = read_img_seq(img_paths_lq, require_mod_crop=True, scale=self.opt['scale'])
|
237 |
+
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
|
238 |
+
else:
|
239 |
+
img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
|
240 |
+
imgs_lq = read_img_seq(img_paths_lq)
|
241 |
+
img_gt = read_img_seq([self.imgs_gt[folder][idx]], require_mod_crop=True, scale=self.opt['scale'])
|
242 |
+
img_gt.squeeze_(0)
|
243 |
+
|
244 |
+
return {
|
245 |
+
'lq': imgs_lq, # (t, c, h, w)
|
246 |
+
'gt': img_gt, # (c, h, w)
|
247 |
+
'folder': folder, # folder name
|
248 |
+
'idx': self.data_info['idx'][index], # e.g., 0/99
|
249 |
+
'border': border, # 1 for border, 0 for non-border
|
250 |
+
'lq_path': lq_path # center frame
|
251 |
+
}
|
252 |
+
|
253 |
+
|
254 |
+
@DATASET_REGISTRY.register()
|
255 |
+
class VideoRecurrentTestDataset(VideoTestDataset):
|
256 |
+
"""Video test dataset for recurrent architectures, which takes LR video
|
257 |
+
frames as input and output corresponding HR video frames.
|
258 |
+
|
259 |
+
Args:
|
260 |
+
Same as VideoTestDataset.
|
261 |
+
Unused opt:
|
262 |
+
padding (str): Padding mode.
|
263 |
+
|
264 |
+
"""
|
265 |
+
|
266 |
+
def __init__(self, opt):
|
267 |
+
super(VideoRecurrentTestDataset, self).__init__(opt)
|
268 |
+
# Find unique folder strings
|
269 |
+
self.folders = sorted(list(set(self.data_info['folder'])))
|
270 |
+
|
271 |
+
def __getitem__(self, index):
|
272 |
+
folder = self.folders[index]
|
273 |
+
|
274 |
+
if self.cache_data:
|
275 |
+
imgs_lq = self.imgs_lq[folder]
|
276 |
+
imgs_gt = self.imgs_gt[folder]
|
277 |
+
else:
|
278 |
+
raise NotImplementedError('Without cache_data is not implemented.')
|
279 |
+
|
280 |
+
return {
|
281 |
+
'lq': imgs_lq,
|
282 |
+
'gt': imgs_gt,
|
283 |
+
'folder': folder,
|
284 |
+
}
|
285 |
+
|
286 |
+
def __len__(self):
|
287 |
+
return len(self.folders)
|
basicsr/data/vimeo90k_dataset.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import torch
|
3 |
+
from pathlib import Path
|
4 |
+
from torch.utils import data as data
|
5 |
+
|
6 |
+
from basicsr.data.transforms import augment, paired_random_crop
|
7 |
+
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
|
8 |
+
from basicsr.utils.registry import DATASET_REGISTRY
|
9 |
+
|
10 |
+
|
11 |
+
@DATASET_REGISTRY.register()
|
12 |
+
class Vimeo90KDataset(data.Dataset):
|
13 |
+
"""Vimeo90K dataset for training.
|
14 |
+
|
15 |
+
The keys are generated from a meta info txt file.
|
16 |
+
basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
|
17 |
+
|
18 |
+
Each line contains:
|
19 |
+
1. clip name; 2. frame number; 3. image shape, separated by a white space.
|
20 |
+
Examples:
|
21 |
+
00001/0001 7 (256,448,3)
|
22 |
+
00001/0002 7 (256,448,3)
|
23 |
+
|
24 |
+
Key examples: "00001/0001"
|
25 |
+
GT (gt): Ground-Truth;
|
26 |
+
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
|
27 |
+
|
28 |
+
The neighboring frame list for different num_frame:
|
29 |
+
num_frame | frame list
|
30 |
+
1 | 4
|
31 |
+
3 | 3,4,5
|
32 |
+
5 | 2,3,4,5,6
|
33 |
+
7 | 1,2,3,4,5,6,7
|
34 |
+
|
35 |
+
Args:
|
36 |
+
opt (dict): Config for train dataset. It contains the following keys:
|
37 |
+
dataroot_gt (str): Data root path for gt.
|
38 |
+
dataroot_lq (str): Data root path for lq.
|
39 |
+
meta_info_file (str): Path for meta information file.
|
40 |
+
io_backend (dict): IO backend type and other kwarg.
|
41 |
+
|
42 |
+
num_frame (int): Window size for input frames.
|
43 |
+
gt_size (int): Cropped patched size for gt patches.
|
44 |
+
random_reverse (bool): Random reverse input frames.
|
45 |
+
use_hflip (bool): Use horizontal flips.
|
46 |
+
use_rot (bool): Use rotation (use vertical flip and transposing h
|
47 |
+
and w for implementation).
|
48 |
+
|
49 |
+
scale (bool): Scale, which will be added automatically.
|
50 |
+
"""
|
51 |
+
|
52 |
+
def __init__(self, opt):
|
53 |
+
super(Vimeo90KDataset, self).__init__()
|
54 |
+
self.opt = opt
|
55 |
+
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
|
56 |
+
|
57 |
+
with open(opt['meta_info_file'], 'r') as fin:
|
58 |
+
self.keys = [line.split(' ')[0] for line in fin]
|
59 |
+
|
60 |
+
# file client (io backend)
|
61 |
+
self.file_client = None
|
62 |
+
self.io_backend_opt = opt['io_backend']
|
63 |
+
self.is_lmdb = False
|
64 |
+
if self.io_backend_opt['type'] == 'lmdb':
|
65 |
+
self.is_lmdb = True
|
66 |
+
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
|
67 |
+
self.io_backend_opt['client_keys'] = ['lq', 'gt']
|
68 |
+
|
69 |
+
# indices of input images
|
70 |
+
self.neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])]
|
71 |
+
|
72 |
+
# temporal augmentation configs
|
73 |
+
self.random_reverse = opt['random_reverse']
|
74 |
+
logger = get_root_logger()
|
75 |
+
logger.info(f'Random reverse is {self.random_reverse}.')
|
76 |
+
|
77 |
+
def __getitem__(self, index):
|
78 |
+
if self.file_client is None:
|
79 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
80 |
+
|
81 |
+
# random reverse
|
82 |
+
if self.random_reverse and random.random() < 0.5:
|
83 |
+
self.neighbor_list.reverse()
|
84 |
+
|
85 |
+
scale = self.opt['scale']
|
86 |
+
gt_size = self.opt['gt_size']
|
87 |
+
key = self.keys[index]
|
88 |
+
clip, seq = key.split('/') # key example: 00001/0001
|
89 |
+
|
90 |
+
# get the GT frame (im4.png)
|
91 |
+
if self.is_lmdb:
|
92 |
+
img_gt_path = f'{key}/im4'
|
93 |
+
else:
|
94 |
+
img_gt_path = self.gt_root / clip / seq / 'im4.png'
|
95 |
+
img_bytes = self.file_client.get(img_gt_path, 'gt')
|
96 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
97 |
+
|
98 |
+
# get the neighboring LQ frames
|
99 |
+
img_lqs = []
|
100 |
+
for neighbor in self.neighbor_list:
|
101 |
+
if self.is_lmdb:
|
102 |
+
img_lq_path = f'{clip}/{seq}/im{neighbor}'
|
103 |
+
else:
|
104 |
+
img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png'
|
105 |
+
img_bytes = self.file_client.get(img_lq_path, 'lq')
|
106 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
107 |
+
img_lqs.append(img_lq)
|
108 |
+
|
109 |
+
# randomly crop
|
110 |
+
img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path)
|
111 |
+
|
112 |
+
# augmentation - flip, rotate
|
113 |
+
img_lqs.append(img_gt)
|
114 |
+
img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
|
115 |
+
|
116 |
+
img_results = img2tensor(img_results)
|
117 |
+
img_lqs = torch.stack(img_results[0:-1], dim=0)
|
118 |
+
img_gt = img_results[-1]
|
119 |
+
|
120 |
+
# img_lqs: (t, c, h, w)
|
121 |
+
# img_gt: (c, h, w)
|
122 |
+
# key: str
|
123 |
+
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
|
124 |
+
|
125 |
+
def __len__(self):
|
126 |
+
return len(self.keys)
|
127 |
+
|
128 |
+
|
129 |
+
@DATASET_REGISTRY.register()
|
130 |
+
class Vimeo90KRecurrentDataset(Vimeo90KDataset):
|
131 |
+
|
132 |
+
def __init__(self, opt):
|
133 |
+
super(Vimeo90KRecurrentDataset, self).__init__(opt)
|
134 |
+
|
135 |
+
self.flip_sequence = opt['flip_sequence']
|
136 |
+
self.neighbor_list = [1, 2, 3, 4, 5, 6, 7]
|
137 |
+
|
138 |
+
def __getitem__(self, index):
|
139 |
+
if self.file_client is None:
|
140 |
+
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
|
141 |
+
|
142 |
+
# random reverse
|
143 |
+
if self.random_reverse and random.random() < 0.5:
|
144 |
+
self.neighbor_list.reverse()
|
145 |
+
|
146 |
+
scale = self.opt['scale']
|
147 |
+
gt_size = self.opt['gt_size']
|
148 |
+
key = self.keys[index]
|
149 |
+
clip, seq = key.split('/') # key example: 00001/0001
|
150 |
+
|
151 |
+
# get the neighboring LQ and GT frames
|
152 |
+
img_lqs = []
|
153 |
+
img_gts = []
|
154 |
+
for neighbor in self.neighbor_list:
|
155 |
+
if self.is_lmdb:
|
156 |
+
img_lq_path = f'{clip}/{seq}/im{neighbor}'
|
157 |
+
img_gt_path = f'{clip}/{seq}/im{neighbor}'
|
158 |
+
else:
|
159 |
+
img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png'
|
160 |
+
img_gt_path = self.gt_root / clip / seq / f'im{neighbor}.png'
|
161 |
+
# LQ
|
162 |
+
img_bytes = self.file_client.get(img_lq_path, 'lq')
|
163 |
+
img_lq = imfrombytes(img_bytes, float32=True)
|
164 |
+
# GT
|
165 |
+
img_bytes = self.file_client.get(img_gt_path, 'gt')
|
166 |
+
img_gt = imfrombytes(img_bytes, float32=True)
|
167 |
+
|
168 |
+
img_lqs.append(img_lq)
|
169 |
+
img_gts.append(img_gt)
|
170 |
+
|
171 |
+
# randomly crop
|
172 |
+
img_gts, img_lqs = paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path)
|
173 |
+
|
174 |
+
# augmentation - flip, rotate
|
175 |
+
img_lqs.extend(img_gts)
|
176 |
+
img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
|
177 |
+
|
178 |
+
img_results = img2tensor(img_results)
|
179 |
+
img_lqs = torch.stack(img_results[:7], dim=0)
|
180 |
+
img_gts = torch.stack(img_results[7:], dim=0)
|
181 |
+
|
182 |
+
if self.flip_sequence: # flip the sequence: 7 frames to 14 frames
|
183 |
+
img_lqs = torch.cat([img_lqs, img_lqs.flip(0)], dim=0)
|
184 |
+
img_gts = torch.cat([img_gts, img_gts.flip(0)], dim=0)
|
185 |
+
|
186 |
+
# img_lqs: (t, c, h, w)
|
187 |
+
# img_gt: (c, h, w)
|
188 |
+
# key: str
|
189 |
+
return {'lq': img_lqs, 'gt': img_gts, 'key': key}
|
190 |
+
|
191 |
+
def __len__(self):
|
192 |
+
return len(self.keys)
|
basicsr/losses/__init__.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
|
3 |
+
from basicsr.utils import get_root_logger
|
4 |
+
from basicsr.utils.registry import LOSS_REGISTRY
|
5 |
+
from .losses import (CharbonnierLoss, GANLoss, L1Loss, MSELoss, PerceptualLoss, WeightedTVLoss, g_path_regularize,
|
6 |
+
gradient_penalty_loss, r1_penalty)
|
7 |
+
|
8 |
+
__all__ = [
|
9 |
+
'L1Loss', 'MSELoss', 'CharbonnierLoss', 'WeightedTVLoss', 'PerceptualLoss', 'GANLoss', 'gradient_penalty_loss',
|
10 |
+
'r1_penalty', 'g_path_regularize'
|
11 |
+
]
|
12 |
+
|
13 |
+
|
14 |
+
def build_loss(opt):
|
15 |
+
"""Build loss from options.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
opt (dict): Configuration. It must contain:
|
19 |
+
type (str): Model type.
|
20 |
+
"""
|
21 |
+
opt = deepcopy(opt)
|
22 |
+
loss_type = opt.pop('type')
|
23 |
+
loss = LOSS_REGISTRY.get(loss_type)(**opt)
|
24 |
+
logger = get_root_logger()
|
25 |
+
logger.info(f'Loss [{loss.__class__.__name__}] is created.')
|
26 |
+
return loss
|
basicsr/losses/loss_util.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
|
5 |
+
def reduce_loss(loss, reduction):
|
6 |
+
"""Reduce loss as specified.
|
7 |
+
|
8 |
+
Args:
|
9 |
+
loss (Tensor): Elementwise loss tensor.
|
10 |
+
reduction (str): Options are 'none', 'mean' and 'sum'.
|
11 |
+
|
12 |
+
Returns:
|
13 |
+
Tensor: Reduced loss tensor.
|
14 |
+
"""
|
15 |
+
reduction_enum = F._Reduction.get_enum(reduction)
|
16 |
+
# none: 0, elementwise_mean:1, sum: 2
|
17 |
+
if reduction_enum == 0:
|
18 |
+
return loss
|
19 |
+
elif reduction_enum == 1:
|
20 |
+
return loss.mean()
|
21 |
+
else:
|
22 |
+
return loss.sum()
|
23 |
+
|
24 |
+
|
25 |
+
def weight_reduce_loss(loss, weight=None, reduction='mean'):
|
26 |
+
"""Apply element-wise weight and reduce loss.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
loss (Tensor): Element-wise loss.
|
30 |
+
weight (Tensor): Element-wise weights. Default: None.
|
31 |
+
reduction (str): Same as built-in losses of PyTorch. Options are
|
32 |
+
'none', 'mean' and 'sum'. Default: 'mean'.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
Tensor: Loss values.
|
36 |
+
"""
|
37 |
+
# if weight is specified, apply element-wise weight
|
38 |
+
if weight is not None:
|
39 |
+
assert weight.dim() == loss.dim()
|
40 |
+
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
|
41 |
+
loss = loss * weight
|
42 |
+
|
43 |
+
# if weight is not specified or reduction is sum, just reduce the loss
|
44 |
+
if weight is None or reduction == 'sum':
|
45 |
+
loss = reduce_loss(loss, reduction)
|
46 |
+
# if reduction is mean, then compute mean over weight region
|
47 |
+
elif reduction == 'mean':
|
48 |
+
if weight.size(1) > 1:
|
49 |
+
weight = weight.sum()
|
50 |
+
else:
|
51 |
+
weight = weight.sum() * loss.size(1)
|
52 |
+
loss = loss.sum() / weight
|
53 |
+
|
54 |
+
return loss
|
55 |
+
|
56 |
+
|
57 |
+
def weighted_loss(loss_func):
|
58 |
+
"""Create a weighted version of a given loss function.
|
59 |
+
|
60 |
+
To use this decorator, the loss function must have the signature like
|
61 |
+
`loss_func(pred, target, **kwargs)`. The function only needs to compute
|
62 |
+
element-wise loss without any reduction. This decorator will add weight
|
63 |
+
and reduction arguments to the function. The decorated function will have
|
64 |
+
the signature like `loss_func(pred, target, weight=None, reduction='mean',
|
65 |
+
**kwargs)`.
|
66 |
+
|
67 |
+
:Example:
|
68 |
+
|
69 |
+
>>> import torch
|
70 |
+
>>> @weighted_loss
|
71 |
+
>>> def l1_loss(pred, target):
|
72 |
+
>>> return (pred - target).abs()
|
73 |
+
|
74 |
+
>>> pred = torch.Tensor([0, 2, 3])
|
75 |
+
>>> target = torch.Tensor([1, 1, 1])
|
76 |
+
>>> weight = torch.Tensor([1, 0, 1])
|
77 |
+
|
78 |
+
>>> l1_loss(pred, target)
|
79 |
+
tensor(1.3333)
|
80 |
+
>>> l1_loss(pred, target, weight)
|
81 |
+
tensor(1.5000)
|
82 |
+
>>> l1_loss(pred, target, reduction='none')
|
83 |
+
tensor([1., 1., 2.])
|
84 |
+
>>> l1_loss(pred, target, weight, reduction='sum')
|
85 |
+
tensor(3.)
|
86 |
+
"""
|
87 |
+
|
88 |
+
@functools.wraps(loss_func)
|
89 |
+
def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
|
90 |
+
# get element-wise loss
|
91 |
+
loss = loss_func(pred, target, **kwargs)
|
92 |
+
loss = weight_reduce_loss(loss, weight, reduction)
|
93 |
+
return loss
|
94 |
+
|
95 |
+
return wrapper
|
basicsr/losses/losses.py
ADDED
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import autograd as autograd
|
4 |
+
from torch import nn as nn
|
5 |
+
from torch.nn import functional as F
|
6 |
+
|
7 |
+
from basicsr.archs.vgg_arch import VGGFeatureExtractor
|
8 |
+
from basicsr.utils.registry import LOSS_REGISTRY
|
9 |
+
from .loss_util import weighted_loss
|
10 |
+
|
11 |
+
_reduction_modes = ['none', 'mean', 'sum']
|
12 |
+
|
13 |
+
|
14 |
+
@weighted_loss
|
15 |
+
def l1_loss(pred, target):
|
16 |
+
return F.l1_loss(pred, target, reduction='none')
|
17 |
+
|
18 |
+
|
19 |
+
@weighted_loss
|
20 |
+
def mse_loss(pred, target):
|
21 |
+
return F.mse_loss(pred, target, reduction='none')
|
22 |
+
|
23 |
+
|
24 |
+
@weighted_loss
|
25 |
+
def charbonnier_loss(pred, target, eps=1e-12):
|
26 |
+
return torch.sqrt((pred - target)**2 + eps)
|
27 |
+
|
28 |
+
|
29 |
+
@LOSS_REGISTRY.register()
|
30 |
+
class L1Loss(nn.Module):
|
31 |
+
"""L1 (mean absolute error, MAE) loss.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
|
35 |
+
reduction (str): Specifies the reduction to apply to the output.
|
36 |
+
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
|
37 |
+
"""
|
38 |
+
|
39 |
+
def __init__(self, loss_weight=1.0, reduction='mean'):
|
40 |
+
super(L1Loss, self).__init__()
|
41 |
+
if reduction not in ['none', 'mean', 'sum']:
|
42 |
+
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
|
43 |
+
|
44 |
+
self.loss_weight = loss_weight
|
45 |
+
self.reduction = reduction
|
46 |
+
|
47 |
+
def forward(self, pred, target, weight=None, **kwargs):
|
48 |
+
"""
|
49 |
+
Args:
|
50 |
+
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
|
51 |
+
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
|
52 |
+
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
|
53 |
+
"""
|
54 |
+
return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction)
|
55 |
+
|
56 |
+
|
57 |
+
@LOSS_REGISTRY.register()
|
58 |
+
class MSELoss(nn.Module):
|
59 |
+
"""MSE (L2) loss.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
|
63 |
+
reduction (str): Specifies the reduction to apply to the output.
|
64 |
+
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
|
65 |
+
"""
|
66 |
+
|
67 |
+
def __init__(self, loss_weight=1.0, reduction='mean'):
|
68 |
+
super(MSELoss, self).__init__()
|
69 |
+
if reduction not in ['none', 'mean', 'sum']:
|
70 |
+
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
|
71 |
+
|
72 |
+
self.loss_weight = loss_weight
|
73 |
+
self.reduction = reduction
|
74 |
+
|
75 |
+
def forward(self, pred, target, weight=None, **kwargs):
|
76 |
+
"""
|
77 |
+
Args:
|
78 |
+
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
|
79 |
+
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
|
80 |
+
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
|
81 |
+
"""
|
82 |
+
return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction)
|
83 |
+
|
84 |
+
|
85 |
+
@LOSS_REGISTRY.register()
|
86 |
+
class CharbonnierLoss(nn.Module):
|
87 |
+
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
|
88 |
+
variant of L1Loss).
|
89 |
+
|
90 |
+
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
|
91 |
+
Super-Resolution".
|
92 |
+
|
93 |
+
Args:
|
94 |
+
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
|
95 |
+
reduction (str): Specifies the reduction to apply to the output.
|
96 |
+
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
|
97 |
+
eps (float): A value used to control the curvature near zero. Default: 1e-12.
|
98 |
+
"""
|
99 |
+
|
100 |
+
def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12):
|
101 |
+
super(CharbonnierLoss, self).__init__()
|
102 |
+
if reduction not in ['none', 'mean', 'sum']:
|
103 |
+
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}')
|
104 |
+
|
105 |
+
self.loss_weight = loss_weight
|
106 |
+
self.reduction = reduction
|
107 |
+
self.eps = eps
|
108 |
+
|
109 |
+
def forward(self, pred, target, weight=None, **kwargs):
|
110 |
+
"""
|
111 |
+
Args:
|
112 |
+
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
|
113 |
+
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
|
114 |
+
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
|
115 |
+
"""
|
116 |
+
return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction)
|
117 |
+
|
118 |
+
|
119 |
+
@LOSS_REGISTRY.register()
|
120 |
+
class WeightedTVLoss(L1Loss):
|
121 |
+
"""Weighted TV loss.
|
122 |
+
|
123 |
+
Args:
|
124 |
+
loss_weight (float): Loss weight. Default: 1.0.
|
125 |
+
"""
|
126 |
+
|
127 |
+
def __init__(self, loss_weight=1.0, reduction='mean'):
|
128 |
+
if reduction not in ['mean', 'sum']:
|
129 |
+
raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: mean | sum')
|
130 |
+
super(WeightedTVLoss, self).__init__(loss_weight=loss_weight, reduction=reduction)
|
131 |
+
|
132 |
+
def forward(self, pred, weight=None):
|
133 |
+
if weight is None:
|
134 |
+
y_weight = None
|
135 |
+
x_weight = None
|
136 |
+
else:
|
137 |
+
y_weight = weight[:, :, :-1, :]
|
138 |
+
x_weight = weight[:, :, :, :-1]
|
139 |
+
|
140 |
+
y_diff = super().forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=y_weight)
|
141 |
+
x_diff = super().forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=x_weight)
|
142 |
+
|
143 |
+
loss = x_diff + y_diff
|
144 |
+
|
145 |
+
return loss
|
146 |
+
|
147 |
+
|
148 |
+
@LOSS_REGISTRY.register()
|
149 |
+
class PerceptualLoss(nn.Module):
|
150 |
+
"""Perceptual loss with commonly used style loss.
|
151 |
+
|
152 |
+
Args:
|
153 |
+
layer_weights (dict): The weight for each layer of vgg feature.
|
154 |
+
Here is an example: {'conv5_4': 1.}, which means the conv5_4
|
155 |
+
feature layer (before relu5_4) will be extracted with weight
|
156 |
+
1.0 in calculating losses.
|
157 |
+
vgg_type (str): The type of vgg network used as feature extractor.
|
158 |
+
Default: 'vgg19'.
|
159 |
+
use_input_norm (bool): If True, normalize the input image in vgg.
|
160 |
+
Default: True.
|
161 |
+
range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
|
162 |
+
Default: False.
|
163 |
+
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
|
164 |
+
loss will be calculated and the loss will multiplied by the
|
165 |
+
weight. Default: 1.0.
|
166 |
+
style_weight (float): If `style_weight > 0`, the style loss will be
|
167 |
+
calculated and the loss will multiplied by the weight.
|
168 |
+
Default: 0.
|
169 |
+
criterion (str): Criterion used for perceptual loss. Default: 'l1'.
|
170 |
+
"""
|
171 |
+
|
172 |
+
def __init__(self,
|
173 |
+
layer_weights,
|
174 |
+
vgg_type='vgg19',
|
175 |
+
use_input_norm=True,
|
176 |
+
range_norm=False,
|
177 |
+
perceptual_weight=1.0,
|
178 |
+
style_weight=0.,
|
179 |
+
criterion='l1'):
|
180 |
+
super(PerceptualLoss, self).__init__()
|
181 |
+
self.perceptual_weight = perceptual_weight
|
182 |
+
self.style_weight = style_weight
|
183 |
+
self.layer_weights = layer_weights
|
184 |
+
self.vgg = VGGFeatureExtractor(
|
185 |
+
layer_name_list=list(layer_weights.keys()),
|
186 |
+
vgg_type=vgg_type,
|
187 |
+
use_input_norm=use_input_norm,
|
188 |
+
range_norm=range_norm)
|
189 |
+
|
190 |
+
self.criterion_type = criterion
|
191 |
+
if self.criterion_type == 'l1':
|
192 |
+
self.criterion = torch.nn.L1Loss()
|
193 |
+
elif self.criterion_type == 'l2':
|
194 |
+
self.criterion = torch.nn.L2loss()
|
195 |
+
elif self.criterion_type == 'fro':
|
196 |
+
self.criterion = None
|
197 |
+
else:
|
198 |
+
raise NotImplementedError(f'{criterion} criterion has not been supported.')
|
199 |
+
|
200 |
+
def forward(self, x, gt):
|
201 |
+
"""Forward function.
|
202 |
+
|
203 |
+
Args:
|
204 |
+
x (Tensor): Input tensor with shape (n, c, h, w).
|
205 |
+
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
|
206 |
+
|
207 |
+
Returns:
|
208 |
+
Tensor: Forward results.
|
209 |
+
"""
|
210 |
+
# extract vgg features
|
211 |
+
x_features = self.vgg(x)
|
212 |
+
gt_features = self.vgg(gt.detach())
|
213 |
+
|
214 |
+
# calculate perceptual loss
|
215 |
+
if self.perceptual_weight > 0:
|
216 |
+
percep_loss = 0
|
217 |
+
for k in x_features.keys():
|
218 |
+
if self.criterion_type == 'fro':
|
219 |
+
percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k]
|
220 |
+
else:
|
221 |
+
percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k]
|
222 |
+
percep_loss *= self.perceptual_weight
|
223 |
+
else:
|
224 |
+
percep_loss = None
|
225 |
+
|
226 |
+
# calculate style loss
|
227 |
+
if self.style_weight > 0:
|
228 |
+
style_loss = 0
|
229 |
+
for k in x_features.keys():
|
230 |
+
if self.criterion_type == 'fro':
|
231 |
+
style_loss += torch.norm(
|
232 |
+
self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k]
|
233 |
+
else:
|
234 |
+
style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat(
|
235 |
+
gt_features[k])) * self.layer_weights[k]
|
236 |
+
style_loss *= self.style_weight
|
237 |
+
else:
|
238 |
+
style_loss = None
|
239 |
+
|
240 |
+
return percep_loss, style_loss
|
241 |
+
|
242 |
+
def _gram_mat(self, x):
|
243 |
+
"""Calculate Gram matrix.
|
244 |
+
|
245 |
+
Args:
|
246 |
+
x (torch.Tensor): Tensor with shape of (n, c, h, w).
|
247 |
+
|
248 |
+
Returns:
|
249 |
+
torch.Tensor: Gram matrix.
|
250 |
+
"""
|
251 |
+
n, c, h, w = x.size()
|
252 |
+
features = x.view(n, c, w * h)
|
253 |
+
features_t = features.transpose(1, 2)
|
254 |
+
gram = features.bmm(features_t) / (c * h * w)
|
255 |
+
return gram
|
256 |
+
|
257 |
+
|
258 |
+
@LOSS_REGISTRY.register()
|
259 |
+
class GANLoss(nn.Module):
|
260 |
+
"""Define GAN loss.
|
261 |
+
|
262 |
+
Args:
|
263 |
+
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
|
264 |
+
real_label_val (float): The value for real label. Default: 1.0.
|
265 |
+
fake_label_val (float): The value for fake label. Default: 0.0.
|
266 |
+
loss_weight (float): Loss weight. Default: 1.0.
|
267 |
+
Note that loss_weight is only for generators; and it is always 1.0
|
268 |
+
for discriminators.
|
269 |
+
"""
|
270 |
+
|
271 |
+
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):
|
272 |
+
super(GANLoss, self).__init__()
|
273 |
+
self.gan_type = gan_type
|
274 |
+
self.loss_weight = loss_weight
|
275 |
+
self.real_label_val = real_label_val
|
276 |
+
self.fake_label_val = fake_label_val
|
277 |
+
|
278 |
+
if self.gan_type == 'vanilla':
|
279 |
+
self.loss = nn.BCEWithLogitsLoss()
|
280 |
+
elif self.gan_type == 'lsgan':
|
281 |
+
self.loss = nn.MSELoss()
|
282 |
+
elif self.gan_type == 'wgan':
|
283 |
+
self.loss = self._wgan_loss
|
284 |
+
elif self.gan_type == 'wgan_softplus':
|
285 |
+
self.loss = self._wgan_softplus_loss
|
286 |
+
elif self.gan_type == 'hinge':
|
287 |
+
self.loss = nn.ReLU()
|
288 |
+
else:
|
289 |
+
raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.')
|
290 |
+
|
291 |
+
def _wgan_loss(self, input, target):
|
292 |
+
"""wgan loss.
|
293 |
+
|
294 |
+
Args:
|
295 |
+
input (Tensor): Input tensor.
|
296 |
+
target (bool): Target label.
|
297 |
+
|
298 |
+
Returns:
|
299 |
+
Tensor: wgan loss.
|
300 |
+
"""
|
301 |
+
return -input.mean() if target else input.mean()
|
302 |
+
|
303 |
+
def _wgan_softplus_loss(self, input, target):
|
304 |
+
"""wgan loss with soft plus. softplus is a smooth approximation to the
|
305 |
+
ReLU function.
|
306 |
+
|
307 |
+
In StyleGAN2, it is called:
|
308 |
+
Logistic loss for discriminator;
|
309 |
+
Non-saturating loss for generator.
|
310 |
+
|
311 |
+
Args:
|
312 |
+
input (Tensor): Input tensor.
|
313 |
+
target (bool): Target label.
|
314 |
+
|
315 |
+
Returns:
|
316 |
+
Tensor: wgan loss.
|
317 |
+
"""
|
318 |
+
return F.softplus(-input).mean() if target else F.softplus(input).mean()
|
319 |
+
|
320 |
+
def get_target_label(self, input, target_is_real):
|
321 |
+
"""Get target label.
|
322 |
+
|
323 |
+
Args:
|
324 |
+
input (Tensor): Input tensor.
|
325 |
+
target_is_real (bool): Whether the target is real or fake.
|
326 |
+
|
327 |
+
Returns:
|
328 |
+
(bool | Tensor): Target tensor. Return bool for wgan, otherwise,
|
329 |
+
return Tensor.
|
330 |
+
"""
|
331 |
+
|
332 |
+
if self.gan_type in ['wgan', 'wgan_softplus']:
|
333 |
+
return target_is_real
|
334 |
+
target_val = (self.real_label_val if target_is_real else self.fake_label_val)
|
335 |
+
return input.new_ones(input.size()) * target_val
|
336 |
+
|
337 |
+
def forward(self, input, target_is_real, is_disc=False):
|
338 |
+
"""
|
339 |
+
Args:
|
340 |
+
input (Tensor): The input for the loss module, i.e., the network
|
341 |
+
prediction.
|
342 |
+
target_is_real (bool): Whether the targe is real or fake.
|
343 |
+
is_disc (bool): Whether the loss for discriminators or not.
|
344 |
+
Default: False.
|
345 |
+
|
346 |
+
Returns:
|
347 |
+
Tensor: GAN loss value.
|
348 |
+
"""
|
349 |
+
target_label = self.get_target_label(input, target_is_real)
|
350 |
+
if self.gan_type == 'hinge':
|
351 |
+
if is_disc: # for discriminators in hinge-gan
|
352 |
+
input = -input if target_is_real else input
|
353 |
+
loss = self.loss(1 + input).mean()
|
354 |
+
else: # for generators in hinge-gan
|
355 |
+
loss = -input.mean()
|
356 |
+
else: # other gan types
|
357 |
+
loss = self.loss(input, target_label)
|
358 |
+
|
359 |
+
# loss_weight is always 1.0 for discriminators
|
360 |
+
return loss if is_disc else loss * self.loss_weight
|
361 |
+
|
362 |
+
|
363 |
+
@LOSS_REGISTRY.register()
|
364 |
+
class MultiScaleGANLoss(GANLoss):
|
365 |
+
"""
|
366 |
+
MultiScaleGANLoss accepts a list of predictions
|
367 |
+
"""
|
368 |
+
|
369 |
+
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):
|
370 |
+
super(MultiScaleGANLoss, self).__init__(gan_type, real_label_val, fake_label_val, loss_weight)
|
371 |
+
|
372 |
+
def forward(self, input, target_is_real, is_disc=False):
|
373 |
+
"""
|
374 |
+
The input is a list of tensors, or a list of (a list of tensors)
|
375 |
+
"""
|
376 |
+
if isinstance(input, list):
|
377 |
+
loss = 0
|
378 |
+
for pred_i in input:
|
379 |
+
if isinstance(pred_i, list):
|
380 |
+
# Only compute GAN loss for the last layer
|
381 |
+
# in case of multiscale feature matching
|
382 |
+
pred_i = pred_i[-1]
|
383 |
+
# Safe operation: 0-dim tensor calling self.mean() does nothing
|
384 |
+
loss_tensor = super().forward(pred_i, target_is_real, is_disc).mean()
|
385 |
+
loss += loss_tensor
|
386 |
+
return loss / len(input)
|
387 |
+
else:
|
388 |
+
return super().forward(input, target_is_real, is_disc)
|
389 |
+
|
390 |
+
|
391 |
+
def r1_penalty(real_pred, real_img):
|
392 |
+
"""R1 regularization for discriminator. The core idea is to
|
393 |
+
penalize the gradient on real data alone: when the
|
394 |
+
generator distribution produces the true data distribution
|
395 |
+
and the discriminator is equal to 0 on the data manifold, the
|
396 |
+
gradient penalty ensures that the discriminator cannot create
|
397 |
+
a non-zero gradient orthogonal to the data manifold without
|
398 |
+
suffering a loss in the GAN game.
|
399 |
+
|
400 |
+
Ref:
|
401 |
+
Eq. 9 in Which training methods for GANs do actually converge.
|
402 |
+
"""
|
403 |
+
grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
|
404 |
+
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
|
405 |
+
return grad_penalty
|
406 |
+
|
407 |
+
|
408 |
+
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
|
409 |
+
noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3])
|
410 |
+
grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
|
411 |
+
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
|
412 |
+
|
413 |
+
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
|
414 |
+
|
415 |
+
path_penalty = (path_lengths - path_mean).pow(2).mean()
|
416 |
+
|
417 |
+
return path_penalty, path_lengths.detach().mean(), path_mean.detach()
|
418 |
+
|
419 |
+
|
420 |
+
def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
|
421 |
+
"""Calculate gradient penalty for wgan-gp.
|
422 |
+
|
423 |
+
Args:
|
424 |
+
discriminator (nn.Module): Network for the discriminator.
|
425 |
+
real_data (Tensor): Real input data.
|
426 |
+
fake_data (Tensor): Fake input data.
|
427 |
+
weight (Tensor): Weight tensor. Default: None.
|
428 |
+
|
429 |
+
Returns:
|
430 |
+
Tensor: A tensor for gradient penalty.
|
431 |
+
"""
|
432 |
+
|
433 |
+
batch_size = real_data.size(0)
|
434 |
+
alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
|
435 |
+
|
436 |
+
# interpolate between real_data and fake_data
|
437 |
+
interpolates = alpha * real_data + (1. - alpha) * fake_data
|
438 |
+
interpolates = autograd.Variable(interpolates, requires_grad=True)
|
439 |
+
|
440 |
+
disc_interpolates = discriminator(interpolates)
|
441 |
+
gradients = autograd.grad(
|
442 |
+
outputs=disc_interpolates,
|
443 |
+
inputs=interpolates,
|
444 |
+
grad_outputs=torch.ones_like(disc_interpolates),
|
445 |
+
create_graph=True,
|
446 |
+
retain_graph=True,
|
447 |
+
only_inputs=True)[0]
|
448 |
+
|
449 |
+
if weight is not None:
|
450 |
+
gradients = gradients * weight
|
451 |
+
|
452 |
+
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
|
453 |
+
if weight is not None:
|
454 |
+
gradients_penalty /= torch.mean(weight)
|
455 |
+
|
456 |
+
return gradients_penalty
|
457 |
+
|
458 |
+
|
459 |
+
@LOSS_REGISTRY.register()
|
460 |
+
class GANFeatLoss(nn.Module):
|
461 |
+
"""Define feature matching loss for gans
|
462 |
+
|
463 |
+
Args:
|
464 |
+
criterion (str): Support 'l1', 'l2', 'charbonnier'.
|
465 |
+
loss_weight (float): Loss weight. Default: 1.0.
|
466 |
+
reduction (str): Specifies the reduction to apply to the output.
|
467 |
+
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
|
468 |
+
"""
|
469 |
+
|
470 |
+
def __init__(self, criterion='l1', loss_weight=1.0, reduction='mean'):
|
471 |
+
super(GANFeatLoss, self).__init__()
|
472 |
+
if criterion == 'l1':
|
473 |
+
self.loss_op = L1Loss(loss_weight, reduction)
|
474 |
+
elif criterion == 'l2':
|
475 |
+
self.loss_op = MSELoss(loss_weight, reduction)
|
476 |
+
elif criterion == 'charbonnier':
|
477 |
+
self.loss_op = CharbonnierLoss(loss_weight, reduction)
|
478 |
+
else:
|
479 |
+
raise ValueError(f'Unsupported loss mode: {criterion}. Supported ones are: l1|l2|charbonnier')
|
480 |
+
|
481 |
+
self.loss_weight = loss_weight
|
482 |
+
|
483 |
+
def forward(self, pred_fake, pred_real):
|
484 |
+
num_d = len(pred_fake)
|
485 |
+
loss = 0
|
486 |
+
for i in range(num_d): # for each discriminator
|
487 |
+
# last output is the final prediction, exclude it
|
488 |
+
num_intermediate_outputs = len(pred_fake[i]) - 1
|
489 |
+
for j in range(num_intermediate_outputs): # for each layer output
|
490 |
+
unweighted_loss = self.loss_op(pred_fake[i][j], pred_real[i][j].detach())
|
491 |
+
loss += unweighted_loss / num_d
|
492 |
+
return loss * self.loss_weight
|
basicsr/metrics/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
|
3 |
+
from basicsr.utils.registry import METRIC_REGISTRY
|
4 |
+
from .niqe import calculate_niqe
|
5 |
+
from .psnr_ssim import calculate_psnr, calculate_ssim
|
6 |
+
|
7 |
+
__all__ = ['calculate_psnr', 'calculate_ssim', 'calculate_niqe']
|
8 |
+
|
9 |
+
|
10 |
+
def calculate_metric(data, opt):
|
11 |
+
"""Calculate metric from data and options.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
opt (dict): Configuration. It must contain:
|
15 |
+
type (str): Model type.
|
16 |
+
"""
|
17 |
+
opt = deepcopy(opt)
|
18 |
+
metric_type = opt.pop('type')
|
19 |
+
metric = METRIC_REGISTRY.get(metric_type)(**data, **opt)
|
20 |
+
return metric
|
basicsr/metrics/fid.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
from scipy import linalg
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
from basicsr.archs.inception import InceptionV3
|
8 |
+
|
9 |
+
|
10 |
+
def load_patched_inception_v3(device='cuda', resize_input=True, normalize_input=False):
|
11 |
+
# we may not resize the input, but in [rosinality/stylegan2-pytorch] it
|
12 |
+
# does resize the input.
|
13 |
+
inception = InceptionV3([3], resize_input=resize_input, normalize_input=normalize_input)
|
14 |
+
inception = nn.DataParallel(inception).eval().to(device)
|
15 |
+
return inception
|
16 |
+
|
17 |
+
|
18 |
+
@torch.no_grad()
|
19 |
+
def extract_inception_features(data_generator, inception, len_generator=None, device='cuda'):
|
20 |
+
"""Extract inception features.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
data_generator (generator): A data generator.
|
24 |
+
inception (nn.Module): Inception model.
|
25 |
+
len_generator (int): Length of the data_generator to show the
|
26 |
+
progressbar. Default: None.
|
27 |
+
device (str): Device. Default: cuda.
|
28 |
+
|
29 |
+
Returns:
|
30 |
+
Tensor: Extracted features.
|
31 |
+
"""
|
32 |
+
if len_generator is not None:
|
33 |
+
pbar = tqdm(total=len_generator, unit='batch', desc='Extract')
|
34 |
+
else:
|
35 |
+
pbar = None
|
36 |
+
features = []
|
37 |
+
|
38 |
+
for data in data_generator:
|
39 |
+
if pbar:
|
40 |
+
pbar.update(1)
|
41 |
+
data = data.to(device)
|
42 |
+
feature = inception(data)[0].view(data.shape[0], -1)
|
43 |
+
features.append(feature.to('cpu'))
|
44 |
+
if pbar:
|
45 |
+
pbar.close()
|
46 |
+
features = torch.cat(features, 0)
|
47 |
+
return features
|
48 |
+
|
49 |
+
|
50 |
+
def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6):
|
51 |
+
"""Numpy implementation of the Frechet Distance.
|
52 |
+
|
53 |
+
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
|
54 |
+
and X_2 ~ N(mu_2, C_2) is
|
55 |
+
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
|
56 |
+
Stable version by Dougal J. Sutherland.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
mu1 (np.array): The sample mean over activations.
|
60 |
+
sigma1 (np.array): The covariance matrix over activations for
|
61 |
+
generated samples.
|
62 |
+
mu2 (np.array): The sample mean over activations, precalculated on an
|
63 |
+
representative data set.
|
64 |
+
sigma2 (np.array): The covariance matrix over activations,
|
65 |
+
precalculated on an representative data set.
|
66 |
+
|
67 |
+
Returns:
|
68 |
+
float: The Frechet Distance.
|
69 |
+
"""
|
70 |
+
assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths'
|
71 |
+
assert sigma1.shape == sigma2.shape, ('Two covariances have different dimensions')
|
72 |
+
|
73 |
+
cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False)
|
74 |
+
|
75 |
+
# Product might be almost singular
|
76 |
+
if not np.isfinite(cov_sqrt).all():
|
77 |
+
print('Product of cov matrices is singular. Adding {eps} to diagonal of cov estimates')
|
78 |
+
offset = np.eye(sigma1.shape[0]) * eps
|
79 |
+
cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset))
|
80 |
+
|
81 |
+
# Numerical error might give slight imaginary component
|
82 |
+
if np.iscomplexobj(cov_sqrt):
|
83 |
+
if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
|
84 |
+
m = np.max(np.abs(cov_sqrt.imag))
|
85 |
+
raise ValueError(f'Imaginary component {m}')
|
86 |
+
cov_sqrt = cov_sqrt.real
|
87 |
+
|
88 |
+
mean_diff = mu1 - mu2
|
89 |
+
mean_norm = mean_diff @ mean_diff
|
90 |
+
trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt)
|
91 |
+
fid = mean_norm + trace
|
92 |
+
|
93 |
+
return fid
|