tomaseo2022 commited on
Commit
b4f8683
1 Parent(s): 7bd7f5c

Delete network_swinir.py

Browse files
Files changed (1) hide show
  1. network_swinir.py +0 -854
network_swinir.py DELETED
@@ -1,854 +0,0 @@
1
- # -----------------------------------------------------------------------------------
2
- # SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
3
- # Originally Written by Ze Liu, Modified by Jingyun Liang.
4
- # -----------------------------------------------------------------------------------
5
-
6
- import math
7
- import torch
8
- import torch.nn as nn
9
- import torch.utils.checkpoint as checkpoint
10
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
11
-
12
-
13
- class Mlp(nn.Module):
14
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
15
- super().__init__()
16
- out_features = out_features or in_features
17
- hidden_features = hidden_features or in_features
18
- self.fc1 = nn.Linear(in_features, hidden_features)
19
- self.act = act_layer()
20
- self.fc2 = nn.Linear(hidden_features, out_features)
21
- self.drop = nn.Dropout(drop)
22
-
23
- def forward(self, x):
24
- x = self.fc1(x)
25
- x = self.act(x)
26
- x = self.drop(x)
27
- x = self.fc2(x)
28
- x = self.drop(x)
29
- return x
30
-
31
-
32
- def window_partition(x, window_size):
33
- """
34
- Args:
35
- x: (B, H, W, C)
36
- window_size (int): window size
37
-
38
- Returns:
39
- windows: (num_windows*B, window_size, window_size, C)
40
- """
41
- B, H, W, C = x.shape
42
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
43
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
44
- return windows
45
-
46
-
47
- def window_reverse(windows, window_size, H, W):
48
- """
49
- Args:
50
- windows: (num_windows*B, window_size, window_size, C)
51
- window_size (int): Window size
52
- H (int): Height of image
53
- W (int): Width of image
54
-
55
- Returns:
56
- x: (B, H, W, C)
57
- """
58
- B = int(windows.shape[0] / (H * W / window_size / window_size))
59
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
60
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
61
- return x
62
-
63
-
64
- class WindowAttention(nn.Module):
65
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
66
- It supports both of shifted and non-shifted window.
67
-
68
- Args:
69
- dim (int): Number of input channels.
70
- window_size (tuple[int]): The height and width of the window.
71
- num_heads (int): Number of attention heads.
72
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
73
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
74
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
75
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
76
- """
77
-
78
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
79
-
80
- super().__init__()
81
- self.dim = dim
82
- self.window_size = window_size # Wh, Ww
83
- self.num_heads = num_heads
84
- head_dim = dim // num_heads
85
- self.scale = qk_scale or head_dim ** -0.5
86
-
87
- # define a parameter table of relative position bias
88
- self.relative_position_bias_table = nn.Parameter(
89
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
90
-
91
- # get pair-wise relative position index for each token inside the window
92
- coords_h = torch.arange(self.window_size[0])
93
- coords_w = torch.arange(self.window_size[1])
94
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
95
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
96
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
97
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
98
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
99
- relative_coords[:, :, 1] += self.window_size[1] - 1
100
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
101
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
102
- self.register_buffer("relative_position_index", relative_position_index)
103
-
104
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
105
- self.attn_drop = nn.Dropout(attn_drop)
106
- self.proj = nn.Linear(dim, dim)
107
-
108
- self.proj_drop = nn.Dropout(proj_drop)
109
-
110
- trunc_normal_(self.relative_position_bias_table, std=.02)
111
- self.softmax = nn.Softmax(dim=-1)
112
-
113
- def forward(self, x, mask=None):
114
- """
115
- Args:
116
- x: input features with shape of (num_windows*B, N, C)
117
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
118
- """
119
- B_, N, C = x.shape
120
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
121
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
122
-
123
- q = q * self.scale
124
- attn = (q @ k.transpose(-2, -1))
125
-
126
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
127
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
128
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
129
- attn = attn + relative_position_bias.unsqueeze(0)
130
-
131
- if mask is not None:
132
- nW = mask.shape[0]
133
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
134
- attn = attn.view(-1, self.num_heads, N, N)
135
- attn = self.softmax(attn)
136
- else:
137
- attn = self.softmax(attn)
138
-
139
- attn = self.attn_drop(attn)
140
-
141
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
142
- x = self.proj(x)
143
- x = self.proj_drop(x)
144
- return x
145
-
146
- def extra_repr(self) -> str:
147
- return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
148
-
149
- def flops(self, N):
150
- # calculate flops for 1 window with token length of N
151
- flops = 0
152
- # qkv = self.qkv(x)
153
- flops += N * self.dim * 3 * self.dim
154
- # attn = (q @ k.transpose(-2, -1))
155
- flops += self.num_heads * N * (self.dim // self.num_heads) * N
156
- # x = (attn @ v)
157
- flops += self.num_heads * N * N * (self.dim // self.num_heads)
158
- # x = self.proj(x)
159
- flops += N * self.dim * self.dim
160
- return flops
161
-
162
-
163
- class SwinTransformerBlock(nn.Module):
164
- r""" Swin Transformer Block.
165
-
166
- Args:
167
- dim (int): Number of input channels.
168
- input_resolution (tuple[int]): Input resulotion.
169
- num_heads (int): Number of attention heads.
170
- window_size (int): Window size.
171
- shift_size (int): Shift size for SW-MSA.
172
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
173
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
174
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
175
- drop (float, optional): Dropout rate. Default: 0.0
176
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
177
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
178
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
179
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
180
- """
181
-
182
- def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
183
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
184
- act_layer=nn.GELU, norm_layer=nn.LayerNorm):
185
- super().__init__()
186
- self.dim = dim
187
- self.input_resolution = input_resolution
188
- self.num_heads = num_heads
189
- self.window_size = window_size
190
- self.shift_size = shift_size
191
- self.mlp_ratio = mlp_ratio
192
- if min(self.input_resolution) <= self.window_size:
193
- # if window size is larger than input resolution, we don't partition windows
194
- self.shift_size = 0
195
- self.window_size = min(self.input_resolution)
196
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
197
-
198
- self.norm1 = norm_layer(dim)
199
- self.attn = WindowAttention(
200
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
201
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
202
-
203
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
204
- self.norm2 = norm_layer(dim)
205
- mlp_hidden_dim = int(dim * mlp_ratio)
206
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
207
-
208
- if self.shift_size > 0:
209
- attn_mask = self.calculate_mask(self.input_resolution)
210
- else:
211
- attn_mask = None
212
-
213
- self.register_buffer("attn_mask", attn_mask)
214
-
215
- def calculate_mask(self, x_size):
216
- # calculate attention mask for SW-MSA
217
- H, W = x_size
218
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
219
- h_slices = (slice(0, -self.window_size),
220
- slice(-self.window_size, -self.shift_size),
221
- slice(-self.shift_size, None))
222
- w_slices = (slice(0, -self.window_size),
223
- slice(-self.window_size, -self.shift_size),
224
- slice(-self.shift_size, None))
225
- cnt = 0
226
- for h in h_slices:
227
- for w in w_slices:
228
- img_mask[:, h, w, :] = cnt
229
- cnt += 1
230
-
231
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
232
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
233
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
234
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
235
-
236
- return attn_mask
237
-
238
- def forward(self, x, x_size):
239
- H, W = x_size
240
- B, L, C = x.shape
241
- # assert L == H * W, "input feature has wrong size"
242
-
243
- shortcut = x
244
- x = self.norm1(x)
245
- x = x.view(B, H, W, C)
246
-
247
- # cyclic shift
248
- if self.shift_size > 0:
249
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
250
- else:
251
- shifted_x = x
252
-
253
- # partition windows
254
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
255
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
256
-
257
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
258
- if self.input_resolution == x_size:
259
- attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
260
- else:
261
- attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
262
-
263
- # merge windows
264
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
265
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
266
-
267
- # reverse cyclic shift
268
- if self.shift_size > 0:
269
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
270
- else:
271
- x = shifted_x
272
- x = x.view(B, H * W, C)
273
-
274
- # FFN
275
- x = shortcut + self.drop_path(x)
276
- x = x + self.drop_path(self.mlp(self.norm2(x)))
277
-
278
- return x
279
-
280
- def extra_repr(self) -> str:
281
- return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
282
- f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
283
-
284
- def flops(self):
285
- flops = 0
286
- H, W = self.input_resolution
287
- # norm1
288
- flops += self.dim * H * W
289
- # W-MSA/SW-MSA
290
- nW = H * W / self.window_size / self.window_size
291
- flops += nW * self.attn.flops(self.window_size * self.window_size)
292
- # mlp
293
- flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
294
- # norm2
295
- flops += self.dim * H * W
296
- return flops
297
-
298
-
299
- class PatchMerging(nn.Module):
300
- r""" Patch Merging Layer.
301
-
302
- Args:
303
- input_resolution (tuple[int]): Resolution of input feature.
304
- dim (int): Number of input channels.
305
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
306
- """
307
-
308
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
309
- super().__init__()
310
- self.input_resolution = input_resolution
311
- self.dim = dim
312
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
313
- self.norm = norm_layer(4 * dim)
314
-
315
- def forward(self, x):
316
- """
317
- x: B, H*W, C
318
- """
319
- H, W = self.input_resolution
320
- B, L, C = x.shape
321
- assert L == H * W, "input feature has wrong size"
322
- assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
323
-
324
- x = x.view(B, H, W, C)
325
-
326
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
327
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
328
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
329
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
330
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
331
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
332
-
333
- x = self.norm(x)
334
- x = self.reduction(x)
335
-
336
- return x
337
-
338
- def extra_repr(self) -> str:
339
- return f"input_resolution={self.input_resolution}, dim={self.dim}"
340
-
341
- def flops(self):
342
- H, W = self.input_resolution
343
- flops = H * W * self.dim
344
- flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
345
- return flops
346
-
347
-
348
- class BasicLayer(nn.Module):
349
- """ A basic Swin Transformer layer for one stage.
350
-
351
- Args:
352
- dim (int): Number of input channels.
353
- input_resolution (tuple[int]): Input resolution.
354
- depth (int): Number of blocks.
355
- num_heads (int): Number of attention heads.
356
- window_size (int): Local window size.
357
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
358
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
359
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
360
- drop (float, optional): Dropout rate. Default: 0.0
361
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
362
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
363
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
364
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
365
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
366
- """
367
-
368
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
369
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
370
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
371
-
372
- super().__init__()
373
- self.dim = dim
374
- self.input_resolution = input_resolution
375
- self.depth = depth
376
- self.use_checkpoint = use_checkpoint
377
-
378
- # build blocks
379
- self.blocks = nn.ModuleList([
380
- SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
381
- num_heads=num_heads, window_size=window_size,
382
- shift_size=0 if (i % 2 == 0) else window_size // 2,
383
- mlp_ratio=mlp_ratio,
384
- qkv_bias=qkv_bias, qk_scale=qk_scale,
385
- drop=drop, attn_drop=attn_drop,
386
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
387
- norm_layer=norm_layer)
388
- for i in range(depth)])
389
-
390
- # patch merging layer
391
- if downsample is not None:
392
- self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
393
- else:
394
- self.downsample = None
395
-
396
- def forward(self, x, x_size):
397
- for blk in self.blocks:
398
- if self.use_checkpoint:
399
- x = checkpoint.checkpoint(blk, x, x_size)
400
- else:
401
- x = blk(x, x_size)
402
- if self.downsample is not None:
403
- x = self.downsample(x)
404
- return x
405
-
406
- def extra_repr(self) -> str:
407
- return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
408
-
409
- def flops(self):
410
- flops = 0
411
- for blk in self.blocks:
412
- flops += blk.flops()
413
- if self.downsample is not None:
414
- flops += self.downsample.flops()
415
- return flops
416
-
417
-
418
- class RSTB(nn.Module):
419
- """Residual Swin Transformer Block (RSTB).
420
-
421
- Args:
422
- dim (int): Number of input channels.
423
- input_resolution (tuple[int]): Input resolution.
424
- depth (int): Number of blocks.
425
- num_heads (int): Number of attention heads.
426
- window_size (int): Local window size.
427
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
428
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
429
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
430
- drop (float, optional): Dropout rate. Default: 0.0
431
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
432
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
433
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
434
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
435
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
436
- img_size: Input image size.
437
- patch_size: Patch size.
438
- resi_connection: The convolutional block before residual connection.
439
- """
440
-
441
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
442
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
443
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
444
- img_size=224, patch_size=4, resi_connection='1conv'):
445
- super(RSTB, self).__init__()
446
-
447
- self.dim = dim
448
- self.input_resolution = input_resolution
449
-
450
- self.residual_group = BasicLayer(dim=dim,
451
- input_resolution=input_resolution,
452
- depth=depth,
453
- num_heads=num_heads,
454
- window_size=window_size,
455
- mlp_ratio=mlp_ratio,
456
- qkv_bias=qkv_bias, qk_scale=qk_scale,
457
- drop=drop, attn_drop=attn_drop,
458
- drop_path=drop_path,
459
- norm_layer=norm_layer,
460
- downsample=downsample,
461
- use_checkpoint=use_checkpoint)
462
-
463
- if resi_connection == '1conv':
464
- self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
465
- elif resi_connection == '3conv':
466
- # to save parameters and memory
467
- self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
468
- nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
469
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
470
- nn.Conv2d(dim // 4, dim, 3, 1, 1))
471
-
472
- self.patch_embed = PatchEmbed(
473
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
474
- norm_layer=None)
475
-
476
- self.patch_unembed = PatchUnEmbed(
477
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
478
- norm_layer=None)
479
-
480
- def forward(self, x, x_size):
481
- return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
482
-
483
- def flops(self):
484
- flops = 0
485
- flops += self.residual_group.flops()
486
- H, W = self.input_resolution
487
- flops += H * W * self.dim * self.dim * 9
488
- flops += self.patch_embed.flops()
489
- flops += self.patch_unembed.flops()
490
-
491
- return flops
492
-
493
-
494
- class PatchEmbed(nn.Module):
495
- r""" Image to Patch Embedding
496
-
497
- Args:
498
- img_size (int): Image size. Default: 224.
499
- patch_size (int): Patch token size. Default: 4.
500
- in_chans (int): Number of input image channels. Default: 3.
501
- embed_dim (int): Number of linear projection output channels. Default: 96.
502
- norm_layer (nn.Module, optional): Normalization layer. Default: None
503
- """
504
-
505
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
506
- super().__init__()
507
- img_size = to_2tuple(img_size)
508
- patch_size = to_2tuple(patch_size)
509
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
510
- self.img_size = img_size
511
- self.patch_size = patch_size
512
- self.patches_resolution = patches_resolution
513
- self.num_patches = patches_resolution[0] * patches_resolution[1]
514
-
515
- self.in_chans = in_chans
516
- self.embed_dim = embed_dim
517
-
518
- if norm_layer is not None:
519
- self.norm = norm_layer(embed_dim)
520
- else:
521
- self.norm = None
522
-
523
- def forward(self, x):
524
- x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
525
- if self.norm is not None:
526
- x = self.norm(x)
527
- return x
528
-
529
- def flops(self):
530
- flops = 0
531
- H, W = self.img_size
532
- if self.norm is not None:
533
- flops += H * W * self.embed_dim
534
- return flops
535
-
536
-
537
- class PatchUnEmbed(nn.Module):
538
- r""" Image to Patch Unembedding
539
-
540
- Args:
541
- img_size (int): Image size. Default: 224.
542
- patch_size (int): Patch token size. Default: 4.
543
- in_chans (int): Number of input image channels. Default: 3.
544
- embed_dim (int): Number of linear projection output channels. Default: 96.
545
- norm_layer (nn.Module, optional): Normalization layer. Default: None
546
- """
547
-
548
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
549
- super().__init__()
550
- img_size = to_2tuple(img_size)
551
- patch_size = to_2tuple(patch_size)
552
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
553
- self.img_size = img_size
554
- self.patch_size = patch_size
555
- self.patches_resolution = patches_resolution
556
- self.num_patches = patches_resolution[0] * patches_resolution[1]
557
-
558
- self.in_chans = in_chans
559
- self.embed_dim = embed_dim
560
-
561
- def forward(self, x, x_size):
562
- B, HW, C = x.shape
563
- x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
564
- return x
565
-
566
- def flops(self):
567
- flops = 0
568
- return flops
569
-
570
-
571
- class Upsample(nn.Sequential):
572
- """Upsample module.
573
-
574
- Args:
575
- scale (int): Scale factor. Supported scales: 2^n and 3.
576
- num_feat (int): Channel number of intermediate features.
577
- """
578
-
579
- def __init__(self, scale, num_feat):
580
- m = []
581
- if (scale & (scale - 1)) == 0: # scale = 2^n
582
- for _ in range(int(math.log(scale, 2))):
583
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
584
- m.append(nn.PixelShuffle(2))
585
- elif scale == 3:
586
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
587
- m.append(nn.PixelShuffle(3))
588
- else:
589
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
590
- super(Upsample, self).__init__(*m)
591
-
592
-
593
- class UpsampleOneStep(nn.Sequential):
594
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
595
- Used in lightweight SR to save parameters.
596
-
597
- Args:
598
- scale (int): Scale factor. Supported scales: 2^n and 3.
599
- num_feat (int): Channel number of intermediate features.
600
-
601
- """
602
-
603
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
604
- self.num_feat = num_feat
605
- self.input_resolution = input_resolution
606
- m = []
607
- m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
608
- m.append(nn.PixelShuffle(scale))
609
- super(UpsampleOneStep, self).__init__(*m)
610
-
611
- def flops(self):
612
- H, W = self.input_resolution
613
- flops = H * W * self.num_feat * 3 * 9
614
- return flops
615
-
616
-
617
- class SwinIR(nn.Module):
618
- r""" SwinIR
619
- A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
620
-
621
- Args:
622
- img_size (int | tuple(int)): Input image size. Default 64
623
- patch_size (int | tuple(int)): Patch size. Default: 1
624
- in_chans (int): Number of input image channels. Default: 3
625
- embed_dim (int): Patch embedding dimension. Default: 96
626
- depths (tuple(int)): Depth of each Swin Transformer layer.
627
- num_heads (tuple(int)): Number of attention heads in different layers.
628
- window_size (int): Window size. Default: 7
629
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
630
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
631
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
632
- drop_rate (float): Dropout rate. Default: 0
633
- attn_drop_rate (float): Attention dropout rate. Default: 0
634
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
635
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
636
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
637
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
638
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
639
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
640
- img_range: Image range. 1. or 255.
641
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
642
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
643
- """
644
-
645
- def __init__(self, img_size=64, patch_size=1, in_chans=3,
646
- embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
647
- window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
648
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
649
- norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
650
- use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
651
- **kwargs):
652
- super(SwinIR, self).__init__()
653
- num_in_ch = in_chans
654
- num_out_ch = in_chans
655
- num_feat = 64
656
- self.img_range = img_range
657
- if in_chans == 3:
658
- rgb_mean = (0.4488, 0.4371, 0.4040)
659
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
660
- else:
661
- self.mean = torch.zeros(1, 1, 1, 1)
662
- self.upscale = upscale
663
- self.upsampler = upsampler
664
-
665
- #####################################################################################################
666
- ################################### 1, shallow feature extraction ###################################
667
- self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
668
-
669
- #####################################################################################################
670
- ################################### 2, deep feature extraction ######################################
671
- self.num_layers = len(depths)
672
- self.embed_dim = embed_dim
673
- self.ape = ape
674
- self.patch_norm = patch_norm
675
- self.num_features = embed_dim
676
- self.mlp_ratio = mlp_ratio
677
-
678
- # split image into non-overlapping patches
679
- self.patch_embed = PatchEmbed(
680
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
681
- norm_layer=norm_layer if self.patch_norm else None)
682
- num_patches = self.patch_embed.num_patches
683
- patches_resolution = self.patch_embed.patches_resolution
684
- self.patches_resolution = patches_resolution
685
-
686
- # merge non-overlapping patches into image
687
- self.patch_unembed = PatchUnEmbed(
688
- img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
689
- norm_layer=norm_layer if self.patch_norm else None)
690
-
691
- # absolute position embedding
692
- if self.ape:
693
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
694
- trunc_normal_(self.absolute_pos_embed, std=.02)
695
-
696
- self.pos_drop = nn.Dropout(p=drop_rate)
697
-
698
- # stochastic depth
699
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
700
-
701
- # build Residual Swin Transformer blocks (RSTB)
702
- self.layers = nn.ModuleList()
703
- for i_layer in range(self.num_layers):
704
- layer = RSTB(dim=embed_dim,
705
- input_resolution=(patches_resolution[0],
706
- patches_resolution[1]),
707
- depth=depths[i_layer],
708
- num_heads=num_heads[i_layer],
709
- window_size=window_size,
710
- mlp_ratio=self.mlp_ratio,
711
- qkv_bias=qkv_bias, qk_scale=qk_scale,
712
- drop=drop_rate, attn_drop=attn_drop_rate,
713
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
714
- norm_layer=norm_layer,
715
- downsample=None,
716
- use_checkpoint=use_checkpoint,
717
- img_size=img_size,
718
- patch_size=patch_size,
719
- resi_connection=resi_connection
720
-
721
- )
722
- self.layers.append(layer)
723
- self.norm = norm_layer(self.num_features)
724
-
725
- # build the last conv layer in deep feature extraction
726
- if resi_connection == '1conv':
727
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
728
- elif resi_connection == '3conv':
729
- # to save parameters and memory
730
- self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
731
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
732
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
733
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
734
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
735
-
736
- #####################################################################################################
737
- ################################ 3, high quality image reconstruction ################################
738
- if self.upsampler == 'pixelshuffle':
739
- # for classical SR
740
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
741
- nn.LeakyReLU(inplace=True))
742
- self.upsample = Upsample(upscale, num_feat)
743
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
744
- elif self.upsampler == 'pixelshuffledirect':
745
- # for lightweight SR (to save parameters)
746
- self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
747
- (patches_resolution[0], patches_resolution[1]))
748
- elif self.upsampler == 'nearest+conv':
749
- # for real-world SR (less artifacts)
750
- assert self.upscale == 4, 'only support x4 now.'
751
- self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
752
- nn.LeakyReLU(inplace=True))
753
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
754
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
755
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
756
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
757
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
758
- else:
759
- # for image denoising and JPEG compression artifact reduction
760
- self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
761
-
762
- self.apply(self._init_weights)
763
-
764
- def _init_weights(self, m):
765
- if isinstance(m, nn.Linear):
766
- trunc_normal_(m.weight, std=.02)
767
- if isinstance(m, nn.Linear) and m.bias is not None:
768
- nn.init.constant_(m.bias, 0)
769
- elif isinstance(m, nn.LayerNorm):
770
- nn.init.constant_(m.bias, 0)
771
- nn.init.constant_(m.weight, 1.0)
772
-
773
- @torch.jit.ignore
774
- def no_weight_decay(self):
775
- return {'absolute_pos_embed'}
776
-
777
- @torch.jit.ignore
778
- def no_weight_decay_keywords(self):
779
- return {'relative_position_bias_table'}
780
-
781
- def forward_features(self, x):
782
- x_size = (x.shape[2], x.shape[3])
783
- x = self.patch_embed(x)
784
- if self.ape:
785
- x = x + self.absolute_pos_embed
786
- x = self.pos_drop(x)
787
-
788
- for layer in self.layers:
789
- x = layer(x, x_size)
790
-
791
- x = self.norm(x) # B L C
792
- x = self.patch_unembed(x, x_size)
793
-
794
- return x
795
-
796
- def forward(self, x):
797
- self.mean = self.mean.type_as(x)
798
- x = (x - self.mean) * self.img_range
799
-
800
- if self.upsampler == 'pixelshuffle':
801
- # for classical SR
802
- x = self.conv_first(x)
803
- x = self.conv_after_body(self.forward_features(x)) + x
804
- x = self.conv_before_upsample(x)
805
- x = self.conv_last(self.upsample(x))
806
- elif self.upsampler == 'pixelshuffledirect':
807
- # for lightweight SR
808
- x = self.conv_first(x)
809
- x = self.conv_after_body(self.forward_features(x)) + x
810
- x = self.upsample(x)
811
- elif self.upsampler == 'nearest+conv':
812
- # for real-world SR
813
- x = self.conv_first(x)
814
- x = self.conv_after_body(self.forward_features(x)) + x
815
- x = self.conv_before_upsample(x)
816
- x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
817
- x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
818
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
819
- else:
820
- # for image denoising and JPEG compression artifact reduction
821
- x_first = self.conv_first(x)
822
- res = self.conv_after_body(self.forward_features(x_first)) + x_first
823
- x = x + self.conv_last(res)
824
-
825
- x = x / self.img_range + self.mean
826
-
827
- return x
828
-
829
- def flops(self):
830
- flops = 0
831
- H, W = self.patches_resolution
832
- flops += H * W * 3 * self.embed_dim * 9
833
- flops += self.patch_embed.flops()
834
- for i, layer in enumerate(self.layers):
835
- flops += layer.flops()
836
- flops += H * W * 3 * self.embed_dim * self.embed_dim
837
- flops += self.upsample.flops()
838
- return flops
839
-
840
-
841
- if __name__ == '__main__':
842
- upscale = 4
843
- window_size = 8
844
- height = (1024 // upscale // window_size + 1) * window_size
845
- width = (720 // upscale // window_size + 1) * window_size
846
- model = SwinIR(upscale=2, img_size=(height, width),
847
- window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
848
- embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
849
- print(model)
850
- print(height, width, model.flops() / 1e9)
851
-
852
- x = torch.randn((1, 3, height, width))
853
- x = model(x)
854
- print(x.shape)