Spaces:
Running
on
T4
Running
on
T4
Upload realesrgan.py
Browse files- realesrgan.py +56 -0
realesrgan.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import functional as F
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
from rrdbnet_arch import RRDBNet
|
8 |
+
from utils_sr import *
|
9 |
+
|
10 |
+
|
11 |
+
class RealESRGAN:
|
12 |
+
def __init__(self, device, scale=4):
|
13 |
+
self.device = device
|
14 |
+
self.scale = scale
|
15 |
+
self.model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=scale)
|
16 |
+
|
17 |
+
def load_weights(self, model_path):
|
18 |
+
loadnet = torch.load(model_path)
|
19 |
+
if 'params' in loadnet:
|
20 |
+
self.model.load_state_dict(loadnet['params'], strict=True)
|
21 |
+
elif 'params_ema' in loadnet:
|
22 |
+
self.model.load_state_dict(loadnet['params_ema'], strict=True)
|
23 |
+
else:
|
24 |
+
self.model.load_state_dict(loadnet, strict=True)
|
25 |
+
self.model.eval()
|
26 |
+
self.model.to(self.device)
|
27 |
+
|
28 |
+
@torch.cuda.amp.autocast()
|
29 |
+
def predict(self, lr_image, batch_size=4, patches_size=192,
|
30 |
+
padding=24, pad_size=15):
|
31 |
+
scale = self.scale
|
32 |
+
device = self.device
|
33 |
+
lr_image = np.array(lr_image)
|
34 |
+
lr_image = pad_reflect(lr_image, pad_size)
|
35 |
+
|
36 |
+
patches, p_shape = split_image_into_overlapping_patches(lr_image, patch_size=patches_size,
|
37 |
+
padding_size=padding)
|
38 |
+
img = torch.FloatTensor(patches/255).permute((0,3,1,2)).to(device).detach()
|
39 |
+
|
40 |
+
with torch.no_grad():
|
41 |
+
res = self.model(img[0:batch_size])
|
42 |
+
for i in range(batch_size, img.shape[0], batch_size):
|
43 |
+
res = torch.cat((res, self.model(img[i:i+batch_size])), 0)
|
44 |
+
|
45 |
+
sr_image = res.permute((0,2,3,1)).clamp_(0, 1).cpu()
|
46 |
+
np_sr_image = sr_image.numpy()
|
47 |
+
|
48 |
+
padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,)
|
49 |
+
scaled_image_shape = tuple(np.multiply(lr_image.shape[0:2], scale)) + (3,)
|
50 |
+
np_sr_image = stich_together(np_sr_image, padded_image_shape=padded_size_scaled,
|
51 |
+
target_shape=scaled_image_shape, padding_size=padding * scale)
|
52 |
+
sr_img = (np_sr_image*255).astype(np.uint8)
|
53 |
+
sr_img = unpad_image(sr_img, pad_size*scale)
|
54 |
+
sr_img = Image.fromarray(sr_img)
|
55 |
+
|
56 |
+
return sr_img
|