Spaces:
Paused
Paused
Commit
•
c1fd008
0
Parent(s):
Duplicate from havas79/Real-ESRGAN_Demo
Browse filesCo-authored-by: George Mastrakoulis <havas79@users.noreply.huggingface.co>
- .gitattributes +31 -0
- README.md +13 -0
- app.py +226 -0
- requirements.txt +11 -0
.gitattributes
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Real-ESRGAN Demo for Image Restoration and Upscaling
|
3 |
+
emoji: 🖼️
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.3.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
duplicated_from: havas79/Real-ESRGAN_Demo
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
7 |
+
from basicsr.utils.download_util import load_file_from_url
|
8 |
+
|
9 |
+
from realesrgan import RealESRGANer
|
10 |
+
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
11 |
+
|
12 |
+
|
13 |
+
last_file = None
|
14 |
+
img_mode = "RGBA"
|
15 |
+
|
16 |
+
|
17 |
+
def realesrgan(img, model_name, denoise_strength, face_enhance, outscale):
|
18 |
+
"""Real-ESRGAN function to restore (and upscale) images.
|
19 |
+
"""
|
20 |
+
if not img:
|
21 |
+
return
|
22 |
+
|
23 |
+
# Define model parameters
|
24 |
+
if model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
|
25 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
26 |
+
netscale = 4
|
27 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
|
28 |
+
elif model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
|
29 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
30 |
+
netscale = 4
|
31 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
|
32 |
+
elif model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
|
33 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
34 |
+
netscale = 4
|
35 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
|
36 |
+
elif model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
|
37 |
+
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
38 |
+
netscale = 2
|
39 |
+
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
|
40 |
+
elif model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
|
41 |
+
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
42 |
+
netscale = 4
|
43 |
+
file_url = [
|
44 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
|
45 |
+
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
|
46 |
+
]
|
47 |
+
|
48 |
+
# Determine model paths
|
49 |
+
model_path = os.path.join('weights', model_name + '.pth')
|
50 |
+
if not os.path.isfile(model_path):
|
51 |
+
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
52 |
+
for url in file_url:
|
53 |
+
# model_path will be updated
|
54 |
+
model_path = load_file_from_url(
|
55 |
+
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
|
56 |
+
|
57 |
+
# Use dni to control the denoise strength
|
58 |
+
dni_weight = None
|
59 |
+
if model_name == 'realesr-general-x4v3' and denoise_strength != 1:
|
60 |
+
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
|
61 |
+
model_path = [model_path, wdn_model_path]
|
62 |
+
dni_weight = [denoise_strength, 1 - denoise_strength]
|
63 |
+
|
64 |
+
# Restorer Class
|
65 |
+
upsampler = RealESRGANer(
|
66 |
+
scale=netscale,
|
67 |
+
model_path=model_path,
|
68 |
+
dni_weight=dni_weight,
|
69 |
+
model=model,
|
70 |
+
tile=0,
|
71 |
+
tile_pad=10,
|
72 |
+
pre_pad=10,
|
73 |
+
half=False,
|
74 |
+
gpu_id=None
|
75 |
+
)
|
76 |
+
|
77 |
+
# Use GFPGAN for face enhancement
|
78 |
+
if face_enhance:
|
79 |
+
from gfpgan import GFPGANer
|
80 |
+
face_enhancer = GFPGANer(
|
81 |
+
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
|
82 |
+
upscale=outscale,
|
83 |
+
arch='clean',
|
84 |
+
channel_multiplier=2,
|
85 |
+
bg_upsampler=upsampler)
|
86 |
+
|
87 |
+
# Convert the input PIL image to cv2 image, so that it can be processed by realesrgan
|
88 |
+
cv_img = numpy.array(img)
|
89 |
+
img = cv2.cvtColor(cv_img, cv2.COLOR_RGBA2BGRA)
|
90 |
+
|
91 |
+
# Apply restoration
|
92 |
+
try:
|
93 |
+
if face_enhance:
|
94 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
95 |
+
else:
|
96 |
+
output, _ = upsampler.enhance(img, outscale=outscale)
|
97 |
+
except RuntimeError as error:
|
98 |
+
print('Error', error)
|
99 |
+
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
100 |
+
else:
|
101 |
+
# Save restored image and return it to the output Image component
|
102 |
+
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
103 |
+
extension = 'png'
|
104 |
+
else:
|
105 |
+
extension = 'jpg'
|
106 |
+
|
107 |
+
out_filename = f"output_{rnd_string(8)}.{extension}"
|
108 |
+
cv2.imwrite(out_filename, output)
|
109 |
+
global last_file
|
110 |
+
last_file = out_filename
|
111 |
+
return out_filename
|
112 |
+
|
113 |
+
|
114 |
+
def rnd_string(x):
|
115 |
+
"""Returns a string of 'x' random characters
|
116 |
+
"""
|
117 |
+
characters = "abcdefghijklmnopqrstuvwxyz_0123456789"
|
118 |
+
result = "".join((random.choice(characters)) for i in range(x))
|
119 |
+
return result
|
120 |
+
|
121 |
+
|
122 |
+
def reset():
|
123 |
+
"""Resets the Image components of the Gradio interface and deletes
|
124 |
+
the last processed image
|
125 |
+
"""
|
126 |
+
global last_file
|
127 |
+
if last_file:
|
128 |
+
print(f"Deleting {last_file} ...")
|
129 |
+
os.remove(last_file)
|
130 |
+
last_file = None
|
131 |
+
return gr.update(value=None), gr.update(value=None)
|
132 |
+
|
133 |
+
|
134 |
+
def has_transparency(img):
|
135 |
+
"""This function works by first checking to see if a "transparency" property is defined
|
136 |
+
in the image's info -- if so, we return "True". Then, if the image is using indexed colors
|
137 |
+
(such as in GIFs), it gets the index of the transparent color in the palette
|
138 |
+
(img.info.get("transparency", -1)) and checks if it's used anywhere in the canvas
|
139 |
+
(img.getcolors()). If the image is in RGBA mode, then presumably it has transparency in
|
140 |
+
it, but it double-checks by getting the minimum and maximum values of every color channel
|
141 |
+
(img.getextrema()), and checks if the alpha channel's smallest value falls below 255.
|
142 |
+
https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent
|
143 |
+
"""
|
144 |
+
if img.info.get("transparency", None) is not None:
|
145 |
+
return True
|
146 |
+
if img.mode == "P":
|
147 |
+
transparent = img.info.get("transparency", -1)
|
148 |
+
for _, index in img.getcolors():
|
149 |
+
if index == transparent:
|
150 |
+
return True
|
151 |
+
elif img.mode == "RGBA":
|
152 |
+
extrema = img.getextrema()
|
153 |
+
if extrema[3][0] < 255:
|
154 |
+
return True
|
155 |
+
return False
|
156 |
+
|
157 |
+
|
158 |
+
def image_properties(img):
|
159 |
+
"""Returns the dimensions (width and height) and color mode of the input image and
|
160 |
+
also sets the global img_mode variable to be used by the realesrgan function
|
161 |
+
"""
|
162 |
+
global img_mode
|
163 |
+
if img:
|
164 |
+
if has_transparency(img):
|
165 |
+
img_mode = "RGBA"
|
166 |
+
else:
|
167 |
+
img_mode = "RGB"
|
168 |
+
properties = f"Width: {img.size[0]}, Height: {img.size[1]} | Color Mode: {img_mode}"
|
169 |
+
return properties
|
170 |
+
|
171 |
+
|
172 |
+
def main():
|
173 |
+
# Gradio Interface
|
174 |
+
with gr.Blocks(title="Real-ESRGAN Gradio Demo", theme="dark") as demo:
|
175 |
+
|
176 |
+
gr.Markdown(
|
177 |
+
"""# <div align="center"> Real-ESRGAN Demo for Image Restoration and Upscaling </div>
|
178 |
+
<div align="center"><img width="200" height="74" src="https://github.com/xinntao/Real-ESRGAN/raw/master/assets/realesrgan_logo.png"></div>
|
179 |
+
|
180 |
+
This Gradio Demo was built as my Final Project for **CS50's Introduction to Programming with Python**.
|
181 |
+
Please visit the [Real-ESRGAN GitHub page](https://github.com/xinntao/Real-ESRGAN) for detailed information about the project.
|
182 |
+
"""
|
183 |
+
)
|
184 |
+
|
185 |
+
with gr.Accordion("Options/Parameters"):
|
186 |
+
with gr.Row():
|
187 |
+
model_name = gr.Dropdown(label="Real-ESRGAN inference model to be used",
|
188 |
+
choices=["RealESRGAN_x4plus", "RealESRNet_x4plus", "RealESRGAN_x4plus_anime_6B",
|
189 |
+
"RealESRGAN_x2plus", "realesr-general-x4v3"],
|
190 |
+
value="realesr-general-x4v3", show_label=True)
|
191 |
+
denoise_strength = gr.Slider(label="Denoise Strength (Used only with the realesr-general-x4v3 model)",
|
192 |
+
minimum=0, maximum=1, step=0.1, value=0.5)
|
193 |
+
outscale = gr.Slider(label="Image Upscaling Factor",
|
194 |
+
minimum=1, maximum=10, step=1, value=2, show_label=True)
|
195 |
+
face_enhance = gr.Checkbox(label="Face Enhancement using GFPGAN (Doesn't work for anime images)",
|
196 |
+
value=False, show_label=True)
|
197 |
+
|
198 |
+
with gr.Row():
|
199 |
+
with gr.Group():
|
200 |
+
input_image = gr.Image(label="Source Image", type="pil", image_mode="RGBA")
|
201 |
+
input_image_properties = gr.Textbox(label="Image Properties", max_lines=1)
|
202 |
+
output_image = gr.Image(label="Restored Image", image_mode="RGBA")
|
203 |
+
with gr.Row():
|
204 |
+
restore_btn = gr.Button("Restore Image")
|
205 |
+
reset_btn = gr.Button("Reset")
|
206 |
+
|
207 |
+
# Event listeners:
|
208 |
+
input_image.change(fn=image_properties, inputs=input_image, outputs=input_image_properties)
|
209 |
+
restore_btn.click(fn=realesrgan,
|
210 |
+
inputs=[input_image, model_name, denoise_strength, face_enhance, outscale],
|
211 |
+
outputs=output_image)
|
212 |
+
reset_btn.click(fn=reset, inputs=[], outputs=[output_image, input_image])
|
213 |
+
# reset_btn.click(None, inputs=[], outputs=[input_image], _js="() => (null)\n")
|
214 |
+
# Undocumented method to clear a component's value using Javascript
|
215 |
+
|
216 |
+
gr.Markdown(
|
217 |
+
"""*Please note that support for animated GIFs is not yet implemented. Should an animated GIF is chosen for restoration,
|
218 |
+
the demo will output only the first frame saved in PNG format (to preserve probable transparency).*
|
219 |
+
"""
|
220 |
+
)
|
221 |
+
|
222 |
+
demo.launch()
|
223 |
+
|
224 |
+
|
225 |
+
if __name__ == "__main__":
|
226 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
numpy
|
4 |
+
opencv-python
|
5 |
+
Pillow
|
6 |
+
basicsr
|
7 |
+
facexlib
|
8 |
+
gfpgan
|
9 |
+
tqdm
|
10 |
+
gradio
|
11 |
+
realesrgan
|