Didisoftwares vicalloy commited on
Commit
cd5f849
0 Parent(s):

Duplicate from vicalloy/GFPGAN

Browse files

Co-authored-by: vicalloy <vicalloy@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +31 -0
  2. README.md +14 -0
  3. app.py +175 -0
  4. packages.txt +3 -0
  5. requirements.txt +13 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zst filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GFPGAN
3
+ emoji: 😁
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.1.7
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: vicalloy/GFPGAN
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cv2
4
+ import gradio as gr
5
+ import torch
6
+ from basicsr.archs.srvgg_arch import SRVGGNetCompact
7
+ from gfpgan.utils import GFPGANer
8
+ from realesrgan.utils import RealESRGANer
9
+
10
+ os.system("pip freeze")
11
+ # download weights
12
+ if not os.path.exists('realesr-general-x4v3.pth'):
13
+ os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
14
+ if not os.path.exists('GFPGANv1.2.pth'):
15
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .")
16
+ if not os.path.exists('GFPGANv1.3.pth'):
17
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .")
18
+ if not os.path.exists('GFPGANv1.4.pth'):
19
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
20
+ if not os.path.exists('RestoreFormer.pth'):
21
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .")
22
+ if not os.path.exists('CodeFormer.pth'):
23
+ os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth -P .")
24
+
25
+ if not os.path.exists('HanamichiSakuragi.jpg'):
26
+ torch.hub.download_url_to_file(
27
+ 'https://haoluobo.com/wp-content/uploads/2023/01/%E6%A8%B1%E6%9C%A8%E8%8A%B1%E9%81%93.jpg',
28
+ 'HanamichiSakuragi.jpg')
29
+ torch.hub.download_url_to_file(
30
+ 'https://haoluobo.com/wp-content/uploads/2023/01/%E6%9D%8E%E4%B8%96%E6%B0%91.jpg',
31
+ 'LiShiming.jpg')
32
+ torch.hub.download_url_to_file(
33
+ 'https://haoluobo.com/wp-content/uploads/2023/01/%E4%B9%BE%E9%9A%86.jpg',
34
+ 'QianLong.jpg')
35
+ torch.hub.download_url_to_file(
36
+ 'https://user-images.githubusercontent.com/17445847/187401133-8a3bf269-5b4d-4432-b2f0-6d26ee1d3307.png',
37
+ '10045.png')
38
+
39
+ # background enhancer with RealESRGAN
40
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
41
+ model_path = 'realesr-general-x4v3.pth'
42
+ half = True if torch.cuda.is_available() else False
43
+ upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
44
+
45
+ os.makedirs('output', exist_ok=True)
46
+
47
+
48
+ # def inference(img, version, scale, weight):
49
+ def inference(img, version, scale, blur_face):
50
+ blur_face = int(blur_face)
51
+ if blur_face % 2 != 1:
52
+ blur_face += 1
53
+ if blur_face < 3:
54
+ blur_face = 0
55
+ # weight /= 100
56
+ print(img, version, scale)
57
+ if scale > 4:
58
+ scale = 4 # avoid too large scale value
59
+ try:
60
+ extension = os.path.splitext(os.path.basename(str(img)))[1]
61
+ img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
62
+ if len(img.shape) == 3 and img.shape[2] == 4:
63
+ img_mode = 'RGBA'
64
+ elif len(img.shape) == 2: # for gray inputs
65
+ img_mode = None
66
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
67
+ else:
68
+ img_mode = None
69
+
70
+ h, w = img.shape[0:2]
71
+ if h < 300:
72
+ img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
73
+
74
+ if version == 'v1.2':
75
+ face_enhancer = GFPGANer(
76
+ model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
77
+ elif version == 'v1.3':
78
+ face_enhancer = GFPGANer(
79
+ model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
80
+ elif version == 'v1.4':
81
+ face_enhancer = GFPGANer(
82
+ model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
83
+ elif version == 'RestoreFormer':
84
+ face_enhancer = GFPGANer(
85
+ model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler)
86
+ # elif version == 'CodeFormer':
87
+ # face_enhancer = GFPGANer(
88
+ # model_path='CodeFormer.pth', upscale=2, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler)
89
+
90
+ try:
91
+ # _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight)
92
+ face_helper = face_enhancer.face_helper
93
+ align_warp_face = face_helper.align_warp_face
94
+
95
+ def new_align_warp_face(*args, **kwargs):
96
+ align_warp_face(*args, **kwargs) # save_cropped_path
97
+ face_helper.org_cropped_faces = face_helper.cropped_faces
98
+ if blur_face >= 3:
99
+ face_helper.cropped_faces = [cv2.GaussianBlur(e, (blur_face, blur_face), 0) for e in face_helper.cropped_faces]
100
+ print("find face count:", len(face_helper.cropped_faces))
101
+
102
+ face_helper.align_warp_face = new_align_warp_face
103
+ _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
104
+ except RuntimeError as error:
105
+ print('Error', error)
106
+
107
+ try:
108
+ if scale != 2:
109
+ interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
110
+ h, w = img.shape[0:2]
111
+ output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
112
+ except Exception as error:
113
+ print('wrong scale input.', error)
114
+ if img_mode == 'RGBA': # RGBA images should be saved in png format
115
+ extension = 'png'
116
+ else:
117
+ extension = 'jpg'
118
+ save_path = f'output/out.{extension}'
119
+ cv2.imwrite(save_path, output)
120
+
121
+ output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
122
+ return (
123
+ output,
124
+ save_path,
125
+ [cv2.cvtColor(e, cv2.COLOR_BGR2RGB) for e in face_enhancer.face_helper.org_cropped_faces],
126
+ [cv2.cvtColor(e, cv2.COLOR_BGR2RGB) for e in face_enhancer.face_helper.restored_faces]
127
+ )
128
+ except Exception as error:
129
+ print('global exception', error)
130
+ return None, None
131
+
132
+
133
+ title = "GFPGAN: Practical Face Restoration Algorithm"
134
+ description = r"""Gradio demo for <a href='https://github.com/TencentARC/GFPGAN' target='_blank'><b>GFPGAN: Towards Real-World Blind Face Restoration with Generative Facial Prior</b></a>.<br>
135
+ It can be used to restore your **old photos** or improve **AI-generated faces**.<br>
136
+ To use it, simply upload your image.<br>
137
+ If GFPGAN is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/GFPGAN' target='_blank'>Github Repo</a> and recommend it to your friends 😊<br>
138
+ This demo was forked by [vicalloy](https://github.com/vicalloy), add `face blur` param to optimize painting face enhance.
139
+ """
140
+ article = r"""
141
+
142
+ [![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases)
143
+ [![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN)
144
+ [![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)](https://arxiv.org/abs/2101.04061)
145
+
146
+ If you have any question, please email 📧 `xintao.wang@outlook.com` or `xintaowang@tencent.com`.
147
+ """
148
+ with gr.Blocks() as demo:
149
+ gr.Markdown("<center><h1>%s</h1></center>" % title)
150
+ gr.Markdown(description)
151
+ with gr.Row(equal_height=False):
152
+ with gr.Column():
153
+ file_path = gr.components.Image(type="filepath", label="Input")
154
+ version = gr.components.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], type="value", value='v1.4', label='version')
155
+ rescaling_factor = gr.components.Number(label="Rescaling factor", value=2)
156
+ blur_face = gr.components.Number(label="Blur face", value=25)
157
+ submit = gr.Button("Submit")
158
+ with gr.Column():
159
+ output_img = gr.components.Image(type="numpy", label="Output (The whole image)")
160
+ download = gr.components.File(label="Download the output image")
161
+ with gr.Row():
162
+ with gr.Column():
163
+ input_faces = gr.Gallery(label="Input faces").style(height="auto")
164
+ with gr.Column():
165
+ output_faces = gr.Gallery(label="Output faces").style(height="auto")
166
+ gr.Examples([['HanamichiSakuragi.jpg', 'v1.4', 2, 31], ['LiShiming.jpg', 'v1.4', 2, 3], ['QianLong.jpg', 'v1.4', 2, 3],
167
+ ['10045.png', 'v1.4', 2, 0]], [file_path, version, rescaling_factor, blur_face])
168
+ gr.Markdown(article)
169
+ submit.click(
170
+ inference,
171
+ inputs=[file_path, version, rescaling_factor, blur_face],
172
+ outputs=[output_img, download, input_faces, output_faces]
173
+ )
174
+ demo.queue(concurrency_count=4)
175
+ demo.launch()
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.7
2
+ basicsr>=1.4.2
3
+ facexlib>=0.2.5
4
+ gfpgan>=1.3.7
5
+ realesrgan>=0.2.5
6
+ numpy
7
+ opencv-python
8
+ torchvision
9
+ scipy
10
+ tqdm
11
+ lmdb
12
+ pyyaml
13
+ yapf