Upload 3 files
#1
by
John6666
- opened
- README.md +1 -1
- app.py +8 -7
- requirements.txt +5 -5
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 📈
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.6.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: apache-2.0
|
app.py
CHANGED
@@ -122,21 +122,22 @@ article = r"""
|
|
122 |
"""
|
123 |
demo = gr.Interface(
|
124 |
inference, [
|
125 |
-
gr.
|
126 |
# gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'),
|
127 |
-
gr.
|
128 |
-
gr.
|
129 |
# gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50)
|
130 |
], [
|
131 |
-
gr.
|
132 |
-
gr.
|
133 |
],
|
134 |
title=title,
|
135 |
description=description,
|
136 |
article=article,
|
137 |
# examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50],
|
138 |
# ['10045.png', 'v1.4', 2, 50]]).launch()
|
139 |
-
examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]]
|
|
|
140 |
|
141 |
-
demo.queue(
|
142 |
demo.launch()
|
|
|
122 |
"""
|
123 |
demo = gr.Interface(
|
124 |
inference, [
|
125 |
+
gr.Image(type="filepath", label="Input"),
|
126 |
# gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'),
|
127 |
+
gr.Radio(choices=['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", value='v1.4', label='version'),
|
128 |
+
gr.Number(label="Rescaling factor", value=2),
|
129 |
# gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50)
|
130 |
], [
|
131 |
+
gr.Image(type="numpy", label="Output (The whole image)"),
|
132 |
+
gr.File(label="Download the output image")
|
133 |
],
|
134 |
title=title,
|
135 |
description=description,
|
136 |
article=article,
|
137 |
# examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50],
|
138 |
# ['10045.png', 'v1.4', 2, 50]]).launch()
|
139 |
+
examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]],
|
140 |
+
cache_examples=False)
|
141 |
|
142 |
+
demo.queue()
|
143 |
demo.launch()
|
requirements.txt
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
torch>=1.7
|
2 |
-
|
3 |
-
facexlib
|
4 |
-
|
5 |
-
|
6 |
-
numpy
|
7 |
opencv-python
|
8 |
torchvision
|
9 |
scipy
|
|
|
1 |
torch>=1.7
|
2 |
+
git+https://github.com/XPixelGroup/BasicSR
|
3 |
+
git+https://github.com/xinntao/facexlib
|
4 |
+
git+https://github.com/TencentARC/GFPGAN
|
5 |
+
git+https://github.com/xinntao/Real-ESRGAN
|
6 |
+
numpy<2
|
7 |
opencv-python
|
8 |
torchvision
|
9 |
scipy
|