Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- app.py +44 -43
- inference.py +1 -1
- requirements.txt +1 -0
app.py
CHANGED
@@ -6,6 +6,7 @@ import spaces
|
|
6 |
import torch
|
7 |
import numpy as np
|
8 |
import gradio as gr
|
|
|
9 |
|
10 |
from utils import preprocess_img, preprocess_img_from_path, postprocess_img
|
11 |
from vgg19 import VGG_19
|
@@ -57,8 +58,7 @@ def run(content_image, style_name, style_strength=5, progress=gr.Progress(track_
|
|
57 |
et = time.time()
|
58 |
print('TIME TAKEN:', et-st)
|
59 |
|
60 |
-
yield postprocess_img(generated_img, original_size)
|
61 |
-
|
62 |
|
63 |
def set_slider(value):
|
64 |
return gr.update(value=value)
|
@@ -66,54 +66,55 @@ def set_slider(value):
|
|
66 |
css = """
|
67 |
#container {
|
68 |
margin: 0 auto;
|
69 |
-
max-width:
|
70 |
}
|
71 |
"""
|
72 |
|
73 |
with gr.Blocks(css=css) as demo:
|
74 |
gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer</h1>")
|
75 |
-
with gr.
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
).then(
|
99 |
-
fn=lambda: gr.update(visible=True),
|
100 |
-
outputs=[download_button]
|
101 |
-
)
|
102 |
-
|
103 |
-
content_and_output.change(
|
104 |
-
fn=lambda _: gr.update(visible=False),
|
105 |
-
inputs=[content_and_output],
|
106 |
-
outputs=[download_button]
|
107 |
-
)
|
108 |
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
117 |
|
118 |
demo.queue = False
|
119 |
demo.config['queue'] = False
|
|
|
6 |
import torch
|
7 |
import numpy as np
|
8 |
import gradio as gr
|
9 |
+
from gradio_imageslider import ImageSlider
|
10 |
|
11 |
from utils import preprocess_img, preprocess_img_from_path, postprocess_img
|
12 |
from vgg19 import VGG_19
|
|
|
58 |
et = time.time()
|
59 |
print('TIME TAKEN:', et-st)
|
60 |
|
61 |
+
yield (content_image, postprocess_img(generated_img, original_size))
|
|
|
62 |
|
63 |
def set_slider(value):
|
64 |
return gr.update(value=value)
|
|
|
66 |
css = """
|
67 |
#container {
|
68 |
margin: 0 auto;
|
69 |
+
max-width: 1100px;
|
70 |
}
|
71 |
"""
|
72 |
|
73 |
with gr.Blocks(css=css) as demo:
|
74 |
gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer</h1>")
|
75 |
+
with gr.Row(elem_id='container'):
|
76 |
+
with gr.Column():
|
77 |
+
content_image = gr.Image(label='Content', type='pil', sources=['upload', 'webcam', 'clipboard'], format='jpg', show_download_button=False)
|
78 |
+
style_dropdown = gr.Radio(choices=list(style_options.keys()), label='Style', value='Starry Night', type='value')
|
79 |
+
with gr.Group():
|
80 |
+
style_strength_slider = gr.Slider(label='Style Strength', minimum=1, maximum=10, step=1, value=5)
|
81 |
+
submit_button = gr.Button('Submit', variant='primary')
|
82 |
+
|
83 |
+
examples = gr.Examples(
|
84 |
+
examples=[
|
85 |
+
['./content_images/Bridge.jpg', 'Starry Night'],
|
86 |
+
['./content_images/GoldenRetriever.jpg', 'Great Wave'],
|
87 |
+
['./content_images/CameraGirl.jpg', 'Bokeh']
|
88 |
+
],
|
89 |
+
inputs=[content_image, style_dropdown]
|
90 |
+
)
|
91 |
|
92 |
+
with gr.Column():
|
93 |
+
output_image = ImageSlider(position=0.15, label='Output', show_label=False, type='pil', interactive=False, show_download_button=False)
|
94 |
+
download_button = gr.DownloadButton(label='Download Image', visible=False)
|
95 |
+
|
96 |
+
def save_image(img_tuple):
|
97 |
+
filename = 'generated.jpg'
|
98 |
+
img_tuple[1].save(filename)
|
99 |
+
return filename
|
100 |
+
|
101 |
+
submit_button.click(
|
102 |
+
fn=lambda: gr.update(visible=False),
|
103 |
+
outputs=[download_button]
|
104 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
+
submit_button.click(
|
107 |
+
fn=run,
|
108 |
+
inputs=[content_image, style_dropdown, style_strength_slider],
|
109 |
+
outputs=[output_image]
|
110 |
+
).then(
|
111 |
+
fn=save_image,
|
112 |
+
inputs=[output_image],
|
113 |
+
outputs=[download_button]
|
114 |
+
).then(
|
115 |
+
fn=lambda: gr.update(visible=True),
|
116 |
+
outputs=[download_button]
|
117 |
+
)
|
118 |
|
119 |
demo.queue = False
|
120 |
demo.config['queue'] = False
|
inference.py
CHANGED
@@ -26,7 +26,7 @@ def inference(
|
|
26 |
content_image,
|
27 |
style_features,
|
28 |
lr,
|
29 |
-
iterations=
|
30 |
optim_caller=optim.AdamW,
|
31 |
alpha=1,
|
32 |
beta=1
|
|
|
26 |
content_image,
|
27 |
style_features,
|
28 |
lr,
|
29 |
+
iterations=101,
|
30 |
optim_caller=optim.AdamW,
|
31 |
alpha=1,
|
32 |
beta=1
|
requirements.txt
CHANGED
@@ -3,5 +3,6 @@ torch
|
|
3 |
torchvision
|
4 |
pillow
|
5 |
gradio
|
|
|
6 |
spaces
|
7 |
tqdm
|
|
|
3 |
torchvision
|
4 |
pillow
|
5 |
gradio
|
6 |
+
gradio_imageslider
|
7 |
spaces
|
8 |
tqdm
|