Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- app.py +1 -1
- inference.py +19 -10
app.py
CHANGED
@@ -103,7 +103,7 @@ css = """
|
|
103 |
"""
|
104 |
|
105 |
with gr.Blocks(css=css) as demo:
|
106 |
-
gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer w/ Salient Object
|
107 |
with gr.Row(elem_id='container'):
|
108 |
with gr.Column():
|
109 |
content_image = gr.Image(label='Content', type='pil', sources=['upload', 'webcam', 'clipboard'], format='jpg', show_download_button=False)
|
|
|
103 |
"""
|
104 |
|
105 |
with gr.Blocks(css=css) as demo:
|
106 |
+
gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer w/ Salient Object Detection")
|
107 |
with gr.Row(elem_id='container'):
|
108 |
with gr.Column():
|
109 |
content_image = gr.Image(label='Content', type='pil', sources=['upload', 'webcam', 'clipboard'], format='jpg', show_download_button=False)
|
inference.py
CHANGED
@@ -1,9 +1,14 @@
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import torch.optim as optim
|
3 |
import torch.nn.functional as F
|
4 |
-
from torch.utils.tensorboard import SummaryWriter
|
5 |
from torchvision.transforms.functional import gaussian_blur
|
6 |
|
|
|
|
|
|
|
7 |
def _gram_matrix(feature):
|
8 |
batch_size, n_feature_maps, height, width = feature.size()
|
9 |
new_feature = feature.view(batch_size * n_feature_maps, height * width)
|
@@ -44,7 +49,9 @@ def inference(
|
|
44 |
alpha=1,
|
45 |
beta=1,
|
46 |
):
|
47 |
-
|
|
|
|
|
48 |
generated_image = content_image.clone().requires_grad_(True)
|
49 |
optimizer = optim_caller([generated_image], lr=lr)
|
50 |
min_losses = [float('inf')] * iterations
|
@@ -78,16 +85,17 @@ def inference(
|
|
78 |
total_loss.backward()
|
79 |
|
80 |
# log loss
|
81 |
-
|
82 |
-
'
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
86 |
min_losses[iter] = min(min_losses[iter], total_loss.item())
|
87 |
|
88 |
return total_loss
|
89 |
|
90 |
-
for iter in range(iterations):
|
91 |
optimizer.step(lambda: closure(iter))
|
92 |
|
93 |
if apply_to_background:
|
@@ -95,6 +103,7 @@ def inference(
|
|
95 |
foreground_mask_resized = F.interpolate(foreground_mask.unsqueeze(1), size=generated_image.shape[2:], mode='nearest')
|
96 |
generated_image.data = generated_image.data * (1 - foreground_mask_resized) + content_image.data * foreground_mask_resized
|
97 |
|
98 |
-
|
99 |
-
|
|
|
100 |
return generated_image, background_ratio
|
|
|
1 |
+
import os
|
2 |
+
from tqdm import tqdm
|
3 |
+
|
4 |
import torch
|
5 |
import torch.optim as optim
|
6 |
import torch.nn.functional as F
|
|
|
7 |
from torchvision.transforms.functional import gaussian_blur
|
8 |
|
9 |
+
DEV_MODE = os.environ.get('DEV_MODE', None)
|
10 |
+
print('DEV MODE:', True if DEV_MODE else False)
|
11 |
+
|
12 |
def _gram_matrix(feature):
|
13 |
batch_size, n_feature_maps, height, width = feature.size()
|
14 |
new_feature = feature.view(batch_size * n_feature_maps, height * width)
|
|
|
49 |
alpha=1,
|
50 |
beta=1,
|
51 |
):
|
52 |
+
if DEV_MODE:
|
53 |
+
from torch.utils.tensorboard import SummaryWriter
|
54 |
+
writer = SummaryWriter()
|
55 |
generated_image = content_image.clone().requires_grad_(True)
|
56 |
optimizer = optim_caller([generated_image], lr=lr)
|
57 |
min_losses = [float('inf')] * iterations
|
|
|
85 |
total_loss.backward()
|
86 |
|
87 |
# log loss
|
88 |
+
if DEV_MODE:
|
89 |
+
writer.add_scalars(f'style-{"background" if apply_to_background else "image"}', {
|
90 |
+
'Loss/content': content_loss.item(),
|
91 |
+
'Loss/style': style_loss.item(),
|
92 |
+
'Loss/total': total_loss.item()
|
93 |
+
}, iter)
|
94 |
min_losses[iter] = min(min_losses[iter], total_loss.item())
|
95 |
|
96 |
return total_loss
|
97 |
|
98 |
+
for iter in tqdm(range(iterations)):
|
99 |
optimizer.step(lambda: closure(iter))
|
100 |
|
101 |
if apply_to_background:
|
|
|
103 |
foreground_mask_resized = F.interpolate(foreground_mask.unsqueeze(1), size=generated_image.shape[2:], mode='nearest')
|
104 |
generated_image.data = generated_image.data * (1 - foreground_mask_resized) + content_image.data * foreground_mask_resized
|
105 |
|
106 |
+
if DEV_MODE:
|
107 |
+
writer.flush()
|
108 |
+
writer.close()
|
109 |
return generated_image, background_ratio
|