Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- app.py +2 -2
- inference.py +4 -4
app.py
CHANGED
@@ -40,7 +40,7 @@ for style_name, style_img_path in style_options.items():
|
|
40 |
style_features = (model(style_img_512), model(style_img_1024))
|
41 |
cached_style_features[style_name] = style_features
|
42 |
|
43 |
-
@spaces.GPU(duration=
|
44 |
def run(content_image, style_name, style_strength, output_quality, progress=gr.Progress(track_tqdm=True)):
|
45 |
img_size = 1024 if output_quality else 512
|
46 |
content_img, original_size = preprocess_img(content_image, img_size)
|
@@ -66,7 +66,7 @@ def run(content_image, style_name, style_strength, output_quality, progress=gr.P
|
|
66 |
et = time.time()
|
67 |
print('TIME TAKEN:', et-st)
|
68 |
|
69 |
-
|
70 |
|
71 |
|
72 |
def set_slider(value):
|
|
|
40 |
style_features = (model(style_img_512), model(style_img_1024))
|
41 |
cached_style_features[style_name] = style_features
|
42 |
|
43 |
+
@spaces.GPU(duration=10)
|
44 |
def run(content_image, style_name, style_strength, output_quality, progress=gr.Progress(track_tqdm=True)):
|
45 |
img_size = 1024 if output_quality else 512
|
46 |
content_img, original_size = preprocess_img(content_image, img_size)
|
|
|
66 |
et = time.time()
|
67 |
print('TIME TAKEN:', et-st)
|
68 |
|
69 |
+
yield postprocess_img(generated_img, original_size)
|
70 |
|
71 |
|
72 |
def set_slider(value):
|
inference.py
CHANGED
@@ -30,8 +30,8 @@ def inference(
|
|
30 |
alpha=1,
|
31 |
beta=1
|
32 |
):
|
33 |
-
|
34 |
-
optimizer = optim.AdamW([
|
35 |
|
36 |
with torch.no_grad():
|
37 |
content_features = model(content_image)
|
@@ -39,10 +39,10 @@ def inference(
|
|
39 |
for _ in tqdm(range(iterations), desc='The magic is happening ✨'):
|
40 |
optimizer.zero_grad()
|
41 |
|
42 |
-
generated_features = model(
|
43 |
total_loss = _compute_loss(generated_features, content_features, style_features, alpha, beta)
|
44 |
|
45 |
total_loss.backward()
|
46 |
optimizer.step()
|
47 |
|
48 |
-
return
|
|
|
30 |
alpha=1,
|
31 |
beta=1
|
32 |
):
|
33 |
+
generated_image = content_image.clone().requires_grad_(True)
|
34 |
+
optimizer = optim.AdamW([generated_image], lr=lr)
|
35 |
|
36 |
with torch.no_grad():
|
37 |
content_features = model(content_image)
|
|
|
39 |
for _ in tqdm(range(iterations), desc='The magic is happening ✨'):
|
40 |
optimizer.zero_grad()
|
41 |
|
42 |
+
generated_features = model(generated_image)
|
43 |
total_loss = _compute_loss(generated_features, content_features, style_features, alpha, beta)
|
44 |
|
45 |
total_loss.backward()
|
46 |
optimizer.step()
|
47 |
|
48 |
+
return generated_image
|