Spaces:
Runtime error
Runtime error
Upload app2.py
Browse files
app2.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import onnxruntime
|
5 |
+
from scipy.ndimage.filters import gaussian_filter
|
6 |
+
from skimage.color import rgb2gray
|
7 |
+
|
8 |
+
|
9 |
+
def xdog(im, gamma=0.98, phi=200, eps=-0.1, k=1.6, sigma=1):
|
10 |
+
# Source : https://github.com/CemalUnal/XDoG-Filter
|
11 |
+
# Reference : XDoG: An eXtended difference-of-Gaussians compendium including advanced image stylization
|
12 |
+
# Link : http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.151&rep=rep1&type=pdf
|
13 |
+
if im.shape[2] == 3:
|
14 |
+
im = rgb2gray(im)
|
15 |
+
imf1 = gaussian_filter(im, sigma)
|
16 |
+
imf2 = gaussian_filter(im, sigma * k)
|
17 |
+
imdiff = imf1 - gamma * imf2
|
18 |
+
imdiff = (imdiff < eps) * 1.0 + (imdiff >= eps) * (1.0 + np.tanh(phi * imdiff))
|
19 |
+
imdiff -= imdiff.min()
|
20 |
+
imdiff /= imdiff.max()
|
21 |
+
imdiff *= 255.0
|
22 |
+
imdiff = imdiff.astype('uint8')
|
23 |
+
imdiff = cv2.cvtColor(imdiff, cv2.COLOR_GRAY2BGR)
|
24 |
+
return (imdiff / 255.0).astype('float32')
|
25 |
+
|
26 |
+
|
27 |
+
def swin_model(img):
|
28 |
+
h, w = img.shape[0], img.shape[1]
|
29 |
+
factor = max(h / 512, w / 512)
|
30 |
+
if factor > 1:
|
31 |
+
img = cv2.resize(img, (int(h / factor), int(w / factor)))
|
32 |
+
img = np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1)) / 255.0
|
33 |
+
img = img[None, ...].astype(np.float32)
|
34 |
+
mod_pad_h, mod_pad_w = 0, 0
|
35 |
+
window_size = 8
|
36 |
+
if h % window_size != 0:
|
37 |
+
mod_pad_h = window_size - h % window_size
|
38 |
+
if w % window_size != 0:
|
39 |
+
mod_pad_w = window_size - w % window_size
|
40 |
+
img = np.pad(img, (mod_pad_w, mod_pad_h), 'reflect')
|
41 |
+
ort_session = onnxruntime.InferenceSession('model.onnx', providers=['CPUExecutionProvider'])
|
42 |
+
ort_inputs = {ort_session.get_inputs()[0].name: img}
|
43 |
+
output = ort_session.run(None, ort_inputs)[0]
|
44 |
+
_, _, a, b = output.shape
|
45 |
+
output = output[:, :, 0:a - mod_pad_h, 0:b - mod_pad_w]
|
46 |
+
output = np.squeeze(output).clip(0, 1)
|
47 |
+
if factor > 1:
|
48 |
+
output = cv2.resize(output, h, w)
|
49 |
+
return output
|
50 |
+
|
51 |
+
|
52 |
+
demo = gr.Blocks()
|
53 |
+
with demo:
|
54 |
+
gr.Markdown(
|
55 |
+
"""
|
56 |
+
# Hello,World!
|
57 |
+
This is an sketch extraction using in anime art by swin_transformer, you can see the difference between X-dog and learning-based algorithm.
|
58 |
+
""")
|
59 |
+
input_image = gr.Image()
|
60 |
+
output_xdog = gr.Image()
|
61 |
+
output_deep = gr.Image()
|
62 |
+
b_xdog = gr.Button("X_dog")
|
63 |
+
b_learning_based = gr.Button("learning based")
|
64 |
+
b_xdog.click(xdog, inputs=input_image, outputs=output_xdog)
|
65 |
+
b_learning_based.click(swin_model, inputs=input_image, outputs=output_deep)
|
66 |
+
demo.launch()
|