Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
·
39dde7a
1
Parent(s):
55602bc
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import matplotlib.pyplot as plt
|
2 |
+
import cv2
|
3 |
+
import kornia as K
|
4 |
+
import kornia.feature as KF
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
from kornia_moons.feature import *
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
def load_torch_image(fname):
|
11 |
+
img = K.image_to_tensor(cv2.imread(fname), False).float() /255.
|
12 |
+
img = K.color.bgr_to_rgb(img)
|
13 |
+
return img
|
14 |
+
def inference(file1,file2):
|
15 |
+
fname1 = file1.name
|
16 |
+
fname2 = file2.name
|
17 |
+
|
18 |
+
img1 = load_torch_image(fname1)
|
19 |
+
img2 = load_torch_image(fname2)
|
20 |
+
|
21 |
+
|
22 |
+
matcher = KF.LoFTR(pretrained='outdoor')
|
23 |
+
|
24 |
+
input_dict = {"image0": K.color.rgb_to_grayscale(img1), # LofTR works on grayscale images only
|
25 |
+
"image1": K.color.rgb_to_grayscale(img2)}
|
26 |
+
|
27 |
+
with torch.no_grad():
|
28 |
+
correspondences = matcher(input_dict)
|
29 |
+
mkpts0 = correspondences['keypoints0'].cpu().numpy()
|
30 |
+
mkpts1 = correspondences['keypoints1'].cpu().numpy()
|
31 |
+
H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
|
32 |
+
inliers = inliers > 0
|
33 |
+
fig, ax = plt.subplots()
|
34 |
+
|
35 |
+
draw_LAF_matches(
|
36 |
+
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2),
|
37 |
+
torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1),
|
38 |
+
torch.ones(mkpts0.shape[0]).view(1,-1, 1)),
|
39 |
+
|
40 |
+
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2),
|
41 |
+
torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1),
|
42 |
+
torch.ones(mkpts1.shape[0]).view(1,-1, 1)),
|
43 |
+
torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
|
44 |
+
K.tensor_to_image(img1),
|
45 |
+
K.tensor_to_image(img2),
|
46 |
+
inliers,
|
47 |
+
draw_dict={'inlier_color': (0.2, 1, 0.2),
|
48 |
+
'tentative_color': None,
|
49 |
+
'feature_color': (0.2, 0.5, 1), 'vertical': False}, ax=ax)
|
50 |
+
|
51 |
+
fig.savefig('example.jpg',dpi=199)
|
52 |
+
return 'example.jpg'
|
53 |
+
|
54 |
+
|
55 |
+
title = "Kornia-Loftr"
|
56 |
+
description = "demo for Anime2Sketch. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
|
57 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.05703'>Adversarial Open Domain Adaption for Sketch-to-Photo Synthesis</a> | <a href='https://github.com/Mukosame/Anime2Sketch'>Github Repo</a></p>"
|
58 |
+
|
59 |
+
gr.Interface(
|
60 |
+
inference,
|
61 |
+
[gr.inputs.Image(type="file", label="Input1"),gr.inputs.Image(type="file", label="Input2")],
|
62 |
+
gr.outputs.Image(type="file", label="Output"),
|
63 |
+
title=title,
|
64 |
+
description=description,
|
65 |
+
article=article,
|
66 |
+
enable_queue=True
|
67 |
+
).launch(debug=True)
|