Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 3,421 Bytes
39dde7a 8991cba 39dde7a f0bdc5a 1787e0d f0bdc5a 39dde7a f0bdc5a 39dde7a f0bdc5a 39dde7a f0bdc5a 39dde7a f0bdc5a 39dde7a f0bdc5a 1a70cbc f0bdc5a 39dde7a 11fba97 255c510 e35b8e7 ac2ee0e f0bdc5a 39dde7a f0bdc5a 39dde7a e35b8e7 f0bdc5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import matplotlib.pyplot as plt
import cv2
import kornia as K
import kornia.feature as KF
import numpy as np
import torch
from kornia_moons.feature import *
from kornia_moons.viz import *
import gradio as gr
def load_torch_image(img):
if isinstance(img, np.ndarray):
# If the input is already a numpy array, convert it to a tensor
img_tensor = K.image_to_tensor(img).float() / 255.0
else:
# If it's a file path, load it using kornia
img_tensor = K.io.load_image(img, K.io.ImageLoadType.RGB32)
img_tensor = img_tensor.unsqueeze(0) # Add batch dimension: 1xCxHxW
img_tensor = K.geometry.resize(img_tensor, (700, 700))
return img_tensor
def inference(img1, img2):
img1_tensor = load_torch_image(img1)
img2_tensor = load_torch_image(img2)
matcher = KF.LoFTR(pretrained='outdoor')
input_dict = {
"image0": K.color.rgb_to_grayscale(img1_tensor), # LoFTR works on grayscale images only
"image1": K.color.rgb_to_grayscale(img2_tensor)
}
with torch.no_grad():
correspondences = matcher(input_dict)
mkpts0 = correspondences['keypoints0'].cpu().numpy()
mkpts1 = correspondences['keypoints1'].cpu().numpy()
H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
inliers = inliers > 0
fig, ax = plt.subplots()
draw_LAF_matches(
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1,-1, 2),
torch.ones(mkpts0.shape[0]).view(1,-1, 1, 1),
torch.ones(mkpts0.shape[0]).view(1,-1, 1)),
KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1,-1, 2),
torch.ones(mkpts1.shape[0]).view(1,-1, 1, 1),
torch.ones(mkpts1.shape[0]).view(1,-1, 1)),
torch.arange(mkpts0.shape[0]).view(-1,1).repeat(1,2),
K.tensor_to_image(img1_tensor.squeeze()),
K.tensor_to_image(img2_tensor.squeeze()),
inliers,
draw_dict={'inlier_color': (0.2, 1, 0.2),
'tentative_color': None,
'feature_color': (0.2, 0.5, 1), 'vertical': False},
ax=ax
)
plt.axis('off')
return fig
title = "Kornia-Loftr"
description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Open Source Differentiable Computer Vision Library</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://github.com/zju3dv/LoFTR' target='_blank'>LoFTR Github</a> | <a href='https://arxiv.org/abs/2104.00680' target='_blank'>LoFTR: Detector-Free Local Feature Matching with Transformers</a></p>"
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
examples = [['kn_church-2.jpg','kn_church-8.jpg']]
iface = gr.Interface(
inference,
[
gr.Image(type="numpy", label="Input1"),
gr.Image(type="numpy", label="Input2")],
gr.Plot(label="Feature Matches"),
title=title,
description=description,
article=article,
examples=examples,
css=css
)
iface.launch(debug=True) |