leonelhs commited on
Commit
a9f0f33
·
1 Parent(s): 035c6f2

missing files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +19 -18
  3. delf.py +108 -0
  4. requirements.txt +1 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,17 +1,17 @@
1
  import gradio as gr
2
- from style_transfer import StyleTransfer
3
 
4
- style = StyleTransfer()
5
 
6
 
7
- def predict(content_image, style_image):
8
- return style.transfer(content_image, style_image)
9
 
10
 
11
  footer = r"""
12
  <center>
13
  <b>
14
- Demo for <a href='https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization'>Style Transfer</a>
15
  </b>
16
  </center>
17
  """
@@ -25,28 +25,29 @@ coffee&emoji=&slug=leonelhs&button_colour=FFDD00&font_colour=000000&font_family=
25
  </center>
26
  """
27
 
28
- with gr.Blocks(title="Style Transfer") as app:
29
- gr.HTML("<center><h1>Style Transfer</h1></center>")
30
- gr.HTML("<center><h3>Fast Style Transfer for Arbitrary Styles</h3></center>")
 
31
  with gr.Row(equal_height=False):
32
  with gr.Column():
33
- content_img = gr.Image(type="filepath", label="Content image")
34
- style_img = gr.Image(type="filepath", label="Style image")
35
  run_btn = gr.Button(variant="primary")
36
  with gr.Column():
37
  output_img = gr.Image(type="pil", label="Output image")
38
- gr.ClearButton(components=[content_img, style_img, output_img], variant="stop")
39
 
40
- run_btn.click(predict, [content_img, style_img], [output_img])
41
 
42
  with gr.Row():
43
- blobs_c = [[f"examples/contents/{x:02d}.jpg"] for x in range(1, 4)]
44
- examples_c = gr.Dataset(components=[content_img], samples=blobs_c)
45
- examples_c.click(lambda x: x[0], [examples_c], [content_img])
46
  with gr.Row():
47
- blobs_s = [[f"examples/styles/{x:02d}.jpg"] for x in range(1, 12)]
48
- examples_s = gr.Dataset(components=[style_img], samples=blobs_s)
49
- examples_s.click(lambda x: x[0], [examples_s], [style_img])
50
 
51
  with gr.Row():
52
  gr.HTML(footer)
 
1
  import gradio as gr
2
+ from delf import DeepLocalFeatures
3
 
4
+ delf = DeepLocalFeatures()
5
 
6
 
7
+ def predict(image_a, image_b):
8
+ return delf.match(image_a, image_b)
9
 
10
 
11
  footer = r"""
12
  <center>
13
  <b>
14
+ Demo for <a href='https://www.tensorflow.org/hub/tutorials/tf_hub_delf_module'>DELF</a>
15
  </b>
16
  </center>
17
  """
 
25
  </center>
26
  """
27
 
28
+ with gr.Blocks(title="DELF") as app:
29
+ gr.HTML("<center><h1>Match images using DELF</h1></center>")
30
+ gr.HTML("<center><h3>Neural network and logic for processing images to identify keypoints and their "
31
+ "descriptors.</h3></center>")
32
  with gr.Row(equal_height=False):
33
  with gr.Column():
34
+ input_img_a = gr.Image(type="pil", label="Input image A")
35
+ input_img_b = gr.Image(type="pil", label="Input image B")
36
  run_btn = gr.Button(variant="primary")
37
  with gr.Column():
38
  output_img = gr.Image(type="pil", label="Output image")
39
+ gr.ClearButton(components=[input_img_a, input_img_b, output_img], variant="stop")
40
 
41
+ run_btn.click(predict, [input_img_a, input_img_b], [output_img])
42
 
43
  with gr.Row():
44
+ blobs_a = [[f"examples/image_a/{x:02d}.jpg"] for x in range(1, 5)]
45
+ examples_a = gr.Dataset(components=[input_img_a], samples=blobs_a)
46
+ examples_a.click(lambda x: x[0], [examples_a], [input_img_a])
47
  with gr.Row():
48
+ blobs_b = [[f"examples/image_b/{x:02d}.jpg"] for x in range(1, 5)]
49
+ examples_b = gr.Dataset(components=[input_img_b], samples=blobs_b)
50
+ examples_b.click(lambda x: x[0], [examples_b], [input_img_b])
51
 
52
  with gr.Row():
53
  gr.HTML(footer)
delf.py CHANGED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #############################################################################
2
+ #
3
+ # Source from:
4
+ # https://www.tensorflow.org/hub/tutorials/tf_hub_delf_module
5
+ # Forked from:
6
+ # https://www.tensorflow.org/hub/tutorials/tf_hub_delf_module
7
+ # Reimplemented by: Leonel Hernández
8
+ #
9
+ ##############################################################################
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+ import tensorflow as tf
13
+ from PIL import Image, ImageOps
14
+ from huggingface_hub import snapshot_download
15
+ from scipy.spatial import cKDTree
16
+ from skimage.feature import plot_matches
17
+ from skimage.measure import ransac
18
+ from skimage.transform import AffineTransform
19
+
20
+ DELF_REPO_ID = "leonelhs/delf"
21
+
22
+
23
+ def match_images(image1, image2, result1, result2):
24
+ distance_threshold = 0.8
25
+
26
+ # Read features.
27
+ num_features_1 = result1['locations'].shape[0]
28
+ print("Loaded image 1's %d features" % num_features_1)
29
+
30
+ num_features_2 = result2['locations'].shape[0]
31
+ print("Loaded image 2's %d features" % num_features_2)
32
+
33
+ # Find nearest-neighbor matches using a KD tree.
34
+ d1_tree = cKDTree(result1['descriptors'])
35
+ _, indices = d1_tree.query(
36
+ result2['descriptors'],
37
+ distance_upper_bound=distance_threshold)
38
+
39
+ # Select feature locations for putative matches.
40
+ locations_2_to_use = np.array([
41
+ result2['locations'][i,]
42
+ for i in range(num_features_2)
43
+ if indices[i] != num_features_1
44
+ ])
45
+ locations_1_to_use = np.array([
46
+ result1['locations'][indices[i],]
47
+ for i in range(num_features_2)
48
+ if indices[i] != num_features_1
49
+ ])
50
+
51
+ # Perform geometric verification using RANSAC.
52
+ _, inliers = ransac(
53
+ (locations_1_to_use, locations_2_to_use),
54
+ AffineTransform,
55
+ min_samples=3,
56
+ residual_threshold=20,
57
+ max_trials=1000)
58
+
59
+ print('Found %d inliers' % sum(inliers))
60
+
61
+ # Visualize correspondences.
62
+ fig, ax = plt.subplots()
63
+ inlier_idxs = np.nonzero(inliers)[0]
64
+ stack = np.column_stack((inlier_idxs, inlier_idxs))
65
+ plot_matches(
66
+ ax,
67
+ image1,
68
+ image2,
69
+ locations_1_to_use,
70
+ locations_2_to_use,
71
+ stack,
72
+ matches_color='b')
73
+ ax.axis('off')
74
+ ax.set_title('DELF correspondences')
75
+
76
+ fig.canvas.draw()
77
+ image_array = np.array(fig.canvas.renderer.buffer_rgba())
78
+ image = Image.fromarray(image_array)
79
+ return image.convert("RGB")
80
+
81
+
82
+ def crop_image(image, width=256, height=256):
83
+ return ImageOps.fit(image, (width, height), Image.LANCZOS)
84
+
85
+
86
+ class DeepLocalFeatures:
87
+
88
+ def __init__(self):
89
+ model_path = snapshot_download(DELF_REPO_ID)
90
+ self.model = tf.saved_model.load(model_path).signatures['default']
91
+
92
+ def run_delf(self, image):
93
+ np_image = np.array(image)
94
+ float_image = tf.image.convert_image_dtype(np_image, tf.float32)
95
+
96
+ return self.model(
97
+ image=float_image,
98
+ score_threshold=tf.constant(100.0),
99
+ image_scales=tf.constant([0.25, 0.3536, 0.5, 0.7071, 1.0, 1.4142, 2.0]),
100
+ max_feature_num=tf.constant(1000))
101
+
102
+ def match(self, image_a, image_b):
103
+ image_a = crop_image(image_a)
104
+ image_b = crop_image(image_b)
105
+ result_a = self.run_delf(image_a)
106
+ result_b = self.run_delf(image_b)
107
+ return match_images(image_a, image_b, result_a, result_b)
108
+
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  pillow>=10.0.1
2
  numpy>=1.23.5
3
  tensorflow>=2.15.0
 
 
1
  pillow>=10.0.1
2
  numpy>=1.23.5
3
  tensorflow>=2.15.0
4
+ scikit-image>=0.22.0