Realcat commited on
Commit
a44851c
1 Parent(s): 84efff1

add: resize image

Browse files
Files changed (4) hide show
  1. hloc/extract_features.py +32 -0
  2. hloc/match_dense.py +20 -2
  3. ui/app_class.py +56 -3
  4. ui/utils.py +15 -0
hloc/extract_features.py CHANGED
@@ -73,6 +73,10 @@ confs = {
73
  "preprocessing": {
74
  "grayscale": True,
75
  "resize_max": 1600,
 
 
 
 
76
  },
77
  },
78
  "r2d2": {
@@ -102,6 +106,10 @@ confs = {
102
  "preprocessing": {
103
  "grayscale": False,
104
  "resize_max": 1600,
 
 
 
 
105
  },
106
  },
107
  "d2net-ms": {
@@ -114,6 +122,10 @@ confs = {
114
  "preprocessing": {
115
  "grayscale": False,
116
  "resize_max": 1600,
 
 
 
 
117
  },
118
  },
119
  "rord": {
@@ -126,6 +138,10 @@ confs = {
126
  "preprocessing": {
127
  "grayscale": False,
128
  "resize_max": 1600,
 
 
 
 
129
  },
130
  },
131
  "rootsift": {
@@ -201,6 +217,10 @@ confs = {
201
  "preprocessing": {
202
  "grayscale": False,
203
  "resize_max": 1600,
 
 
 
 
204
  },
205
  },
206
  "xfeat": {
@@ -212,6 +232,10 @@ confs = {
212
  "preprocessing": {
213
  "grayscale": False,
214
  "resize_max": 1600,
 
 
 
 
215
  },
216
  },
217
  "alike": {
@@ -228,6 +252,10 @@ confs = {
228
  "preprocessing": {
229
  "grayscale": False,
230
  "resize_max": 1600,
 
 
 
 
231
  },
232
  },
233
  "lanet": {
@@ -240,6 +268,10 @@ confs = {
240
  "preprocessing": {
241
  "grayscale": False,
242
  "resize_max": 1600,
 
 
 
 
243
  },
244
  },
245
  "darkfeat": {
 
73
  "preprocessing": {
74
  "grayscale": True,
75
  "resize_max": 1600,
76
+ "force_resize": True,
77
+ "width": 640,
78
+ "height": 480,
79
+ "dfactor": 8,
80
  },
81
  },
82
  "r2d2": {
 
106
  "preprocessing": {
107
  "grayscale": False,
108
  "resize_max": 1600,
109
+ "force_resize": True,
110
+ "width": 640,
111
+ "height": 480,
112
+ "dfactor": 8,
113
  },
114
  },
115
  "d2net-ms": {
 
122
  "preprocessing": {
123
  "grayscale": False,
124
  "resize_max": 1600,
125
+ "force_resize": True,
126
+ "width": 640,
127
+ "height": 480,
128
+ "dfactor": 8,
129
  },
130
  },
131
  "rord": {
 
138
  "preprocessing": {
139
  "grayscale": False,
140
  "resize_max": 1600,
141
+ "force_resize": True,
142
+ "width": 640,
143
+ "height": 480,
144
+ "dfactor": 8,
145
  },
146
  },
147
  "rootsift": {
 
217
  "preprocessing": {
218
  "grayscale": False,
219
  "resize_max": 1600,
220
+ "force_resize": True,
221
+ "width": 640,
222
+ "height": 480,
223
+ "dfactor": 8,
224
  },
225
  },
226
  "xfeat": {
 
232
  "preprocessing": {
233
  "grayscale": False,
234
  "resize_max": 1600,
235
+ "force_resize": True,
236
+ "width": 640,
237
+ "height": 480,
238
+ "dfactor": 8,
239
  },
240
  },
241
  "alike": {
 
252
  "preprocessing": {
253
  "grayscale": False,
254
  "resize_max": 1600,
255
+ "force_resize": True,
256
+ "width": 640,
257
+ "height": 480,
258
+ "dfactor": 8,
259
  },
260
  },
261
  "lanet": {
 
268
  "preprocessing": {
269
  "grayscale": False,
270
  "resize_max": 1600,
271
+ "force_resize": True,
272
+ "width": 640,
273
+ "height": 480,
274
+ "dfactor": 8,
275
  },
276
  },
277
  "darkfeat": {
hloc/match_dense.py CHANGED
@@ -91,7 +91,14 @@ confs = {
91
  "max_keypoints": 2000,
92
  "match_threshold": 0.2,
93
  },
94
- "preprocessing": {"grayscale": True, "resize_max": 1024, "dfactor": 8},
 
 
 
 
 
 
 
95
  "max_error": 2, # max error for assigned keypoints (in px)
96
  "cell_size": 8, # size of quantization patch (max 1 kp/patch)
97
  },
@@ -104,7 +111,14 @@ confs = {
104
  "max_keypoints": 2000,
105
  "match_threshold": 0.2,
106
  },
107
- "preprocessing": {"grayscale": True, "resize_max": 1024, "dfactor": 8},
 
 
 
 
 
 
 
108
  "max_error": 4, # max error for assigned keypoints (in px)
109
  "cell_size": 4, # size of quantization patch (max 1 kp/patch)
110
  },
@@ -251,6 +265,10 @@ confs = {
251
  "resize_max": 1024,
252
  "dfactor": 8,
253
  "force_resize": False,
 
 
 
 
254
  },
255
  },
256
  "sold2": {
 
91
  "max_keypoints": 2000,
92
  "match_threshold": 0.2,
93
  },
94
+ "preprocessing": {
95
+ "grayscale": True,
96
+ "resize_max": 1024,
97
+ "dfactor": 8,
98
+ "width": 640,
99
+ "height": 480,
100
+ "force_resize": True,
101
+ },
102
  "max_error": 2, # max error for assigned keypoints (in px)
103
  "cell_size": 8, # size of quantization patch (max 1 kp/patch)
104
  },
 
111
  "max_keypoints": 2000,
112
  "match_threshold": 0.2,
113
  },
114
+ "preprocessing": {
115
+ "grayscale": True,
116
+ "resize_max": 1024,
117
+ "dfactor": 8,
118
+ "width": 640,
119
+ "height": 480,
120
+ "force_resize": True,
121
+ },
122
  "max_error": 4, # max error for assigned keypoints (in px)
123
  "cell_size": 4, # size of quantization patch (max 1 kp/patch)
124
  },
 
265
  "resize_max": 1024,
266
  "dfactor": 8,
267
  "force_resize": False,
268
+ "resize_max": 1024,
269
+ "width": 640,
270
+ "height": 480,
271
+ "dfactor": 8,
272
  },
273
  },
274
  "sold2": {
ui/app_class.py CHANGED
@@ -109,6 +109,29 @@ class ImageMatchingApp:
109
  )
110
 
111
  with gr.Accordion("Advanced Setting", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  with gr.Accordion("Matching Setting", open=True):
113
  with gr.Row():
114
  match_setting_threshold = gr.Slider(
@@ -195,7 +218,12 @@ class ImageMatchingApp:
195
  "setting_geometry"
196
  ],
197
  )
198
-
 
 
 
 
 
199
  # collect inputs
200
  state_cache = gr.State({})
201
  inputs = [
@@ -211,7 +239,9 @@ class ImageMatchingApp:
211
  ransac_max_iter,
212
  choice_geometry_type,
213
  gr.State(self.matcher_zoo),
214
- # state_cache,
 
 
215
  ]
216
 
217
  # Add some examples
@@ -295,7 +325,6 @@ class ImageMatchingApp:
295
  inputs=match_image_src,
296
  outputs=input_image1,
297
  )
298
-
299
  # collect outputs
300
  outputs = [
301
  output_keypoints,
@@ -336,6 +365,7 @@ class ImageMatchingApp:
336
  ransac_max_iter,
337
  choice_geometry_type,
338
  output_pred,
 
339
  ]
340
  button_reset.click(
341
  fn=self.ui_reset_state,
@@ -422,6 +452,27 @@ class ImageMatchingApp:
422
  "source": choice, # The list of image sources to be displayed
423
  }
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  def ui_reset_state(
426
  self,
427
  *args: Any,
@@ -446,6 +497,7 @@ class ImageMatchingApp:
446
  int,
447
  float,
448
  int,
 
449
  ]:
450
  """
451
  Reset the state of the UI.
@@ -487,6 +539,7 @@ class ImageMatchingApp:
487
  self.cfg["defaults"]["ransac_max_iter"], # ransac_max_iter: int
488
  self.cfg["defaults"]["setting_geometry"], # geometry: str
489
  None, # predictions
 
490
  )
491
 
492
  def display_supported_algorithms(self, style="tab"):
 
109
  )
110
 
111
  with gr.Accordion("Advanced Setting", open=False):
112
+ with gr.Accordion("Image Setting", open=True):
113
+ with gr.Row():
114
+ image_force_resize_cb = gr.Checkbox(
115
+ label="Force Resize",
116
+ value=False,
117
+ interactive=True,
118
+ )
119
+ image_setting_height = gr.Slider(
120
+ minimum=48,
121
+ maximum=2048,
122
+ step=16,
123
+ label="Image Height",
124
+ value=480,
125
+ visible=False,
126
+ )
127
+ image_setting_width = gr.Slider(
128
+ minimum=64,
129
+ maximum=2048,
130
+ step=16,
131
+ label="Image Width",
132
+ value=640,
133
+ visible=False,
134
+ )
135
  with gr.Accordion("Matching Setting", open=True):
136
  with gr.Row():
137
  match_setting_threshold = gr.Slider(
 
218
  "setting_geometry"
219
  ],
220
  )
221
+ # image resize
222
+ image_force_resize_cb.select(
223
+ fn=self._on_select_force_resize,
224
+ inputs=image_force_resize_cb,
225
+ outputs=[image_setting_width, image_setting_height],
226
+ )
227
  # collect inputs
228
  state_cache = gr.State({})
229
  inputs = [
 
239
  ransac_max_iter,
240
  choice_geometry_type,
241
  gr.State(self.matcher_zoo),
242
+ image_force_resize_cb,
243
+ image_setting_width,
244
+ image_setting_height,
245
  ]
246
 
247
  # Add some examples
 
325
  inputs=match_image_src,
326
  outputs=input_image1,
327
  )
 
328
  # collect outputs
329
  outputs = [
330
  output_keypoints,
 
365
  ransac_max_iter,
366
  choice_geometry_type,
367
  output_pred,
368
+ image_force_resize_cb,
369
  ]
370
  button_reset.click(
371
  fn=self.ui_reset_state,
 
452
  "source": choice, # The list of image sources to be displayed
453
  }
454
 
455
+ def _on_select_force_resize(self, visible: bool = False):
456
+ image_height = gr.Slider(
457
+ minimum=48,
458
+ maximum=2048,
459
+ step=16,
460
+ label="Image Height",
461
+ value=480,
462
+ visible=visible,
463
+ interactive=True,
464
+ )
465
+ image_width = gr.Slider(
466
+ minimum=64,
467
+ maximum=2048,
468
+ step=16,
469
+ label="Image Width",
470
+ value=640,
471
+ visible=visible,
472
+ interactive=True,
473
+ )
474
+ return image_width, image_height
475
+
476
  def ui_reset_state(
477
  self,
478
  *args: Any,
 
497
  int,
498
  float,
499
  int,
500
+ bool,
501
  ]:
502
  """
503
  Reset the state of the UI.
 
539
  self.cfg["defaults"]["ransac_max_iter"], # ransac_max_iter: int
540
  self.cfg["defaults"]["setting_geometry"], # geometry: str
541
  None, # predictions
542
+ False,
543
  )
544
 
545
  def display_supported_algorithms(self, style="tab"):
ui/utils.py CHANGED
@@ -829,6 +829,9 @@ def run_matching(
829
  ransac_max_iter: int = DEFAULT_RANSAC_MAX_ITER,
830
  choice_geometry_type: str = DEFAULT_SETTING_GEOMETRY,
831
  matcher_zoo: Dict[str, Any] = None,
 
 
 
832
  use_cached_model: bool = False,
833
  ) -> Tuple[
834
  np.ndarray,
@@ -902,6 +905,12 @@ def run_matching(
902
  t1 = time.time()
903
 
904
  if model["dense"]:
 
 
 
 
 
 
905
  pred = match_dense.match_images(
906
  matcher, image0, image1, match_conf["preprocessing"], device=DEVICE
907
  )
@@ -925,6 +934,12 @@ def run_matching(
925
  else:
926
  extractor = get_feature_model(extract_conf)
927
 
 
 
 
 
 
 
928
  pred0 = extract_features.extract(
929
  extractor, image0, extract_conf["preprocessing"]
930
  )
 
829
  ransac_max_iter: int = DEFAULT_RANSAC_MAX_ITER,
830
  choice_geometry_type: str = DEFAULT_SETTING_GEOMETRY,
831
  matcher_zoo: Dict[str, Any] = None,
832
+ force_resize: bool = False,
833
+ image_width: int = 640,
834
+ image_height: int = 480,
835
  use_cached_model: bool = False,
836
  ) -> Tuple[
837
  np.ndarray,
 
905
  t1 = time.time()
906
 
907
  if model["dense"]:
908
+ match_conf["preprocessing"]["force_resize"] = force_resize
909
+ if force_resize:
910
+ match_conf["preprocessing"]["height"] = image_height
911
+ match_conf["preprocessing"]["width"] = image_width
912
+ logger.info(f"Force resize to {image_width}x{image_height}")
913
+
914
  pred = match_dense.match_images(
915
  matcher, image0, image1, match_conf["preprocessing"], device=DEVICE
916
  )
 
934
  else:
935
  extractor = get_feature_model(extract_conf)
936
 
937
+ extract_conf["preprocessing"]["force_resize"] = force_resize
938
+ if force_resize:
939
+ extract_conf["preprocessing"]["height"] = image_height
940
+ extract_conf["preprocessing"]["width"] = image_width
941
+ logger.info(f"Force resize to {image_width}x{image_height}")
942
+
943
  pred0 = extract_features.extract(
944
  extractor, image0, extract_conf["preprocessing"]
945
  )