diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..c7a348215c21dadf3a71e7e378b7542cfd6a1abc --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 80 +extend-ignore = E203,E501,E402 +exclude = .git,__pycache__,build,.venv/,third_party \ No newline at end of file diff --git a/.github/.stale.yml b/.github/.stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..dc90e5a1c3aad4818a813606b52fdecd2fdf6782 --- /dev/null +++ b/.github/.stale.yml @@ -0,0 +1,17 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 60 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 7 +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security +# Label to use when marking an issue as stale +staleLabel: wontfix +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000000000000000000000000000000000..036bffc7cbe7f88ef6c4657752a24a73e4bf9a76 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,30 @@ +--- +name: πŸ› Bug report +about: If something isn't working πŸ”§ +title: "" +labels: bug +assignees: +--- + +## πŸ› Bug Report + + + +## πŸ”¬ How To Reproduce + +Steps to reproduce the behavior: + +1. ... + +### Environment + +- OS: [e.g. Linux / Windows / macOS] +- Python version, get it with: + +```bash +python --version +``` + +## πŸ“Ž Additional context + + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..8f2da5489e290e6e55426eaeac2234c61f21f638 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,3 @@ +# Configuration: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository + +blank_issues_enabled: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000000000000000000000000000000..7ce8c1277148183d41908db966ee2f0978c7a02a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,15 @@ +--- +name: πŸš€ Feature request +about: Suggest an idea for this project πŸ– +title: "" +labels: enhancement +assignees: +--- + +## πŸš€ Feature Request + + + +## πŸ“Ž Additional context + + diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000000000000000000000000000000000000..0b624eefe6041c776f1ab4a6aba44d3d1c8cdd83 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,25 @@ +--- +name: ❓ Question +about: Ask a question about this project πŸŽ“ +title: "" +labels: question +assignees: +--- + +## Checklist + + + +- [ ] I've searched the project's [`issues`] + +## ❓ Question + + + +How can I [...]? + +Is it possible to [...]? + +## πŸ“Ž Additional context + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..4dab74cab6b6b173c9a0a15e1d7652f75bc29818 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +## Description + + + +## Related Issue + + diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc4b3c3f511d20f0dd255f76f3fea1fe0dbd1ce5 --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,24 @@ +# Release drafter configuration https://github.com/release-drafter/release-drafter#configuration +# Emojis were chosen to match the https://gitmoji.carloscuesta.me/ + +name-template: "v$RESOLVED_VERSION" +tag-template: "v$RESOLVED_VERSION" + +categories: + - title: ":rocket: Features" + labels: [enhancement, feature] + - title: ":wrench: Fixes" + labels: [bug, bugfix, fix] + - title: ":toolbox: Maintenance & Refactor" + labels: [refactor, refactoring, chore] + - title: ":package: Build System & CI/CD & Test" + labels: [build, ci, testing, test] + - title: ":pencil: Documentation" + labels: [documentation] + - title: ":arrow_up: Dependencies updates" + labels: [dependencies] + +template: | + ## What’s Changed + + $CHANGES diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..40470011ab9b70e7c4f456a72c3c9fc41a68a83c --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,32 @@ +name: CI CPU + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.10" + + - name: Install dependencies + run: | + pip install -r requirements.txt + sudo apt-get update && sudo apt-get install ffmpeg libsm6 libxext6 -y + + - name: Run tests + run: python test_app_cli.py diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 0000000000000000000000000000000000000000..bfabd5ff589e0d0b74884621b74618c50d2cab50 --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,24 @@ +name: Format and Lint Checks +on: + push: + branches: + - main + paths: + - '*.py' + pull_request: + types: [ assigned, opened, synchronize, reopened ] +jobs: + check: + name: Format and Lint Checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + - run: python -m pip install --upgrade pip + - run: python -m pip install .[dev] + - run: python -m flake8 common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py + - run: python -m isort common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py --check-only --diff + - run: python -m black common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py --check --diff \ No newline at end of file diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml new file mode 100644 index 0000000000000000000000000000000000000000..58efdb9b1e39b9ae044c71ea6dcece2f23db4823 --- /dev/null +++ b/.github/workflows/release-drafter.yml @@ -0,0 +1,16 @@ +name: Release Drafter + +on: + push: + # branches to consider in the event; optional, defaults to all + branches: + - master + +jobs: + update_release_draft: + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into "master" + - uses: release-drafter/release-drafter@v5.23.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 859cf52a947ac96811227b6c869dd15da1d92179..267f939700419fe8a7cf502f11669f06a36b24e9 100644 --- a/.gitignore +++ b/.gitignore @@ -14,9 +14,11 @@ experiments third_party/REKD hloc/matchers/dedode.py gradio_cached_examples - +*.mp4 hloc/matchers/quadtree.py third_party/QuadTreeAttention desktop.ini +*.egg-info +output.pkl experiments* gen_example.py \ No newline at end of file diff --git a/README.md b/README.md index 55715e079853f433feb99d04e3ff18906313eb29..dd25bf69bf39c337a82d0fdcce7fb5b048e5bc84 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,8 @@ The tool currently supports various popular image matching algorithms, namely: - [ ] [DUSt3R](https://github.com/naver/dust3r), arXiv 2023 - [x] [LightGlue](https://github.com/cvg/LightGlue), ICCV 2023 - [x] [DarkFeat](https://github.com/THU-LYJ-Lab/DarkFeat), AAAI 2023 +- [x] [SFD2](https://github.com/feixue94/sfd2), CVPR 2023 +- [x] [IMP](https://github.com/feixue94/imp-release), CVPR 2023 - [ ] [ASTR](https://github.com/ASTR2023/ASTR), CVPR 2023 - [ ] [SEM](https://github.com/SEM2023/SEM), CVPR 2023 - [ ] [DeepLSD](https://github.com/cvg/DeepLSD), CVPR 2023 diff --git a/common/api.py b/common/api.py index 3ddc02e879cd2cfd2a2cf49c46529d383ccdebf7..c56503dcd24ebad6d841a2d6760930cb2817f459 100644 --- a/common/api.py +++ b/common/api.py @@ -1,26 +1,23 @@ -import cv2 -import torch import warnings -import numpy as np from pathlib import Path -from typing import Dict, Any, Optional, Tuple, List, Union -from hloc import logger -from hloc import match_dense, match_features, extract_features +from typing import Any, Dict, Optional + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import torch + +from hloc import extract_features, logger, match_dense, match_features from hloc.utils.viz import add_text, plot_keypoints + from .utils import ( - load_config, - get_model, - get_feature_model, - filter_matches, - device, ROOT, + filter_matches, + get_feature_model, + get_model, + load_config, ) -from .viz import ( - fig2im, - plot_images, - display_matches, -) -import matplotlib.pyplot as plt +from .viz import display_matches, fig2im, plot_images warnings.simplefilter("ignore") @@ -109,7 +106,7 @@ class ImageMatchingAPI(torch.nn.Module): "match_threshold" ] = match_threshold except TypeError as e: - breakpoint() + logger.error(e) else: self.conf["feature"]["model"]["max_keypoints"] = max_keypoints self.conf["feature"]["model"][ @@ -137,7 +134,9 @@ class ImageMatchingAPI(torch.nn.Module): self.match_conf["preprocessing"], device=self.device, ) - last_fixed = "{}".format(self.match_conf["model"]["name"]) + last_fixed = "{}".format( # noqa: F841 + self.match_conf["model"]["name"] + ) else: pred0 = extract_features.extract( self.extractor, img0, self.extract_conf["preprocessing"] @@ -290,7 +289,5 @@ class ImageMatchingAPI(torch.nn.Module): if __name__ == "__main__": - import argparse - config = load_config(ROOT / "common/config.yaml") - test_api(config) + api = ImageMatchingAPI(config) diff --git a/common/app_class.py b/common/app_class.py index 058e82512a7d4b35b0815a5ed81ef8ba90191d25..ebba28aead5551b9155d545782c8770730abc9b1 100644 --- a/common/app_class.py +++ b/common/app_class.py @@ -1,22 +1,21 @@ -import argparse -import numpy as np -import gradio as gr from pathlib import Path -from typing import Dict, Any, Optional, Tuple, List, Union +from typing import Any, Dict, Optional, Tuple + +import gradio as gr +import numpy as np + from common.utils import ( - ransac_zoo, + GRADIO_VERSION, + gen_examples, generate_warp_images, - load_config, get_matcher_zoo, + load_config, + ransac_zoo, run_matching, run_ransac, send_to_match, - gen_examples, - GRADIO_VERSION, - ROOT, ) - DESCRIPTION = """ # Image Matching WebUI This Space demonstrates [Image Matching WebUI](https://github.com/Vincentqyw/image-matching-webui) by vincent qin. Feel free to play with it, or duplicate to run image matching without a queue! @@ -132,12 +131,14 @@ class ImageMatchingApp: label="Keypoint thres.", value=0.015, ) - detect_line_threshold = gr.Slider( - minimum=0.1, - maximum=1, - step=0.01, - label="Line thres.", - value=0.2, + detect_line_threshold = ( # noqa: F841 + gr.Slider( + minimum=0.1, + maximum=1, + step=0.01, + label="Line thres.", + value=0.2, + ) ) # matcher_lists = gr.Radio( # ["NN-mutual", "Dual-Softmax"], diff --git a/common/config.yaml b/common/config.yaml index f4f435df02e5b7ab79e5862d30c50d193d2fb8bf..32438bc24059f3cc8cfca28ac1c085a5d95f09b8 100644 --- a/common/config.yaml +++ b/common/config.yaml @@ -30,20 +30,22 @@ matcher_zoo: DUSt3R: # TODO: duster is under development enable: true + skip_ci: true matcher: duster dense: true - info: + info: name: DUSt3R #dispaly name source: "CVPR 2024" github: https://github.com/naver/dust3r paper: https://arxiv.org/abs/2312.14132 project: https://dust3r.europe.naverlabs.com - display: true + display: true GIM(dkm): enable: true + skip_ci: true matcher: gim(dkm) dense: true - info: + info: name: GIM(DKM) #dispaly name source: "ICLR 2024" github: https://github.com/xuelunshen/gim @@ -52,8 +54,9 @@ matcher_zoo: display: true RoMa: matcher: roma + skip_ci: true dense: true - info: + info: name: RoMa #dispaly name source: "CVPR 2024" github: https://github.com/Parskatt/RoMa @@ -62,8 +65,9 @@ matcher_zoo: display: true dkm: matcher: dkm + skip_ci: true dense: true - info: + info: name: DKM #dispaly name source: "CVPR 2023" github: https://github.com/Parskatt/DKM @@ -73,7 +77,7 @@ matcher_zoo: loftr: matcher: loftr dense: true - info: + info: name: LoFTR #dispaly name source: "CVPR 2021" github: https://github.com/zju3dv/LoFTR @@ -82,6 +86,7 @@ matcher_zoo: display: true cotr: enable: false + skip_ci: true matcher: cotr dense: true info: @@ -363,3 +368,31 @@ matcher_zoo: paper: https://arxiv.org/abs/2104.03362 project: null display: true + + sfd2+imp: + matcher: imp + feature: sfd2 + enable: false + dense: false + skip_ci: true + info: + name: SFD2+IMP #dispaly name + source: "CVPR 2023" + github: https://github.com/feixue94/imp-release + paper: https://arxiv.org/pdf/2304.14837 + project: https://feixue94.github.io/ + display: true + + sfd2+mnn: + matcher: NN-mutual + feature: sfd2 + enable: false + dense: false + skip_ci: true + info: + name: SFD2+MNN #dispaly name + source: "CVPR 2023" + github: https://github.com/feixue94/sfd2 + paper: https://arxiv.org/abs/2304.14845 + project: https://feixue94.github.io/ + display: true diff --git a/common/utils.py b/common/utils.py index a5dd496686cd4fb3ea55c005950b931af50b787a..b6c52c5cf092888126a6cd6e954169a16ac4717b 100644 --- a/common/utils.py +++ b/common/utils.py @@ -1,35 +1,35 @@ import os -import cv2 -import sys -import torch +import pickle import random -import psutil import shutil -import numpy as np -import gradio as gr -from PIL import Image +import time +import warnings +from itertools import combinations from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import cv2 +import gradio as gr +import matplotlib.pyplot as plt +import numpy as np import poselib -from itertools import combinations -from typing import Callable, Dict, Any, Optional, Tuple, List, Union -from hloc import matchers, extractors, logger -from hloc.utils.base_model import dynamic_load -from hloc import match_dense, match_features, extract_features -from .viz import ( - fig2im, - plot_images, - display_matches, - display_keypoints, - plot_color_line_matches, +import psutil +from PIL import Image + +from hloc import ( + DEVICE, + extract_features, + extractors, + logger, + match_dense, + match_features, + matchers, ) -import time -import matplotlib.pyplot as plt -import warnings -import tempfile -import pickle +from hloc.utils.base_model import dynamic_load + +from .viz import display_keypoints, display_matches, fig2im, plot_images warnings.simplefilter("ignore") -device = "cuda" if torch.cuda.is_available() else "cpu" ROOT = Path(__file__).parent.parent # some default values @@ -91,14 +91,13 @@ class ModelCache: host_colocation = int(os.environ.get("HOST_COLOCATION", "1")) vm = psutil.virtual_memory() du = shutil.disk_usage(".") - vm_ratio = host_colocation * vm.used / vm.total if verbose: logger.info( f"RAM: {vm.used / 1e9:.1f}/{vm.total / host_colocation / 1e9:.1f}GB" ) - # logger.info( - # f"DISK: {du.used / 1e9:.1f}/{du.total / host_colocation / 1e9:.1f}GB" - # ) + logger.info( + f"DISK: {du.used / 1e9:.1f}/{du.total / host_colocation / 1e9:.1f}GB" + ) return vm.used / 1e9 def print_memory_usage(self): @@ -173,7 +172,7 @@ def get_model(match_conf: Dict[str, Any]): A matcher model instance. """ Model = dynamic_load(matchers, match_conf["model"]["name"]) - model = Model(match_conf["model"]).eval().to(device) + model = Model(match_conf["model"]).eval().to(DEVICE) return model @@ -188,7 +187,7 @@ def get_feature_model(conf: Dict[str, Dict[str, Any]]): A feature extraction model instance. """ Model = dynamic_load(extractors, conf["model"]["name"]) - model = Model(conf["model"]).eval().to(device) + model = Model(conf["model"]).eval().to(DEVICE) return model @@ -423,7 +422,7 @@ def _filter_matches_poselib( elif geometry_type == "Fundamental": M, info = poselib.estimate_fundamental(kp0, kp1, ransac_options) else: - raise notImplementedError("Not Implemented") + raise NotImplementedError return M, np.array(info["inliers"]) @@ -464,7 +463,7 @@ def proc_ransac_matches( geometry_type, ) else: - raise notImplementedError("Not Implemented") + raise NotImplementedError def filter_matches( @@ -617,7 +616,9 @@ def compute_geometry( geo_info["H1"] = H1.tolist() geo_info["H2"] = H2.tolist() except cv2.error as e: - logger.error(f"StereoRectifyUncalibrated failed, skip!") + logger.error( + f"StereoRectifyUncalibrated failed, skip! error: {e}" + ) return geo_info else: return {} @@ -643,7 +644,6 @@ def wrap_images( """ h0, w0, _ = img0.shape h1, w1, _ = img1.shape - result_matrix: Optional[np.ndarray] = None if geo_info is not None and len(geo_info) != 0: rectified_image0 = img0 rectified_image1 = None @@ -656,7 +656,6 @@ def wrap_images( title: List[str] = [] if geom_type == "Homography": rectified_image1 = cv2.warpPerspective(img1, H, (w0, h0)) - result_matrix = H title = ["Image 0", "Image 1 - warped"] elif geom_type == "Fundamental": if geom_type not in geo_info: @@ -666,7 +665,6 @@ def wrap_images( H1, H2 = np.array(geo_info["H1"]), np.array(geo_info["H2"]) rectified_image0 = cv2.warpPerspective(img0, H1, (w0, h0)) rectified_image1 = cv2.warpPerspective(img1, H2, (w1, h1)) - result_matrix = np.array(geo_info["Fundamental"]) title = ["Image 0 - warped", "Image 1 - warped"] else: print("Error: Unknown geometry type") @@ -705,7 +703,7 @@ def generate_warp_images( ): return None, None geom_info = matches_info["geom_info"] - wrapped_images = None + warped_image = None if choice != "No": wrapped_image_pair, warped_image = wrap_images( input_image0, input_image1, geom_info, choice @@ -805,7 +803,7 @@ def run_ransac( with open(tmp_state_cache, "wb") as f: pickle.dump(state_cache, f) - logger.info(f"Dump results done!") + logger.info("Dump results done!") return ( output_matches_ransac, @@ -880,7 +878,7 @@ def run_matching( output_matches_ransac = None # super slow! - if "roma" in key.lower() and device == "cpu": + if "roma" in key.lower() and DEVICE == "cpu": gr.Info( f"Success! Please be patient and allow for about 2-3 minutes." f" Due to CPU inference, {key} is quiet slow." @@ -905,7 +903,7 @@ def run_matching( if model["dense"]: pred = match_dense.match_images( - matcher, image0, image1, match_conf["preprocessing"], device=device + matcher, image0, image1, match_conf["preprocessing"], device=DEVICE ) del matcher extract_conf = None @@ -1000,7 +998,7 @@ def run_matching( tmp_state_cache = "output.pkl" with open(tmp_state_cache, "wb") as f: pickle.dump(state_cache, f) - logger.info(f"Dump results done!") + logger.info("Dump results done!") return ( output_keypoints, output_matches_raw, diff --git a/common/viz.py b/common/viz.py index 53103bd63d0d96b771a6177ebc98ef91fdb0367b..e7fb8fb23644910042c7c5cd582745054d7523f5 100644 --- a/common/viz.py +++ b/common/viz.py @@ -1,11 +1,13 @@ -import cv2 import typing +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import cv2 import matplotlib +import matplotlib.pyplot as plt import numpy as np import seaborn as sns -import matplotlib.pyplot as plt -from pathlib import Path -from typing import Dict, Any, Optional, Tuple, List, Union + from hloc.utils.viz import add_text, plot_keypoints diff --git a/docker/build_docker.bat b/docker/build_docker.bat new file mode 100644 index 0000000000000000000000000000000000000000..9f3fc687e1185de2866a1dbe221599549abdbce8 --- /dev/null +++ b/docker/build_docker.bat @@ -0,0 +1,3 @@ +docker build -t image-matching-webui:latest . --no-cache +# docker tag image-matching-webui:latest vincentqin/image-matching-webui:latest +# docker push vincentqin/image-matching-webui:latest diff --git a/run_docker.sh b/docker/run_docker.bat old mode 100755 new mode 100644 similarity index 100% rename from run_docker.sh rename to docker/run_docker.bat diff --git a/docker/run_docker.sh b/docker/run_docker.sh new file mode 100644 index 0000000000000000000000000000000000000000..da7686293c14465f0899c4b022f89fcc03db93b3 --- /dev/null +++ b/docker/run_docker.sh @@ -0,0 +1 @@ +docker run -it -p 7860:7860 vincentqin/image-matching-webui:latest python app.py --server_name "0.0.0.0" --server_port=7860 diff --git a/env-docker.txt b/env-docker.txt deleted file mode 100644 index 794a2db5d10e9637dee25ddd1f3c09588c31fff5..0000000000000000000000000000000000000000 --- a/env-docker.txt +++ /dev/null @@ -1,33 +0,0 @@ -e2cnn==0.2.3 -einops==0.6.1 -gdown==4.7.1 -gradio==4.28.3 -gradio_client==0.16.0 -h5py==3.9.0 -imageio==2.31.1 -Jinja2==3.1.2 -kornia==0.6.12 -loguru==0.7.0 -matplotlib==3.7.1 -numpy==1.23.5 -omegaconf==2.3.0 -opencv-contrib-python==4.6.0.66 -opencv-python==4.6.0.66 -pandas==2.0.3 -plotly==5.15.0 -protobuf==4.23.2 -pycolmap==0.5.0 -pytlsd==0.0.2 -pytorch-lightning==1.4.9 -PyYAML==6.0 -scikit-image==0.21.0 -scikit-learn==1.2.2 -scipy==1.11.1 -seaborn==0.12.2 -shapely==2.0.1 -tensorboardX==2.6.1 -torchmetrics==0.6.0 -torchvision==0.17.1 -tqdm==4.65.0 -yacs==0.1.8 -onnxruntime \ No newline at end of file diff --git a/format.sh b/format.sh new file mode 100644 index 0000000000000000000000000000000000000000..278a456265a1878cb68457fae0db38fda7116775 --- /dev/null +++ b/format.sh @@ -0,0 +1,3 @@ +python -m flake8 common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py +python -m isort common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py +python -m black common/*.py hloc/*.py hloc/matchers/*.py hloc/extractors/*.py \ No newline at end of file diff --git a/hloc/__init__.py b/hloc/__init__.py index 172b949b16c98b5e3eb9765d64fabd56906b38cf..7c2e3dd4a877ca73db43cf0181aa8e6193daa2e8 100644 --- a/hloc/__init__.py +++ b/hloc/__init__.py @@ -1,4 +1,5 @@ import logging + import torch from packaging import version diff --git a/hloc/extract_features.py b/hloc/extract_features.py index cd0f2ba932e3a8ff6a50655f8a6349dfd9f9e892..1e66f03f970adf2e3cd4cf057ff59af634528ad1 100644 --- a/hloc/extract_features.py +++ b/hloc/extract_features.py @@ -1,21 +1,22 @@ import argparse -import torch +import collections.abc as collections +import pprint from pathlib import Path -from typing import Dict, List, Union, Optional -import h5py from types import SimpleNamespace +from typing import Dict, List, Optional, Union + import cv2 +import h5py import numpy as np -from tqdm import tqdm -import pprint -import collections.abc as collections import PIL.Image +import torch import torchvision.transforms.functional as F +from tqdm import tqdm + from . import extractors, logger from .utils.base_model import dynamic_load +from .utils.io import list_h5_names, read_image from .utils.parsers import parse_image_lists -from .utils.io import read_image, list_h5_names - """ A set of standard configurations that can be directly selected from the command @@ -290,6 +291,23 @@ confs = { "dfactor": 8, }, }, + "sfd2": { + "output": "feats-sfd2-n4096-r1600", + "model": { + "name": "sfd2", + "max_keypoints": 4096, + }, + "preprocessing": { + "grayscale": False, + "force_resize": True, + "resize_max": 1600, + "width": 640, + "height": 480, + "conf_th": 0.001, + "multiscale": False, + "scales": [1.0], + }, + }, # Global descriptors "dir": { "output": "global-feats-dir", @@ -460,7 +478,7 @@ def extract(model, image_0, conf): # image0 = image_0[:, :, ::-1] # BGR to RGB data = preprocess(image0, conf) pred = model({"image": data["image"]}) - pred["image_size"] = original_size = data["original_size"] + pred["image_size"] = data["original_size"] pred = {**pred, **data} return pred diff --git a/hloc/extractors/alike.py b/hloc/extractors/alike.py index a61142da8fab182845d62c9d090b34f79054fed4..2f6ae550c443551f44baba44cc4612e9a1f048cc 100644 --- a/hloc/extractors/alike.py +++ b/hloc/extractors/alike.py @@ -1,10 +1,12 @@ import sys from pathlib import Path + import torch -from ..utils.base_model import BaseModel from hloc import logger +from ..utils.base_model import BaseModel + alike_path = Path(__file__).parent / "../../third_party/ALIKE" sys.path.append(str(alike_path)) from alike import ALike as Alike_ @@ -34,7 +36,7 @@ class Alike(BaseModel): scores_th=conf["detection_threshold"], n_limit=conf["max_keypoints"], ) - logger.info(f"Load Alike model done.") + logger.info("Load Alike model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/d2net.py b/hloc/extractors/d2net.py index 624e31f9539b97e925ec57e20f78136bac6b5b62..3f92437714dcf63b1f81fa28ee86e5d3d1a9cddb 100644 --- a/hloc/extractors/d2net.py +++ b/hloc/extractors/d2net.py @@ -1,17 +1,17 @@ +import subprocess import sys from pathlib import Path -import subprocess + import torch -from ..utils.base_model import BaseModel from hloc import logger -d2net_path = Path(__file__).parent / "../../third_party" -sys.path.append(str(d2net_path)) -from d2net.lib.model_test import D2Net as _D2Net -from d2net.lib.pyramid import process_multiscale +from ..utils.base_model import BaseModel d2net_path = Path(__file__).parent / "../../third_party/d2net" +sys.path.append(str(d2net_path)) +from lib.model_test import D2Net as _D2Net +from lib.pyramid import process_multiscale class D2Net(BaseModel): @@ -30,6 +30,7 @@ class D2Net(BaseModel): model_file.parent.mkdir(exist_ok=True) cmd = [ "wget", + "--quiet", "https://dusmanu.com/files/d2-net/" + conf["model_name"], "-O", str(model_file), @@ -39,7 +40,7 @@ class D2Net(BaseModel): self.net = _D2Net( model_file=model_file, use_relu=conf["use_relu"], use_cuda=False ) - logger.info(f"Load D2Net model done.") + logger.info("Load D2Net model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/darkfeat.py b/hloc/extractors/darkfeat.py index 2e461e7b0e94587b10fd03772cd5b6291215ac9e..b596ac007fa692dce6fccb00bf46aa1b8dcc12f9 100644 --- a/hloc/extractors/darkfeat.py +++ b/hloc/extractors/darkfeat.py @@ -1,9 +1,12 @@ import sys from pathlib import Path + import subprocess -from ..utils.base_model import BaseModel + from hloc import logger +from ..utils.base_model import BaseModel + darkfeat_path = Path(__file__).parent / "../../third_party/DarkFeat" sys.path.append(str(darkfeat_path)) from darkfeat import DarkFeat as DarkFeat_ @@ -43,7 +46,7 @@ class DarkFeat(BaseModel): raise e self.net = DarkFeat_(model_path) - logger.info(f"Load DarkFeat model done.") + logger.info("Load DarkFeat model done.") def _forward(self, data): pred = self.net({"image": data["image"]}) diff --git a/hloc/extractors/dedode.py b/hloc/extractors/dedode.py index 6e700c5b9ed0487cd4e4794fd9efb446a96f5d24..a1d7130a1d6c0db65fbb2e1e40fddb90bc2e3096 100644 --- a/hloc/extractors/dedode.py +++ b/hloc/extractors/dedode.py @@ -1,16 +1,18 @@ +import subprocess import sys from pathlib import Path -import subprocess + import torch -from PIL import Image -from ..utils.base_model import BaseModel -from hloc import logger import torchvision.transforms as transforms +from hloc import logger + +from ..utils.base_model import BaseModel + dedode_path = Path(__file__).parent / "../../third_party/DeDoDe" sys.path.append(str(dedode_path)) -from DeDoDe import dedode_detector_L, dedode_descriptor_B +from DeDoDe import dedode_descriptor_B, dedode_detector_L from DeDoDe.utils import to_pixel_coords device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -49,14 +51,14 @@ class DeDoDe(BaseModel): if not model_detector_path.exists(): model_detector_path.parent.mkdir(exist_ok=True) link = self.weight_urls[conf["model_detector_name"]] - cmd = ["wget", link, "-O", str(model_detector_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_detector_path)] logger.info(f"Downloading the DeDoDe detector model with `{cmd}`.") subprocess.run(cmd, check=True) if not model_descriptor_path.exists(): model_descriptor_path.parent.mkdir(exist_ok=True) link = self.weight_urls[conf["model_descriptor_name"]] - cmd = ["wget", link, "-O", str(model_descriptor_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_descriptor_path)] logger.info( f"Downloading the DeDoDe descriptor model with `{cmd}`." ) @@ -73,7 +75,7 @@ class DeDoDe(BaseModel): self.descriptor = dedode_descriptor_B( weights=weights_descriptor, device=device ) - logger.info(f"Load DeDoDe model done.") + logger.info("Load DeDoDe model done.") def _forward(self, data): """ diff --git a/hloc/extractors/dir.py b/hloc/extractors/dir.py index d1fa39e45e68355c3f06accfc6327d0ab50cf999..2d47256b1a4f2d74a99fc0320293ba1b3bf88bb4 100644 --- a/hloc/extractors/dir.py +++ b/hloc/extractors/dir.py @@ -1,10 +1,11 @@ +import os import sys from pathlib import Path -import torch from zipfile import ZipFile -import os -import sklearn + import gdown +import sklearn +import torch from ..utils.base_model import BaseModel @@ -13,8 +14,8 @@ sys.path.append( ) os.environ["DB_ROOT"] = "" # required by dirtorch -from dirtorch.utils import common # noqa: E402 from dirtorch.extract_features import load_model # noqa: E402 +from dirtorch.utils import common # noqa: E402 # The DIR model checkpoints (pickle files) include sklearn.decomposition.pca, # which has been deprecated in sklearn v0.24 diff --git a/hloc/extractors/disk.py b/hloc/extractors/disk.py index 8a9aca79c8efd2725b5aff77672841fbe3e85802..d8d512e5203fcf6675d9e904a79a7542ecee0bba 100644 --- a/hloc/extractors/disk.py +++ b/hloc/extractors/disk.py @@ -15,7 +15,7 @@ class DISK(BaseModel): def _init(self, conf): self.model = kornia.feature.DISK.from_pretrained(conf["weights"]) - logger.info(f"Load DISK model done.") + logger.info("Load DISK model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/dog.py b/hloc/extractors/dog.py index aff261039136e008816baeb49c60e5bb0635ea8b..b280bbc42376f3af827002bb85ff4996ccdf50b4 100644 --- a/hloc/extractors/dog.py +++ b/hloc/extractors/dog.py @@ -1,15 +1,14 @@ import kornia +import numpy as np +import pycolmap +import torch from kornia.feature.laf import ( - laf_from_center_scale_ori, extract_patches_from_pyramid, + laf_from_center_scale_ori, ) -import numpy as np -import torch -import pycolmap from ..utils.base_model import BaseModel - EPS = 1e-6 diff --git a/hloc/extractors/example.py b/hloc/extractors/example.py index 9992eff12d207d2379f47db9e1859cb79a8bf6b3..3d952c4014e006d74409a8f32ee7159d58305de5 100644 --- a/hloc/extractors/example.py +++ b/hloc/extractors/example.py @@ -1,9 +1,9 @@ import sys from pathlib import Path -import subprocess + import torch -from .. import logger +from .. import logger from ..utils.base_model import BaseModel example_path = Path(__file__).parent / "../../third_party/example" @@ -35,7 +35,7 @@ class Example(BaseModel): # self.net = ExampleNet(is_test=True) state_dict = torch.load(model_path, map_location="cpu") self.net.load_state_dict(state_dict["model_state"]) - logger.info(f"Load example model done.") + logger.info("Load example model done.") def _forward(self, data): # data: dict, keys: 'image' diff --git a/hloc/extractors/fire.py b/hloc/extractors/fire.py index d677f421dc3e0014dab4e5ca017c1788a7dff121..980f18e63d1a395835891c8e6595cfc66c21db2d 100644 --- a/hloc/extractors/fire.py +++ b/hloc/extractors/fire.py @@ -1,7 +1,8 @@ -from pathlib import Path -import subprocess import logging +import subprocess import sys +from pathlib import Path + import torch import torchvision.transforms as tvf @@ -42,11 +43,11 @@ class FIRe(BaseModel): if not model_path.exists(): model_path.parent.mkdir(exist_ok=True) link = self.fire_models[conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_path)] logger.info(f"Downloading the FIRe model with `{cmd}`.") subprocess.run(cmd, check=True) - logger.info(f"Loading fire model...") + logger.info("Loading fire model...") # Load net state = torch.load(model_path) diff --git a/hloc/extractors/fire_local.py b/hloc/extractors/fire_local.py index b66ea57428e444237c6a0f7207e3c0d10ed48be8..a8e9ba9f4c3d86280e8232f61263b729ccb933be 100644 --- a/hloc/extractors/fire_local.py +++ b/hloc/extractors/fire_local.py @@ -1,11 +1,12 @@ -from pathlib import Path import subprocess import sys +from pathlib import Path + import torch import torchvision.transforms as tvf -from ..utils.base_model import BaseModel from .. import logger +from ..utils.base_model import BaseModel fire_path = Path(__file__).parent / "../../third_party/fire" @@ -13,10 +14,6 @@ sys.path.append(str(fire_path)) import fire_network -from lib.how.how.stages.evaluate import eval_asmk_fire, load_dataset_fire - -from lib.asmk import asmk -from asmk import io_helpers, asmk_method, kernel as kern_pkg EPS = 1e-6 @@ -44,18 +41,18 @@ class FIRe(BaseModel): # Config paths model_path = fire_path / "model" / conf["model_name"] - config_path = fire_path / conf["config_name"] - asmk_bin_path = fire_path / "model" / conf["asmk_name"] + config_path = fire_path / conf["config_name"] # noqa: F841 + asmk_bin_path = fire_path / "model" / conf["asmk_name"] # noqa: F841 # Download the model. if not model_path.exists(): model_path.parent.mkdir(exist_ok=True) link = self.fire_models[conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_path)] logger.info(f"Downloading the FIRe model with `{cmd}`.") subprocess.run(cmd, check=True) - logger.info(f"Loading fire model...") + logger.info("Loading fire model...") # Load net state = torch.load(model_path) diff --git a/hloc/extractors/lanet.py b/hloc/extractors/lanet.py index 42700a066acf9b53e4f8bd9227b2746197089fa4..6d0d3b2e7b9037c0af0628a4571a7cf6fe0d59f1 100644 --- a/hloc/extractors/lanet.py +++ b/hloc/extractors/lanet.py @@ -1,14 +1,17 @@ +import subprocess import sys from pathlib import Path -import subprocess + import torch +from hloc import logger from ..utils.base_model import BaseModel -from hloc import logger + +lib_path = Path(__file__).parent / "../../third_party" +sys.path.append(str(lib_path)) +from lanet.network_v0.model import PointModel lanet_path = Path(__file__).parent / "../../third_party/lanet" -sys.path.append(str(lanet_path)) -from network_v0.model import PointModel device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -26,11 +29,11 @@ class LANet(BaseModel): lanet_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth' ) if not model_path.exists(): - print(f"No model found at {model_path}") + logger.warning(f"No model found at {model_path}, start downloading") self.net = PointModel(is_test=True) state_dict = torch.load(model_path, map_location="cpu") self.net.load_state_dict(state_dict["model_state"]) - logger.info(f"Load LANet model done.") + logger.info("Load LANet model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/netvlad.py b/hloc/extractors/netvlad.py index 81bc63bc74bc51c1d8de55ee6393bc3371fc7657..c7938820d0ea0c84b738ef5564aa1dbad5532236 100644 --- a/hloc/extractors/netvlad.py +++ b/hloc/extractors/netvlad.py @@ -1,5 +1,6 @@ -from pathlib import Path import subprocess +from pathlib import Path + import numpy as np import torch import torch.nn as nn @@ -7,8 +8,8 @@ import torch.nn.functional as F import torchvision.models as models from scipy.io import loadmat -from ..utils.base_model import BaseModel from .. import logger +from ..utils.base_model import BaseModel EPS = 1e-6 @@ -60,7 +61,7 @@ class NetVLAD(BaseModel): if not checkpoint.exists(): checkpoint.parent.mkdir(exist_ok=True, parents=True) link = self.dir_models[conf["model_name"]] - cmd = ["wget", link, "-O", str(checkpoint)] + cmd = ["wget", "--quiet", link, "-O", str(checkpoint)] logger.info(f"Downloading the NetVLAD model with `{cmd}`.") subprocess.run(cmd, check=True) diff --git a/hloc/extractors/r2d2.py b/hloc/extractors/r2d2.py index 0ced44e420278ed080e59bdfe242097ac55f0302..359d89c96a5590764bae0604989c2d738c814bd9 100644 --- a/hloc/extractors/r2d2.py +++ b/hloc/extractors/r2d2.py @@ -1,14 +1,15 @@ import sys from pathlib import Path + import torchvision.transforms as tvf -from ..utils.base_model import BaseModel from hloc import logger -base_path = Path(__file__).parent / "../../third_party" -sys.path.append(str(base_path)) +from ..utils.base_model import BaseModel + r2d2_path = Path(__file__).parent / "../../third_party/r2d2" -from r2d2.extract import load_network, NonMaxSuppression, extract_multiscale +sys.path.append(str(r2d2_path)) +from extract import NonMaxSuppression, extract_multiscale, load_network class R2D2(BaseModel): @@ -35,7 +36,7 @@ class R2D2(BaseModel): rel_thr=conf["reliability_threshold"], rep_thr=conf["repetability_threshold"], ) - logger.info(f"Load R2D2 model done.") + logger.info("Load R2D2 model done.") def _forward(self, data): img = data["image"] diff --git a/hloc/extractors/rekd.py b/hloc/extractors/rekd.py index e3816dbbc2d909bed5d73e1547e6feeaee0bfdc2..c4fbb5fd583d0371c1dba900c5e3719391bed3e0 100644 --- a/hloc/extractors/rekd.py +++ b/hloc/extractors/rekd.py @@ -1,11 +1,12 @@ import sys from pathlib import Path -import subprocess + import torch -from ..utils.base_model import BaseModel from hloc import logger +from ..utils.base_model import BaseModel + rekd_path = Path(__file__).parent / "../../third_party" sys.path.append(str(rekd_path)) from REKD.training.model.REKD import REKD as REKD_ @@ -29,7 +30,7 @@ class REKD(BaseModel): self.net = REKD_(is_test=True) state_dict = torch.load(model_path, map_location="cpu") self.net.load_state_dict(state_dict["model_state"]) - logger.info(f"Load REKD model done.") + logger.info("Load REKD model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/rord.py b/hloc/extractors/rord.py index d7c3f360ab9af1312db38ad04c6bb0fb5392337e..cfc48e659bda7b9f3a88b3518aed887dd9710a05 100644 --- a/hloc/extractors/rord.py +++ b/hloc/extractors/rord.py @@ -3,9 +3,10 @@ from pathlib import Path import subprocess import torch -from ..utils.base_model import BaseModel from hloc import logger +from ..utils.base_model import BaseModel + rord_path = Path(__file__).parent / "../../third_party" sys.path.append(str(rord_path)) from RoRD.lib.model_test import D2Net as _RoRD @@ -42,11 +43,10 @@ class RoRD(BaseModel): subprocess.run(cmd, check=True) except subprocess.CalledProcessError as e: logger.error(f"Failed to download the RoRD model.") - raise e self.net = _RoRD( model_file=model_path, use_relu=conf["use_relu"], use_cuda=False ) - logger.info(f"Load RoRD model done.") + logger.info("Load RoRD model done.") def _forward(self, data): image = data["image"] diff --git a/hloc/extractors/sfd2.py b/hloc/extractors/sfd2.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb76eddd29347be56be162afc346b0ab9bb934a --- /dev/null +++ b/hloc/extractors/sfd2.py @@ -0,0 +1,43 @@ +# -*- coding: UTF-8 -*- +import sys +from pathlib import Path + +import torchvision.transforms as tvf + +from .. import logger +from ..utils.base_model import BaseModel + +pram_path = Path(__file__).parent / "../../third_party/pram" +sys.path.append(str(pram_path)) + +from nets.sfd2 import load_sfd2 + + +class SFD2(BaseModel): + default_conf = { + "max_keypoints": 4096, + "model_name": "sfd2_20230511_210205_resnet4x.79.pth", + "conf_th": 0.001, + } + required_inputs = ["image"] + + def _init(self, conf): + self.conf = {**self.default_conf, **conf} + self.norm_rgb = tvf.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + model_fn = pram_path / "weights" / self.conf["model_name"] + self.net = load_sfd2(weight_path=model_fn).eval() + + logger.info("Load SFD2 model done.") + + def _forward(self, data): + pred = self.net.extract_local_global( + data={"image": self.norm_rgb(data["image"])}, config=self.conf + ) + out = { + "keypoints": pred["keypoints"][0][None], + "scores": pred["scores"][0][None], + "descriptors": pred["descriptors"][0][None], + } + return out diff --git a/hloc/extractors/sift.py b/hloc/extractors/sift.py index ddf507954981f2bae5d2a88bade604faa482e3a6..09576f98355595ea1c8e0105bac98887a320b675 100644 --- a/hloc/extractors/sift.py +++ b/hloc/extractors/sift.py @@ -4,14 +4,15 @@ import cv2 import numpy as np import torch from kornia.color import rgb_to_grayscale -from packaging import version from omegaconf import OmegaConf +from packaging import version try: import pycolmap except ImportError: pycolmap = None from hloc import logger + from ..utils.base_model import BaseModel @@ -140,7 +141,7 @@ class SIFT(BaseModel): f"Unknown backend: {backend} not in " f"{{{','.join(backends)}}}." ) - logger.info(f"Load SIFT model done.") + logger.info("Load SIFT model done.") def extract_single_image(self, image: torch.Tensor): image_np = image.cpu().numpy().squeeze(0) diff --git a/hloc/extractors/superpoint.py b/hloc/extractors/superpoint.py index 9be16bf9cb1e8c14102a31bce0960ea59d9c3f9f..ee618392ae9d976b40d1c43a6628892a09d993fd 100644 --- a/hloc/extractors/superpoint.py +++ b/hloc/extractors/superpoint.py @@ -1,10 +1,12 @@ import sys from pathlib import Path + import torch -from ..utils.base_model import BaseModel from hloc import logger +from ..utils.base_model import BaseModel + sys.path.append(str(Path(__file__).parent / "../../third_party")) from SuperGluePretrainedNetwork.models import superpoint # noqa E402 @@ -43,7 +45,7 @@ class SuperPoint(BaseModel): if conf["fix_sampling"]: superpoint.sample_descriptors = sample_descriptors_fix_sampling self.net = superpoint.SuperPoint(conf) - logger.info(f"Load SuperPoint model done.") + logger.info("Load SuperPoint model done.") def _forward(self, data): return self.net(data, self.conf) diff --git a/hloc/extractors/xfeat.py b/hloc/extractors/xfeat.py index 3b9184bf686e64c2135342c10e9a86dd1115b0da..5dc230f247a79021db8b194ac5ce1d0ff7f37e89 100644 --- a/hloc/extractors/xfeat.py +++ b/hloc/extractors/xfeat.py @@ -1,6 +1,7 @@ import torch -from pathlib import Path + from hloc import logger + from ..utils.base_model import BaseModel @@ -18,7 +19,7 @@ class XFeat(BaseModel): pretrained=True, top_k=self.conf["max_keypoints"], ) - logger.info(f"Load XFeat(sparse) model done.") + logger.info("Load XFeat(sparse) model done.") def _forward(self, data): pred = self.net.detectAndCompute( diff --git a/hloc/match_dense.py b/hloc/match_dense.py index e47af662912d8c0362fc364a15de1d25a2e66cc9..ac95d937523adb3e1bd28b8a301517bda35028a6 100644 --- a/hloc/match_dense.py +++ b/hloc/match_dense.py @@ -1,9 +1,11 @@ +from types import SimpleNamespace + +import cv2 import numpy as np import torch import torchvision.transforms.functional as F -from types import SimpleNamespace + from .extract_features import read_image, resize_image -import cv2 device = "cuda" if torch.cuda.is_available() else "cpu" diff --git a/hloc/match_features.py b/hloc/match_features.py index 28ba3a2ea264b49f6b6d263350461189bc800df9..2d4b0bd0cc3078789ac980177479c224129ad4cc 100644 --- a/hloc/match_features.py +++ b/hloc/match_features.py @@ -1,18 +1,19 @@ import argparse -from typing import Union, Optional, Dict, List, Tuple -from pathlib import Path import pprint +from functools import partial +from pathlib import Path from queue import Queue from threading import Thread -from functools import partial -from tqdm import tqdm +from typing import Dict, List, Optional, Tuple, Union + import h5py +import numpy as np import torch +from tqdm import tqdm -from . import matchers, logger +from . import logger, matchers from .utils.base_model import dynamic_load from .utils.parsers import names_to_pair, names_to_pair_old, parse_retrieval -import numpy as np """ A set of standard configurations that can be directly selected from the command @@ -162,6 +163,13 @@ confs = { "match_threshold": 0.2, }, }, + "imp": { + "output": "matches-imp", + "model": { + "name": "imp", + "match_threshold": 0.2, + }, + }, } diff --git a/hloc/matchers/adalam.py b/hloc/matchers/adalam.py index b51491ad7ed46a467811a793d819c7da8e63b3d4..7820428a5a087d0b5d6855de15c0230327ce7dc1 100644 --- a/hloc/matchers/adalam.py +++ b/hloc/matchers/adalam.py @@ -1,10 +1,9 @@ import torch - -from ..utils.base_model import BaseModel - from kornia.feature.adalam import AdalamFilter from kornia.utils.helpers import get_cuda_device_if_available +from ..utils.base_model import BaseModel + class AdaLAM(BaseModel): # See https://kornia.readthedocs.io/en/latest/_modules/kornia/feature/adalam/adalam.html. diff --git a/hloc/matchers/aspanformer.py b/hloc/matchers/aspanformer.py index c24209122ed14bcb34c098227e445b6461158e90..a24ba558921410e2a6b12f4ce3fd60885301be91 100644 --- a/hloc/matchers/aspanformer.py +++ b/hloc/matchers/aspanformer.py @@ -1,17 +1,16 @@ +import subprocess import sys -import torch -from ..utils.base_model import BaseModel -from ..utils import do_system from pathlib import Path -import subprocess + +import torch from .. import logger +from ..utils.base_model import BaseModel sys.path.append(str(Path(__file__).parent / "../../third_party")) from ASpanFormer.src.ASpanFormer.aspanformer import ASpanFormer as _ASpanFormer from ASpanFormer.src.config.default import get_cfg_defaults from ASpanFormer.src.utils.misc import lower_config -from ASpanFormer.demo import demo_utils aspanformer_path = Path(__file__).parent / "../../third_party/ASpanFormer" @@ -85,7 +84,7 @@ class ASpanFormer(BaseModel): "state_dict" ] self.net.load_state_dict(state_dict, strict=False) - logger.info(f"Loaded Aspanformer model") + logger.info("Loaded Aspanformer model") def _forward(self, data): data_ = { diff --git a/hloc/matchers/cotr.py b/hloc/matchers/cotr.py index bfb92d41cd98e900529ce2d61754f3c54f187e5c..44d74f642339133eea2da5beae15d1899e5920bc 100644 --- a/hloc/matchers/cotr.py +++ b/hloc/matchers/cotr.py @@ -1,19 +1,19 @@ -import sys import argparse -import torch -import warnings -import numpy as np +import sys from pathlib import Path + +import numpy as np +import torch from torchvision.transforms import ToPILImage + from ..utils.base_model import BaseModel sys.path.append(str(Path(__file__).parent / "../../third_party/COTR")) -from COTR.utils import utils as utils_cotr -from COTR.models import build_model -from COTR.options.options import * -from COTR.options.options_utils import * -from COTR.inference.inference_helper import triangulate_corr from COTR.inference.sparse_engine import SparseEngine +from COTR.models import build_model +from COTR.options.options import * # noqa: F403 +from COTR.options.options_utils import * # noqa: F403 +from COTR.utils import utils as utils_cotr utils_cotr.fix_randomness(0) torch.set_grad_enabled(False) @@ -33,7 +33,7 @@ class COTR(BaseModel): def _init(self, conf): parser = argparse.ArgumentParser() - set_COTR_arguments(parser) + set_COTR_arguments(parser) # noqa: F405 opt = parser.parse_args() opt.command = " ".join(sys.argv) opt.load_weights_path = str( diff --git a/hloc/matchers/dkm.py b/hloc/matchers/dkm.py index fc268a547e5503c904d9fb876ea2389193c139ed..f4d702e01500421f526d70a13e40d91d1f5f7096 100644 --- a/hloc/matchers/dkm.py +++ b/hloc/matchers/dkm.py @@ -1,10 +1,12 @@ +import subprocess import sys from pathlib import Path + import torch from PIL import Image -import subprocess -from ..utils.base_model import BaseModel + from .. import logger +from ..utils.base_model import BaseModel sys.path.append(str(Path(__file__).parent / "../../third_party")) from DKM.dkm import DKMv3_outdoor @@ -37,11 +39,11 @@ class DKMv3(BaseModel): if not model_path.exists(): model_path.parent.mkdir(exist_ok=True) link = self.dkm_models[conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_path)] logger.info(f"Downloading the DKMv3 model with `{cmd}`.") subprocess.run(cmd, check=True) self.net = DKMv3_outdoor(path_to_weights=str(model_path), device=device) - logger.info(f"Loading DKMv3 model done") + logger.info("Loading DKMv3 model done") def _forward(self, data): img0 = data["image0"].cpu().numpy().squeeze() * 255 diff --git a/hloc/matchers/dual_softmax.py b/hloc/matchers/dual_softmax.py index b42887c099998aaf9c48a32ee541f8637ea7d279..1c073ae66fdd064a27140e0cb566aa1d78ad2e6e 100644 --- a/hloc/matchers/dual_softmax.py +++ b/hloc/matchers/dual_softmax.py @@ -1,13 +1,13 @@ +import numpy as np import torch from ..utils.base_model import BaseModel -import numpy as np # borrow from dedode def dual_softmax_matcher( - desc_A: tuple["B", "C", "N"], - desc_B: tuple["B", "C", "M"], + desc_A: tuple["B", "C", "N"], # noqa: F821 + desc_B: tuple["B", "C", "M"], # noqa: F821 threshold=0.1, inv_temperature=20, normalize=True, diff --git a/hloc/matchers/duster.py b/hloc/matchers/duster.py index a9f15b82a231e3be70551cf2687347ac1cc35a0b..2243d8aad04ee52df61044a5a945ec55414860f9 100644 --- a/hloc/matchers/duster.py +++ b/hloc/matchers/duster.py @@ -1,21 +1,22 @@ import os import sys -import torch -from pathlib import Path -import torchvision.transforms as tfm -import torch.nn.functional as F import urllib.request +from pathlib import Path + import numpy as np -from ..utils.base_model import BaseModel +import torch +import torchvision.transforms as tfm + from .. import logger +from ..utils.base_model import BaseModel duster_path = Path(__file__).parent / "../../third_party/dust3r" sys.path.append(str(duster_path)) -from dust3r.inference import inference -from dust3r.model import load_model, AsymmetricCroCo3DStereo +from dust3r.cloud_opt import GlobalAlignerMode, global_aligner from dust3r.image_pairs import make_pairs -from dust3r.cloud_opt import global_aligner, GlobalAlignerMode +from dust3r.inference import inference +from dust3r.model import AsymmetricCroCo3DStereo from dust3r.utils.geometry import find_reciprocal_matches, xy_grid device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -38,7 +39,7 @@ class Duster(BaseModel): self.model_path # "naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt" ).to(device) - logger.info(f"Loaded Dust3r model") + logger.info("Loaded Dust3r model") def download_weights(self): url = "https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth" diff --git a/hloc/matchers/gim.py b/hloc/matchers/gim.py index 61c7d899ccae4a1fea81a7540f3f3eef1b7651cd..d0ccffa145314570a9419af3fd021740916d177f 100644 --- a/hloc/matchers/gim.py +++ b/hloc/matchers/gim.py @@ -1,11 +1,12 @@ -import os -import sys -import torch import subprocess -import gdown +import sys from pathlib import Path -from ..utils.base_model import BaseModel + +import gdown +import torch + from .. import logger +from ..utils.base_model import BaseModel gim_path = Path(__file__).parent / "../../third_party/gim" sys.path.append(str(gim_path)) @@ -43,9 +44,9 @@ class GIM(BaseModel): if "drive.google.com" in model_link: gdown.download(model_link, output=str(model_path), fuzzy=True) else: - cmd = ["wget", model_link, "-O", str(model_path)] + cmd = ["wget", "--quiet", model_link, "-O", str(model_path)] subprocess.run(cmd, check=True) - logger.info(f"Downloaded GIM model succeeed!") + logger.info("Downloaded GIM model succeeed!") self.aspect_ratio = 896 / 672 model = DKMv3(None, 672, 896, upsample_preds=True) @@ -60,7 +61,7 @@ class GIM(BaseModel): model.load_state_dict(state_dict) self.net = model - logger.info(f"Loaded GIM model") + logger.info("Loaded GIM model") def pad_image(self, image, aspect_ratio): new_width = max(image.shape[3], int(image.shape[2] * aspect_ratio)) diff --git a/hloc/matchers/gluestick.py b/hloc/matchers/gluestick.py index 093ba3665c95ac881ae22682497fb5af5722a55b..b14614e23f58fd9d1bcb9a39d73a18d5d12ee6df 100644 --- a/hloc/matchers/gluestick.py +++ b/hloc/matchers/gluestick.py @@ -1,9 +1,11 @@ +import subprocess import sys from pathlib import Path -import subprocess + import torch -from ..utils.base_model import BaseModel + from .. import logger +from ..utils.base_model import BaseModel gluestick_path = Path(__file__).parent / "../../third_party/GlueStick" sys.path.append(str(gluestick_path)) @@ -42,10 +44,10 @@ class GlueStick(BaseModel): if not model_path.exists(): model_path.parent.mkdir(exist_ok=True) link = self.gluestick_models[conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_path)] logger.info(f"Downloading the Gluestick model with `{cmd}`.") subprocess.run(cmd, check=True) - logger.info(f"Loading GlueStick model...") + logger.info("Loading GlueStick model...") gluestick_conf = { "name": "two_view_pipeline", diff --git a/hloc/matchers/imp.py b/hloc/matchers/imp.py new file mode 100644 index 0000000000000000000000000000000000000000..b79e21af20942e7ac27e932d65b3828994c5d8fa --- /dev/null +++ b/hloc/matchers/imp.py @@ -0,0 +1,47 @@ +# -*- coding: UTF-8 -*- +import sys +from pathlib import Path + +import torch + +from .. import device, logger +from ..utils.base_model import BaseModel + +pram_path = Path(__file__).parent / "../../third_party/pram" +sys.path.append(str(pram_path)) + +from nets.gml import GML + + +class IMP(BaseModel): + default_conf = { + "match_threshold": 0.2, + "features": "sfd2", + "model_name": "imp_gml.920.pth", + "sinkhorn_iterations": 20, + } + required_inputs = [ + "image0", + "keypoints0", + "scores0", + "descriptors0", + "image1", + "keypoints1", + "scores1", + "descriptors1", + ] + + def _init(self, conf): + self.conf = {**self.default_conf, **conf} + weight_path = pram_path / "weights" / self.conf["model_name"] + self.net = GML(self.conf).eval().to(device) + self.net.load_state_dict( + torch.load(weight_path, map_location="cpu")["model"], strict=True + ) + logger.info("Load IMP model done.") + + def _forward(self, data): + data["descriptors0"] = data["descriptors0"].transpose(2, 1).float() + data["descriptors1"] = data["descriptors1"].transpose(2, 1).float() + + return self.net.produce_matches(data, p=0.2) diff --git a/hloc/matchers/lightglue.py b/hloc/matchers/lightglue.py index 3cfbbcf278709af26d9ffd03af3d7b0f291414a8..4a36be64b4e4dbe95d45bb1f52c869fe067de58f 100644 --- a/hloc/matchers/lightglue.py +++ b/hloc/matchers/lightglue.py @@ -1,7 +1,8 @@ import sys from pathlib import Path -from ..utils.base_model import BaseModel + from .. import logger +from ..utils.base_model import BaseModel lightglue_path = Path(__file__).parent / "../../third_party/LightGlue" sys.path.append(str(lightglue_path)) @@ -36,7 +37,7 @@ class LightGlue(BaseModel): conf["weights"] = str(weight_path) conf["filter_threshold"] = conf["match_threshold"] self.net = LG(**conf) - logger.info(f"Load lightglue model done.") + logger.info("Load lightglue model done.") def _forward(self, data): input = {} diff --git a/hloc/matchers/loftr.py b/hloc/matchers/loftr.py index fe7f6f372f6e74508edd3bf3603190b4adbccf3a..a1405b7073a80ab946ec8d724642a8f8ab9de9ba 100644 --- a/hloc/matchers/loftr.py +++ b/hloc/matchers/loftr.py @@ -1,8 +1,11 @@ -import torch import warnings -from kornia.feature.loftr.loftr import default_cfg + +import torch from kornia.feature import LoFTR as LoFTR_ +from kornia.feature.loftr.loftr import default_cfg + from hloc import logger + from ..utils.base_model import BaseModel diff --git a/hloc/matchers/mickey.py b/hloc/matchers/mickey.py new file mode 100644 index 0000000000000000000000000000000000000000..3d60ff5f229ba31a0922406fe54c0588fd6a4273 --- /dev/null +++ b/hloc/matchers/mickey.py @@ -0,0 +1,67 @@ +import subprocess +import sys +from pathlib import Path + +import torch + +from .. import logger +from ..utils.base_model import BaseModel + +mickey_path = Path(__file__).parent / "../../third_party" +sys.path.append(str(mickey_path)) + +from mickey.config.default import cfg +from mickey.lib.models.builder import build_model + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +class Mickey(BaseModel): + default_conf = { + "config_path": "config.yaml", + "model_name": "mickey.ckpt", + "max_keypoints": 3000, + } + required_inputs = [ + "image0", + "image1", + ] + weight_urls = "https://storage.googleapis.com/niantic-lon-static/research/mickey/assets/mickey_weights.zip" + + # Initialize the line matcher + def _init(self, conf): + model_path = mickey_path / "mickey/mickey_weights" / conf["model_name"] + zip_path = mickey_path / "mickey/mickey_weights.zip" + config_path = model_path.parent / self.conf["config_path"] + # Download the model. + if not model_path.exists(): + model_path.parent.mkdir(exist_ok=True, parents=True) + link = self.weight_urls + if not zip_path.exists(): + cmd = ["wget", "--quiet", link, "-O", str(zip_path)] + logger.info(f"Downloading the Mickey model with {cmd}.") + subprocess.run(cmd, check=True) + cmd = ["unzip", "-d", str(model_path.parent.parent), str(zip_path)] + logger.info(f"Running {cmd}.") + subprocess.run(cmd, check=True) + + logger.info("Loading mickey model...") + cfg.merge_from_file(config_path) + self.net = build_model(cfg, checkpoint=model_path) + logger.info("Load Mickey model done.") + + def _forward(self, data): + # data['K_color0'] = torch.from_numpy(K['im0.jpg']).unsqueeze(0).to(device) + # data['K_color1'] = torch.from_numpy(K['im1.jpg']).unsqueeze(0).to(device) + pred = self.net(data) + pred = { + **pred, + **data, + } + inliers = data["inliers_list"] + pred = { + "keypoints0": inliers[:, :2], + "keypoints1": inliers[:, 2:4], + } + + return pred diff --git a/hloc/matchers/omniglue.py b/hloc/matchers/omniglue.py index abf09a5b03ce18b68661acbce4ad93f4c5a03b6a..c02d7f35f10706565d109987b12daa166703113e 100644 --- a/hloc/matchers/omniglue.py +++ b/hloc/matchers/omniglue.py @@ -1,9 +1,10 @@ -import sys -import torch import subprocess -import numpy as np +import sys from pathlib import Path +import numpy as np +import torch + from .. import logger from ..utils.base_model import BaseModel @@ -25,7 +26,7 @@ class OmniGlue(BaseModel): } def _init(self, conf): - logger.info(f"Loading OmniGlue model") + logger.info("Loading OmniGlue model") og_model_path = omniglue_path / "models" / "omniglue.onnx" sp_model_path = omniglue_path / "models" / "sp_v6.onnx" dino_model_path = ( @@ -34,7 +35,7 @@ class OmniGlue(BaseModel): if not dino_model_path.exists(): link = self.dino_v2_link_dict.get(dino_model_path.name, None) if link is not None: - cmd = ["wget", link, "-O", str(dino_model_path)] + cmd = ["wget", "--quiet", link, "-O", str(dino_model_path)] logger.info(f"Downloading the dinov2 model with `{cmd}`.") subprocess.run(cmd, check=True) else: @@ -45,7 +46,7 @@ class OmniGlue(BaseModel): dino_export=str(dino_model_path), max_keypoints=self.conf["max_keypoints"], ) - logger.info(f"Loaded OmniGlue model done!") + logger.info("Loaded OmniGlue model done!") def _forward(self, data): image0_rgb_np = data["image0"][0].permute(1, 2, 0).cpu().numpy() * 255 @@ -61,7 +62,6 @@ class OmniGlue(BaseModel): for i in range(match_kp0.shape[0]): if match_confidences[i] > match_threshold: keep_idx.append(i) - num_filtered_matches = len(keep_idx) scores = torch.from_numpy(match_confidences[keep_idx]).reshape(-1, 1) pred = { "keypoints0": torch.from_numpy(match_kp0[keep_idx]), diff --git a/hloc/matchers/roma.py b/hloc/matchers/roma.py index 30964a735b372f9e38158193bc0d1748537ae8b8..01949160f98478c6c1620f60ddfa83cf555490f8 100644 --- a/hloc/matchers/roma.py +++ b/hloc/matchers/roma.py @@ -1,10 +1,12 @@ +import subprocess import sys from pathlib import Path -import subprocess + import torch from PIL import Image -from ..utils.base_model import BaseModel + from .. import logger +from ..utils.base_model import BaseModel roma_path = Path(__file__).parent / "../../third_party/RoMa" sys.path.append(str(roma_path)) @@ -19,7 +21,6 @@ class Roma(BaseModel): "model_name": "roma_outdoor.pth", "model_utils_name": "dinov2_vitl14_pretrain.pth", "max_keypoints": 3000, - "tiny_roma": None, } required_inputs = [ "image0", @@ -42,18 +43,18 @@ class Roma(BaseModel): if not model_path.exists(): model_path.parent.mkdir(exist_ok=True) link = self.weight_urls["roma"][conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] + cmd = ["wget", "--quiet", link, "-O", str(model_path)] logger.info(f"Downloading the Roma model with `{cmd}`.") subprocess.run(cmd, check=True) if not dinov2_weights.exists(): dinov2_weights.parent.mkdir(exist_ok=True) link = self.weight_urls[conf["model_utils_name"]] - cmd = ["wget", link, "-O", str(dinov2_weights)] + cmd = ["wget", "--quiet", link, "-O", str(dinov2_weights)] logger.info(f"Downloading the dinov2 model with `{cmd}`.") subprocess.run(cmd, check=True) - logger.info(f"Loading Roma model") + logger.info("Loading Roma model") # load the model weights = torch.load(model_path, map_location="cpu") dinov2_weights = torch.load(dinov2_weights, map_location="cpu") @@ -67,7 +68,7 @@ class Roma(BaseModel): # temp fix issue: https://github.com/Parskatt/RoMa/issues/26 amp_dtype=torch.float32, ) - logger.info(f"Load Roma model done.") + logger.info("Load Roma model done.") def _forward(self, data): img0 = data["image0"].cpu().numpy().squeeze() * 255 diff --git a/hloc/matchers/sgmnet.py b/hloc/matchers/sgmnet.py index 7931a6bd51c415a374ba44d3f83775a1a2879f9f..cc28221073518e8e7f297bd701619dfcdb30ef5b 100644 --- a/hloc/matchers/sgmnet.py +++ b/hloc/matchers/sgmnet.py @@ -1,11 +1,12 @@ +import subprocess import sys +from collections import OrderedDict, namedtuple from pathlib import Path -import subprocess + import torch -from PIL import Image -from collections import OrderedDict, namedtuple -from ..utils.base_model import BaseModel + from .. import logger +from ..utils.base_model import BaseModel sgmnet_path = Path(__file__).parent / "../../third_party/SGMNet" sys.path.append(str(sgmnet_path)) @@ -89,7 +90,7 @@ class SGMNet(BaseModel): new_stat_dict[key[7:]] = value checkpoint["state_dict"] = new_stat_dict self.net.load_state_dict(checkpoint["state_dict"]) - logger.info(f"Load SGMNet model done.") + logger.info("Load SGMNet model done.") def _forward(self, data): x1 = data["keypoints0"].squeeze() # N x 2 diff --git a/hloc/matchers/sold2.py b/hloc/matchers/sold2.py index a75c371082cc413318f828f7e06e2cbaca312399..e7ac07f6a4e1c3f4af0ab79fd908cd6a350503d8 100644 --- a/hloc/matchers/sold2.py +++ b/hloc/matchers/sold2.py @@ -1,11 +1,11 @@ +import subprocess import sys from pathlib import Path -from ..utils.base_model import BaseModel + import torch -from ..utils.base_model import BaseModel from .. import logger -import subprocess +from ..utils.base_model import BaseModel sold2_path = Path(__file__).parent / "../../third_party/SOLD2" sys.path.append(str(sold2_path)) @@ -43,7 +43,7 @@ class SOLD2(BaseModel): if not checkpoint_path.exists(): checkpoint_path.parent.mkdir(exist_ok=True) link = self.weight_urls[conf["weights"]] - cmd = ["wget", link, "-O", str(checkpoint_path)] + cmd = ["wget", "--quiet", link, "-O", str(checkpoint_path)] logger.info(f"Downloading the SOLD2 model with `{cmd}`.") subprocess.run(cmd, check=True) diff --git a/hloc/matchers/topicfm.py b/hloc/matchers/topicfm.py index c42e6fd81a48892b6181e8999774f1562933ba7a..2d4701cc0dbe4952712f4718e26256022dd0b522 100644 --- a/hloc/matchers/topicfm.py +++ b/hloc/matchers/topicfm.py @@ -1,12 +1,13 @@ -import torch -import warnings -from ..utils.base_model import BaseModel import sys from pathlib import Path +import torch + +from ..utils.base_model import BaseModel + sys.path.append(str(Path(__file__).parent / "../../third_party")) -from TopicFM.src.models.topic_fm import TopicFM as _TopicFM from TopicFM.src import get_model_cfg +from TopicFM.src.models.topic_fm import TopicFM as _TopicFM topicfm_path = Path(__file__).parent / "../../third_party/TopicFM" diff --git a/hloc/matchers/xfeat_dense.py b/hloc/matchers/xfeat_dense.py index 19ffbb343d0cae77ebc433c6fc6001d0f17a217c..00d660fed15530b78b4445299059cc152eeeea33 100644 --- a/hloc/matchers/xfeat_dense.py +++ b/hloc/matchers/xfeat_dense.py @@ -1,6 +1,7 @@ import torch -from pathlib import Path + from hloc import logger + from ..utils.base_model import BaseModel @@ -21,7 +22,7 @@ class XFeatDense(BaseModel): pretrained=True, top_k=self.conf["max_keypoints"], ) - logger.info(f"Load XFeat(dense) model done.") + logger.info("Load XFeat(dense) model done.") def _forward(self, data): # Compute coarse feats diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..4e77a75ac08b6320f9da8cf2cfe53fc9ec3bcd4a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,36 @@ +[project] +name = "ImageMatchingWebui" +description = "Image Matching Webui: A tool for matching images using sota algorithms with a Gradio UI" +version = "1.0" +authors = [ + {name = "vincentqyw"}, +] +readme = "README.md" +requires-python = ">=3.8" +license = {file = "LICENSE"} +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", +] +urls = {Repository = "https://github.com/Vincentqyw/image-matching-webui"} +dynamic = ["dependencies"] + +[project.optional-dependencies] +dev = ["black", "flake8", "isort"] + +[tool.setuptools.packages.find] +include = ["hloc*", "common",] + +[tool.setuptools.package-data] +common = ["*.yaml"] + +[tool.setuptools.dynamic] +dependencies = {file = ["requirements.txt"]} + +[tool.black] +line-length = 80 + +[tool.isort] +profile = "black" +line_length = 80 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 28194fea5246b17c39c2f8b9bfe2738081b9be0c..ad5e055f627232bd10e3b69f6d3ee08450b07e31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -torch==2.2.1 e2cnn==0.2.3 einops==0.6.1 gdown==4.7.1 @@ -33,4 +32,6 @@ tqdm==4.65.0 yacs==0.1.8 onnxruntime poselib -roma #dust3r \ No newline at end of file +roma #dust3r +huggingface_hub +psutil \ No newline at end of file diff --git a/style.css b/style.css deleted file mode 100644 index 98fbaf2b4b9579149b90210ec4a1ba9632aea49b..0000000000000000000000000000000000000000 --- a/style.css +++ /dev/null @@ -1,19 +0,0 @@ -h1 { - text-align: center; - display:block; - } - - #duplicate-button { - margin: auto; - color: white; - background: #1565c0; - border-radius: 100vh; - } - - #component-0 { - /* max-width: 900px; */ - margin: auto; - padding-top: 1.5rem; - } - - footer {visibility: hidden} \ No newline at end of file diff --git a/test_app_cli.py b/test_app_cli.py index 9329bc02915116e59d535faae506ade9bdc370b3..e9ca772ef1de22659ad2667173cace11a69ac804 100644 --- a/test_app_cli.py +++ b/test_app_cli.py @@ -6,7 +6,7 @@ from hloc import logger from common.utils import ( get_matcher_zoo, load_config, - device, + DEVICE, ROOT, ) from common.api import ImageMatchingAPI @@ -23,15 +23,17 @@ def test_all(config: dict = None): if image0 is None or image1 is None: logger.error("Error: No images found! Please upload two images.") enable = config["matcher_zoo"][k].get("enable", True) - if enable: + skip_ci = config["matcher_zoo"][k].get("skip_ci", False) + if enable and not skip_ci: logger.info(f"Testing {k} ...") - api = ImageMatchingAPI(conf=v, device=device) + api = ImageMatchingAPI(conf=v, device=DEVICE) api(image0, image1) log_path = ROOT / "experiments" / "all" log_path.mkdir(exist_ok=True, parents=True) api.visualize(log_path=log_path) else: logger.info(f"Skipping {k} ...") + return 0 def test_one(): @@ -68,7 +70,7 @@ def test_one(): }, "dense": False, } - api = ImageMatchingAPI(conf=conf, device=device) + api = ImageMatchingAPI(conf=conf, device=DEVICE) api(image0, image1) log_path = ROOT / "experiments" / "one" log_path.mkdir(exist_ok=True, parents=True) @@ -98,16 +100,15 @@ def test_one(): "dense": True, } - api = ImageMatchingAPI(conf=conf, device=device) + api = ImageMatchingAPI(conf=conf, device=DEVICE) api(image0, image1) log_path = ROOT / "experiments" / "one" log_path.mkdir(exist_ok=True, parents=True) api.visualize(log_path=log_path) + return 0 if __name__ == "__main__": - import argparse - config = load_config(ROOT / "common/config.yaml") test_one() test_all(config)