XingyiHe commited on
Commit
8c3344c
·
1 Parent(s): 993eeba
imcui/datasets/multi_modality_pairs/rgb_2.png ADDED
imcui/datasets/multi_modality_pairs/thermal_1.jpg ADDED
imcui/third_party/place_holder.txt CHANGED
@@ -1 +0,0 @@
1
-
 
 
imcui/ui/app_class.py CHANGED
@@ -19,9 +19,11 @@ from .utils import (
19
  send_to_match,
20
  )
21
  import os
 
 
22
  if not (Path(__file__).parent / "../third_party/MatchAnything").exists():
23
  print("**********************************")
24
- os.system(f"cd {str(Path(__file__).parent / '../third_party')} && git clone https://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@github.com/hxy-123/MatchAnything_HF.git && mv MatchAnything_HF MatchAnything && cd MatchAnything && gdown 1qnxd5DKomsYUH9_8dQ4Xvwatg_vI-vsh && unzip weights.zip")
25
 
26
  DESCRIPTION = '''
27
  <center><b><font size='10'><font color='78aa58'>Match</font><font color='6589bf'>Anything</font></font></b></center>
@@ -31,7 +33,7 @@ DESCRIPTION = '''
31
  <center><b>
32
  <a href=https://zju3dv.github.io/MatchAnything>Project Page</a>
33
  |
34
- <a href=https://arxiv.org/abs/2409>Paper</a>
35
  </b></center>
36
 
37
  > MatchAnything: Universal Cross-Modality Image Matching with Large-Scale Pre-Training
 
19
  send_to_match,
20
  )
21
  import os
22
+ GITHUB_TOKEN = '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'
23
+ GOOGLE_TOKEN = '1qnxd5DKomsYUH9_8dQ4Xvwatg_vI-vsh'
24
  if not (Path(__file__).parent / "../third_party/MatchAnything").exists():
25
  print("**********************************")
26
+ os.system(f"cd {str(Path(__file__).parent / '../third_party')} && git clone https://{GITHUB_TOKEN}@github.com/hxy-123/MatchAnything_HF.git && mv MatchAnything_HF MatchAnything && cd MatchAnything && gdown {GOOGLE_TOKEN} && unzip weights.zip")
27
 
28
  DESCRIPTION = '''
29
  <center><b><font size='10'><font color='78aa58'>Match</font><font color='6589bf'>Anything</font></font></b></center>
 
33
  <center><b>
34
  <a href=https://zju3dv.github.io/MatchAnything>Project Page</a>
35
  |
36
+ <a href=https://arxiv.org/abs/2501.07556>Paper</a>
37
  </b></center>
38
 
39
  > MatchAnything: Universal Cross-Modality Image Matching with Large-Scale Pre-Training
imcui/ui/utils.py CHANGED
@@ -234,7 +234,7 @@ def gen_examples():
234
  [str(images_dir / "MTV_thermal_vis_pair2_1.jpg"), str(images_dir / "MTV_thermal_vis_pair2_2.jpg")],
235
  ]
236
 
237
- pairs_special = [[str(images_dir / "mri_ut_1.jpg"), str(images_dir / "mri_ut_2.jpg")], [str(images_dir / "01.png"), str(images_dir / "02.png")]]
238
 
239
  match_setting_threshold = DEFAULT_SETTING_THRESHOLD
240
  match_setting_max_features = DEFAULT_SETTING_MAX_FEATURES
@@ -312,6 +312,20 @@ def gen_examples():
312
  ransac_max_iter,
313
  "Fundamental",
314
  ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  return input_lists
316
 
317
 
@@ -353,14 +367,6 @@ def _filter_matches_opencv(
353
  Tuple[np.ndarray, np.ndarray]: Homography matrix and mask.
354
  """
355
  if geometry_type == "Homography":
356
- # M, mask = cv2.findHomography(
357
- # kp0,
358
- # kp1,
359
- # method=method,
360
- # ransacReprojThreshold=reproj_threshold,
361
- # confidence=confidence,
362
- # maxIters=max_iter,
363
- # )
364
  M, mask = cv2.estimateAffine2D(kp0, kp1, ransacReprojThreshold=reproj_threshold, confidence=confidence, method=method, maxIters=max_iter)
365
  M = np.concatenate([M, np.array([[0, 0, 1]])], axis=0) # 3 * 3
366
 
 
234
  [str(images_dir / "MTV_thermal_vis_pair2_1.jpg"), str(images_dir / "MTV_thermal_vis_pair2_2.jpg")],
235
  ]
236
 
237
+ pairs_special = [[str(images_dir / "mri_ut_1.jpg"), str(images_dir / "mri_ut_2.jpg")], [str(images_dir / "01.png"), str(images_dir / "02.png")], [str(images_dir / 'thermal_1.png'), str(images_dir / 'rgb_2.png')]]
238
 
239
  match_setting_threshold = DEFAULT_SETTING_THRESHOLD
240
  match_setting_max_features = DEFAULT_SETTING_MAX_FEATURES
 
312
  ransac_max_iter,
313
  "Fundamental",
314
  ])
315
+
316
+ input_lists.insert(0, [
317
+ pairs_special[2][0],
318
+ pairs_special[2][1],
319
+ 0.1,
320
+ match_setting_max_features,
321
+ detect_keypoints_threshold,
322
+ "matchanything_roma",
323
+ ransac_method,
324
+ 4,
325
+ ransac_confidence,
326
+ ransac_max_iter,
327
+ "Fundamental",
328
+ ])
329
  return input_lists
330
 
331
 
 
367
  Tuple[np.ndarray, np.ndarray]: Homography matrix and mask.
368
  """
369
  if geometry_type == "Homography":
 
 
 
 
 
 
 
 
370
  M, mask = cv2.estimateAffine2D(kp0, kp1, ransacReprojThreshold=reproj_threshold, confidence=confidence, method=method, maxIters=max_iter)
371
  M = np.concatenate([M, np.array([[0, 0, 1]])], axis=0) # 3 * 3
372