Skip to content

Commit

Permalink
Update testing scripts and README.md
Browse files Browse the repository at this point in the history
  • Loading branch information
angshine committed Jun 17, 2021
1 parent d41f04f commit d2b85e2
Show file tree
Hide file tree
Showing 16 changed files with 132 additions and 125 deletions.
11 changes: 9 additions & 2 deletions README.md
Expand Up @@ -127,6 +127,13 @@ cd demo
</details>

### Reproduce the testing results with pytorch-lightning
You need to setup the testing subsets of ScanNet and MegaDepth first. We create symlinks from the previously downloaded datasets to `data/{{dataset}}/test`.

```shell
# set up symlinks
ln -s /path/to/scannet-1500-testset/* /path/to/LoFTR/data/scannet/test
ln -s /path/to/megadepth-1500-testset/* /path/to/LoFTR/data/megadepth/test
```

```shell
conda activate loftr
Expand All @@ -142,7 +149,7 @@ For visualizing the results, please refer to `notebooks/visualize_dump_results.i
<br/>


### Image pair info for training on ScanNet
<!-- ### Image pair info for training on ScanNet
You can download the data at [here](https://drive.google.com/file/d/1fC2BezUSsSQy7_H65A0ZfrYK0RB3TXXj/view?usp=sharing).
<details>
Expand Down Expand Up @@ -175,7 +182,7 @@ Out[19]: 1684276
`data['name']` is the image pair info, organized as [`scene_id`, `seq_id`, `image0_id`, `image1_id`].
`data['score']` is the overlapping score defined in [SuperGlue](https://arxiv.org/pdf/1911.11763) (Page 12).
</details>
</details> -->


## Training
Expand Down
10 changes: 5 additions & 5 deletions assets/megadepth_test_1500_scene_info/megadepth_test_1500.txt
@@ -1,5 +1,5 @@
0022_0.1_0.3.npz
0015_0.1_0.3.npz
0015_0.3_0.5.npz
0022_0.3_0.5.npz
0022_0.5_0.7.npz
0022_0.1_0.3
0015_0.1_0.3
0015_0.3_0.5
0022_0.3_0.5
0022_0.5_0.7
4 changes: 2 additions & 2 deletions configs/data/megadepth_test_1500.py
Expand Up @@ -3,9 +3,9 @@
TEST_BASE_PATH = "assets/megadepth_test_1500_scene_info"

cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
cfg.DATASET.TEST_DATA_ROOT = "/data/MegaDepth/megadepth_test_1500"
cfg.DATASET.TEST_DATA_ROOT = "data/megadepth/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500.txt"

cfg.DATASET.MGDPT_IMG_RESIZE = 840
cfg.DATASET.MIN_OVERLAP_SCORE = 0.0
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
4 changes: 2 additions & 2 deletions configs/data/scannet_test_1500.py
Expand Up @@ -3,9 +3,9 @@
TEST_BASE_PATH = "assets/scannet_test_1500"

cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.TEST_DATA_ROOT = "/data/scannet/scannet_test_1500"
cfg.DATASET.TEST_DATA_ROOT = "data/scannet/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt"
cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"

cfg.DATASET.MIN_OVERLAP_SCORE = 0.0
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
1 change: 0 additions & 1 deletion data/scannet/test

This file was deleted.

3 changes: 3 additions & 0 deletions data/scannet/test/.gitignore
@@ -0,0 +1,3 @@
*
*/
!.gitignore
4 changes: 2 additions & 2 deletions docs/TRAINING.md
Expand Up @@ -37,8 +37,8 @@ ln -s /path/to/scannet_indices/* /path/to/LoFTR/data/scannet/index

# megadepth
# -- # train and test dataset (train and test share the same dataset)
ln -s /path/to/megadepth/Undistorted_SfM/* /path/to/LoFTR/data/megadepth/train
ln -s /path/to/megadepth/Undistorted_SfM/* /path/to/LoFTR/data/megadepth/test
ln -s /path/to/megadepth/Undistorted_SfM /path/to/LoFTR/data/megadepth/train
ln -s /path/to/megadepth/Undistorted_SfM /path/to/LoFTR/data/megadepth/test
# -- # dataset indices
ln -s /path/to/megadepth_indices/* /path/to/LoFTR/data/megadepth/index
```
Expand Down
100 changes: 51 additions & 49 deletions notebooks/demo_single_pair.ipynb

Large diffs are not rendered by default.

92 changes: 47 additions & 45 deletions notebooks/visualize_dump_results.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion scripts/reproduce_test/indoor_ds.sh
Expand Up @@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR

data_cfg_path="configs/data/scannet_test_1500.py"
main_cfg_path="configs/loftr/loftr_ds.py"
main_cfg_path="configs/loftr/indoor/loftr_ds.py"
ckpt_path="weights/indoor_ds.ckpt"
dump_dir="dump/loftr_ds_indoor"
profiler_name="inference"
Expand Down
2 changes: 1 addition & 1 deletion scripts/reproduce_test/indoor_ot.sh
Expand Up @@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR

data_cfg_path="configs/data/scannet_test_1500.py"
main_cfg_path="configs/loftr/loftr_ot.py"
main_cfg_path="configs/loftr/indoor/loftr_ot.py"
ckpt_path="weights/indoor_ot.ckpt"
dump_dir="dump/loftr_ot_indoor"
profiler_name="inference"
Expand Down
2 changes: 1 addition & 1 deletion scripts/reproduce_test/outdoor_ds.sh
Expand Up @@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR

data_cfg_path="configs/data/megadepth_test_1500.py"
main_cfg_path="configs/loftr/loftr_ds.py"
main_cfg_path="configs/loftr/outdoor/loftr_ds.py"
ckpt_path="weights/outdoor_ds.ckpt"
dump_dir="dump/loftr_ds_outdoor"
profiler_name="inference"
Expand Down
2 changes: 1 addition & 1 deletion scripts/reproduce_test/outdoor_ot.sh
Expand Up @@ -8,7 +8,7 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
cd $PROJECT_DIR

data_cfg_path="configs/data/megadepth_test_1500.py"
main_cfg_path="configs/loftr/loftr_ot.py"
main_cfg_path="configs/loftr/outdoor/loftr_ot.py"
ckpt_path="weights/outdoor_ot.ckpt"
dump_dir="dump/loftr_ot_outdoor"
profiler_name="inference"
Expand Down
1 change: 1 addition & 0 deletions src/config/default.py
Expand Up @@ -99,6 +99,7 @@

############## Trainer ##############
_CN.TRAINER = CN()
_CN.TRAINER.WORLD_SIZE = 1
_CN.TRAINER.CANONICAL_BS = 64
_CN.TRAINER.CANONICAL_LR = 6e-3
_CN.TRAINER.SCALING = None # this will be calculated automatically
Expand Down
4 changes: 2 additions & 2 deletions src/lightning/data.py
Expand Up @@ -75,13 +75,13 @@ def __init__(self, args, config):
self.train_loader_params = {
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'pin_memory': args.pin_memory,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.val_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': args.pin_memory,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.test_loader_params = {
'batch_size': 1,
Expand Down
15 changes: 4 additions & 11 deletions src/utils/plotting.py
Expand Up @@ -16,10 +16,6 @@ def _compute_conf_thresh(data):


# --- VISUALIZATION --- #
def plot_keypoints(axes, kpts0, kpts1, color='w', ps=2):
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps)


def make_matching_figure(
img0, img1, mkpts0, mkpts1, color,
Expand All @@ -38,8 +34,8 @@ def make_matching_figure(

if kpts0 is not None:
assert kpts1 is not None
# plot_keypoints(axes, kpts0, kpts1, color='k', ps=4)
plot_keypoints(axes, kpts0, kpts1, color='w', ps=2)
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c='w', s=2)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c='w', s=2)

# draw matches
if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
Expand Down Expand Up @@ -98,7 +94,7 @@ def _make_evaluation_figure(data, b_id, alpha='dynamic'):
color = error_colormap(epi_errs, conf_thr, alpha=alpha)

text = [
f'Matches {len(kpts0)}',
f'#Matches {len(kpts0)}',
f'Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}',
f'Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}'
]
Expand All @@ -115,15 +111,12 @@ def _make_confidence_figure(data, b_id):

def make_matching_figures(data, config, mode='evaluation'):
""" Make matching figures for a batch.
Args:
data (Dict): a batch updated by PL_LoFTR.
config (Dict): matcher config
Returns:
figures (Dict[str, List[plt.figure]]
TODO:
- confidence mode plotting
- parallel plotting
- evaluation mode & confidence mode at the same time
"""
assert mode in ['evaluation', 'confidence'] # 'confidence'
figures = {mode: []}
Expand Down

0 comments on commit d2b85e2

Please sign in to comment.