/
ExampleVisualOdometryDepth.java
125 lines (102 loc) · 4.54 KB
/
ExampleVisualOdometryDepth.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
* Copyright (c) 2011-2013, Peter Abeles. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.examples.sfm;
import boofcv.abst.feature.detect.interest.ConfigGeneralDetector;
import boofcv.abst.feature.tracker.PointTrackerTwoPass;
import boofcv.abst.sfm.AccessPointTracks3D;
import boofcv.abst.sfm.d3.DepthVisualOdometry;
import boofcv.abst.sfm.d3.VisualOdometry;
import boofcv.alg.distort.DoNothingPixelTransform_F32;
import boofcv.alg.sfm.DepthSparse3D;
import boofcv.alg.tracker.klt.PkltConfig;
import boofcv.factory.feature.tracker.FactoryPointTrackerTwoPass;
import boofcv.factory.sfm.FactoryVisualOdometry;
import boofcv.io.MediaManager;
import boofcv.io.UtilIO;
import boofcv.io.image.SimpleImageSequence;
import boofcv.io.wrapper.DefaultMediaManager;
import boofcv.struct.calib.VisualDepthParameters;
import boofcv.struct.image.*;
import georegression.struct.point.Vector3D_F64;
import georegression.struct.se.Se3_F64;
import org.ddogleg.struct.GrowQueue_I8;
import java.io.IOException;
/**
* Bare bones example showing how to estimate the camera's ego-motion using a depth camera system, e.g. Kinect.
* Additional information on the scene can be optionally extracted from the algorithm if it implements AccessPointTracks3D.
*
* @author Peter Abeles
*/
public class ExampleVisualOdometryDepth {
public static void main( String args[] ) throws IOException {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = "../data/applet/kinect/straight/";
// load camera description and the video sequence
VisualDepthParameters param = UtilIO.loadXML(media.openFile(directory + "visualdepth.xml"));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
configKlt.templateRadius = 3;
PointTrackerTwoPass<ImageUInt8> tracker =
FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1),
ImageUInt8.class, ImageSInt16.class);
DepthSparse3D<ImageUInt16> sparseDepth = new DepthSparse3D.I<ImageUInt16>(1e-3);
// declares the algorithm
DepthVisualOdometry<ImageUInt8,ImageUInt16> visualOdometry =
FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true,
sparseDepth, tracker, ImageUInt8.class, ImageUInt16.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());
// image with depth information
ImageUInt16 depth = new ImageUInt16(1,1);
// image with color information
MultiSpectral<ImageUInt8> rgb = new MultiSpectral<ImageUInt8>(ImageUInt8.class,1,1,3);
ImageUInt8 gray = new ImageUInt8(1,1);
// work space
GrowQueue_I8 data = new GrowQueue_I8();
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<ImageUInt8> videoVisual = media.openVideo(directory+"rgb.mjpeg", ImageType.single(ImageUInt8.class));
SimpleImageSequence<ImageUInt16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(ImageUInt16.class));
while( videoVisual.hasNext() ) {
ImageUInt8 left = videoVisual.next();
ImageUInt16 right = videoDepth.next();
if( !visualOdometry.process(left,right) ) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
/**
* If the algorithm implements AccessPointTracks3D, then count the number of inlier features
* and return a string.
*/
public static String inlierPercent(VisualOdometry alg) {
if( !(alg instanceof AccessPointTracks3D))
return "";
AccessPointTracks3D access = (AccessPointTracks3D)alg;
int count = 0;
int N = access.getAllTracks().size();
for( int i = 0; i < N; i++ ) {
if( access.isInlier(i) )
count++;
}
return String.format("%%%5.3f", 100.0 * count / N);
}
}