Skip to content

Commit

Permalink
Added set of minor bug fixes, and added code for infrastructure-based…
Browse files Browse the repository at this point in the history
… calibration.
  • Loading branch information
hengli committed Sep 16, 2013
1 parent 67623f4 commit b9c8e2a
Show file tree
Hide file tree
Showing 17 changed files with 2,184 additions and 136 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ Go to the build folder where the executables corresponding to the examples are l
Note: Extrinsic calibration requires the use of a vocabulary tree. The vocabulary data
corresponding to 64-bit SURF descriptors can be found in data/vocabulary/surf64.yml.gz.
This file has to be located in the directory from which you run the executable.

4. Infrastructure-based calibration

Details to be released soon!

[1]: https://github.com/hengli/camodocal/blob/master/src/examples/intrinsic_calib.cc "src/examples/intrinsic_calib.cc"
[2]: https://github.com/hengli/camodocal/blob/master/src/examples/stereo_calib.cc "src/examples/stereo_calib.cc"
Expand Down
Binary file added data/vocabulary/surf64.yml.gz
Binary file not shown.
153 changes: 153 additions & 0 deletions include/camodocal/infrastr_calib/InfrastructureCalibration.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
#ifndef INFRASTRUCTURECALIBRATION_H
#define INFRASTRUCTURECALIBRATION_H

#include <boost/thread.hpp>

#include "camodocal/camera_models/Camera.h"
#include "camodocal/camera_systems/CameraRigExtrinsics.h"
#include "camodocal/sparse_graph/SparseGraph.h"

#ifdef VCHARGE_VIZ
#include "../../../../../visualization/overlay/GLOverlayExtended.h"
#endif

namespace camodocal
{

// forward declaration
class LocationRecognition;

class InfrastructureCalibration
{
public:
InfrastructureCalibration(std::vector<CameraPtr>& cameras,
bool verbose = false);

bool loadMap(const std::string& mapDirectory);

void addFrameSet(const std::vector<cv::Mat>& images,
uint64_t timestamp, bool preprocess);
void addOdometry(double x, double y, double yaw, uint64_t timestamp);

void reset(void);
void run(void);

void loadFrameSets(const std::string& filename);
void saveFrameSets(const std::string& filename) const;

const CameraRigExtrinsics& extrinsics(void) const;

private:
void estimateCameraPose(const cv::Mat& image, uint64_t timestamp,
FramePtr& frame, bool preprocess = false);
void optimize(bool optimizeScenePoints);

cv::Mat buildDescriptorMat(const std::vector<Point2DFeaturePtr>& features,
std::vector<size_t>& indices) const;
std::vector<cv::DMatch> matchFeatures(const std::vector<Point2DFeaturePtr>& queryFeatures,
const std::vector<Point2DFeaturePtr>& trainFeatures) const;
void rectifyImagePoint(const CameraConstPtr& camera,
const cv::Point2f& src, cv::Point2f& dst) const;

double reprojectionError(const CameraConstPtr& camera,
const Eigen::Vector3d& P,
const Eigen::Quaterniond& cam_ref_q,
const Eigen::Vector3d& cam_ref_t,
const Eigen::Vector3d& ref_p,
const Eigen::Vector3d& ref_att,
const Eigen::Vector2d& observed_p) const;

void frameReprojectionError(const FramePtr& frame,
const CameraConstPtr& camera,
const Pose& T_cam_ref,
double& minError, double& maxError, double& avgError,
size_t& featureCount) const;

void frameReprojectionError(const FramePtr& frame,
const CameraConstPtr& camera,
double& minError, double& maxError, double& avgError,
size_t& featureCount) const;

void reprojectionError(double& minError, double& maxError,
double& avgError, size_t& featureCount) const;

// Compute the quaternion average using the Markley SVD method
template <typename FloatT>
Eigen::Quaternion<FloatT> quaternionAvg(const std::vector<Eigen::Quaternion<FloatT> >& points) const;

#ifdef VCHARGE_VIZ
enum MapType
{
REFERENCE_MAP,
REFERENCE_POINTS
};

void visualizeMap(const std::string& overlayName, MapType type) const;
void visualizeCameraPose(const FrameConstPtr& frame,
bool showScenePoints);
void visualizeCameraPoses(bool showScenePoints);
void visualizeExtrinsics(void) const;
void visualizeOdometry(void) const;
#endif

typedef struct
{
uint64_t timestamp;
std::vector<FramePtr> frames;
} FrameSet;

// inputs
std::vector<CameraPtr> m_cameras;
SparseGraph m_refGraph;

// working data
boost::shared_ptr<LocationRecognition> m_locrec;
boost::mutex m_feature3DMapMutex;
boost::unordered_map<Point3DFeature*, Point3DFeaturePtr> m_feature3DMap;
std::vector<FrameSet> m_framesets;
double m_x_last;
double m_y_last;
double m_distance;
bool m_verbose;

#ifdef VCHARGE_VIZ
vcharge::GLOverlayExtended m_overlay;
#endif

// output
CameraRigExtrinsics m_extrinsics;

// parameters
const float k_maxDistanceRatio;
const int k_minCorrespondences2D2D;
const int k_minCorrespondences2D3D;
const double k_minKeyFrameDistance;
const int k_nearestImageMatches;
const double k_nominalFocalLength;
const double k_reprojErrorThresh;
};

template <typename FloatT>
Eigen::Quaternion<FloatT>
InfrastructureCalibration::quaternionAvg(const std::vector<Eigen::Quaternion<FloatT> >& points) const
{
using namespace Eigen;

Matrix<FloatT, 3, 3> sum;
sum.setZero();
for (int i = 0, end = points.size(); i != end; ++i)
{
sum += points[i].toRotationMatrix();
}

JacobiSVD<Matrix<FloatT, 3, 3> > svd(sum, ComputeFullU | ComputeFullV);

Matrix<FloatT, 3, 3> result = svd.matrixU()
* (Matrix<FloatT, 3, 1>() << 1, 1, svd.matrixU().determinant()*svd.matrixV().determinant()).finished().asDiagonal()
* svd.matrixV().transpose();
return Quaternion<FloatT>(result);
}

}

#endif
2 changes: 2 additions & 0 deletions include/camodocal/sparse_graph/Odometry.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ namespace camodocal
class Odometry
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW

Odometry();

uint64_t& timeStamp(void);
Expand Down
2 changes: 2 additions & 0 deletions include/camodocal/sparse_graph/SparseGraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ class Point2DFeatureRight: public Point2DFeature
class Point3DFeature
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW

Point3DFeature();

Eigen::Vector3d& point(void);
Expand Down
1 change: 1 addition & 0 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ add_subdirectory(dbow2)
add_subdirectory(examples)
add_subdirectory(features2d)
add_subdirectory(gpl)
add_subdirectory(infrastr_calib)
add_subdirectory(npoint)
add_subdirectory(pugixml)
add_subdirectory(sparse_graph)
Expand Down
118 changes: 43 additions & 75 deletions src/calib/CameraRigBA.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ CameraRigBA::CameraRigBA(const std::vector<CameraPtr>& cameras,
, kMaxPoint3DDistance(20.0)
, kMaxReprojErr(2.0)
, kMinLoopCorrespondences2D2D(50)
, kMinInterCorrespondences2D2D(10)
, kMinInterCorrespondences2D2D(8)
, kNearestImageMatches(15)
, kNominalFocalLength(300.0)
, mVerbose(false)
Expand Down Expand Up @@ -310,35 +310,6 @@ CameraRigBA::run(int beginStage, bool findLoopClosures, bool optimizeIntrinsics,
<< " px | max = " << maxError << " px | count = " << featureCount << std::endl;
}

if (!findLoopClosures)
{
std::cout << "# INFO: Running BA on odometry data..." << std::endl;

if (optimizeIntrinsics)
{
// perform BA to optimize intrinsics, extrinsics, odometry poses, and scene points
optimize(CAMERA_INTRINSICS | CAMERA_ODOMETRY_EXTRINSICS | ODOMETRY_6D_EXTRINSICS | POINT_3D, true);
}
else
{
// perform BA to optimize extrinsics, odometry poses, and scene points
optimize(CAMERA_ODOMETRY_EXTRINSICS | ODOMETRY_6D_EXTRINSICS | POINT_3D, true);
}

reprojectionError(minError, maxError, avgError, featureCount, ODOMETRY);

prune(PRUNE_BEHIND_CAMERA | PRUNE_FARAWAY | PRUNE_HIGH_REPROJ_ERR);

if (mVerbose)
{
std::cout << "# INFO: Done." << std::endl;
std::cout << "# INFO: Reprojection error after inter-map linking: avg = " << avgError
<< " px | max = " << maxError << " px | count = " << featureCount << std::endl;
}

std::cout << "# INFO: Done." << std::endl;
}

#ifdef VCHARGE_VIZ
visualize("opt2-BA-", ODOMETRY);

Expand All @@ -347,19 +318,6 @@ CameraRigBA::run(int beginStage, bool findLoopClosures, bool optimizeIntrinsics,

if (saveWorkingData)
{
if (!findLoopClosures)
{
for (int i = 0; i < mCameras.size(); ++i)
{
std::ostringstream oss;
oss << "tmp_intrinsic_" << i << ".yaml";

boost::filesystem::path intrinsicPath(dataDir);
intrinsicPath /= oss.str();
mCameras.at(i)->writeParameters(intrinsicPath.string());
}
}

boost::filesystem::path extrinsicPath(dataDir);
extrinsicPath /= "tmp_extrinsic_2.txt";
mExtrinsics.writeToFile(extrinsicPath.string());
Expand All @@ -370,45 +328,49 @@ CameraRigBA::run(int beginStage, bool findLoopClosures, bool optimizeIntrinsics,
}
}

if (findLoopClosures && beginStage <= 3)
if (beginStage <= 3)
{
buildVocTree();

if (mVerbose)
if (findLoopClosures)
{
std::cout << "# INFO: Finding loop closures... " << std::endl;
}

std::vector<boost::tuple<int, int, FramePtr, FramePtr> > loopClosureFrameFrame;
std::vector<Correspondence3D3D> loopClosure3D3D;
findLoopClosure3D3D(loopClosureFrameFrame, loopClosure3D3D);
buildVocTree();

for (size_t i = 0; i < loopClosure3D3D.size(); ++i)
{
Point3DFeaturePtr p1 = loopClosure3D3D.at(i).get<4>();
Point3DFeaturePtr p2 = loopClosure3D3D.at(i).get<5>();
if (mVerbose)
{
std::cout << "# INFO: Finding loop closures... " << std::endl;
}

p2->features2D().insert(p2->features2D().end(), p1->features2D().begin(), p1->features2D().end());
std::sort(p2->features2D().begin(), p2->features2D().end());
p2->features2D().erase(std::unique(p2->features2D().begin(), p2->features2D().end()), p2->features2D().end());
std::vector<boost::tuple<int, int, FramePtr, FramePtr> > loopClosureFrameFrame;
std::vector<Correspondence3D3D> loopClosure3D3D;
findLoopClosure3D3D(loopClosureFrameFrame, loopClosure3D3D);

std::vector<Point2DFeaturePtr> features2D = p2->features2D();
for (size_t j = 0; j < features2D.size(); ++j)
for (size_t i = 0; i < loopClosure3D3D.size(); ++i)
{
features2D.at(j)->feature3D() = p2;
Point3DFeaturePtr p1 = loopClosure3D3D.at(i).get<4>();
Point3DFeaturePtr p2 = loopClosure3D3D.at(i).get<5>();

p2->features2D().insert(p2->features2D().end(), p1->features2D().begin(), p1->features2D().end());
std::sort(p2->features2D().begin(), p2->features2D().end());
p2->features2D().erase(std::unique(p2->features2D().begin(), p2->features2D().end()), p2->features2D().end());

std::vector<Point2DFeaturePtr> features2D = p2->features2D();
for (size_t j = 0; j < features2D.size(); ++j)
{
features2D.at(j)->feature3D() = p2;
}
}
}

#ifdef VCHARGE_VIZ
visualizeFrameFrameCorrespondences("loop-p-p", loopClosureFrameFrame);
visualizeFrameFrameCorrespondences("loop-p-p", loopClosureFrameFrame);
#endif

if (mVerbose)
{
std::cout << "# INFO: Done." << std::endl;
std::cout << "# INFO: Running BA on odometry data... " << std::endl;
if (mVerbose)
{
std::cout << "# INFO: Done." << std::endl;
}
}

std::cout << "# INFO: Running BA on odometry data... " << std::endl;

// perform BA to optimize intrinsics, extrinsics and scene points
if (optimizeIntrinsics)
{
Expand All @@ -433,7 +395,18 @@ CameraRigBA::run(int beginStage, bool findLoopClosures, bool optimizeIntrinsics,
if (mVerbose)
{
std::cout << "# INFO: Done." << std::endl;
std::cout << "# INFO: Reprojection error after loop closures: avg = " << avgError
std::cout << "# INFO: Reprojection error after ";

if (findLoopClosures)
{
std::cout << "loop closures";
}
else
{
std::cout << "inter-map linking";
}

std::cout << ": avg = " << avgError
<< " px | max = " << maxError << " px | count = " << featureCount << std::endl;
}

Expand Down Expand Up @@ -1286,11 +1259,6 @@ CameraRigBA::findLocalInterMap2D2DCorrespondences(std::vector<Correspondence2D2D
{
++pathNodeIdx;

// if (pathNodeIdx > 60)
// {
// return;
// }

std::vector<FramePtr>& frames = it->second;

std::vector<FramePtr> localFrameMaps[mCameras.size()];
Expand Down
6 changes: 3 additions & 3 deletions src/calib/CameraRigBA.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,16 +92,16 @@ class CameraRigBA
const std::vector<Point2DFeaturePtr>& features2) const;

void findLocalInterMap2D2DCorrespondences(std::vector<Correspondence2D2D>& correspondences2D2D,
double reprojErrorThresh = 3.0);
double reprojErrorThresh = 2.0);
void matchFrameToWindow(int cameraIdx1, int cameraIdx2,
FramePtr& frame1,
std::vector<FramePtr>& window,
std::vector<Correspondence2D2D>* correspondences2D2D,
double reprojErrorThresh = 3.0);
double reprojErrorThresh = 2.0);
void matchFrameToFrame(int cameraIdx1, int cameraIdx2,
FramePtr& frame1, FramePtr& frame2,
std::vector<Correspondence2D2D>* corr2D2D,
double reprojErrorThresh = 3.0);
double reprojErrorThresh = 2.0);

cv::Mat buildDescriptorMat(const std::vector<Point2DFeaturePtr>& features,
std::vector<size_t>& indices) const;
Expand Down
2 changes: 1 addition & 1 deletion src/camera_systems/CameraRigExtrinsics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ CameraRigExtrinsics::reset(void)
{
for (int i = 0; i < m_cameraCount; ++i)
{
setGlobalCameraPose(i, Eigen::Matrix4d::Identity());
m_globalPoses.at(i).setIdentity();
}
}

Expand Down
Loading

0 comments on commit b9c8e2a

Please sign in to comment.