diff --git a/.gitignore b/.gitignore index fa30a74..070bb21 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,4 @@ *bin *~ src/ar_track_alvar - +*.user \ No newline at end of file diff --git a/include/MultiMarker.h b/include/MultiMarker.h index ec5a615..401446c 100644 --- a/include/MultiMarker.h +++ b/include/MultiMarker.h @@ -36,6 +36,7 @@ #include "Camera.h" #include "Filter.h" #include "FileFormat.h" +#include namespace alvar { @@ -61,7 +62,7 @@ class ALVAR_EXPORT MultiMarker { std::map pointcloud; std::vector marker_indices; // The marker id's to be used in marker field (first being the base) std::vector marker_status; // 0: not in point cloud, 1: in point cloud, 2: used in GetPose() - std::vector< std::vector > rel_corners; //The coords of the master marker relative to each child marker in marker_indices + std::vector< std::vector > rel_corners; //The coords of the master marker relative to each child marker in marker_indices int pointcloud_index(int marker_id, int marker_corner, bool add_if_missing=false); int get_id_index(int id, bool add_if_missing=false); diff --git a/nodes/FindMarkerBundles.cpp b/nodes/FindMarkerBundles.cpp index 4991add..474b0ce 100644 --- a/nodes/FindMarkerBundles.cpp +++ b/nodes/FindMarkerBundles.cpp @@ -63,8 +63,7 @@ #include #include -#include -#include +#include #include #include #include @@ -255,7 +254,7 @@ int InferCorners(const ARCloud &cloud, MultiMarkerBundle &master, ARCloud &bund_ //Grab the precomputed corner coords and correct for the weird Alvar coord system for(int j = 0; j < 4; ++j) { - btVector3 corner_coord = master.rel_corners[index][j]; + tf::Vector3 corner_coord = master.rel_corners[index][j]; gm::PointStamped p, output_p; p.header.frame_id = marker_frame; p.point.x = corner_coord.y()/100.0; @@ -679,17 +678,17 @@ void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) //p1-->p2 should point in Alvar's pos Y direction int makeMasterTransform (const CvPoint3D64f& p0, const CvPoint3D64f& p1, const CvPoint3D64f& p2, const CvPoint3D64f& p3, - btTransform &retT) + tf::Transform &retT) { - const btVector3 q0(p0.x, p0.y, p0.z); - const btVector3 q1(p1.x, p1.y, p1.z); - const btVector3 q2(p2.x, p2.y, p2.z); - const btVector3 q3(p3.x, p3.y, p3.z); + const tf::Vector3 q0(p0.x, p0.y, p0.z); + const tf::Vector3 q1(p1.x, p1.y, p1.z); + const tf::Vector3 q2(p2.x, p2.y, p2.z); + const tf::Vector3 q3(p3.x, p3.y, p3.z); // (inverse) matrix with the given properties - const btVector3 v = (q1-q0).normalized(); - const btVector3 w = (q2-q1).normalized(); - const btVector3 n = v.cross(w); + const tf::Vector3 v = (q1-q0).normalized(); + const tf::Vector3 w = (q2-q1).normalized(); + const tf::Vector3 n = v.cross(w); btMatrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); m = m.inverse(); @@ -711,15 +710,15 @@ int makeMasterTransform (const CvPoint3D64f& p0, const CvPoint3D64f& p1, btScalar ey = eig_quat.y(); btScalar ez = eig_quat.z(); btScalar ew = eig_quat.w(); - btQuaternion quat(ex,ey,ez,ew); + tf::Quaternion quat(ex,ey,ez,ew); quat = quat.normalized(); double qx = (q0.x() + q1.x() + q2.x() + q3.x()) / 4.0; double qy = (q0.y() + q1.y() + q2.y() + q3.y()) / 4.0; double qz = (q0.z() + q1.z() + q2.z() + q3.z()) / 4.0; - btVector3 origin (qx,qy,qz); + tf::Vector3 origin (qx,qy,qz); - btTransform tform (quat, origin); //transform from master to marker + tf::Transform tform (quat, origin); //transform from master to marker retT = tform; return 0; @@ -731,7 +730,7 @@ int makeMasterTransform (const CvPoint3D64f& p0, const CvPoint3D64f& p1, int calcAndSaveMasterCoords(MultiMarkerBundle &master) { int mast_id = master.master_id; - std::vector rel_corner_coords; + std::vector rel_corner_coords; //Go through all the markers associated with this bundle for (size_t i=0; i +#include .h> #include #include #include +#include using namespace alvar; using namespace std; @@ -53,8 +54,7 @@ using namespace std; #define GHOST_MARKER 3 Camera *cam; -IplImage *capture_; -sensor_msgs::CvBridge bridge_; +cv_bridge::CvImagePtr cv_ptr_; image_transport::Subscriber cam_sub_; ros::Publisher arMarkerPub_; ros::Publisher rvizMarkerPub_; @@ -112,13 +112,13 @@ void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_ qw = p.quaternion[0]; //Get the marker pose in the camera frame - btQuaternion rotation (qx,qy,qz,qw); - btVector3 origin (px,py,pz); - btTransform t (rotation, origin); //transform from cam to marker + tf::Quaternion rotation (qx,qy,qz,qw); + tf::Vector3 origin (px,py,pz); + tf::Transform t (rotation, origin); //transform from cam to marker - btVector3 markerOrigin (0, 0, 0); - btTransform m (btQuaternion::getIdentity (), markerOrigin); - btTransform markerPose = t * m; + tf::Vector3 markerOrigin (0, 0, 0); + tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); + tf::Transform markerPose = t * m; //Publish the cam to marker transform for main marker in each bundle if(type==MAIN_MARKER){ @@ -191,12 +191,6 @@ void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_ //Callback to handle getting video frames and processing them void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) { - if(init){ - CvSize sz_ = cvSize (cam->x_res, cam->y_res); - capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4); - init = false; - } - //If we've already gotten the cam info, then go ahead if(cam->getCamInfo_){ try{ @@ -214,11 +208,17 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) ar_track_alvar::AlvarMarker ar_pose_marker; arPoseMarkers_.markers.clear (); + //Convert the image - capture_ = bridge_.imgMsgToCv (image_msg, "rgb8"); + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); //Get the estimated pose of the main markers by using all the markers in each bundle - GetMultiMarkerPoses(capture_); + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + IplImage ipl_image = cv_ptr_->image; + GetMultiMarkerPoses(&ipl_image); //Draw the observed markers that are visible and note which bundles have at least 1 marker seen for(int i=0; iencoding.c_str ()); } } diff --git a/nodes/IndividualMarkers.cpp b/nodes/IndividualMarkers.cpp index 764b26e..03cb7d1 100644 --- a/nodes/IndividualMarkers.cpp +++ b/nodes/IndividualMarkers.cpp @@ -38,10 +38,11 @@ #include "CvTestbed.h" #include "MarkerDetector.h" #include "Shared.h" -#include +#include #include #include #include +#include namespace gm=geometry_msgs; namespace ata=ar_track_alvar; @@ -55,8 +56,7 @@ using boost::make_shared; bool init=true; Camera *cam; -IplImage *capture_; -sensor_msgs::CvBridge bridge_; +cv_bridge::CvImagePtr cv_ptr_; image_transport::Subscriber cam_sub_; ros::Subscriber cloud_sub_; ros::Publisher arMarkerPub_; @@ -310,12 +310,6 @@ void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) { sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image); - if(init){ - CvSize sz_ = cvSize (cam->x_res, cam->y_res); - capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4); - init = false; - } - //If we've already gotten the cam info, then go ahead if(cam->getCamInfo_){ //Convert cloud to PCL @@ -326,13 +320,22 @@ void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) pcl::toROSMsg (cloud, *image_msg); image_msg->header.stamp = msg->header.stamp; image_msg->header.frame_id = msg->header.frame_id; - + + //Convert the image - capture_ = bridge_.imgMsgToCv (image_msg, "rgb8"); + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + IplImage ipl_image = cv_ptr_->image; + //Use the kinect to improve the pose Pose ret_pose; - GetMarkerPoses(capture_, cloud); + GetMarkerPoses(&ipl_image, cloud); try{ tf::StampedTransform CamToOutput; @@ -359,12 +362,12 @@ void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) double qz = p.quaternion[3]; double qw = p.quaternion[0]; - btQuaternion rotation (qx,qy,qz,qw); - btVector3 origin (px,py,pz); - btTransform t (rotation, origin); - btVector3 markerOrigin (0, 0, 0); - btTransform m (btQuaternion::getIdentity (), markerOrigin); - btTransform markerPose = t * m; // marker pose in the camera frame + tf::Quaternion rotation (qx,qy,qz,qw); + tf::Vector3 origin (px,py,pz); + tf::Transform t (rotation, origin); + tf::Vector3 markerOrigin (0, 0, 0); + tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); + tf::Transform markerPose = t * m; // marker pose in the camera frame //Publish the transform from the camera to the marker std::string markerFrame = "ar_marker_"; @@ -442,7 +445,7 @@ void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) } arMarkerPub_.publish (arPoseMarkers_); } - catch (sensor_msgs::CvBridgeException & e){ + catch (cv_bridge::Exception& e){ ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); } } diff --git a/nodes/IndividualMarkersNoKinect.cpp b/nodes/IndividualMarkersNoKinect.cpp index 2d17b55..4580873 100644 --- a/nodes/IndividualMarkersNoKinect.cpp +++ b/nodes/IndividualMarkersNoKinect.cpp @@ -38,18 +38,18 @@ #include "CvTestbed.h" #include "MarkerDetector.h" #include "Shared.h" -#include +#include #include #include #include +#include using namespace alvar; using namespace std; bool init=true; Camera *cam; -IplImage *capture_; -sensor_msgs::CvBridge bridge_; +cv_bridge::CvImagePtr cv_ptr_; image_transport::Subscriber cam_sub_; ros::Publisher arMarkerPub_; ros::Publisher rvizMarkerPub_; @@ -71,12 +71,6 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg); void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) { - if(init){ - CvSize sz_ = cvSize (cam->x_res, cam->y_res); - capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4); - init = false; - } - //If we've already gotten the cam info, then go ahead if(cam->getCamInfo_){ try{ @@ -89,8 +83,18 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) ROS_ERROR("%s",ex.what()); } - capture_ = bridge_.imgMsgToCv (image_msg, "rgb8"); - marker_detector.Detect(capture_, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true); + + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + IplImage ipl_image = cv_ptr_->image; + + marker_detector.Detect(&ipl_image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true); arPoseMarkers_.markers.clear (); for (size_t i=0; isize(); i++) @@ -106,12 +110,12 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) double qz = p.quaternion[3]; double qw = p.quaternion[0]; - btQuaternion rotation (qx,qy,qz,qw); - btVector3 origin (px,py,pz); - btTransform t (rotation, origin); - btVector3 markerOrigin (0, 0, 0); - btTransform m (btQuaternion::getIdentity (), markerOrigin); - btTransform markerPose = t * m; // marker pose in the camera frame + tf::Quaternion rotation (qx,qy,qz,qw); + tf::Vector3 origin (px,py,pz); + tf::Transform t (rotation, origin); + tf::Vector3 markerOrigin (0, 0, 0); + tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); + tf::Transform markerPose = t * m; // marker pose in the camera frame //Publish the transform from the camera to the marker std::string markerFrame = "ar_marker_"; @@ -189,7 +193,7 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) } arMarkerPub_.publish (arPoseMarkers_); } - catch (sensor_msgs::CvBridgeException & e){ + catch (cv_bridge::Exception& e){ ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); } } diff --git a/nodes/TrainMarkerBundle.cpp b/nodes/TrainMarkerBundle.cpp index 05ed118..b5d6c4f 100644 --- a/nodes/TrainMarkerBundle.cpp +++ b/nodes/TrainMarkerBundle.cpp @@ -40,10 +40,11 @@ #include "MultiMarkerBundle.h" #include "MultiMarkerInitializer.h" #include "Shared.h" -#include +#include #include #include #include +#include using namespace alvar; using namespace std; @@ -53,8 +54,7 @@ using namespace std; #define GHOST_MARKER 3 Camera *cam; -IplImage *capture_; -sensor_msgs::CvBridge bridge_; +cv_bridge::CvImagePtr cv_ptr_; image_transport::Subscriber cam_sub_; ros::Publisher arMarkerPub_; ros::Publisher rvizMarkerPub_; @@ -175,13 +175,13 @@ void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_ qw = p.quaternion[0]; //Get the marker pose in the camera frame - btQuaternion rotation (qx,qy,qz,qw); - btVector3 origin (px,py,pz); - btTransform t (rotation, origin); + tf::Quaternion rotation (qx,qy,qz,qw); + tf::Vector3 origin (px,py,pz); + tf::Transform t (rotation, origin); - btVector3 markerOrigin (0, 0, 0); - btTransform m (btQuaternion::getIdentity (), markerOrigin); - btTransform markerPose = t * m; + tf::Vector3 markerOrigin (0, 0, 0); + tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); + tf::Transform markerPose = t * m; //Publish the transform from the camera to the marker if(type==MAIN_MARKER){ @@ -242,12 +242,6 @@ void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) { - if(init){ - CvSize sz_ = cvSize (cam->x_res, cam->y_res); - capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4); - init = false; - } - //Check if automatic measurement collection should be triggered if(auto_collect){ auto_count++; @@ -273,12 +267,19 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) ar_track_alvar::AlvarMarker ar_pose_marker; arPoseMarkers_.markers.clear (); - //Convert the image - capture_ = bridge_.imgMsgToCv (image_msg, "rgb8"); + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + IplImage ipl_image = cv_ptr_->image; - //Get the estimated pose of the main marker using the whole bundle + //Get the estimated pose of the main marker using the whole bundle static Pose bundlePose; - double error = GetMultiMarkerPose(capture_, bundlePose); + double error = GetMultiMarkerPose(&ipl_image, bundlePose); if (optimize_done){ //Draw the main marker @@ -301,7 +302,7 @@ void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) } arMarkerPub_.publish (arPoseMarkers_); } - catch (sensor_msgs::CvBridgeException & e){ + catch (cv_bridge::Exception& e){ ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); } } diff --git a/src/kinect_filtering.cpp b/src/kinect_filtering.cpp index d7152cd..25cdf6a 100644 --- a/src/kinect_filtering.cpp +++ b/src/kinect_filtering.cpp @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -149,11 +150,11 @@ namespace ar_track_alvar } // Project point onto plane - btVector3 project (const ARPoint& p, const double a, const double b, + tf::Vector3 project (const ARPoint& p, const double a, const double b, const double c, const double d) { const double t = a*p.x + b*p.y + c*p.z + d; - return btVector3(p.x-t*a, p.y-t*b, p.z-t*c); + return tf::Vector3(p.x-t*a, p.y-t*b, p.z-t*c); } ostream& operator<< (ostream& str, const btMatrix3x3& m) @@ -164,14 +165,14 @@ namespace ar_track_alvar return str; } - ostream& operator<< (ostream& str, const btQuaternion& q) + ostream& operator<< (ostream& str, const tf::Quaternion& q) { str << "[(" << q.x() << ", " << q.y() << ", " << q.z() << "), " << q.w() << "]"; return str; } - ostream& operator<< (ostream& str, const btVector3& v) + ostream& operator<< (ostream& str, const tf::Vector3& v) { str << "(" << v.x() << ", " << v.y() << ", " << v.z() << ")"; return str; @@ -187,23 +188,23 @@ namespace ar_track_alvar if(getCoeffs(coeffs, &a, &b, &c, &d) < 0) return -1; - const btVector3 q1 = project(p1, a, b, c, d); - const btVector3 q2 = project(p2, a, b, c, d); - const btVector3 q3 = project(p3, a, b, c, d); - const btVector3 q4 = project(p4, a, b, c, d); + const tf::Vector3 q1 = project(p1, a, b, c, d); + const tf::Vector3 q2 = project(p2, a, b, c, d); + const tf::Vector3 q3 = project(p3, a, b, c, d); + const tf::Vector3 q4 = project(p4, a, b, c, d); // Make sure points aren't the same so things are well-defined if((q2-q1).length() < 1e-3) return -1; // (inverse) matrix with the given properties - const btVector3 v = (q2-q1).normalized(); - const btVector3 n(a, b, c); - const btVector3 w = -v.cross(n); + const tf::Vector3 v = (q2-q1).normalized(); + const tf::Vector3 n(a, b, c); + const tf::Vector3 w = -v.cross(n); btMatrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); // Possibly flip things based on third point - const btVector3 diff = (q4-q3).normalized(); + const tf::Vector3 diff = (q4-q3).normalized(); //ROS_INFO_STREAM("w = " << w << " and d = " << diff); if (w.dot(diff)<0) { @@ -220,7 +221,7 @@ namespace ar_track_alvar } - int getQuaternion (const btMatrix3x3& m, btQuaternion &retQ) + int getQuaternion (const btMatrix3x3& m, tf::Quaternion &retQ) { if(m.determinant() <= 0) return -1; @@ -243,7 +244,7 @@ namespace ar_track_alvar btScalar ey = eig_quat.y(); btScalar ez = eig_quat.z(); btScalar ew = eig_quat.w(); - btQuaternion quat(ex,ey,ez,ew); + tf::Quaternion quat(ex,ey,ez,ew); retQ = quat.normalized(); return 0; @@ -258,7 +259,7 @@ namespace ar_track_alvar btMatrix3x3 m; if(extractFrame(coeffs, p1, p2, p3, p4, m) < 0) return -1; - btQuaternion q; + tf::Quaternion q; if(getQuaternion(m,q) < 0) return -1; retQ.x = q.x();