Permalink
Browse files

Using struct for SearchParams instead of map<string,any> for better e…

…fficiency
  • Loading branch information...
1 parent 1b356b9 commit 0550a12814c623974e8cf2c1ce8eb9d2d552b1e4 @mariusmuja committed Dec 20, 2011
View
@@ -746,7 +746,7 @@ \subsubsection{flann::KdTreeCuda3dIndex}
flann::Matrix<float> dists_gpu(gpu_pointer_dists,n_points, knn, stride);
flann::SearchParams params;
-params["matrices_in_gpu_ram"]=true;
+params.matrices_in_gpu_ram = true;
flannindex.knnSearch( queries_gpu ,indices_gpu,dists_gpu,knn, params);
\end{Verbatim}
View
@@ -1,119 +0,0 @@
-/***********************************************************************
- * Software License Agreement (BSD License)
- *
- * Copyright 2008-2010 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
- * Copyright 2008-2010 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *************************************************************************/
-
-
-#ifndef QUERIES_H_
-#define QUERIES_H_
-
-#include <flann/util/matrix.h>
-#include <boost/archive/binary_iarchive.hpp>
-#include <boost/archive/binary_oarchive.hpp>
-
-
-namespace boost {
-namespace serialization {
-
-template<class Archive, class T>
-void serialize(Archive & ar, flann::Matrix<T> & matrix, const unsigned int version)
-{
- ar & matrix.rows & matrix.cols & matrix.stride;
- if (Archive::is_loading::value) {
- matrix.data = new T[matrix.rows*matrix.cols];
- }
- ar & boost::serialization::make_array(matrix.data, matrix.rows*matrix.cols);
-}
-
-}
-}
-
-namespace flann
-{
-
-template<typename T>
-struct Request
-{
- flann::Matrix<T> queries;
- int nn;
- int checks;
-
- template<typename Archive>
- void serialize(Archive& ar, const unsigned int version)
- {
- ar & queries & nn & checks;
- }
-};
-
-template<typename T>
-struct Response
-{
- flann::Matrix<int> indices;
- flann::Matrix<T> dists;
-
- template<typename Archive>
- void serialize(Archive& ar, const unsigned int version)
- {
- ar & indices & dists;
- }
-};
-
-
-using boost::asio::ip::tcp;
-
-template <typename T>
-void read_object(tcp::socket& sock, T& val)
-{
- uint32_t size;
- boost::asio::read(sock, boost::asio::buffer(&size, sizeof(size)));
- size = ntohl(size);
-
- boost::asio::streambuf archive_stream;
- boost::asio::read(sock, archive_stream, boost::asio::transfer_at_least(size));
-
- boost::archive::binary_iarchive archive(archive_stream);
- archive >> val;
-}
-
-template <typename T>
-void write_object(tcp::socket& sock, const T& val)
-{
- boost::asio::streambuf archive_stream;
- boost::archive::binary_oarchive archive(archive_stream);
- archive << val;
-
- uint32_t size = archive_stream.size();
- size = htonl(size);
- boost::asio::write(sock, boost::asio::buffer(&size, sizeof(size)));
- boost::asio::write(sock, archive_stream);
-
-}
-
-}
-
-
-
-#endif /* QUERIES_H_ */
@@ -123,7 +123,7 @@ class AutotunedIndex : public NNIndex<Distance>
{
save_value(stream, (int)bestIndex_->getType());
bestIndex_->saveIndex(stream);
- save_value(stream, get_param<int>(bestSearchParams_, "checks"));
+ save_value(stream, bestSearchParams_.checks);
}
/**
@@ -138,18 +138,15 @@ class AutotunedIndex : public NNIndex<Distance>
params["algorithm"] = (flann_algorithm_t)index_type;
bestIndex_ = create_index_by_type<Distance>(dataset_, params, distance_);
bestIndex_->loadIndex(stream);
- int checks;
- load_value(stream, checks);
- bestSearchParams_["checks"] = checks;
+ load_value(stream, bestSearchParams_.checks);
}
/**
* Method that searches for nearest-neighbors
*/
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
{
- int checks = get_param(searchParams,"checks",(int)FLANN_CHECKS_AUTOTUNED);
- if (checks == FLANN_CHECKS_AUTOTUNED) {
+ if (searchParams.checks == FLANN_CHECKS_AUTOTUNED) {
bestIndex_->findNeighbors(result, vec, bestSearchParams_);
}
else {
@@ -540,7 +537,7 @@ class AutotunedIndex : public NNIndex<Distance>
}
Logger::info("Required number of checks: %d \n", checks);
- searchParams["checks"] = checks;
+ searchParams.checks = checks;
speedup = linear / searchTime;
@@ -416,7 +416,7 @@ class HierarchicalClusteringIndex : public NNIndex<Distance>
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
{
- int maxChecks = get_param(searchParams,"checks",32);
+ int maxChecks = searchParams.checks;
// Priority queue storing intermediate branches in the best-bin-first search
Heap<BranchSt>* heap = new Heap<BranchSt>(size_);
@@ -280,14 +280,14 @@ void KDTreeCuda3dIndex<Distance>::knnSearchGpu(const Matrix<ElementType>& querie
assert(int(indices.cols) >= knn);
assert( dists.cols == indices.cols && dists.stride==indices.stride );
- bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
+ bool matrices_on_gpu = params.matrices_in_gpu_ram;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
- float epsError = 1+get_param(params,"eps",0.0f);
- bool sorted = get_param(params,"sorted",true);
- bool use_heap = get_param(params,"use_heap",false);
+ float epsError = 1+params.eps;
+ bool sorted = params.sorted;
+ bool use_heap = params.use_heap;
typename GpuDistance<Distance>::type distance;
// std::cout<<" search: "<<std::endl;
@@ -437,9 +437,9 @@ int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& que
// assert(indices.roasdfws >= queries.rows);
// assert(dists.rows >= queries.rows);
- int max_neighbors = get_param(params, "max_neighbors", -1);
- bool sorted = get_param(params, "sorted", true);
- bool use_heap = get_param(params, "use_heap", false);
+ int max_neighbors = params.max_neighbors;
+ bool sorted = params.sorted;
+ bool use_heap = params.use_heap;
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
@@ -506,7 +506,7 @@ int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& que
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
- queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
+ queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),params.sorted), distance);
}
else {
if( use_heap ) {
@@ -521,7 +521,7 @@ int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& que
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
- queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
+ queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),params.sorted), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
@@ -535,7 +535,7 @@ int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& que
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
- queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
+ queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),params.sorted), distance);
}
}
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
@@ -566,17 +566,17 @@ struct isNotMinusOne
template< typename Distance>
int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
{
- int max_neighbors = get_param(params, "max_neighbors", -1);
+ int max_neighbors = params.max_neighbors;
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows || max_neighbors==0 );
assert(indices.stride==dists.stride || max_neighbors==0 );
assert( indices.cols==indices.stride );
assert(dists.rows >= queries.rows || max_neighbors==0 );
- bool sorted = get_param(params, "sorted", true);
- bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
- float epsError = 1+get_param(params,"eps",0.0f);
- bool use_heap = get_param(params,"use_heap",false);
+ bool sorted = params.sorted;
+ bool matrices_on_gpu = params.matrices_in_gpu_ram;
+ float epsError = 1+params.eps;
+ bool use_heap = params.use_heap;
if( max_neighbors<0 ) max_neighbors=indices.cols;
@@ -610,7 +610,6 @@ int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& qu
thrust::device_vector<float> distsDev(queries.rows* max_neighbors);
thrust::device_vector<int> indicesDev(queries.rows* max_neighbors);
- // bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
@@ -675,7 +674,6 @@ int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& qu
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.ptr() );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
- // bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
@@ -199,13 +199,7 @@ class KDTreeCuda3dIndex : public NNIndex<Distance>
*/
virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params)
{
- if( get_param(params,"use_cpu",false) ) {
- throw FLANNException( "CPU search not supported!" );
- // return NNIndex<Distance>::knnSearch(queries,indices, dists, knn, params);
- }
- else {
- knnSearchGpu(queries,indices, dists, knn, params);
- }
+ knnSearchGpu(queries,indices, dists, knn, params);
return knn*queries.rows; // hack...
}
@@ -223,13 +217,7 @@ class KDTreeCuda3dIndex : public NNIndex<Distance>
size_t knn,
const SearchParams& params)
{
- if( get_param(params,"use_cpu",false) ) {
- throw FLANNException( "CPU search not supported!" );
- // return NNIndex<Distance>::knnSearch(queries,indices, dists, knn, params);
- }
- else {
- knnSearchGpu(queries,indices, dists, knn, params);
- }
+ knnSearchGpu(queries,indices, dists, knn, params);
return knn*queries.rows; // hack...
}
@@ -268,25 +256,13 @@ class KDTreeCuda3dIndex : public NNIndex<Distance>
virtual int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists,
float radius, const SearchParams& params)
{
- if( get_param(params,"use_cpu",false) ) {
- throw FLANNException( "CPU search not supported!" );
- // return NNIndex<Distance>::radiusSearch(queries,indices, dists, radius, params);
- }
- else {
- return radiusSearchGpu(queries,indices, dists, radius, params);
- }
+ return radiusSearchGpu(queries,indices, dists, radius, params);
}
virtual int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params)
{
- if( get_param(params,"use_cpu",false) ) {
- throw FLANNException( "CPU search not supported!" );
- return NNIndex<Distance>::radiusSearch(queries,indices, dists, radius, params);
- }
- else {
- return radiusSearchGpu(queries,indices, dists, radius, params);
- }
+ return radiusSearchGpu(queries,indices, dists, radius, params);
}
int radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists,
@@ -199,8 +199,8 @@ class KDTreeIndex : public NNIndex<Distance>
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
{
- int maxChecks = get_param(searchParams,"checks", 32);
- float epsError = 1+get_param(searchParams,"eps",0.0f);
+ int maxChecks = searchParams.checks;
+ float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
getExactNeighbors(result, vec, epsError);
@@ -213,7 +213,7 @@ class KDTreeSingleIndex : public NNIndex<Distance>
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
{
- float epsError = 1+get_param(searchParams,"eps",0.0f);
+ float epsError = 1+searchParams.eps;
std::vector<DistanceType> dists(dim_,0);
DistanceType distsq = computeInitialDistances(vec, dists);
@@ -517,14 +517,13 @@ class KDTreeSingleIndex : public NNIndex<Distance>
{
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
-// DistanceType worst_dist = result_set.worstDist();
+ DistanceType worst_dist = result_set.worstDist();
for (int i=node->left; i<node->right; ++i) {
int index = reorder_ ? i : vind_[i];
- DistanceType dist = distance_(vec, data_[index], dim_);
-// DistanceType dist = distance_(vec, data_[index], dim_, worst_dist);
-// if (dist<worst_dist) {
+ DistanceType dist = distance_(vec, data_[index], dim_, worst_dist);
+ if (dist<worst_dist) {
result_set.addPoint(dist,vind_[i]);
-// }
+ }
}
return;
}
@@ -426,7 +426,7 @@ class KMeansIndex : public NNIndex<Distance>
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
{
- int maxChecks = get_param(searchParams,"checks",32);
+ int maxChecks = searchParams.checks;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
findExactNN(root_, result, vec);
Oops, something went wrong.

0 comments on commit 0550a12

Please sign in to comment.