Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

replaced SGVector::scale() with linalg::scale() #2928

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
45 changes: 45 additions & 0 deletions benchmarks/sgvector_add_operator_benchmark.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#include <shogun/lib/SGVector.h>
#include <shogun/lib/SGSparseVector.h>
#include <shogun/mathematics/linalg/linalg.h>
#include <algorithm>
#include <hayai/hayai.hpp>
#include <iostream>

using namespace shogun;

/**
* Instructions :
* 1. Install benchmarking toolkit "hayai" (https://github.com/nickbruun/hayai)
* 2. Compile against libhayai_main, e.g.
* g++ -O3 -std=c++11 sgvector_add_operator_benchmark.cpp -I/usr/include/eigen3 -I/usr/local/include/viennacl -lshogun -lhayai_main -lOpenCL -o benchmark
* 3. ./benchmark
*/

/** Generate data only once */
struct Data
{
Data()
{
init();
}

void init()
{
m_vec = SGVector<float32_t>(num_elems);
std::iota(m_vec.data(), m_vec.data()+m_vec.size(), 1);
}

SGVector<float32_t> m_vec;
static constexpr index_t num_elems=1000;
};

Data data;


BENCHMARK(SGVector, addoperator_SGVector, 10, 100000000)
{
SGVector<float32_t> test_vec = SGVector<float32_t>(data.num_elems);
std::iota(test_vec.data(), test_vec.data()+test_vec.size(), 1);
data.m_vec += test_vec;
}

2 changes: 1 addition & 1 deletion doc/OpenCV_docs/eigenfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ int main()
mean.scale_vector(-1, mean.vector, mean.vlen);


testimage_sgvec.add(mean);
add<linalg::Backend::NATIVE>(testimage_sgvec, mean, testimage_sgvec);



Expand Down
2 changes: 1 addition & 1 deletion doc/OpenCV_docs/fisherfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ int main()
SGVector<float64_t> testimage_sgvec(temp2.get_column_vector(0),
temp2.num_cols, false);
mean.scale_vector(-1, mean.vector, mean.vlen);
testimage_sgvec.add(mean);
add<linalg::Backend::NATIVE>(testimage_sgvec, mean, testimage_sgvec);

// now we must project it into the PCA subspace. This is done by performing
// the Dot product between testimage and the WFINAL.
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/ensemble/MeanRule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <shogun/ensemble/MeanRule.h>
#include <shogun/lib/SGVector.h>
#include <shogun/lib/SGMatrix.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;

Expand All @@ -35,7 +36,7 @@ SGVector<float64_t> CMeanRule::combine(const SGMatrix<float64_t>& ensemble_resul
SGVector<float64_t> mean_labels(row_sum, ensemble_result.num_rows);

float64_t scale = 1/(float64_t)ensemble_result.num_cols;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@vigsterkr whats your suggestion here?
Ignore linalg, make a double implementation, or make this class unavailable for non c++11 ?

mean_labels.scale(scale);
linalg::scale<linalg::Backend::NATIVE>(mean_labels, scale);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you have already written using namespace linalg, so you can omit the linalg:: here I suppose.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed. linalg:: was necessary to avoid names conflist with float64_t scale.


return mean_labels;
}
Expand Down
16 changes: 10 additions & 6 deletions src/shogun/lib/SGVector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <algorithm>

#include <shogun/mathematics/eigen3.h>
#include <shogun/mathematics/linalg/linalg.h>
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

needs to be guarded


#define COMPLEX128_ERROR_NOARG(function) \
template <> \
Expand Down Expand Up @@ -277,6 +278,15 @@ SGVector<T> SGVector<T>::operator+ (SGVector<T> x)
return result;
}


template<class T>
SGVector<T> SGVector<T>::operator+= (SGVector<T> x)
{
linalg::add<linalg::Backend::NATIVE>(*this, x, *this);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is the a reason for NATIVE here?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@karlnapf I've did that according to @lambday,

So, for now you should just use NATIVE backend explicitly for the task.

Do I miss something in this situation?

return *this;
}


template<class T>
void SGVector<T>::add(const SGVector<T> x)
{
Expand Down Expand Up @@ -839,12 +849,6 @@ void SGVector<float32_t>::scale_vector(float32_t alpha, float32_t* vec, int32_t
}
#endif

template<class T>
void SGVector<T>::scale(T alpha)
{
scale_vector(alpha, vector, vlen);
}

template<class T> void SGVector<T>::load(CFile* loader)
{
REQUIRE(loader, "Require a valid 'c FILE pointer'\n");
Expand Down
9 changes: 1 addition & 8 deletions src/shogun/lib/SGVector.h
Original file line number Diff line number Diff line change
Expand Up @@ -286,11 +286,7 @@ template<class T> class SGVector : public SGReferencedData
SGVector<T> operator+ (SGVector<T> x);

/** Inplace addition operator */
SGVector<T> operator+= (SGVector<T> x)
{
add(x);
return *this;
}
SGVector<T> operator+= (SGVector<T> x);

/** Inplace addition operator for sparse vector */
SGVector<T> operator+= (SGSparseVector<T>& x)
Expand Down Expand Up @@ -429,9 +425,6 @@ template<class T> class SGVector : public SGReferencedData
return idx;
}

/// Scale vector inplace
void scale(T alpha);

/** Load vector from file
*
* @param loader File object via which to load data
Expand Down
4 changes: 3 additions & 1 deletion src/shogun/machine/gp/EPInferenceMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,11 @@
#include <shogun/lib/DynamicArray.h>

#include <shogun/mathematics/eigen3.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace Eigen;
using namespace linalg;

// try to use previously allocated memory for SGVector
#define CREATE_SGVECTOR(vec, len, sg_type) \
Expand Down Expand Up @@ -179,7 +181,7 @@ void CEPInferenceMethod::update()

// get and scale diagonal of the kernel matrix
SGVector<float64_t> ktrtr_diag=m_ktrtr.get_diagonal_vector();
ktrtr_diag.scale(CMath::exp(m_log_scale*2.0));
scale<linalg::Backend::NATIVE>(ktrtr_diag, CMath::exp(m_log_scale*2.0));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be easuer to read if we overloaded the *= operator of SGVector

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@karlnapf you mean this overload has to be like in SGVector::operator+= (scale() method will be called inside the operator then)?


// marginal likelihood for ttau = tnu = 0
float64_t nlZ0=-SGVector<float64_t>::sum(m_model->get_log_zeroth_moments(
Expand Down
9 changes: 5 additions & 4 deletions src/shogun/machine/gp/SingleFITCLaplacianInferenceMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,10 @@
#include <shogun/lib/external/brent.h>
#include <shogun/mathematics/eigen3.h>
#include <shogun/features/DotFeatures.h>
#include <shogun/mathematics/linalg/linalg.h>
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

guard, also below


using namespace shogun;
using namespace linalg;
using namespace Eigen;

namespace shogun
Expand Down Expand Up @@ -80,8 +82,7 @@ class CFITCPsiLine : public func_base
(*dlp)=lik->get_log_probability_derivative_f(lab, (*f), 1);

(*W)=lik->get_log_probability_derivative_f(lab, (*f), 2);
W->scale(-1.0);

scale<linalg::Backend::NATIVE>(*W, -1.0);
// compute psi=alpha'*(f-m)/2-lp
float64_t result = eigen_alpha.dot(eigen_f-eigen_m)/2.0-
SGVector<float64_t>::sum(lik->get_log_probability_f(lab, *f));
Expand Down Expand Up @@ -360,7 +361,7 @@ void CSingleFITCLaplacianInferenceMethod::update_alpha()

// compute W = -d2lp
m_W=m_model->get_log_probability_derivative_f(m_labels, m_mu, 2);
m_W.scale(-1.0);
scale<linalg::Backend::NATIVE>(m_W, -1.0);

//n-by-1 vector
Map<VectorXd> eigen_al(m_al.vector, m_al.vlen);
Expand Down Expand Up @@ -469,7 +470,7 @@ void CSingleFITCLaplacianInferenceMethod::update_chol()

// W = -d2lp
m_W=m_d2lp.clone();
m_W.scale(-1.0);
scale<linalg::Backend::NATIVE>(m_W, -1.0);

Map<VectorXd> eigen_W(m_W.vector, m_W.vlen);
m_sW=SGVector<float64_t>(m_W.vlen);
Expand Down
10 changes: 7 additions & 3 deletions src/shogun/machine/gp/SingleLaplacianInferenceMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@
#include <shogun/mathematics/Math.h>
#include <shogun/lib/external/brent.h>
#include <shogun/mathematics/eigen3.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace linalg;
using namespace Eigen;

namespace shogun
Expand Down Expand Up @@ -59,7 +61,8 @@ class CPsiLine : public func_base
(*dlp)=lik->get_log_probability_derivative_f(lab, (*f), 1);

(*W)=lik->get_log_probability_derivative_f(lab, (*f), 2);
W->scale(-1.0);
scale<linalg::Backend::NATIVE>(*W, -1.0);


// compute psi=alpha'*(f-m)/2-lp
float64_t result = (*alpha).dot(eigen_f-eigen_m)/2.0-
Expand Down Expand Up @@ -188,7 +191,7 @@ void CSingleLaplacianInferenceMethod::update_chol()

// W = -d2lp
m_W=m_d2lp.clone();
m_W.scale(-1.0);
scale<linalg::Backend::NATIVE>(m_W, -1.0);
m_sW=SGVector<float64_t>(m_W.vlen);

// compute sW
Expand Down Expand Up @@ -312,7 +315,8 @@ void CSingleLaplacianInferenceMethod::update_alpha()

// compute W = -d2lp
m_W=m_model->get_log_probability_derivative_f(m_labels, m_mu, 2);
m_W.scale(-1.0);
scale<linalg::Backend::NATIVE>(m_W, -1.0);


Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);

Expand Down
5 changes: 4 additions & 1 deletion src/shogun/statistics/QuadraticTimeMMD.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,10 @@
#include <shogun/kernel/Kernel.h>
#include <shogun/kernel/CombinedKernel.h>
#include <shogun/kernel/CustomKernel.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace linalg;

#ifdef HAVE_EIGEN3
#include <shogun/mathematics/eigen3.h>
Expand Down Expand Up @@ -1023,7 +1025,8 @@ SGVector<float64_t> CQuadraticTimeMMD::sample_null_spectrum_DEPRECATED(

/* when m=n, return m*MMD^2 instead */
if (m==n)
null_samples.scale(0.5);
scale<linalg::Backend::NATIVE>(null_samples, 0.5);


return null_samples;
}
Expand Down
10 changes: 6 additions & 4 deletions src/shogun/structure/CCSOSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
#include <shogun/mathematics/Mosek.h>
#include <shogun/lib/SGSparseVector.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace linalg;

CCCSOSVM::CCCSOSVM()
: CLinearStructuredOutputMachine()
Expand Down Expand Up @@ -537,9 +539,9 @@ SGSparseVector<float64_t> CCCSOSVM::find_cutting_plane(float64_t* margin)
CResultSet* result = m_model->argmax(m_w, i);
if (result->psi_computed)
{
new_constraint.add(result->psi_truth);
result->psi_pred.scale(-1.0);
new_constraint.add(result->psi_pred);
add<linalg::Backend::NATIVE>(new_constraint, result->psi_truth, new_constraint);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

here we have the + operator, why not use it?

scale<linalg::Backend::NATIVE>(result->psi_pred, -1.0);
add<linalg::Backend::NATIVE>(new_constraint, result->psi_pred, new_constraint);
}
else if(result->psi_computed_sparse)
{
Expand All @@ -563,7 +565,7 @@ SGSparseVector<float64_t> CCCSOSVM::find_cutting_plane(float64_t* margin)
}
/* scaling */
float64_t scale = 1/(float64_t)num_samples;
new_constraint.scale(scale);
linalg::scale<linalg::Backend::NATIVE>(new_constraint, scale);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same as above.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't remove linalg:: because of names conflict with float64_t scale.

*margin *= scale;

/* find the nnz elements in new_constraint */
Expand Down
8 changes: 6 additions & 2 deletions src/shogun/structure/FWSOSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,11 @@
#include <shogun/structure/FWSOSVM.h>
#include <shogun/labels/LabelsFactory.h>
#include <shogun/lib/SGVector.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace linalg;


CFWSOSVM::CFWSOSVM()
: CLinearStructuredOutputMachine()
Expand Down Expand Up @@ -142,14 +145,15 @@ bool CFWSOSVM::train_machine(CFeatures* data)
ASSERT(loss_i - CMath::dot(m_w.vector, psi_i.vector, m_w.vlen) >= -1e-12);

// 4) update w_s and ell_s
w_s.add(psi_i);
add<linalg::Backend::NATIVE>(w_s, psi_i, w_s);
ell_s += loss_i;

SG_UNREF(result);

} // end si

w_s.scale(1.0 / (N*m_lambda));
scale<linalg::Backend::NATIVE>(w_s, 1.0 / (N*m_lambda));

ell_s /= N;

// 5) duality gap
Expand Down
7 changes: 5 additions & 2 deletions src/shogun/structure/FactorGraphModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
#include <shogun/structure/FactorGraphModel.h>
#include <shogun/structure/Factor.h>
#include <shogun/features/FactorGraphFeatures.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/Math.h>
#include <shogun/mathematics/linalg/linalg.h>

#ifdef HAVE_STD_UNORDERED_MAP
#include <unordered_map>
Expand All @@ -22,6 +23,8 @@
#endif

using namespace shogun;
using namespace linalg;


CFactorGraphModel::CFactorGraphModel()
: CStructuredModel()
Expand Down Expand Up @@ -271,7 +274,7 @@ SGVector< float64_t > CFactorGraphModel::get_joint_feature_vector(int32_t feat_i
}

// negation (-E(x,y) = <w,phi(x,y)>)
psi.scale(-1.0);
scale<linalg::Backend::NATIVE>(psi, -1.0);

SG_UNREF(facs);
SG_UNREF(fg);
Expand Down
5 changes: 4 additions & 1 deletion src/shogun/structure/StochasticSOSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,11 @@
#include <shogun/structure/StochasticSOSVM.h>
#include <shogun/labels/LabelsFactory.h>
#include <shogun/lib/SGVector.h>
#include <shogun/mathematics/linalg/linalg.h>

using namespace shogun;
using namespace linalg;


CStochasticSOSVM::CStochasticSOSVM()
: CLinearStructuredOutputMachine()
Expand Down Expand Up @@ -146,7 +149,7 @@ bool CStochasticSOSVM::train_machine(CFeatures* data)
}

w_s = psi_i.clone();
w_s.scale(1.0 / (N*m_lambda));
scale<linalg::Backend::NATIVE>(w_s, 1.0 / (N*m_lambda));

// 4) step-size gamma
float64_t gamma = 1.0 / (k+1.0);
Expand Down
6 changes: 5 additions & 1 deletion tests/unit/ensemble/MeanRule_unittest.cc
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
#include <shogun/ensemble/MeanRule.h>
#include <shogun/lib/SGVector.h>
#include <shogun/lib/SGMatrix.h>
#include <shogun/mathematics/linalg/linalg.h>
#include <gtest/gtest.h>

using namespace shogun;
using namespace linalg;


void generate_random_ensemble_matrix(SGMatrix<float64_t>& em)
{
Expand Down Expand Up @@ -31,7 +34,8 @@ TEST(MeanRule, combine_matrix)
SGVector<float64_t> rv = ensemble_matrix.get_row_vector(i);
expected[i] = SGVector<float64_t>::sum(rv, ensemble_matrix.num_cols);
}
expected.scale(1/(float64_t)num_classifiers);

scale<linalg::Backend::NATIVE>(expected, 1/(float64_t)num_classifiers);

SGVector<float64_t> combined = mr->combine(ensemble_matrix);

Expand Down