Skip to content

Commit

Permalink
replaced SGVector::add with linalg::add; moved SGVector::operator+=(S…
Browse files Browse the repository at this point in the history
…GVector) to .cpp file; added benchmark for SGVector::operator+=(SGVector)
  • Loading branch information
nginn committed Dec 2, 2015
1 parent a6c2269 commit fc0e3f5
Show file tree
Hide file tree
Showing 9 changed files with 63 additions and 12 deletions.
45 changes: 45 additions & 0 deletions benchmarks/sgvector_add_operator_benchmark.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#include <shogun/lib/SGVector.h>
#include <shogun/lib/SGSparseVector.h>
#include <shogun/mathematics/linalg/linalg.h>
#include <algorithm>
#include <hayai/hayai.hpp>
#include <iostream>

using namespace shogun;

/**
* Instructions :
* 1. Install benchmarking toolkit "hayai" (https://github.com/nickbruun/hayai)
* 2. Compile against libhayai_main, e.g.
* g++ -O3 -std=c++11 sgvector_add_operator_benchmark.cpp -I/usr/include/eigen3 -I/usr/local/include/viennacl -lshogun -lhayai_main -lOpenCL -o benchmark
* 3. ./benchmark
*/

/** Generate data only once */
struct Data
{
Data()
{
init();
}

void init()
{
m_vec = SGVector<float32_t>(num_elems);
std::iota(m_vec.data(), m_vec.data()+m_vec.size(), 1);
}

SGVector<float32_t> m_vec;
static constexpr index_t num_elems=1000;
};

Data data;


BENCHMARK(SGVector, addoperator_SGVector, 10, 100000000)
{
SGVector<float32_t> test_vec = SGVector<float32_t>(data.num_elems);
std::iota(test_vec.data(), test_vec.data()+test_vec.size(), 1);
data.m_vec += test_vec;
}

2 changes: 1 addition & 1 deletion doc/OpenCV_docs/eigenfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ int main()
mean.scale_vector(-1, mean.vector, mean.vlen);


testimage_sgvec.add(mean);
add<linalg::Backend::NATIVE>(testimage_sgvec, mean, testimage_sgvec);



Expand Down
2 changes: 1 addition & 1 deletion doc/OpenCV_docs/fisherfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ int main()
SGVector<float64_t> testimage_sgvec(temp2.get_column_vector(0),
temp2.num_cols, false);
mean.scale_vector(-1, mean.vector, mean.vlen);
testimage_sgvec.add(mean);
add<linalg::Backend::NATIVE>(testimage_sgvec, mean, testimage_sgvec);

// now we must project it into the PCA subspace. This is done by performing
// the Dot product between testimage and the WFINAL.
Expand Down
10 changes: 10 additions & 0 deletions src/shogun/lib/SGVector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <algorithm>

#include <shogun/mathematics/eigen3.h>
#include <shogun/mathematics/linalg/linalg.h>

#define COMPLEX128_ERROR_NOARG(function) \
template <> \
Expand Down Expand Up @@ -277,6 +278,15 @@ SGVector<T> SGVector<T>::operator+ (SGVector<T> x)
return result;
}


template<class T>
SGVector<T> SGVector<T>::operator+= (SGVector<T> x)
{
linalg::add<linalg::Backend::NATIVE>(*this, x, *this);
return *this;
}


template<class T>
void SGVector<T>::add(const SGVector<T> x)
{
Expand Down
6 changes: 1 addition & 5 deletions src/shogun/lib/SGVector.h
Original file line number Diff line number Diff line change
Expand Up @@ -286,11 +286,7 @@ template<class T> class SGVector : public SGReferencedData
SGVector<T> operator+ (SGVector<T> x);

/** Inplace addition operator */
SGVector<T> operator+= (SGVector<T> x)
{
add(x);
return *this;
}
SGVector<T> operator+= (SGVector<T> x);

/** Inplace addition operator for sparse vector */
SGVector<T> operator+= (SGSparseVector<T>& x)
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/structure/CCSOSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -539,9 +539,9 @@ SGSparseVector<float64_t> CCCSOSVM::find_cutting_plane(float64_t* margin)
CResultSet* result = m_model->argmax(m_w, i);
if (result->psi_computed)
{
new_constraint.add(result->psi_truth);
add<linalg::Backend::NATIVE>(new_constraint, result->psi_truth, new_constraint);
scale<linalg::Backend::NATIVE>(result->psi_pred, -1.0);
new_constraint.add(result->psi_pred);
add<linalg::Backend::NATIVE>(new_constraint, result->psi_pred, new_constraint);
}
else if(result->psi_computed_sparse)
{
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/structure/FWSOSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ bool CFWSOSVM::train_machine(CFeatures* data)
ASSERT(loss_i - CMath::dot(m_w.vector, psi_i.vector, m_w.vlen) >= -1e-12);

// 4) update w_s and ell_s
w_s.add(psi_i);
add<linalg::Backend::NATIVE>(w_s, psi_i, w_s);
ell_s += loss_i;

SG_UNREF(result);
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/features/DataGenerators_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ TEST(MeanShiftDataGenerator,get_next_example)
for (index_t i=0; i<num_runs; ++i)
{
gen->get_next_example();
avg.add(gen->get_vector());
add<linalg::Backend::NATIVE>(avg, gen->get_vector(), avg);
gen->release_example();
}

Expand Down
2 changes: 1 addition & 1 deletion tests/unit/lib/SGVector_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ TEST(SGVectorTest,add)
float64_t* b_clone = SGVector<float64_t>::clone_vector(b.vector, b.vlen);
SGVector<float64_t> c(b_clone, 10);

c.add(a);
add<linalg::Backend::NATIVE>(c, a, c);
for (int i=0; i < c.vlen; ++i)
EXPECT_EQ(c[i], a[i]+b[i]);

Expand Down

1 comment on commit fc0e3f5

@karlnapf
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lambday can you comment?

Please sign in to comment.