Skip to content

Commit

Permalink
Merge pull request #3358 from OXPHOS/linalg_scale
Browse files Browse the repository at this point in the history
LinalgRefactor - SGVector & SGMatrix - scale
  • Loading branch information
vigsterkr committed Aug 11, 2016
2 parents 4f4dc47 + cc1d863 commit d9f35fc
Show file tree
Hide file tree
Showing 6 changed files with 331 additions and 0 deletions.
29 changes: 29 additions & 0 deletions src/shogun/mathematics/linalg/LinalgBackendBase.h
Expand Up @@ -157,6 +157,35 @@ class LinalgBackendBase
BACKEND_GENERIC_COMPLEX_MEAN(SGMatrix)
#undef BACKEND_GENERIC_COMPLEX_MEAN

/**
* Wrapper method of scale operation the operation B = alpha*A.
*
* @see linalg::scale
*/
#define BACKEND_GENERIC_SCALE(Type, Container) \
virtual Container<Type> scale(const Container<Type>& a, Type alpha) const \
{ \
SG_SNOTIMPLEMENTED; \
return 0; \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGMatrix)
#undef BACKEND_GENERIC_SCALE

/**
* Wrapper method of scale operation the operation result = alpha*A.
*
* @see linalg::scale
*/
#define BACKEND_GENERIC_IN_PLACE_SCALE(Type, Container) \
virtual void scale(Container<Type>& a, Type alpha, Container<Type>& result) const \
{ \
SG_SNOTIMPLEMENTED; \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_SCALE

/**
* Wrapper method of vector sum that works with generic vectors.
*
Expand Down
64 changes: 64 additions & 0 deletions src/shogun/mathematics/linalg/LinalgBackendEigen.h
Expand Up @@ -124,6 +124,26 @@ class LinalgBackendEigen : public LinalgBackendBase
BACKEND_GENERIC_COMPLEX_MEAN(SGMatrix)
#undef BACKEND_GENERIC_COMPLEX_MEAN

/** Implementation of @see linalg::scale */
#define BACKEND_GENERIC_SCALE(Type, Container) \
virtual Container<Type> scale(const Container<Type>& a, Type alpha) const \
{ \
return scale_impl(a, alpha); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGMatrix)
#undef BACKEND_GENERIC_SCALE

/** Implementation of @see linalg::scale */
#define BACKEND_GENERIC_IN_PLACE_SCALE(Type, Container) \
virtual void scale(Container<Type>& a, Type alpha, Container<Type>& result) const \
{ \
scale_impl(a, alpha, result); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_SCALE

/** Implementation of @see LinalgBackendBase::sum */
#define BACKEND_GENERIC_SUM(Type, Container) \
virtual Type sum(const Container<Type>& a, bool no_diag) const \
Expand Down Expand Up @@ -252,6 +272,50 @@ class LinalgBackendEigen : public LinalgBackendBase
return sum_impl(a)/(complex128_t(a.size()));
}

/** Eigen3 vector scale method: B = alpha * A */
template <typename T>
SGVector<T> scale_impl(const SGVector<T>& a, T alpha) const
{
SGVector<T> b(a.vlen);
typename SGVector<T>::EigenVectorXtMap a_eig = a;
typename SGVector<T>::EigenVectorXtMap b_eig = b;

b_eig = alpha * a_eig;
return b;
}

/** Eigen3 matrix scale method: B = alpha * A */
template <typename T>
SGMatrix<T> scale_impl(const SGMatrix<T>& a, T alpha) const
{
SGMatrix<T> b(a.num_rows, a.num_cols);
typename SGMatrix<T>::EigenMatrixXtMap a_eig = a;
typename SGMatrix<T>::EigenMatrixXtMap b_eig = b;

b_eig = alpha * a_eig;
return b;
}

/** Eigen3 vector inplace scale method: result = alpha * A */
template <typename T>
void scale_impl(SGVector<T>& a, T alpha, SGVector<T>& result) const
{
typename SGVector<T>::EigenVectorXtMap a_eig = a;
typename SGVector<T>::EigenVectorXtMap result_eig = result;

result_eig = alpha * a_eig;
}

/** Eigen3 matrix inplace scale method: result = alpha * A */
template <typename T>
void scale_impl(SGMatrix<T>& a, T alpha, SGMatrix<T>& result) const
{
typename SGMatrix<T>::EigenMatrixXtMap a_eig = a;
typename SGMatrix<T>::EigenMatrixXtMap result_eig = result;

result_eig = alpha * a_eig;
}

/** Eigen3 vector sum method */
template <typename T>
T sum_impl(const SGVector<T>& vec, bool no_diag=false) const
Expand Down
61 changes: 61 additions & 0 deletions src/shogun/mathematics/linalg/LinalgBackendViennacl.h
Expand Up @@ -103,6 +103,26 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_MEAN, SGMatrix)
#undef BACKEND_GENERIC_MEAN

/** Implementation of @see linalg::scale */
#define BACKEND_GENERIC_SCALE(Type, Container) \
virtual Container<Type> scale(const Container<Type>& a, Type alpha) const \
{ \
return scale_impl(a, alpha); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_SCALE, SGMatrix)
#undef BACKEND_GENERIC_SCALE

/** Implementation of @see linalg::scale */
#define BACKEND_GENERIC_IN_PLACE_SCALE(Type, Container) \
virtual void scale(Container<Type>& a, Type alpha, Container<Type>& result) const \
{ \
scale_impl(a, result, alpha); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_SCALE, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_SCALE

/** Implementation of @see LinalgBackendBase::sum */
#define BACKEND_GENERIC_SUM(Type, Container) \
virtual Type sum(const Container<Type>& a, bool no_diag) const \
Expand Down Expand Up @@ -228,6 +248,47 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
return sum_impl(a)/float64_t(a.size());
}

/** ViennaCL vector scale method: B = alpha * A */
template <typename T>
SGVector<T> scale_impl(const SGVector<T>& a, T alpha) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* b_gpu = new GPUMemoryViennaCL<T>(a.size());
b_gpu->data_vector(a.size()) = alpha * a_gpu->data_vector(a.size());
return SGVector<T>(b_gpu, a.size());
}

/** ViennaCL matrix scale method: B = alpha * A */
template <typename T>
SGMatrix<T> scale_impl(const SGMatrix<T>& a, T alpha) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* b_gpu = new GPUMemoryViennaCL<T>(a.size());
b_gpu->data_matrix(a.num_rows, a.num_cols) = alpha * a_gpu->data_matrix(a.num_rows, a.num_cols);
return SGMatrix<T>(b_gpu, a.num_rows, a.num_cols);
}

/** ViennaCL vector inplace scale method: result = alpha * A */
template <typename T>
void scale_impl(SGVector<T>& a, SGVector<T>& result, T alpha) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* result_gpu = cast_to_viennacl(result);

result_gpu->data_vector(a.size()) = alpha * a_gpu->data_vector(a.size());
}

/** ViennaCL vector inplace scale method: result = alpha * A */
template <typename T>
void scale_impl(SGMatrix<T>& a, SGMatrix<T>& result, T alpha) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* result_gpu = cast_to_viennacl(result);

result_gpu->data_matrix(a.num_rows, a.num_cols) =
alpha * a_gpu->data_matrix(a.num_rows, a.num_cols);
}

/** ViennaCL vector sum method. */
template <typename T>
T sum_impl(const SGVector<T>& vec, bool no_diag=false) const
Expand Down
44 changes: 44 additions & 0 deletions src/shogun/mathematics/linalg/LinalgNamespace.h
Expand Up @@ -220,6 +220,50 @@ complex128_t mean(const Container<complex128_t>& a)
return infer_backend(a)->mean(a);
}

/**
* Performs the operation B = alpha * A on vectors or matrices
*
* @param a first vector/matrix
* @param alpha scale factor
* @return vector or matrix of alpha * A
*/
template<typename T, template<typename> class Container>
Container<T> scale(const Container<T>& a, T alpha=1)
{
return infer_backend(a)->scale(a, alpha);
}

/**
* Performs the operation result = alpha * A on vectors
*
* @param a first vector
* @param alpha scale factor
* @param result the vector of alpha * A
*/
template <typename T>
void scale(SGVector<T>& a, SGVector<T>& result, T alpha=1)
{
REQUIRE(result.vlen == a.vlen, "Length of vector result (%d) doesn't match vector a (%d).\n", result.vlen, a.vlen);
infer_backend(a, result)->scale(a, alpha, result);
}

/**
* Performs the operation result = alpha * A on matrices
*
* @param a first matrix
* @param alpha scale factor
* @param result the matrix of alpha * A
*/
template <typename T>
void scale(SGMatrix<T>& a, SGMatrix<T>& result, T alpha=1)
{
REQUIRE((a.num_rows == result.num_rows), "Numresulter of rows of matrix a (%d) must match matrix result (%d).\n",
a.num_rows, result.num_rows);
REQUIRE((a.num_cols == result.num_cols), "Numresulter of columns of matrix a (%d) must match matrix result (%d).\n",
a.num_cols, result.num_cols);
infer_backend(a, result)->scale(a, alpha, result);
}

/**
* Method that computes the sum of vectors or matrices
*
Expand Down
Expand Up @@ -125,6 +125,63 @@ TEST(LinalgBackendEigen, SGMatrix_mean)
EXPECT_NEAR(result, 2.5, 1E-15);
}

TEST(LinalgBackendEigen, SGVector_scale)
{
const index_t size = 5;
const float64_t alpha = 0.3;
SGVector<float64_t> a(size);
a.range_fill(0);

auto result = scale(a, alpha);

for (index_t i = 0; i < size; ++i)
EXPECT_NEAR(alpha * a[i], result[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_scale)
{
const float64_t alpha = 0.3;
const index_t nrows = 2, ncols = 3;
SGMatrix<float64_t> A(nrows, ncols);

for (index_t i = 0; i < nrows*ncols; ++i)
A[i] = i;

auto result = scale(A, alpha);

for (index_t i = 0; i < nrows*ncols; ++i)
EXPECT_NEAR(alpha*A[i], result[i], 1e-15);
}

TEST(LinalgBackendEigen, SGVector_scale_in_place)
{
const index_t size = 5;
const float64_t alpha = 0.3;
SGVector<float64_t> a(size);
a.range_fill(0);

scale(a, a, alpha);

for (index_t i = 0; i < size; ++i)
EXPECT_NEAR(alpha * i, a[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_scale_in_place)
{
const float64_t alpha = 0.3;
const index_t nrows = 2, ncols = 3;

SGMatrix<float64_t> A(nrows, ncols);

for (index_t i = 0; i < nrows*ncols; ++i)
A[i] = i;

scale(A, A, alpha);

for (index_t i = 0; i < nrows*ncols; ++i)
EXPECT_NEAR(alpha*i, A[i], 1e-15);
}

TEST(LinalgBackendEigen, SGVector_sum)
{
const index_t size = 10;
Expand Down
Expand Up @@ -165,6 +165,82 @@ TEST(LinalgBackendViennaCL, SGMatrix_mean)
EXPECT_NEAR(result, 2.5, 1E-15);
}

TEST(LinalgBackendViennaCL, SGVector_scale)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());

const index_t size = 5;
const float32_t alpha = 0.3;
SGVector<float32_t> a(size), a_gpu;
a.range_fill(0);

a_gpu = to_gpu(a);

SGVector<float32_t> result_gpu = scale(a_gpu, alpha);
SGVector<float32_t> result = from_gpu(result_gpu);

for (index_t i = 0; i < size; ++i)
EXPECT_NEAR(alpha * a[i], result[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_scale)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());

const float32_t alpha = 0.3;
const index_t nrows = 2, ncols = 3;
SGMatrix<float32_t> A(nrows, ncols), A_gpu;
for (index_t i = 0; i < nrows*ncols; ++i)
A[i] = i;

A_gpu = to_gpu(A);

auto result_gpu = scale(A_gpu, alpha);
auto result = from_gpu(result_gpu);

for (index_t i = 0; i < nrows*ncols; ++i)
EXPECT_NEAR(alpha*A[i], result[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGVector_scale_in_place)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());

const index_t size = 5;
const float32_t alpha = 0.3;
SGVector<float32_t> a(size), a_gpu;
a.range_fill(0);

a_gpu = to_gpu(a);

scale(a_gpu, a_gpu, alpha);
a = from_gpu(a_gpu);

for (index_t i = 0; i < size; ++i)
EXPECT_NEAR(alpha * i, a[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_scale_in_place)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());

const float32_t alpha = 0.3;
const index_t nrows = 2, ncols = 3;
SGMatrix<float32_t> A(nrows, ncols);
SGMatrix<float32_t> A_gpu;

for (index_t i = 0; i < nrows*ncols; ++i)
A[i] = i;

A_gpu = to_gpu(A);

scale(A_gpu, A_gpu, alpha);
A = from_gpu(A_gpu);

for (index_t i = 0; i < nrows*ncols; ++i)
EXPECT_NEAR(alpha*i, A[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGVector_sum)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());
Expand Down

0 comments on commit d9f35fc

Please sign in to comment.