Skip to content

Commit

Permalink
Merge pull request #3657 from OXPHOS/linalg_temp
Browse files Browse the repository at this point in the history
LinalgRefactor - clean up documentations and styles
  • Loading branch information
OXPHOS committed Feb 27, 2017
2 parents c1d17d9 + b772c94 commit cfa931a
Show file tree
Hide file tree
Showing 10 changed files with 248 additions and 210 deletions.
4 changes: 2 additions & 2 deletions src/shogun/mathematics/linalg/GPUMemoryBase.h
Expand Up @@ -36,7 +36,7 @@
namespace shogun
{

/** @brief interface for GPU memory libraries */
/** @brief Interface for GPU memory libraries */
template <typename T>
struct GPUMemoryBase
{
Expand All @@ -48,7 +48,7 @@ struct GPUMemoryBase
/** Clone GPU memory, i.e. vector or matrix
*
* @param GPUMemoryBase structure pointer
* @return a deep-copy of GPUMemoryBase structure pointer
* @return A deep-copy of GPUMemoryBase structure pointer
*/
virtual GPUMemoryBase<T>* clone_vector(GPUMemoryBase<T>* gpu_ptr,
index_t vlen) const = 0;
Expand Down
25 changes: 18 additions & 7 deletions src/shogun/mathematics/linalg/GPUMemoryViennaCL.h
Expand Up @@ -68,9 +68,9 @@ struct GPUMemoryViennaCL : public GPUMemoryBase<T>
init();
};

/** Creates a new vector
/** Create a new vector
*
* @param length Number of elements
* @param len Number of elements
*/
GPUMemoryViennaCL(index_t len): m_data(new VCLMemoryArray())
{
Expand All @@ -79,9 +79,9 @@ struct GPUMemoryViennaCL : public GPUMemoryBase<T>
viennacl::context());
}

/** Wraps a vector around an existing memory segment
/** Wrap a vector around an existing memory segment
*
* @param vector GPUMemoryBase pointer
* @param gpu_ptr GPUMemoryBase pointer
*/
GPUMemoryViennaCL(GPUMemoryBase<T>* gpu_ptr) : m_data(new VCLMemoryArray())
{
Expand All @@ -91,7 +91,11 @@ struct GPUMemoryViennaCL : public GPUMemoryBase<T>
m_offset = temp_ptr->m_offset;
};

/** Clone GPU vector */
/** Clone GPU vector
*
* @param vector GPUMemoryBase pointer
* @param vlen Length of the vector
*/
GPUMemoryBase<T>* clone_vector(GPUMemoryBase<T>* vector, index_t vlen) const
{
GPUMemoryViennaCL<T>* src_ptr = static_cast<GPUMemoryViennaCL<T>*>(vector);
Expand All @@ -105,13 +109,20 @@ struct GPUMemoryViennaCL : public GPUMemoryBase<T>
return gpu_ptr;
}

/** ViennaCL Vector structure that saves the data */
/** ViennaCL Vector structure that saves the data
*
* @param len Number of elements
*/
VCLVectorBase data_vector(index_t len)
{
return VCLVectorBase(*m_data, len, m_offset, 1);
}

/** ViennaCL Vector structure that saves the data */
/** ViennaCL Vector structure that saves the data
*
* @param nrows Row number of the matrix
* @param ncols Column number of the matrix
*/
VCLMatrixBase data_matrix(index_t nrows, index_t ncols)
{
#if VIENNACL_VERSION >= 10600
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/mathematics/linalg/LinalgBackendBase.h
Expand Up @@ -30,6 +30,9 @@
* Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
*/

#ifndef LINALG_BACKEND_BASE_H__
#define LINALG_BACKEND_BASE_H__

#include <shogun/lib/config.h>
#include <shogun/lib/common.h>
#include <shogun/lib/SGVector.h>
Expand All @@ -39,9 +42,6 @@
#include <shogun/mathematics/linalg/internal/Block.h>
#include <memory>

#ifndef LINALG_BACKEND_BASE_H__
#define LINALG_BACKEND_BASE_H__

namespace shogun
{

Expand Down Expand Up @@ -103,7 +103,7 @@ class LinalgBackendBase
#undef BACKEND_GENERIC_IN_PLACE_ADD

/**
* Wrapper Cholesky decomposition.
* Wrapper method of Cholesky decomposition.
*
* @see linalg::cholesky_factor
*/
Expand Down
21 changes: 9 additions & 12 deletions src/shogun/mathematics/linalg/LinalgBackendEigen.h
Expand Up @@ -30,18 +30,18 @@
* Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
*/

#ifndef LINALG_BACKEND_EIGEN_H__
#define LINALG_BACKEND_EIGEN_H__

#include <shogun/lib/SGVector.h>
#include <shogun/mathematics/eigen3.h>
#include <shogun/mathematics/linalg/LinalgBackendBase.h>
#include <numeric>

#ifndef LINALG_BACKEND_EIGEN_H__
#define LINALG_BACKEND_EIGEN_H__

namespace shogun
{

/** @brief linalg methods with Eigen3 backend */
/** @brief Linalg methods with Eigen3 backend */
class LinalgBackendEigen : public LinalgBackendBase
{
public:
Expand Down Expand Up @@ -84,7 +84,8 @@ class LinalgBackendEigen : public LinalgBackendBase

/** Implementation of @see LinalgBackendBase::add */
#define BACKEND_GENERIC_IN_PLACE_ADD(Type, Container) \
virtual void add(Container<Type>& a, Container<Type>& b, Type alpha, Type beta, Container<Type>& result) const \
virtual void add(Container<Type>& a, Container<Type>& b, Type alpha, \
Type beta, Container<Type>& result) const \
{ \
add_impl(a, b, alpha, beta, result); \
}
Expand Down Expand Up @@ -182,11 +183,7 @@ class LinalgBackendEigen : public LinalgBackendBase
BACKEND_GENERIC_COMPLEX_MEAN(SGMatrix)
#undef BACKEND_GENERIC_COMPLEX_MEAN

/**
* Wrapper method that range fills a vector of matrix.
*
* @see linalg::range_fill
*/
/** Implementation of @see LinalgBackendBase::range_fill */
#define BACKEND_GENERIC_RANGE_FILL(Type, Container) \
virtual void range_fill(Container<Type>& a, const Type start) const \
{ \
Expand Down Expand Up @@ -403,7 +400,7 @@ class LinalgBackendEigen : public LinalgBackendBase

result_eig = a_block.array() * b_block.array();
}

/** Eigen3 matrix * vector in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGVector<T>& b, SGVector<T>& result,
Expand Down Expand Up @@ -498,7 +495,7 @@ class LinalgBackendEigen : public LinalgBackendBase
result_eig = alpha * a_eig;
}

/** Set const method */
/** Eigen3 set const method */
template <typename T, template <typename> class Container>
void set_const_impl(Container<T>& a, T value) const
{
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/mathematics/linalg/LinalgBackendGPUBase.h
Expand Up @@ -30,16 +30,16 @@
* Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
*/

#ifndef LINALG_BACKEND_GPU_BASE_H__
#define LINALG_BACKEND_GPU_BASE_H__

#include <shogun/lib/config.h>
#include <shogun/lib/common.h>
#include <shogun/lib/SGVector.h>
#include <shogun/io/SGIO.h>
#include <shogun/mathematics/linalg/GPUMemoryBase.h>
#include <memory>

#ifndef LINALG_BACKEND_GPU_BASE_H__
#define LINALG_BACKEND_GPU_BASE_H__

namespace shogun
{

Expand Down
40 changes: 23 additions & 17 deletions src/shogun/mathematics/linalg/LinalgBackendViennacl.h
Expand Up @@ -30,12 +30,12 @@
* Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
*/

#include <shogun/mathematics/linalg/LinalgBackendGPUBase.h>
#include <shogun/mathematics/linalg/LinalgBackendViennaclKernels.h>

#ifndef LINALG_BACKEND_VIENNACL_H__
#define LINALG_BACKEND_VIENNACL_H__

#include <shogun/mathematics/linalg/LinalgBackendGPUBase.h>
#include <shogun/mathematics/linalg/LinalgBackendViennaclKernels.h>

#ifdef HAVE_VIENNACL

#include <viennacl/vector.hpp>
Expand Down Expand Up @@ -66,8 +66,10 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
METHODNAME(float32_t, Container); \
METHODNAME(float64_t, Container); \

/** Implementation of @see LinalgBackendBase::add */
#define BACKEND_GENERIC_IN_PLACE_ADD(Type, Container) \
virtual void add(Container<Type>& a, Container<Type>& b, Type alpha, Type beta, Container<Type>& result) const \
virtual void add(Container<Type>& a, Container<Type>& b, Type alpha, \
Type beta, Container<Type>& result) const \
{ \
add_impl(a, b, alpha, beta, result); \
}
Expand Down Expand Up @@ -125,7 +127,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_MEAN, SGMatrix)
#undef BACKEND_GENERIC_MEAN

/** Implementation of @see linalg::scale */
/** Implementation of @see LinalgBackendBase::scale */
#define BACKEND_GENERIC_IN_PLACE_SCALE(Type, Container) \
virtual void scale(Container<Type>& a, Type alpha, Container<Type>& result) const \
{ \
Expand Down Expand Up @@ -205,14 +207,14 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
#undef DEFINE_FOR_ALL_PTYPE

private:
/** static cast GPUMemoryBase class to GPUMemoryViennaCL */
/** Static cast @see GPUMemoryBase class to @see GPUMemoryViennaCL */
template <typename T, template<typename> class Container>
GPUMemoryViennaCL<T>* cast_to_viennacl(const Container<T> &a) const
{
return static_cast<GPUMemoryViennaCL<T>*>(a.gpu_ptr.get());
}

/** ViennaCL vector result = alpha*A + beta*B method */
/** ViennaCL vector result = alpha * A + beta * B method */
template <typename T>
void add_impl(SGVector<T>& a, SGVector<T>& b, T alpha, T beta, SGVector<T>& result) const
{
Expand All @@ -224,7 +226,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
alpha * a_gpu->data_vector(a.size()) + beta * b_gpu->data_vector(b.size());
}

/** ViennaCL matrix result = alpha*A + beta*B method */
/** ViennaCL matrix result = alpha * A + beta * B method */
template <typename T>
void add_impl(SGMatrix<T>& a, SGMatrix<T>& b, T alpha, T beta, SGMatrix<T>& result) const
{
Expand All @@ -244,7 +246,8 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* b_gpu = cast_to_viennacl(b);

return viennacl::linalg::inner_prod(a_gpu->data_vector(a.size()), b_gpu->data_vector(b.size()));
return viennacl::linalg::inner_prod(
a_gpu->data_vector(a.size()), b_gpu->data_vector(b.size()));
}

/** ViennaCL matrix in-place elementwise product method */
Expand All @@ -255,8 +258,9 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
GPUMemoryViennaCL<T>* b_gpu = cast_to_viennacl(b);
GPUMemoryViennaCL<T>* result_gpu = cast_to_viennacl(result);

result_gpu->data_matrix(a.num_rows, a.num_cols) = viennacl::linalg::element_prod(
a_gpu->data_matrix(a.num_rows, a.num_cols), b_gpu->data_matrix(a.num_rows, a.num_cols));
result_gpu->data_matrix(a.num_rows, a.num_cols) =
viennacl::linalg::element_prod(a_gpu->data_matrix(a.num_rows,
a.num_cols), b_gpu->data_matrix(a.num_rows, a.num_cols));
}

/** ViennaCL matrix * vector in-place product method */
Expand All @@ -277,7 +281,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
a_gpu->data_matrix(a.num_rows, a.num_cols), b_gpu->data_vector(b.vlen));
}

/** ViennaCL matrix in-place product method */
/** ViennaCL matrices in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGMatrix<T>& b, SGMatrix<T>& result,
bool transpose_A, bool transpose_B) const
Expand Down Expand Up @@ -309,6 +313,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
b_gpu->data_matrix(b.num_rows, b.num_cols));
}

/** ViennaCL max method */
template <typename T, template<typename> class Container>
T max_impl(const Container<T>& a) const
{
Expand All @@ -329,7 +334,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
return result[0];
}

/** Eigen3 vector and matrix mean method */
/** ViennaCL vectors or matrices mean method */
template <typename T, template <typename> class Container>
float64_t mean_impl(const Container<T>& a) const
{
Expand Down Expand Up @@ -362,10 +367,11 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
void set_const_impl(Container<T>& a, T value) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
typename GPUMemoryViennaCL<T>::VCLVectorBase vcl_vector = a_gpu->data_vector(a.size());
typename GPUMemoryViennaCL<T>::VCLVectorBase vcl_vector =
a_gpu->data_vector(a.size());
viennacl::linalg::vector_assign(vcl_vector, value);
}

/** ViennaCL vector sum method. */
template <typename T>
T sum_impl(const SGVector<T>& vec, bool no_diag=false) const
Expand Down Expand Up @@ -435,7 +441,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
return SGVector<T>(result_gpu, mat.num_rows);
}

/** Transfers data to GPU with ViennaCL method. */
/** Transfer data to GPU with ViennaCL method. */
template <typename T, template<typename> class Container>
GPUMemoryBase<T>* to_gpu_impl(const Container<T>& a) const
{
Expand All @@ -449,7 +455,7 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
return gpu_ptr;
}

/** Fetches data from GPU with ViennaCL method. */
/** Fetch data from GPU with ViennaCL method. */
template <typename T, template<typename> class Container>
void from_gpu_impl(const Container<T>& a, T* data) const
{
Expand Down

0 comments on commit cfa931a

Please sign in to comment.