Skip to content

Commit

Permalink
add matrix*vector method
Browse files Browse the repository at this point in the history
  • Loading branch information
OXPHOS committed Mar 13, 2017
1 parent 6759d9c commit 2b429f2
Show file tree
Hide file tree
Showing 6 changed files with 320 additions and 5 deletions.
3 changes: 2 additions & 1 deletion src/shogun/mathematics/linalg/LinalgBackendBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,11 +145,12 @@ class LinalgBackendBase
* @see linalg::matrix_prod
*/
#define BACKEND_GENERIC_IN_PLACE_MATRIX_PROD(Type, Container) \
virtual void matrix_prod(Container<Type>& a, Container<Type>& b,\
virtual void matrix_prod(SGMatrix<Type>& a, Container<Type>& b,\
Container<Type>& result, bool transpose_A, bool transpose_B) const \
{ \
SG_SNOTIMPLEMENTED; \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_MATRIX_PROD

Expand Down
18 changes: 17 additions & 1 deletion src/shogun/mathematics/linalg/LinalgBackendEigen.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,12 @@ class LinalgBackendEigen : public LinalgBackendBase

/** Implementation of @see LinalgBackendBase::matrix_prod */
#define BACKEND_GENERIC_IN_PLACE_MATRIX_PROD(Type, Container) \
virtual void matrix_prod(Container<Type>& a, Container<Type>& b,\
virtual void matrix_prod(SGMatrix<Type>& a, Container<Type>& b,\
Container<Type>& result, bool transpose_A, bool transpose_B) const \
{ \
matrix_prod_impl(a, b, result, transpose_A, transpose_B); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_MATRIX_PROD

Expand Down Expand Up @@ -322,6 +323,21 @@ class LinalgBackendEigen : public LinalgBackendBase
result_eig = a_eig.array() * b_eig.array();
}

/** Eigen3 matrix * vector in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGVector<T>& b, SGVector<T>& result,
bool transpose, bool transpose_B=false) const
{
typename SGMatrix<T>::EigenMatrixXtMap a_eig = a;
typename SGVector<T>::EigenVectorXtMap b_eig = b;
typename SGVector<T>::EigenVectorXtMap result_eig = result;

if (transpose)
result_eig = a_eig.transpose() * b_eig;
else
result_eig = a_eig * b_eig;
}

/** Eigen3 matrix in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGMatrix<T>& b, SGMatrix<T>& result,
Expand Down
21 changes: 20 additions & 1 deletion src/shogun/mathematics/linalg/LinalgBackendViennacl.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,12 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase

/** Implementation of @see LinalgBackendBase::matrix_prod */
#define BACKEND_GENERIC_IN_PLACE_MATRIX_PROD(Type, Container) \
virtual void matrix_prod(Container<Type>& a, Container<Type>& b,\
virtual void matrix_prod(SGMatrix<Type>& a, Container<Type>& b,\
Container<Type>& result, bool transpose_A, bool transpose_B) const \
{ \
matrix_prod_impl(a, b, result, transpose_A, transpose_B); \
}
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGVector)
DEFINE_FOR_ALL_PTYPE(BACKEND_GENERIC_IN_PLACE_MATRIX_PROD, SGMatrix)
#undef BACKEND_GENERIC_IN_PLACE_MATRIX_PROD

Expand Down Expand Up @@ -294,6 +295,24 @@ class LinalgBackendViennaCL : public LinalgBackendGPUBase
a_gpu->data_matrix(a.num_rows, a.num_cols), b_gpu->data_matrix(a.num_rows, a.num_cols));
}

/** ViennaCL matrix * vector in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGVector<T>& b, SGVector<T>& result,
bool transpose, bool transpose_B=false) const
{
GPUMemoryViennaCL<T>* a_gpu = cast_to_viennacl(a);
GPUMemoryViennaCL<T>* b_gpu = cast_to_viennacl(b);
GPUMemoryViennaCL<T>* result_gpu = cast_to_viennacl(result);

if (transpose)
result_gpu->data_vector(result.vlen) = viennacl::linalg::prod(
viennacl::trans(a_gpu->data_matrix(a.num_rows, a.num_cols)),
b_gpu->data_vector(b.vlen));
else
result_gpu->data_vector(result.vlen) = viennacl::linalg::prod(
a_gpu->data_matrix(a.num_rows, a.num_cols), b_gpu->data_vector(b.vlen));
}

/** ViennaCL matrix in-place product method */
template <typename T>
void matrix_prod_impl(SGMatrix<T>& a, SGMatrix<T>& b, SGMatrix<T>& result,
Expand Down
69 changes: 69 additions & 0 deletions src/shogun/mathematics/linalg/LinalgNamespace.h
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,75 @@ SGMatrix<T> element_prod(SGMatrix<T>& a, SGMatrix<T>& b)
return result;
}

/** Performs the operation of matrix multiply a vector \f$x = Ab\f$.
*
* This version returns the result in-place.
* User should pass an appropriately allocate memory matrix.
*
* @param A The matrix
* @param b The vector
* @param transpose Whether to transpose the matrix. Default false
* @param result Result vector
*/
template <typename T>
void matrix_prod(SGMatrix<T>& A, SGVector<T>& b, SGVector<T>& result, bool transpose = false)
{
if (transpose)
{
REQUIRE(A.num_rows == b.vlen, "Row number of Matrix A (%d) doesn't match \
length of vector b (%d).\n", A.num_rows, b.vlen);
REQUIRE(result.vlen == A.num_cols, "Length of vector result (%d) doesn't match \
column number of Matrix A (%d).\n", result.vlen, A.num_cols);
}
else
{
REQUIRE(A.num_cols == b.vlen, "Column number of Matrix A (%d) doesn't match \
length of vector b (%d).\n", A.num_cols, b.vlen);
REQUIRE(result.vlen == A.num_rows, "Length of vector result (%d) doesn't match \
row number of Matrix A (%d).\n", result.vlen, A.num_rows);
}

REQUIRE(!(result.on_gpu()^A.on_gpu()),
"Cannot operate with vector result on_gpu (%d) and vector a on_gpu (%d).\n", result.on_gpu(), A.on_gpu());
REQUIRE(!(result.on_gpu()^b.on_gpu()),
"Cannot operate with vector result on_gpu (%d) and vector b on_gpu (%d).\n", result.on_gpu(), b.on_gpu());

infer_backend(A, SGMatrix<T>(b))->matrix_prod(A, b, result, transpose, false);
}

/** Performs the operation of matrix multiply a vector \f$x = Ab\f$.
*
* @param A The matrix
* @param b The vector
* @param transpose Whether to transpose a matrix. Default false
* @return result Result vector
*/
template <typename T>
SGVector<T> matrix_prod(SGMatrix<T>& A, SGVector<T>& b, bool transpose = false)
{
SGVector<T> result;
if (transpose)
{
REQUIRE(A.num_rows == b.vlen, "Row number of Matrix A (%d) doesn't match \
length of vector b (%d).\n", A.num_rows, b.vlen);

result.resize_vector(A.num_cols);
}
else
{
REQUIRE(A.num_cols == b.vlen, "Column number of Matrix A (%d) doesn't match \
length of vector b (%d).\n", A.num_cols, b.vlen);

result.resize_vector(A.num_rows);
}

if (A.on_gpu())
result = to_gpu(result);

matrix_prod(A, b, result, transpose);
return result;
}

/** Performs the operation C = A * B where "*" denotes matrix multiplication.
*
* This version returns the result in-place.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,102 @@ TEST(LinalgBackendEigen, SGMatrix_elementwise_product_in_place)
EXPECT_NEAR(C[i]*B[i], A[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_SGVector_matrix_prod)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(rows, cols);
SGVector<float64_t> b(cols);

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(j, i) = i * rows + j;
b[i]=0.5 * i;
}

auto x = matrix_prod(A, b);

float64_t ref[] = {10, 11.5, 13, 14.5};

EXPECT_EQ(x.vlen, A.num_rows);
for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_SGVector_matrix_prod_transpose)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(cols, rows);
SGVector<float64_t> b(cols);

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(i, j) = i * cols + j;
b[i] = 0.5 * i;
}

auto x = matrix_prod(A, b, true);

float64_t ref[] = {7.5, 9, 10.5, 14.5};

EXPECT_EQ(x.vlen, A.num_cols);
for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_SGVector_matrix_prod_in_place)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(rows, cols);
SGVector<float64_t> b(cols);
SGVector<float64_t> x(rows);

for (index_t i = 0; i<cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(j, i) = i * rows + j;
b[i] = 0.5 * i;
}

matrix_prod(A, b, x);

float64_t ref[] = {10, 11.5, 13, 14.5};

for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_SGVector_matrix_prod_in_place_transpose)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(cols, rows);
SGVector<float64_t> b(cols);
SGVector<float64_t> x(rows);

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(i, j) = i * cols + j;
b[i] = 0.5 * i;
}

matrix_prod(A, b, x, true);

float64_t ref[] = {7.5, 9, 10.5, 14.5};

for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendEigen, SGMatrix_matrix_product)
{
const index_t dim1 = 2, dim2 = 4, dim3 = 3;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ TEST(LinalgBackendViennaCL, SGVector_dot)

EXPECT_NEAR(result, 5, 1E-15);
}
/*

TEST(LinalgBackendViennaCL, SGMatrix_elementwise_product)
{
sg_linalg->set_gpu_backend(new LinalgBackendViennaCL());
Expand Down Expand Up @@ -179,7 +179,121 @@ TEST(LinalgBackendViennaCL, SGMatrix_elementwise_product_in_place)
for (index_t i = 0; i < 9; ++i)
EXPECT_NEAR(C[i]*B[i], A[i], 1e-15);
}
*/

TEST(LinalgBackendViennaCL, SGMatrix_SGVector_matrix_prod)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(rows, cols), A_gpu;
SGVector<float64_t> b(cols), b_gpu;

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(j, i) = i * rows + j;
b[i] = 0.5 * i;
}

A_gpu = to_gpu(A);
b_gpu = to_gpu(b);
auto x_gpu = matrix_prod(A_gpu, b_gpu);
auto x = from_gpu(x_gpu);

float64_t ref[] = {10, 11.5, 13, 14.5};

EXPECT_EQ(x.vlen, A.num_rows);
for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_SGVector_matrix_prod_transpose)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(cols, rows), A_gpu;
SGVector<float64_t> b(cols), b_gpu;

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(i, j) = i * cols + j;
b[i] = 0.5 * i;
}

A_gpu = to_gpu(A);
b_gpu = to_gpu(b);
auto x_gpu = matrix_prod(A_gpu, b_gpu, true);
auto x = from_gpu(x_gpu);

float64_t ref[] = {7.5, 9, 10.5, 14.5};

EXPECT_EQ(x.vlen, A.num_cols);
for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_SGVector_matrix_prod_in_place)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(rows, cols), A_gpu;
SGVector<float64_t> b(cols), b_gpu;
SGVector<float64_t> x(rows), x_gpu;

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(j, i) = i * rows + j;
b[i] = 0.5 * i;
}
x.zero();
x_gpu = to_gpu(x);

A_gpu = to_gpu(A);
b_gpu = to_gpu(b);
matrix_prod(A_gpu, b_gpu, x_gpu);
x = from_gpu(x_gpu);

float64_t ref[] = {10, 11.5, 13, 14.5};

EXPECT_EQ(x.vlen, A.num_rows);
for (index_t i=0; i<cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_SGVector_matrix_prod_in_place_transpose)
{
const index_t rows=4;
const index_t cols=3;

SGMatrix<float64_t> A(cols, rows), A_gpu;
SGVector<float64_t> b(cols), b_gpu;
SGVector<float64_t> x(rows), x_gpu;

for (index_t i = 0; i < cols; ++i)
{
for (index_t j = 0; j < rows; ++j)
A(i, j) = i * cols + j;
b[i] = 0.5 * i;
}

x.zero();
x_gpu = to_gpu(x);

A_gpu = to_gpu(A);
b_gpu = to_gpu(b);
matrix_prod(A_gpu, b_gpu, x_gpu, true);
x = from_gpu(x_gpu);

float64_t ref[] = {7.5, 9, 10.5, 14.5};

for (index_t i = 0; i < cols; ++i)
EXPECT_NEAR(x[i], ref[i], 1e-15);
}

TEST(LinalgBackendViennaCL, SGMatrix_matrix_product)
{
const index_t dim1 = 2, dim2 = 4, dim3 = 3;
Expand Down

0 comments on commit 2b429f2

Please sign in to comment.