Skip to content

Commit

Permalink
Initial refactor of linalg/eigsolver classes
Browse files Browse the repository at this point in the history
starting work on issue #1930
  • Loading branch information
vigsterkr committed Mar 8, 2014
1 parent 75ea8d3 commit cb82500
Show file tree
Hide file tree
Showing 12 changed files with 68 additions and 89 deletions.
8 changes: 4 additions & 4 deletions doc/ipython-notebooks/logdet/logdet.ipynb
Expand Up @@ -101,7 +101,7 @@
"op = RealSparseMatrixOperator(A.tocsc())\n",
"\n",
"# Lanczos iterative Eigensolver to compute the min/max Eigenvalues which is required to compute the shifts\n",
"eigen_solver = LanczosEigenSolver(op)\n",
"eigen_solver = LanczosEigenSolver()\n",
"# we set the iteration limit high to compute the eigenvalues more accurately, default iteration limit is 1000\n",
"eigen_solver.set_max_iteration_limit(2000)\n",
"\n",
Expand Down Expand Up @@ -265,7 +265,7 @@
"collapsed": false,
"input": [
"op = RealSparseMatrixOperator(B)\n",
"eigen_solver = LanczosEigenSolver(op)\n",
"eigen_solver = LanczosEigenSolver()\n",
"\n",
"# computing log-det estimates using probing sampler\n",
"probing_sampler = ProbingSampler(op)\n",
Expand Down Expand Up @@ -373,7 +373,7 @@
"def log_det(A):\n",
" op = RealSparseMatrixOperator(A)\n",
" engine = SerialComputationEngine()\n",
" eigen_solver = LanczosEigenSolver(op)\n",
" eigen_solver = LanczosEigenSolver()\n",
" probing_sampler = ProbingSampler(op)\n",
" cgm = CGMShiftedFamilySolver()\n",
" cgm.set_iteration_limit(100)\n",
Expand Down Expand Up @@ -724,4 +724,4 @@
"metadata": {}
}
]
}
}

This comment has been minimized.

Copy link
@lambday

lambday Mar 9, 2014

Member

This might create problem :(

2 changes: 1 addition & 1 deletion examples/undocumented/python_modular/mathematics_logdet.py
Expand Up @@ -32,7 +32,7 @@ def mathematics_logdet (matrix=mtx,max_iter_eig=1000,max_iter_lin=1000,num_sampl
# creating the linear operator, eigen-solver
op=RealSparseMatrixOperator(A.tocsc())

eig_solver=LanczosEigenSolver(op)
eig_solver=LanczosEigenSolver()

# we can set the iteration limit high for poorly conditioned matrices
eig_solver.set_max_iteration_limit(max_iter_eig)
Expand Down
27 changes: 16 additions & 11 deletions src/shogun/mathematics/linalg/eigsolver/DirectEigenSolver.cpp
Expand Up @@ -26,19 +26,12 @@ CDirectEigenSolver::CDirectEigenSolver()
SG_GCDEBUG("%s created (%p)\n", this->get_name(), this)
}

CDirectEigenSolver::CDirectEigenSolver(
CDenseMatrixOperator<float64_t>* linear_operator)
: CEigenSolver((CLinearOperator<float64_t>*)linear_operator)
{
SG_GCDEBUG("%s created (%p)\n", this->get_name(), this)
}

CDirectEigenSolver::~CDirectEigenSolver()
{
SG_GCDEBUG("%s destroyed (%p)\n", this->get_name(), this)
}

void CDirectEigenSolver::compute()
void CDirectEigenSolver::compute(CLinearOperator<float64_t>* linear_operator)
{
if (m_is_computed_min && m_is_computed_max)
{
Expand All @@ -47,16 +40,28 @@ void CDirectEigenSolver::compute()
}

CDenseMatrixOperator<float64_t>* op
=dynamic_cast<CDenseMatrixOperator<float64_t>*>(m_linear_operator);
=dynamic_cast<CDenseMatrixOperator<float64_t>*>(linear_operator);
REQUIRE(op, "Linear operator is not of CDenseMatrixOperator type!\n");

SGMatrix<float64_t> m=op->get_matrix_operator();
SG_REF(linear_operator);
this->compute(op->get_matrix_operator());
SG_UNREF(linear_operator);
}

void CDirectEigenSolver::compute(SGMatrix<float64_t> m)
{
if (m_is_computed_min && m_is_computed_max)
{
SG_DEBUG("Minimum/maximum eigenvalues are already computed, exiting\n");
return;
}

Map<MatrixXd> map_m(m.matrix, m.num_rows, m.num_cols);

// compute the eigenvalues with Eigen3
SelfAdjointEigenSolver<MatrixXd> eig_solver(map_m);
m_min_eigenvalue=eig_solver.eigenvalues()[0];
m_max_eigenvalue=eig_solver.eigenvalues()[op->get_dimension()-1];
m_max_eigenvalue=eig_solver.eigenvalues()[m.num_cols-1];

m_is_computed_min=true;
m_is_computed_max=false;
Expand Down
21 changes: 12 additions & 9 deletions src/shogun/mathematics/linalg/eigsolver/DirectEigenSolver.h
Expand Up @@ -28,22 +28,25 @@ class CDirectEigenSolver : public CEigenSolver
/** default constructor */
CDirectEigenSolver();

/**
* constructor
*
* @param linear_operator self-adjoint dense-matrix linear operator whose
* eigenvalues have to be found
*/
CDirectEigenSolver(CDenseMatrixOperator<float64_t>* linear_operator);

/** destructor */
virtual ~CDirectEigenSolver();

/**
* compute method for computing eigenvalues of a real valued dense matrix
* linear operator
*
* @param linear_operator real valued self-adjoint linear operator
* whose eigenvalues have to be found
*/
virtual void compute(CLinearOperator<float64_t>* linear_operator);

/**
* compute method for computing eigenvalues of a real valued dense matrix
*
* @param m real valued self-adjoint matrix whose eigenvalues have to be
* found
*/
virtual void compute();
void compute(SGMatrix<float64_t> m);

This comment has been minimized.

Copy link
@karlnapf

karlnapf Mar 9, 2014

Member
  • Why move the passing the matrix in the compute method? Its not that I disagree but whats the reason
  • This really should be under the linear operator interface, and the matrix version should just be convenience wrapper

This comment has been minimized.

Copy link
@vigsterkr

vigsterkr Mar 9, 2014

Author Member

because what if u want to do compute on a linearoperator and a matrix? you would need to create 2 different eigensolvers, for no particular reason. see the implementation of DirectEigenSolver it's one implementation with different interfaces... this would be much messier if we fix in the class' ctor what sort of data structure you want to do the solving on... and i really see no reason why we should limit a re-usability of an eigen solver.

btw: eigen has the same interface

This comment has been minimized.

Copy link
@vigsterkr

vigsterkr Mar 9, 2014

Author Member

This really should be under the linear operator interface, and the matrix version should just be convenience wrapper
sorry didn't get this one...

This comment has been minimized.

Copy link
@karlnapf

karlnapf Mar 9, 2014

Member

Ehm, I dod not get you either :) but nevermind, its fine to move.

The point about the matrix i made is: Everything should be written in terms of linear operators, in particular sparse (eigen,***)-solvers, not matrices. The methods which take a matrix then internally can create a linear operator wrapper around the given matrix. We need that since in iterative solvers, one might want to chain multiple matrices to one operator. It is often not even possible to precompute/store this, so things have to be applied one after another. There are also linear operators which do not even correspond to matrices.

In short: The compute(SGMatrix) method should just be a convenience wrapper for linear operators.


/** @return object name */
virtual const char* get_name() const
Expand Down
28 changes: 4 additions & 24 deletions src/shogun/mathematics/linalg/eigsolver/EigenSolver.h
Expand Up @@ -31,31 +31,19 @@ class CEigenSolver : public CSGObject
init();
}

/**
* constructor
*
* @param linear_operator real valued self-adjoint linear operator
* whose eigenvalues have to be found
*/
CEigenSolver(CLinearOperator<float64_t>* linear_operator)
: CSGObject()
{
init();

m_linear_operator=linear_operator;
SG_REF(m_linear_operator);
}
/** destructor */
virtual ~CEigenSolver()
{
SG_UNREF(m_linear_operator);
}

/**
* abstract compute method for computing eigenvalues of a real
* valued linear operator
*
* @param linear_operator real valued self-adjoint linear operator
* whose eigenvalues have to be found
*/
virtual void compute() = 0;
virtual void compute(CLinearOperator<float64_t>* linear_operator) = 0;

/** sets the min eigelvalue of a real valued self-adjoint linear operator */
void set_min_eigenvalue(float64_t min_eigenvalue)
Expand Down Expand Up @@ -95,9 +83,6 @@ class CEigenSolver : public CSGObject
/** max eigenvalue */
float64_t m_max_eigenvalue;

/** the linear solver whose eigenvalues have to be found */
CLinearOperator<float64_t>* m_linear_operator;

/** flag that denotes that the minimum eigenvalue is already computed */
bool m_is_computed_min;

Expand All @@ -110,7 +95,6 @@ class CEigenSolver : public CSGObject
{
m_min_eigenvalue=0.0;
m_max_eigenvalue=0.0;
m_linear_operator=NULL;
m_is_computed_min=false;
m_is_computed_max=false;

Expand All @@ -122,10 +106,6 @@ class CEigenSolver : public CSGObject
"Maximum eigenvalue of a real valued self-adjoint linear operator",
MS_NOT_AVAILABLE);

SG_ADD((CSGObject**)&m_linear_operator, "linear_operator",
"Self-adjoint linear operator",
MS_NOT_AVAILABLE);

SG_ADD(&m_is_computed_min, "is_computed_min",
"Flag denoting that the minimum eigenvalue has already been computed",
MS_NOT_AVAILABLE);
Expand Down
18 changes: 7 additions & 11 deletions src/shogun/mathematics/linalg/eigsolver/LanczosEigenSolver.cpp
Expand Up @@ -31,13 +31,6 @@ CLanczosEigenSolver::CLanczosEigenSolver()
init();
}

CLanczosEigenSolver::CLanczosEigenSolver(
CLinearOperator<float64_t>* linear_operator)
: CEigenSolver(linear_operator)
{
init();
}

void CLanczosEigenSolver::init()
{
m_max_iteration_limit=1000;
Expand All @@ -58,7 +51,7 @@ CLanczosEigenSolver::~CLanczosEigenSolver()
{
}

void CLanczosEigenSolver::compute()
void CLanczosEigenSolver::compute(CLinearOperator<float64_t>* linear_operator)
{
SG_DEBUG("Entering\n");

Expand All @@ -68,10 +61,12 @@ void CLanczosEigenSolver::compute()
return;
}

REQUIRE(m_linear_operator, "Operator is NULL!\n");
REQUIRE(linear_operator, "Operator is NULL!\n");

SG_REF(linear_operator);

// vector v_0
VectorXd v=VectorXd::Zero(m_linear_operator->get_dimension());
VectorXd v=VectorXd::Zero(linear_operator->get_dimension());

// vector v_i, for i=1 this is random valued with norm 1
SGVector<float64_t> v_(v.rows());
Expand Down Expand Up @@ -100,7 +95,7 @@ void CLanczosEigenSolver::compute()
it.get_iter_info().residual_norm);

// apply linear operator to the direction vector
w_=m_linear_operator->apply(v_);
w_=linear_operator->apply(v_);
Map<VectorXd> w_i(w_.vector, w_.vlen);

// compute v^{T}Av, if zero, failure
Expand Down Expand Up @@ -164,6 +159,7 @@ void CLanczosEigenSolver::compute()
SG_WARNING("Some error occured while computing eigenvalues!\n");
}

SG_UNREF(linear_operator);
SG_DEBUG("Leaving\n");
}

Expand Down
13 changes: 4 additions & 9 deletions src/shogun/mathematics/linalg/eigsolver/LanczosEigenSolver.h
Expand Up @@ -29,21 +29,16 @@ class CLanczosEigenSolver : public CEigenSolver
/** default constructor */
CLanczosEigenSolver();

/**
* constructor
*
* @param linear_operator self-adjoint linear operator whose eigenvalues
* are to be found
*/
CLanczosEigenSolver(CLinearOperator<float64_t>* linear_operator);

/** destructor */
virtual ~CLanczosEigenSolver();

/**
* compute method for computing eigenvalues of a real valued linear operator
*
* @param linear_operator real valued self-adjoint linear operator
* whose eigenvalues have to be found
*/
virtual void compute();
virtual void compute(CLinearOperator<float64_t>* linear_operator);

/** @param max_iteration_limit to be set */
void set_max_iteration_limit(int64_t max_iteration_limit)
Expand Down
Expand Up @@ -111,7 +111,7 @@ void CRationalApproximation::set_num_shifts(index_t num_shifts)
void CRationalApproximation::precompute()
{
// compute extremal eigenvalues
m_eigen_solver->compute();
m_eigen_solver->compute(m_linear_operator);
SG_INFO("max_eig=%.15lf\n", m_eigen_solver->get_max_eigenvalue());
SG_INFO("min_eig=%.15lf\n", m_eigen_solver->get_min_eigenvalue());

Expand Down
4 changes: 2 additions & 2 deletions tests/unit/mathematics/linalg/DirectEigenSolver_unittest.cc
Expand Up @@ -28,8 +28,8 @@ TEST(DirectEigenSolver, compute)
CDenseMatrixOperator<float64_t>* A=new CDenseMatrixOperator<float64_t>(m);
SG_REF(A);

CDirectEigenSolver eig_solver(A);
eig_solver.compute();
CDirectEigenSolver eig_solver;
eig_solver.compute(A);

float64_t min_eigval=eig_solver.get_min_eigenvalue();
float64_t max_eigval=eig_solver.get_max_eigenvalue();
Expand Down
16 changes: 8 additions & 8 deletions tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc
Expand Up @@ -44,8 +44,8 @@ TEST(LanczosEigenSolver, compute)
CLinearOperator<float64_t>* A=new CSparseMatrixOperator<float64_t>(mat);
SG_REF(A);

eig_solver=new CLanczosEigenSolver(A);
eig_solver->compute();
eig_solver=new CLanczosEigenSolver();
eig_solver->compute(A);

float64_t lanc_max_eig=eig_solver->get_max_eigenvalue();
float64_t lanc_min_eig=eig_solver->get_min_eigenvalue();
Expand All @@ -57,8 +57,8 @@ TEST(LanczosEigenSolver, compute)
CDenseMatrixOperator<float64_t>* B=new CDenseMatrixOperator<float64_t>(m);
SG_REF(B);

eig_solver=new CDirectEigenSolver(B);
eig_solver->compute();
eig_solver=new CDirectEigenSolver();
eig_solver->compute(B);

float64_t dir_max_eig=eig_solver->get_max_eigenvalue();
float64_t dir_min_eig=eig_solver->get_min_eigenvalue();
Expand Down Expand Up @@ -91,9 +91,9 @@ TEST(LanczosEigenSolver, compute_big_diag_matrix)
}
op->set_diagonal(diag);

CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(op);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
SG_REF(eig_solver);
eig_solver->compute();
eig_solver->compute(op);

// test eigenvalues
Map<VectorXd> diag_map(diag.vector, diag.vlen);
Expand All @@ -118,11 +118,11 @@ TEST(LanczosEigenSolver, set_eigenvalues_externally)
SG_REF(A);
float64_t min_eigenvalue=0.0001;
float64_t max_eigenvalue=100000.0;
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(A);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
eig_solver->set_min_eigenvalue(min_eigenvalue);
eig_solver->set_max_eigenvalue(max_eigenvalue);

eig_solver->compute();
eig_solver->compute(A);
EXPECT_NEAR(eig_solver->get_min_eigenvalue(), min_eigenvalue, 1E-16);
EXPECT_NEAR(eig_solver->get_max_eigenvalue(), max_eigenvalue, 1E-16);

Expand Down
10 changes: 5 additions & 5 deletions tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc
Expand Up @@ -95,7 +95,7 @@ TEST(LogDetEstimator, sample_ratapp_dense)
CDenseMatrixOperator<float64_t>* op=new CDenseMatrixOperator<float64_t>(mat);
SG_REF(op);

CDirectEigenSolver* eig_solver=new CDirectEigenSolver(op);
CDirectEigenSolver* eig_solver=new CDirectEigenSolver();
SG_REF(eig_solver);

CDirectLinearSolverComplex* linear_solver=new CDirectLinearSolverComplex();
Expand Down Expand Up @@ -184,7 +184,7 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler)
CDenseMatrixOperator<float64_t>* opd=new CDenseMatrixOperator<float64_t>(mat);
SG_REF(opd);

CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(op);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
SG_REF(eig_solver);

CDirectLinearSolverComplex* linear_solver=new CDirectLinearSolverComplex();
Expand Down Expand Up @@ -269,7 +269,7 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler_cgm)
CSparseMatrixOperator<float64_t>* op=new CSparseMatrixOperator<float64_t>(sm);
SG_REF(op);

CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(op);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
SG_REF(eig_solver);

CCGMShiftedFamilySolver* linear_solver=new CCGMShiftedFamilySolver();
Expand Down Expand Up @@ -325,7 +325,7 @@ TEST(LogDetEstimator, sample_ratapp_big_diag_matrix)
}
op->set_diagonal(diag);

CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(op);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
SG_REF(eig_solver);

CCGMShiftedFamilySolver* linear_solver=new CCGMShiftedFamilySolver();
Expand Down Expand Up @@ -391,7 +391,7 @@ TEST(LogDetEstimator, sample_ratapp_big_matrix)
CSparseMatrixOperator<float64_t>* op=new CSparseMatrixOperator<float64_t>(sm);
SG_REF(op);

CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver(op);
CLanczosEigenSolver* eig_solver=new CLanczosEigenSolver();
SG_REF(eig_solver);

CCGMShiftedFamilySolver* linear_solver=new CCGMShiftedFamilySolver();
Expand Down

2 comments on commit cb82500

@lambday
Copy link
Member

@lambday lambday commented on cb82500 Mar 9, 2014

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I absolutely love these changes :)

@karlnapf
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Me too! :)

Please sign in to comment.