Skip to content

Commit

Permalink
Merge pull request #3086 from yorkerlin/develop
Browse files Browse the repository at this point in the history
fixed some python_modular warnings
  • Loading branch information
karlnapf committed Mar 17, 2016
2 parents 079e531 + 88ad808 commit 8e69308
Show file tree
Hide file tree
Showing 7 changed files with 34 additions and 33 deletions.
34 changes: 17 additions & 17 deletions src/shogun/machine/gp/KLInferenceMethod.h
Expand Up @@ -178,7 +178,7 @@ class CKLInferenceMethod: public CInferenceMethod
/** update all matrices except gradients */
virtual void update();

/* set L-BFGS parameters
/** set L-BFGS parameters
* For details please see shogun/optimization/lbfgs/lbfgs.h
* @param m The number of corrections to approximate the inverse hessian matrix.
* Default value is 100.
Expand Down Expand Up @@ -423,52 +423,52 @@ class CKLInferenceMethod: public CInferenceMethod
*/
SGVector<float64_t> m_s2;

/* The number of corrections to approximate the inverse hessian matrix.*/
/** The number of corrections to approximate the inverse hessian matrix.*/
int m_m;

/* The maximum number of trials to do line search for each L-BFGS update.*/
/** The maximum number of trials to do line search for each L-BFGS update.*/
int m_max_linesearch;

/* The line search algorithm.*/
/** The line search algorithm.*/
int m_linesearch;

/* The maximum number of iterations for L-BFGS update.*/
/** The maximum number of iterations for L-BFGS update.*/
int m_max_iterations;

/* Delta for convergence test based on the change of function value.*/
/** Delta for convergence test based on the change of function value.*/
float64_t m_delta;

/* Distance for delta-based convergence test.*/
/** Distance for delta-based convergence test.*/
int m_past;

/* Epsilon for convergence test based on the change of gradient.*/
/** Epsilon for convergence test based on the change of gradient.*/
float64_t m_epsilon;

/* The minimum step of the line search.*/
/** The minimum step of the line search.*/
float64_t m_min_step;

/* The maximum step of the line search.*/
/** The maximum step of the line search.*/
float64_t m_max_step;

/* A parameter used in Armijo condition.*/
/** A parameter used in Armijo condition.*/
float64_t m_ftol;

/* A parameter used in curvature condition.*/
/** A parameter used in curvature condition.*/
float64_t m_wolfe;

/* A parameter used in Morethuente linesearch to control the accuracy.*/
/** A parameter used in Morethuente linesearch to control the accuracy.*/
float64_t m_gtol;

/* The machine precision for floating-point values.*/
/** The machine precision for floating-point values.*/
float64_t m_xtol;

/* Coeefficient for the L1 norm of variables.*/
/** Coeefficient for the L1 norm of variables.*/
float64_t m_orthantwise_c;

/* Start index for computing L1 norm of the variables.*/
/** Start index for computing L1 norm of the variables.*/
int m_orthantwise_start;

/* End index for computing L1 norm of the variables.*/
/** End index for computing L1 norm of the variables.*/
int m_orthantwise_end;

private:
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/gp/LaplacianInferenceBase.h
Expand Up @@ -113,7 +113,7 @@ class CLaplacianInferenceBase: public CInferenceMethod
* \f]
*
* where \f$E_\text{c}\f$ is the matrix defined in the algorithm 3.3 of the GPML textbook for class c
* Note the E matrix is used to store these E_\text{c} matrices, where E=[E_1, E_2, ..., E_C],
* Note the E matrix is used to store these \f$E_\text{c}\f$ matrices, where E=[E_1, E_2, ..., E_C],
* where C is the number of classes and C should be greater than 1.
*/
virtual SGMatrix<float64_t> get_cholesky();
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/gp/MultiLaplacianInferenceMethod.h
Expand Up @@ -230,7 +230,7 @@ class CMultiLaplacianInferenceMethod: public CLaplacianInferenceBase

/** the helper used to compute gradient of GP for inference
*
* construct the \f$\pi$\f vector defined in the algorithm 3.3 of the GPML textbook
* construct the \f$\pi\f$ vector defined in the algorithm 3.3 of the GPML textbook
* Noth that the vector is stored in m_W
*
*/
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/machine/gp/SingleFITCLaplacianBase.h
Expand Up @@ -236,10 +236,10 @@ class CSingleFITCLaplacianBase: public CSingleSparseInferenceBase
* */
SGVector<float64_t> m_t;

/* B is defined in infFITC.m and infFITC_Laplace.m */
/** B is defined in infFITC.m and infFITC_Laplace.m */
SGMatrix<float64_t> m_B;

/* w=B*al */
/** w=B*al */
SGVector<float64_t> m_w;

/** Rvdd=W
Expand All @@ -248,7 +248,7 @@ class CSingleFITCLaplacianBase: public CSingleSparseInferenceBase
*/
SGMatrix<float64_t> m_Rvdd;

/* V defined in infFITC.m and infFITC_Laplace.m */
/** V defined in infFITC.m and infFITC_Laplace.m */
SGMatrix<float64_t> m_V;

private:
Expand Down
3 changes: 2 additions & 1 deletion src/shogun/machine/gp/SingleSparseInferenceBase.h
Expand Up @@ -215,9 +215,10 @@ class CSingleSparseInferenceBase: public CSparseInferenceBase
*/
virtual SGVector<float64_t> get_derivative_wrt_inducing_features(const TParameter* param)=0;

/** whether the kernel supports to get the gradient wrt inducing points or not*/
bool m_fully_sparse;

/* a lock used to parallelly compute derivatives wrt hyperparameters */
/** a lock used to parallelly compute derivatives wrt hyperparameters */
CLock* m_lock;
private:
/* init */
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/gp/SoftMaxLikelihood.h
Expand Up @@ -287,7 +287,7 @@ class CSoftMaxLikelihood : public CLikelihoodModel

/**the Monte method sampler
*
* @oaram num_samples number of samples to be generated
* @param num_samples number of samples to be generated
* @param mu posterior mean of a Gaussian distribution
* \f$\mathcal{N}(\mu,\sigma^2)\f$, which is an approximation to the
* posterior marginal \f$p(f_*|X,y,x_*)\f$
Expand Down
18 changes: 9 additions & 9 deletions src/shogun/machine/gp/SparseVGInferenceMethod.h
Expand Up @@ -238,23 +238,23 @@ class CSparseVGInferenceMethod: public CSingleSparseInferenceBase
/** update gradients */
virtual void compute_gradient();
protected:
/* inv_Lm=inv(Lm) where Lm*Lm'=Kmm */
/** inv_Lm=inv(Lm) where Lm*Lm'=Kmm */
SGMatrix<float64_t> m_inv_Lm;
/* Knm*inv_Lm */
/** Knm*inv_Lm */
SGMatrix<float64_t> m_Knm_inv_Lm;
/* invLa=inv(La) where La*La'=sigma2*eye(m)+inv_Lm*Kmn*Knm*inv_Lm' */
/** invLa=inv(La) where La*La'=sigma2*eye(m)+inv_Lm*Kmn*Knm*inv_Lm' */
SGMatrix<float64_t> m_inv_La;
/* yy=(y-meanfun)'*(y-meanfun) */
/** yy=(y-meanfun)'*(y-meanfun) */
float64_t m_yy;
/* the term used to compute gradient wrt likelihood and marginal likelihood*/
/** the term used to compute gradient wrt likelihood and marginal likelihood*/
float64_t m_f3;
/* square of sigma from Gaussian likelihood*/
/** square of sigma from Gaussian likelihood*/
float64_t m_sigma2;
/* the trace term to compute marginal likelihood*/
/** the trace term to compute marginal likelihood*/
float64_t m_trk;
/* a matrix used to compute gradients wrt kernel (Kmm)*/
/** a matrix used to compute gradients wrt kernel (Kmm)*/
SGMatrix<float64_t> m_Tmm;
/* a matrix used to compute gradients wrt kernel (Knm)*/
/** a matrix used to compute gradients wrt kernel (Knm)*/
SGMatrix<float64_t> m_Tnm;
private:
/** init */
Expand Down

0 comments on commit 8e69308

Please sign in to comment.