Skip to content

Commit

Permalink
Merge pull request #1062 from votjakovr/develop
Browse files Browse the repository at this point in the history
Replaced dynamic_cast with obtain_from_generic in FITCInferenceMethod class
  • Loading branch information
karlnapf committed May 8, 2013
2 parents e726ca6 + f926f91 commit 7c302bc
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 17 deletions.
2 changes: 1 addition & 1 deletion data
Submodule data updated 0 files
24 changes: 13 additions & 11 deletions src/shogun/regression/gp/FITCInferenceMethod.cpp
Expand Up @@ -213,9 +213,10 @@ get_marginal_likelihood_derivatives(CMap<TParameter*,
if(update_parameter_hash())
update_all();

//Get the sigma variable from the likelihood model
float64_t m_sigma =
dynamic_cast<CGaussianLikelihood*>(m_model)->get_sigma();
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

Map<MatrixXd> eigen_ktru(m_ktru.matrix, m_ktru.num_rows, m_ktru.num_cols);

Expand Down Expand Up @@ -524,7 +525,7 @@ get_marginal_likelihood_derivatives(CMap<TParameter*,

sum[0] = VectorXd::Ones(eigen_dg.rows()).cwiseQuotient(eigen_dg).sum() - sum[0];

sum = sum*m_sigma*m_sigma;
sum = sum*sigma*sigma;
float64_t dKuui = 2.0*m_ind_noise;

MatrixXd R = -dKuui*B;
Expand Down Expand Up @@ -552,10 +553,10 @@ get_marginal_likelihood_derivatives(CMap<TParameter*,

sum[0] = sum[0] - Wdg_temp.sum()/2.0;

SGVector<float64_t> sigma(1);
SGVector<float64_t> vsigma(1);

sigma[0] = sum[0];
gradient.add(param, sigma);
vsigma[0] = sum[0];
gradient.add(param, vsigma);
para_dict.add(param, m_model);

return gradient;
Expand Down Expand Up @@ -661,9 +662,10 @@ void CFITCInferenceMethod::update_chol()
{
check_members();

// get the sigma variable from the likelihood model
float64_t m_sigma =
dynamic_cast<CGaussianLikelihood*>(m_model)->get_sigma();
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// eigen3 representation of covariance matrix of latent features (m_kuu)
// and training features (m_ktru)
Expand Down Expand Up @@ -693,7 +695,7 @@ void CFITCInferenceMethod::update_chol()

for (index_t i = 0; i < m_ktrtr.num_cols; i++)
{
eigen_dg[i]=m_ktrtr(i,i)*m_scale*m_scale+m_sigma*m_sigma-sV.col(i).sum();
eigen_dg[i]=m_ktrtr(i,i)*m_scale*m_scale+sigma*sigma-sV.col(i).sum();
eigen_idg[i] = 1.0 / eigen_dg[i];
}

Expand Down
82 changes: 77 additions & 5 deletions tests/unit/regression/gp/FITCInferenceMethod_unittest.cc
Expand Up @@ -16,6 +16,7 @@
#include <shogun/regression/gp/FITCInferenceMethod.h>
#include <shogun/regression/gp/ZeroMean.h>
#include <shogun/regression/gp/GaussianLikelihood.h>
#include <shogun/evaluation/GradientResult.h>
#include <gtest/gtest.h>

using namespace shogun;
Expand Down Expand Up @@ -70,11 +71,11 @@ TEST(FITCInferenceMethod,get_cholesky)
// comparison of posterior cholesky with result from GPML package:
// L =
// -0.326180 0.148601 0.405579 -0.683624 0.319057 -0.073608
// 0.148601 -2.222957 1.480121 0.170280 -0.102392 -0.016981
// 0.405579 1.480121 -2.887356 1.091245 -0.481484 0.129348
// -0.683624 0.170280 1.091245 -1.628117 0.779654 -0.016188
// 0.319057 -0.102392 -0.481484 0.779654 -0.410200 -0.152221
// -0.073608 -0.016981 0.129348 -0.016188 -0.152221 -0.722832
// 0.148601 -2.222957 1.480121 0.170280 -0.102392 -0.016981
// 0.405579 1.480121 -2.887356 1.091245 -0.481484 0.129348
// -0.683624 0.170280 1.091245 -1.628117 0.779654 -0.016188
// 0.319057 -0.102392 -0.481484 0.779654 -0.410200 -0.152221
// -0.073608 -0.016981 0.129348 -0.016188 -0.152221 -0.722832
SGMatrix<float64_t> L=inf->get_cholesky();

EXPECT_NEAR(L(0,0), -0.326180, 1E-5);
Expand Down Expand Up @@ -250,4 +251,75 @@ TEST(FITCInferenceMethod,get_negative_marginal_likelihood)
SG_UNREF(inf);
}

TEST(FITCInferenceMethod,get_marginal_likelihood_derivatives)
{
// create some easy regression data with latent features:
// y approximately equals to x^sin(x)
index_t n=6;

SGMatrix<float64_t> feat_train(1, n);
SGMatrix<float64_t> lat_feat_train(1, n);
SGVector<float64_t> lab_train(n);

feat_train[0]=0.81263;
feat_train[1]=0.99976;
feat_train[2]=1.17037;
feat_train[3]=1.51752;
feat_train[4]=1.57765;
feat_train[5]=3.89440;

lat_feat_train[0]=0.00000;
lat_feat_train[1]=0.80000;
lat_feat_train[2]=1.60000;
lat_feat_train[3]=2.40000;
lat_feat_train[4]=3.20000;
lat_feat_train[5]=4.00000;

lab_train[0]=0.86015;
lab_train[1]=0.99979;
lab_train[2]=1.15589;
lab_train[3]=1.51662;
lab_train[4]=1.57764;
lab_train[5]=0.39475;

// shogun representation of features and labels
CDenseFeatures<float64_t>* features_train=new CDenseFeatures<float64_t>(feat_train);
CDenseFeatures<float64_t>* latent_features_train=new CDenseFeatures<float64_t>(lat_feat_train);
CRegressionLabels* labels_train=new CRegressionLabels(lab_train);

// choose Gaussian kernel with sigma = 2 and zero mean function
CGaussianKernel* kernel=new CGaussianKernel(10, 2);
CZeroMean* mean=new CZeroMean();

// Gaussian likelihood with sigma = 0.1
CGaussianLikelihood* likelihood=new CGaussianLikelihood(0.1);

// specify GP regression with Laplacian inference
CFITCInferenceMethod* inf=new CFITCInferenceMethod(kernel, features_train,
mean, labels_train, likelihood, latent_features_train);

CGradientResult* result = new CGradientResult();

result->total_variables=3;
result->gradient=inf->get_marginal_likelihood_derivatives(result->parameter_dictionary);

float64_t dnlZ_ell=4*(*result->gradient.get_element_ptr(0))[0];
float64_t dnlZ_sf2=(*result->gradient.get_element_ptr(1))[0];
float64_t dnlZ_lik=(*result->gradient.get_element_ptr(2))[0];

// comparison of partial derivatives of negative marginal likelihood with
// result from GPML package:
// lik = 2.1930
// cov =
// -1.67233
// 0.55979
EXPECT_NEAR(dnlZ_lik, 2.1930, 1E-4);
EXPECT_NEAR(dnlZ_ell, -1.67233, 1E-5);
EXPECT_NEAR(dnlZ_sf2, 0.55979, 1E-5);

// clean up
SG_UNREF(result);
SG_UNREF(inf);
}

#endif

0 comments on commit 7c302bc

Please sign in to comment.