Skip to content

Commit

Permalink
Remove obtain_from_generic (#4224)
Browse files Browse the repository at this point in the history
  • Loading branch information
syashakash authored and karlnapf committed Apr 9, 2018
1 parent c8c66e6 commit 36ccb7d
Show file tree
Hide file tree
Showing 28 changed files with 87 additions and 127 deletions.
8 changes: 2 additions & 6 deletions src/shogun/classifier/GaussianProcessClassification.cpp
Expand Up @@ -120,10 +120,8 @@ CBinaryLabels* CGaussianProcessClassification::apply_binary(
if (m_method->get_inference_type()== INF_FITC_LAPLACE_SINGLE)
{
#ifdef USE_GPL_SHOGUN
CSingleFITCLaplaceInferenceMethod* fitc_method=
CSingleFITCLaplaceInferenceMethod::obtain_from_generic(m_method);
CSingleFITCLaplaceInferenceMethod* fitc_method = m_method->as<CSingleFITCLaplaceInferenceMethod>();
data=fitc_method->get_inducing_features();
SG_UNREF(fitc_method);
#else
SG_GPL_ONLY
#endif //USE_GPL_SHOGUN
Expand Down Expand Up @@ -156,10 +154,8 @@ bool CGaussianProcessClassification::train_machine(CFeatures* data)
if (m_method->get_inference_type()==INF_FITC_LAPLACE_SINGLE)
{
#ifdef USE_GPL_SHOGUN
CSingleFITCLaplaceInferenceMethod* fitc_method=
CSingleFITCLaplaceInferenceMethod::obtain_from_generic(m_method);
CSingleFITCLaplaceInferenceMethod* fitc_method = m_method->as<CSingleFITCLaplaceInferenceMethod>();
fitc_method->set_inducing_features(data);
SG_UNREF(fitc_method);
#else
SG_ERROR("Single FITC Laplace inference only supported under GPL.\n")
#endif //USE_GPL_SHOGUN
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/evaluation/StructuredAccuracy.cpp
Expand Up @@ -74,8 +74,8 @@ float64_t CStructuredAccuracy::evaluate_real(CStructuredLabels * predicted,

for (int32_t i = 0 ; i < length ; ++i)
{
CRealNumber * truth = CRealNumber::obtain_from_generic(ground_truth->get_label(i));
CRealNumber * pred = CRealNumber::obtain_from_generic(predicted->get_label(i));
CRealNumber * truth = ground_truth->get_label(i)->as<CRealNumber>();
CRealNumber * pred = predicted->get_label(i)->as<CRealNumber>();

num_equal += truth->value == pred->value;

Expand All @@ -96,8 +96,8 @@ float64_t CStructuredAccuracy::evaluate_sequence(CStructuredLabels * predicted,

for (int32_t i = 0 ; i < length ; ++i)
{
CSequence * true_seq = CSequence::obtain_from_generic(ground_truth->get_label(i));
CSequence * pred_seq = CSequence::obtain_from_generic(predicted->get_label(i));
CSequence * true_seq = ground_truth->get_label(i)->as<CSequence>();
CSequence * pred_seq = predicted->get_label(i)->as<CSequence>();

SGVector<int32_t> true_seq_data = true_seq->get_data();
SGVector<int32_t> pred_seq_data = pred_seq->get_data();
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/machine/GaussianProcessMachine.cpp
Expand Up @@ -89,7 +89,7 @@ SGVector<float64_t> CGaussianProcessMachine::get_posterior_means(CFeatures* data

// get kernel and compute kernel matrix: K(feat, data)*scale^2
CKernel* training_kernel=m_method->get_kernel();
CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
CKernel* kernel = training_kernel->clone()->as<CKernel>();
SG_UNREF(training_kernel);

kernel->init(feat, data);
Expand Down Expand Up @@ -153,7 +153,7 @@ SGVector<float64_t> CGaussianProcessMachine::get_posterior_variances(

// get kernel and compute kernel matrix: K(data, data)*scale^2
CKernel* training_kernel=m_method->get_kernel();
CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
CKernel* kernel = training_kernel->clone()->as<CKernel>();
SG_UNREF(training_kernel);
kernel->init(data, data);

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/LinearLatentMachine.cpp
Expand Up @@ -40,7 +40,7 @@ CLatentLabels* CLinearLatentMachine::apply_latent(CFeatures* data)
if (m_model == NULL)
SG_ERROR("LatentModel is not set!\n")

CLatentFeatures* lf = CLatentFeatures::obtain_from_generic(data);
CLatentFeatures* lf = data->as<CLatentFeatures>();
m_model->set_features(lf);

return apply_latent();
Expand Down
21 changes: 7 additions & 14 deletions src/shogun/machine/gp/ExactInferenceMethod.cpp
Expand Up @@ -104,9 +104,8 @@ SGVector<float64_t> CExactInferenceMethod::get_diagonal_vector()
update();

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// compute diagonal vector: sW=1/sigma
SGVector<float64_t> result(m_features->get_num_vectors());
Expand All @@ -121,9 +120,8 @@ float64_t CExactInferenceMethod::get_negative_log_marginal_likelihood()
update();

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// create eigen representation of alpha and L
Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
Expand Down Expand Up @@ -178,9 +176,8 @@ SGMatrix<float64_t> CExactInferenceMethod::get_posterior_covariance()
void CExactInferenceMethod::update_chol()
{
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

/* check whether to allocate cholesky memory */
if (!m_L.matrix || m_L.num_rows!=m_ktrtr.num_rows)
Expand All @@ -198,9 +195,8 @@ void CExactInferenceMethod::update_chol()
void CExactInferenceMethod::update_alpha()
{
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// get labels and mean vector and create eigen representation
SGVector<float64_t> y=((CRegressionLabels*) m_labels)->get_labels();
Expand Down Expand Up @@ -252,9 +248,8 @@ void CExactInferenceMethod::update_cov()
MatrixXd eigen_V = eigen_L.triangularView<Upper>().adjoint().solve(
eigen_K * std::exp(m_log_scale * 2.0));

CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);
eigen_V = eigen_V/sigma;

// compute covariance matrix of the posterior: Sigma = K - V^T * V
Expand All @@ -265,9 +260,8 @@ void CExactInferenceMethod::update_cov()
void CExactInferenceMethod::update_deriv()
{
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// create eigen representation of derivative matrix and cholesky
Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
Expand Down Expand Up @@ -328,9 +322,8 @@ SGVector<float64_t> CExactInferenceMethod::get_derivative_wrt_likelihood_model(
m_model->get_name(), param->m_name)

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// create eigen representation of the matrix Q
Map<MatrixXd> eigen_Q(m_Q.matrix, m_Q.num_rows, m_Q.num_cols);
Expand Down
9 changes: 3 additions & 6 deletions src/shogun/machine/gp/FITCInferenceMethod.cpp
Expand Up @@ -112,9 +112,8 @@ SGVector<float64_t> CFITCInferenceMethod::get_diagonal_vector()
update();

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// compute diagonal vector: sW=1/sigma
SGVector<float64_t> result(m_features->get_num_vectors());
Expand Down Expand Up @@ -153,9 +152,8 @@ void CFITCInferenceMethod::update_chol()
//time complexits O(m^2*n)

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

// eigen3 representation of covariance matrix of inducing features (m_kuu)
// and training features (m_ktru)
Expand Down Expand Up @@ -418,9 +416,8 @@ SGVector<float64_t> CFITCInferenceMethod::get_derivative_wrt_likelihood_model(
Map<MatrixXd> eigen_B(m_B.matrix, m_B.num_rows, m_B.num_cols);

// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);

SGVector<float64_t> result(1);

Expand Down
3 changes: 1 addition & 2 deletions src/shogun/machine/gp/SingleFITCLaplaceInferenceMethod.cpp
Expand Up @@ -245,9 +245,8 @@ float64_t CSingleFITCLaplaceNewtonOptimizer::minimize()

if (m_obj->m_model->get_model_type()==LT_STUDENTST)
{
CStudentsTLikelihood* lik=CStudentsTLikelihood::obtain_from_generic(m_obj->m_model);
CStudentsTLikelihood* lik = m_obj->m_model->as<CStudentsTLikelihood>();
df=lik->get_degrees_freedom();
SG_UNREF(lik);
}
else
df=1;
Expand Down
3 changes: 1 addition & 2 deletions src/shogun/machine/gp/SingleLaplaceInferenceMethod.cpp
Expand Up @@ -209,9 +209,8 @@ float64_t CSingleLaplaceNewtonOptimizer::minimize()

if (m_obj->m_model->get_model_type()==LT_STUDENTST)
{
CStudentsTLikelihood* lik=CStudentsTLikelihood::obtain_from_generic(m_obj->m_model);
CStudentsTLikelihood* lik = m_obj->m_model->as<CStudentsTLikelihood>();
df=lik->get_degrees_freedom();
SG_UNREF(lik);
}
else
df=1;
Expand Down
6 changes: 2 additions & 4 deletions src/shogun/machine/gp/VarDTCInferenceMethod.cpp
Expand Up @@ -168,9 +168,8 @@ float64_t CVarDTCInferenceMethod::get_negative_log_marginal_likelihood()
void CVarDTCInferenceMethod::update_chol()
{
// get the sigma variable from the Gaussian likelihood model
CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);
m_sigma2=sigma*sigma;

//m-by-m matrix
Expand Down Expand Up @@ -266,9 +265,8 @@ void CVarDTCInferenceMethod::update_deriv()
Map<MatrixXd> eigen_Tmm(m_Tmm.matrix, m_Tmm.num_rows, m_Tmm.num_cols);
Map<MatrixXd> eigen_Tnm(m_Tnm.matrix, m_Tnm.num_rows, m_Tnm.num_cols);

CGaussianLikelihood* lik=CGaussianLikelihood::obtain_from_generic(m_model);
CGaussianLikelihood* lik = m_model->as<CGaussianLikelihood>();
float64_t sigma=lik->get_sigma();
SG_UNREF(lik);
m_sigma2=sigma*sigma;

//invLmInvLa = invLm*invLa;
Expand Down
7 changes: 2 additions & 5 deletions src/shogun/regression/GaussianProcessRegression.cpp
Expand Up @@ -49,10 +49,8 @@ CRegressionLabels* CGaussianProcessRegression::apply_regression(CFeatures* data)
// use inducing features for FITC inference method
if (m_method->get_inference_type()==INF_FITC_REGRESSION)
{
CFITCInferenceMethod* fitc_method=
CFITCInferenceMethod::obtain_from_generic(m_method);
CFITCInferenceMethod* fitc_method = m_method->as<CFITCInferenceMethod>();
feat=fitc_method->get_inducing_features();
SG_UNREF(fitc_method);
}
else
feat=m_method->get_features();
Expand Down Expand Up @@ -84,8 +82,7 @@ bool CGaussianProcessRegression::train_machine(CFeatures* data)
// set inducing features for FITC inference method
if (m_method->get_inference_type()==INF_FITC_REGRESSION)
{
CFITCInferenceMethod* fitc_method=
CFITCInferenceMethod::obtain_from_generic(m_method);
CFITCInferenceMethod* fitc_method = m_method->as<CFITCInferenceMethod>();
fitc_method->set_inducing_features(data);
SG_UNREF(fitc_method);
}
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/structure/FactorGraphDataGenerator.cpp
Expand Up @@ -545,8 +545,8 @@ float64_t CFactorGraphDataGenerator::test_sosvm(EMAPInferType infer_type)
CStructuredData* y_truth = fg_labels_train->get_label(i);
acc_loss_sgd += model->delta_loss(y_truth, y_pred);

CFactorGraphObservation* y_t = CFactorGraphObservation::obtain_from_generic(y_truth);
CFactorGraphObservation* y_p = CFactorGraphObservation::obtain_from_generic(y_pred);
CFactorGraphObservation* y_t = y_truth->as<CFactorGraphObservation>();
CFactorGraphObservation* y_p = y_pred->as<CFactorGraphObservation>();

SGVector<int32_t> s_t = y_t->get_data();
SGVector<int32_t> s_p = y_p->get_data();
Expand Down
13 changes: 6 additions & 7 deletions src/shogun/structure/FactorGraphModel.cpp
Expand Up @@ -228,11 +228,11 @@ void CFactorGraphModel::w_to_fparams(SGVector<float64_t> w)
SGVector< float64_t > CFactorGraphModel::get_joint_feature_vector(int32_t feat_idx, CStructuredData* y)
{
// factor graph instance
CFactorGraphFeatures* mf = CFactorGraphFeatures::obtain_from_generic(m_features);
CFactorGraphFeatures* mf = m_features->as<CFactorGraphFeatures>();
CFactorGraph* fg = mf->get_sample(feat_idx);

// ground truth states
CFactorGraphObservation* fg_states = CFactorGraphObservation::obtain_from_generic(y);
CFactorGraphObservation* fg_states = y->as<CFactorGraphObservation>();
SGVector<int32_t> states = fg_states->get_data();

// initialize psi
Expand Down Expand Up @@ -281,7 +281,7 @@ SGVector< float64_t > CFactorGraphModel::get_joint_feature_vector(int32_t feat_i
CResultSet* CFactorGraphModel::argmax(SGVector<float64_t> w, int32_t feat_idx, bool const training)
{
// factor graph instance
CFactorGraphFeatures* mf = CFactorGraphFeatures::obtain_from_generic(m_features);
CFactorGraphFeatures* mf = m_features->as<CFactorGraphFeatures>();
CFactorGraph* fg = mf->get_sample(feat_idx);

// prepare factor graph
Expand Down Expand Up @@ -310,8 +310,7 @@ CResultSet* CFactorGraphModel::argmax(SGVector<float64_t> w, int32_t feat_idx, b
ret->psi_computed = true;

// y_truth
CFactorGraphObservation* y_truth =
CFactorGraphObservation::obtain_from_generic(m_labels->get_label(feat_idx));
CFactorGraphObservation* y_truth = m_labels->get_label(feat_idx)->as<CFactorGraphObservation>();

SGVector<int32_t> states_gt = y_truth->get_data();

Expand Down Expand Up @@ -375,8 +374,8 @@ CResultSet* CFactorGraphModel::argmax(SGVector<float64_t> w, int32_t feat_idx, b

float64_t CFactorGraphModel::delta_loss(CStructuredData* y1, CStructuredData* y2)
{
CFactorGraphObservation* y_truth = CFactorGraphObservation::obtain_from_generic(y1);
CFactorGraphObservation* y_pred = CFactorGraphObservation::obtain_from_generic(y2);
CFactorGraphObservation* y_truth = y1->as<CFactorGraphObservation>();
CFactorGraphObservation* y_pred = y2->as<CFactorGraphObservation>();
SGVector<int32_t> s_truth = y_truth->get_data();
SGVector<int32_t> s_pred = y_pred->get_data();

Expand Down
11 changes: 5 additions & 6 deletions src/shogun/structure/HMSVMModel.cpp
Expand Up @@ -68,7 +68,7 @@ SGVector< float64_t > CHMSVMModel::get_joint_feature_vector(
int32_t D = mf->get_num_features();

// Get the sequence of labels
CSequence* label_seq = CSequence::obtain_from_generic(y);
CSequence* label_seq = y->as<CSequence>();

// Initialize psi
SGVector< float64_t > psi(get_dim());
Expand Down Expand Up @@ -228,8 +228,7 @@ CResultSet* CHMSVMModel::argmax(
// If argmax used while training, add to E the loss matrix (loss-augmented inference)
if ( training )
{
CSequence* ytrue =
CSequence::obtain_from_generic(m_labels->get_label(feat_idx));
CSequence* ytrue = m_labels->get_label(feat_idx)->as<CSequence>();

REQUIRE(ytrue->get_data().size() == T, "T, the length of the feature "
"x^i (%d) and the length of its corresponding label y^i "
Expand Down Expand Up @@ -344,8 +343,8 @@ CResultSet* CHMSVMModel::argmax(

float64_t CHMSVMModel::delta_loss(CStructuredData* y1, CStructuredData* y2)
{
CSequence* seq1 = CSequence::obtain_from_generic(y1);
CSequence* seq2 = CSequence::obtain_from_generic(y2);
CSequence* seq1 = y1->as<CSequence>();
CSequence* seq2 = y2->as<CSequence>();

// Compute the Hamming loss, number of distinct elements in the sequences
return m_state_model->loss(seq1, seq2);
Expand Down Expand Up @@ -448,7 +447,7 @@ bool CHMSVMModel::check_training_setup() const
int32_t state;
for ( int32_t i = 0 ; i < hmsvm_labels->get_num_labels() ; ++i )
{
seq = CSequence::obtain_from_generic(hmsvm_labels->get_label(i));
seq = hmsvm_labels->get_label(i)->as<CSequence>();

SGVector<int32_t> seq_data = seq->get_data();
for ( int32_t j = 0 ; j < seq_data.size() ; ++j )
Expand Down
9 changes: 4 additions & 5 deletions src/shogun/structure/HashedMultilabelModel.cpp
Expand Up @@ -100,7 +100,7 @@ SGSparseVector<float64_t> CHashedMultilabelModel::get_sparse_joint_feature_vecto
SGSparseVector<float64_t> vec = ((CSparseFeatures<float64_t> *)m_features)->
get_sparse_feature_vector(feat_idx);

CSparseMultilabel * slabel = CSparseMultilabel::obtain_from_generic(y);
CSparseMultilabel * slabel = y->as<CSparseMultilabel>();
ASSERT(slabel != NULL);
SGVector<int32_t> slabel_data = slabel->get_data();

Expand Down Expand Up @@ -131,8 +131,8 @@ SGSparseVector<float64_t> CHashedMultilabelModel::get_sparse_joint_feature_vecto
float64_t CHashedMultilabelModel::delta_loss(CStructuredData * y1,
CStructuredData * y2)
{
CSparseMultilabel * y1_slabel = CSparseMultilabel::obtain_from_generic(y1);
CSparseMultilabel * y2_slabel = CSparseMultilabel::obtain_from_generic(y2);
CSparseMultilabel * y1_slabel = y1->as<CSparseMultilabel>();
CSparseMultilabel * y2_slabel = y2->as<CSparseMultilabel>();

ASSERT(y1_slabel != NULL);
ASSERT(y2_slabel != NULL);
Expand Down Expand Up @@ -253,8 +253,7 @@ CResultSet * CHashedMultilabelModel::argmax(SGVector<float64_t> w,

float64_t score = 0, total_score = 0;

CSparseMultilabel * slabel = CSparseMultilabel::obtain_from_generic(
multi_labs->get_label(feat_idx));
CSparseMultilabel * slabel = multi_labs->get_label(feat_idx)->as<CSparseMultilabel>();
SGVector<int32_t> slabel_data = slabel->get_data();
SGVector<float64_t> y_truth = CMultilabelSOLabels::to_dense(
slabel, m_num_classes, 1, 0);
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/structure/HierarchicalMultilabelModel.cpp
Expand Up @@ -134,7 +134,7 @@ SGVector<int32_t> CHierarchicalMultilabelModel::get_label_vector(
SGVector<float64_t> CHierarchicalMultilabelModel::get_joint_feature_vector(
int32_t feat_idx, CStructuredData * y)
{
CSparseMultilabel * slabel = CSparseMultilabel::obtain_from_generic(y);
CSparseMultilabel * slabel = y->as<CSparseMultilabel>();
SGVector<int32_t> slabel_data = slabel->get_data();
SGVector<int32_t> label_vector = get_label_vector(slabel_data);

Expand Down Expand Up @@ -166,8 +166,8 @@ SGVector<float64_t> CHierarchicalMultilabelModel::get_joint_feature_vector(
float64_t CHierarchicalMultilabelModel::delta_loss(CStructuredData * y1,
CStructuredData * y2)
{
CSparseMultilabel * y1_slabel = CSparseMultilabel::obtain_from_generic(y1);
CSparseMultilabel * y2_slabel = CSparseMultilabel::obtain_from_generic(y2);
CSparseMultilabel * y1_slabel = y1->as<CSparseMultilabel>();
CSparseMultilabel * y2_slabel = y2->as<CSparseMultilabel>();

ASSERT(y1_slabel != NULL);
ASSERT(y2_slabel != NULL);
Expand Down

0 comments on commit 36ccb7d

Please sign in to comment.