Skip to content

Commit

Permalink
factories are factories
Browse files Browse the repository at this point in the history
they are always gonna love :)
  • Loading branch information
vigsterkr committed Apr 16, 2019
1 parent c7fec96 commit afac518
Show file tree
Hide file tree
Showing 20 changed files with 90 additions and 143 deletions.
4 changes: 2 additions & 2 deletions examples/meta/src/regression/multiple_kernel_learning.sg
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ combined_kernel.init(features_train, features_train)
#![create_combined_train]

#![train_mkl]
SVM binary_svm_solver = as_svm(machine("SVRLight"))
Machine mkl = machine("MKLRegression", svm=binary_svm_solver, kernel=combined_kernel, labels=labels_train)
Machine binary_svm_solver = machine("SVRLight")
Machine mkl = machine("MKLRegression", svm=as_svm(binary_svm_solver), kernel=combined_kernel, labels=labels_train)
mkl.train()
#![train_mkl]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_trai
kernel.append_kernel(sg.kernel("GaussianKernel", log_width=np.log(2)))

# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm = sg.machine("MKLClassification", svm=sg.as_svm(sg.machine("LibSVM")),
libsvm = sg.machine("LibSVM")
svm = sg.machine("MKLClassification", svm=sg.as_svm(libsvm),
interleaved_optimization=False, kernel=kernel)

# splitting strategy for 5 fold cross-validation (for classification its better
Expand Down
31 changes: 25 additions & 6 deletions src/interfaces/swig/factory.i
Original file line number Diff line number Diff line change
@@ -1,3 +1,28 @@
%newobject shogun::distance(std::string name);
%newobject shogun::evaluation(const std::string& name);
%newobject shogun::kernel(const std::string& name);
%newobject shogun::machine(const std::string& name);
%newobject shogun::multiclass_strategy(const std::string& name);
%newobject shogun::ecoc_encoder(const std::string& name);
%newobject shogun::ecoc_decoder(const std::string& name);
%newobject shogun::transformer(const std::string& name);
%newobject shogun::layer(const std::string& name);
%newobject shogun::splitting_strategy(const std::string& name);
%newobject shogun::machine_evaluation(const std::string& name);
%newobject shogun::svm(const std::string& name);
%newobject shogun::features;
%newobject shogun::gp_likelihood(const std::string& name);
%newobject shogun::gp_mean(const std::string& name);
%newobject shogun::differentiable(const std::string& name);
%newobject shogun::gp_inference(const std::string& name);
%newobject shogun::loss(const std::string& name);
%newobject shogun::string_features(CFile*, EAlphabet alpha = DNA, EPrimitiveType primitive_type = PT_CHAR);
%newobject shogun::transformer(const std::string&);
%newobject shogun::csv_file(std::string fname, char rw);
%newobject shogun::libsvm_file(std::string fname, char rw);
%newobject shogun::pipeline;
%newobject shogun::labels;

%{
#include <shogun/util/factory.h>
%}
Expand All @@ -10,9 +35,3 @@
#endif //SWIGJAVA

%template(labels) shogun::labels<float64_t>;

%newobject shogun::features(CFile*, EPrimitiveType primitive_type);
%newobject shogun::string_features(CFile*, EAlphabet alpha = DNA, EPrimitiveType primitive_type = PT_CHAR);
%newobject shogun::transformer(const std::string&);
%newobject shogun::csv_file(std::string fname, char rw);
%newobject shogun::libsvm_file(std::string fname, char rw);
2 changes: 0 additions & 2 deletions src/shogun/base/class_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,6 @@ namespace shogun {
delete_object(object);
throw e;
}

cast->ref();
return cast;
}

Expand Down
3 changes: 1 addition & 2 deletions src/shogun/clustering/KMeansMiniBatch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ void CKMeansMiniBatch::minibatch_KMeans()

CDenseFeatures<float64_t>* lhs=
distance->get_lhs()->as<CDenseFeatures<float64_t>>();
CDenseFeatures<float64_t>* rhs_mus=new CDenseFeatures<float64_t>(mus);
auto rhs_mus=some<CDenseFeatures<float64_t>>(mus);
CFeatures* rhs_cache=distance->replace_rhs(rhs_mus);
int32_t XSize=lhs->get_num_vectors();
int32_t dims=lhs->get_num_features();
Expand Down Expand Up @@ -113,7 +113,6 @@ void CKMeansMiniBatch::minibatch_KMeans()
}
SG_UNREF(lhs);
distance->replace_rhs(rhs_cache);
delete rhs_mus;
}

SGVector<int32_t> CKMeansMiniBatch::mbchoose_rand(int32_t b, int32_t num)
Expand Down
1 change: 0 additions & 1 deletion src/shogun/converter/ica/FFSep.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ SGNDArray<float64_t> CFFSep::get_covs() const
void CFFSep::fit_dense(CDenseFeatures<float64_t>* features)
{
ASSERT(features);
SG_REF(features);

auto X = features->get_feature_matrix();

Expand Down
18 changes: 2 additions & 16 deletions src/shogun/converter/ica/ICAConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,19 +72,13 @@ void CICAConverter::fit(CFeatures* features)
features->get_feature_type() == F_DREAL,
"ICA converters only work with real features\n");

SG_REF(features);

fit_dense(static_cast<CDenseFeatures<float64_t>*>(features));

SG_UNREF(features);
}

CFeatures* CICAConverter::transform(CFeatures* features, bool inplace)
{
REQUIRE(m_mixing_matrix.matrix, "ICAConverter has not been fitted.\n");

SG_REF(features);

auto X = features->as<CDenseFeatures<float64_t>>()->get_feature_matrix();
if (!inplace)
X = X.clone();
Expand All @@ -97,26 +91,18 @@ CFeatures* CICAConverter::transform(CFeatures* features, bool inplace)
// Unmix
EX = C.inverse() * EX;

auto processed = new CDenseFeatures<float64_t>(X);
SG_UNREF(features);

return processed;
return new CDenseFeatures<float64_t>(X);
}

CFeatures* CICAConverter::inverse_transform(CFeatures* features, bool inplace)
{
REQUIRE(m_mixing_matrix.matrix, "ICAConverter has not been fitted.\n");

SG_REF(features);

auto X = features->as<CDenseFeatures<float64_t>>()->get_feature_matrix();
if (!inplace)
X = X.clone();

linalg::matrix_prod(m_mixing_matrix, X, X);

auto processed = new CDenseFeatures<float64_t>(X);
SG_UNREF(features);

return processed;
return new CDenseFeatures<float64_t>(X);
}
1 change: 0 additions & 1 deletion src/shogun/converter/ica/Jade.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ SGMatrix<float64_t> CJade::get_cumulant_matrix() const
void CJade::fit_dense(CDenseFeatures<float64_t>* features)
{
ASSERT(features);
SG_REF(features);

auto X = features->get_feature_matrix();

Expand Down
3 changes: 1 addition & 2 deletions src/shogun/kernel/GaussianKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,7 @@ void CGaussianKernel::cleanup()
bool CGaussianKernel::init(CFeatures* l, CFeatures* r)
{
cleanup();
CShiftInvariantKernel::init(l, r);
return init_normalizer();
return CShiftInvariantKernel::init(l, r);
}

void CGaussianKernel::set_width(float64_t w)
Expand Down
9 changes: 0 additions & 9 deletions src/shogun/kernel/Kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,6 @@ void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)

bool CKernel::init(CFeatures* l, CFeatures* r)
{
/* make sure that features are not deleted if same ones are used */
SG_REF(l);
SG_REF(r);

//make sure features were indeed supplied
REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
Expand All @@ -116,7 +112,6 @@ bool CKernel::init(CFeatures* l, CFeatures* r)
//remove references to previous features
remove_lhs_and_rhs();

//increase reference counts
SG_REF(l);
if (l==r)
lhs_equals_rhs=true;
Expand All @@ -132,10 +127,6 @@ bool CKernel::init(CFeatures* l, CFeatures* r)
num_lhs=l->get_num_vectors();
num_rhs=r->get_num_vectors();

/* unref "safety" refs from beginning */
SG_UNREF(r);
SG_UNREF(l);

SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
return true;
}
Expand Down
3 changes: 0 additions & 3 deletions src/shogun/machine/GaussianProcessMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,6 @@ SGVector<float64_t> CGaussianProcessMachine::get_posterior_variances(
else
feat=m_method->get_features();

SG_REF(data);

// get kernel and compute kernel matrix: K(data, data)*scale^2
CKernel* training_kernel=m_method->get_kernel();
CKernel* kernel = training_kernel->clone()->as<CKernel>();
Expand All @@ -179,7 +177,6 @@ SGVector<float64_t> CGaussianProcessMachine::get_posterior_variances(
// cleanup
SG_UNREF(kernel);
SG_UNREF(feat);
SG_UNREF(data);

// get shogun representation of cholesky and create eigen representation
SGMatrix<float64_t> L=m_method->get_cholesky();
Expand Down
15 changes: 8 additions & 7 deletions src/shogun/machine/Pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,42 +130,43 @@ namespace shogun
{
REQUIRE(m_labels, "No labels given.\n");
}

auto current_data = wrap(data);
for (auto&& stage : m_stages)
{
if (holds_alternative<CTransformer*>(stage.second))
{
auto transformer = shogun::get<CTransformer*>(stage.second);
transformer->train_require_labels()
? transformer->fit(data, m_labels)
: transformer->fit(data);
? transformer->fit(current_data, m_labels)
: transformer->fit(current_data);

data = transformer->transform(data);
current_data = wrap(transformer->transform(current_data));
}
else
{
auto machine = shogun::get<CMachine*>(stage.second);
if (machine->train_require_labels())
machine->set_labels(m_labels);
machine->train(data);
machine->train(current_data);
}
}
return true;
}

CLabels* CPipeline::apply(CFeatures* data)
{
auto current_data = wrap(data);
for (auto&& stage : m_stages)
{
if (holds_alternative<CTransformer*>(stage.second))
{
auto transformer = shogun::get<CTransformer*>(stage.second);
data = transformer->transform(data);
current_data = wrap(transformer->transform(current_data));
}
else
{
auto machine = shogun::get<CMachine*>(stage.second);
return machine->apply(data);
return machine->apply(current_data);
}
}

Expand Down
4 changes: 1 addition & 3 deletions src/shogun/multiclass/ShareBoost.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Chiyuan Zhang, Soeren Sonnenburg, Pan Deng, Bjoern Esser,
* Authors: Chiyuan Zhang, Soeren Sonnenburg, Pan Deng, Bjoern Esser,
* Sanuj Sharma
*/

Expand Down Expand Up @@ -110,7 +110,6 @@ void CShareBoost::compute_pred()
{
CDenseFeatures<float64_t> *fea = dynamic_cast<CDenseFeatures<float64_t> *>(m_features);
CDenseSubsetFeatures<float64_t> *subset_fea = new CDenseSubsetFeatures<float64_t>(fea, m_activeset);
SG_REF(subset_fea);
for (int32_t i=0; i < m_multiclass_strategy->get_num_classes(); ++i)
{
CLinearMachine *machine = dynamic_cast<CLinearMachine *>(m_machines->get_element(i));
Expand All @@ -120,7 +119,6 @@ void CShareBoost::compute_pred()
SG_UNREF(machine);
SG_UNREF(lab);
}
SG_UNREF(subset_fea);
}

void CShareBoost::compute_pred(const float64_t *W)
Expand Down
12 changes: 2 additions & 10 deletions src/shogun/preprocessor/DensePreprocessor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,11 @@ CFeatures* CDensePreprocessor<ST>::transform(CFeatures* features, bool inplace)
"has to be of C_DENSE (%d) class!\n",
features->get_feature_class(), C_DENSE);

SG_REF(features);
auto matrix = features->as<CDenseFeatures<ST>>()->get_feature_matrix();
if (!inplace)
matrix = matrix.clone();
auto feat_matrix = apply_to_matrix(matrix);
auto preprocessed = new CDenseFeatures<ST>(feat_matrix);

SG_UNREF(features);
return preprocessed;
return new CDenseFeatures<ST>(feat_matrix);
}

template <class ST>
Expand All @@ -113,15 +109,11 @@ CDensePreprocessor<ST>::inverse_transform(CFeatures* features, bool inplace)
"has to be of C_DENSE (%d) class!\n",
features->get_feature_class(), C_DENSE);

SG_REF(features);
auto matrix = features->as<CDenseFeatures<ST>>()->get_feature_matrix();
if (!inplace)
matrix = matrix.clone();
auto feat_matrix = inverse_apply_to_matrix(matrix);
auto preprocessed = new CDenseFeatures<ST>(feat_matrix);

SG_UNREF(features);
return preprocessed;
return new CDenseFeatures<ST>(feat_matrix);
}

template <class ST>
Expand Down
6 changes: 1 addition & 5 deletions src/shogun/regression/GaussianProcessRegression.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Jacob Walker, Roman Votyakov, Sergey Lisitsyn, Soeren Sonnenburg,
* Authors: Jacob Walker, Roman Votyakov, Sergey Lisitsyn, Soeren Sonnenburg,
* Heiko Strathmann, Wu Lin
*/

Expand Down Expand Up @@ -110,10 +110,8 @@ SGVector<float64_t> CGaussianProcessRegression::get_mean_vector(CFeatures* data)
"regression\n", m_method->get_name(), lik->get_name())
SG_UNREF(lik);

SG_REF(data);
SGVector<float64_t> mu=get_posterior_means(data);
SGVector<float64_t> s2=get_posterior_variances(data);
SG_UNREF(data);

// evaluate mean
lik=m_method->get_model();
Expand All @@ -133,10 +131,8 @@ SGVector<float64_t> CGaussianProcessRegression::get_variance_vector(
REQUIRE(m_method->supports_regression(), "%s with %s doesn't support "
"regression\n", m_method->get_name(), lik->get_name())

SG_REF(data);
SGVector<float64_t> mu=get_posterior_means(data);
SGVector<float64_t> s2=get_posterior_variances(data);
SG_UNREF(data);

// evaluate variance
s2=lik->get_predictive_variances(mu, s2);
Expand Down
10 changes: 5 additions & 5 deletions src/shogun/solver/LDASolver.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,21 +76,21 @@ namespace shogun
CDenseFeatures<T>* features, CMulticlassLabels* labels,
float64_t gamma = 0.0)
{
SG_REF(features);
SG_REF(labels);

m_features = features;
m_labels = labels;
m_gamma = gamma;

SG_REF(m_features)
SG_REF(m_labels)

compute_means();
compute_within_cov();
}

~LDASolver()
{
SG_UNREF(m_features)
SG_UNREF(m_labels)
SG_UNREF(m_features);
SG_UNREF(m_labels);
}

/** @return the vector of classes' mean */
Expand Down

0 comments on commit afac518

Please sign in to comment.