From 62c94282bdf9c5bf6db5bd1c492f87c544b59673 Mon Sep 17 00:00:00 2001 From: Wuwei Lin Date: Mon, 14 May 2018 22:16:55 +0800 Subject: [PATCH] Cleanup dense preprocessors and rename init to fit --- src/shogun/metric/LMNNImpl.cpp | 2 +- .../preprocessor/HomogeneousKernelMap.cpp | 9 +- .../preprocessor/HomogeneousKernelMap.h | 3 - src/shogun/preprocessor/LogPlusOne.cpp | 13 +- src/shogun/preprocessor/LogPlusOne.h | 3 - src/shogun/preprocessor/NormOne.cpp | 12 +- src/shogun/preprocessor/NormOne.h | 2 - src/shogun/preprocessor/PNorm.cpp | 12 +- src/shogun/preprocessor/PNorm.h | 2 - src/shogun/preprocessor/PruneVarSubMean.cpp | 116 +++++++++--------- src/shogun/preprocessor/PruneVarSubMean.h | 4 +- .../RandomFourierGaussPreproc.cpp | 20 +-- .../preprocessor/RandomFourierGaussPreproc.h | 31 +++-- src/shogun/preprocessor/RescaleFeatures.cpp | 76 ++++++------ src/shogun/preprocessor/RescaleFeatures.h | 59 +++++---- src/shogun/preprocessor/SumOne.cpp | 12 +- src/shogun/preprocessor/SumOne.h | 2 - .../GaussianProcessClassification_unittest.cc | 4 +- .../unit/preprocessor/LogPlusOne_unittest.cc | 2 +- .../MultipleProcessors_unittest.cc | 4 +- .../preprocessor/Preprocessor_unittest.cc | 4 +- .../preprocessor/RescaleFeatures_unittest.cc | 2 +- tests/unit/regression/lars_unittest.cc | 4 +- 23 files changed, 172 insertions(+), 226 deletions(-) diff --git a/src/shogun/metric/LMNNImpl.cpp b/src/shogun/metric/LMNNImpl.cpp index c72267a1533..6270dd20b2e 100644 --- a/src/shogun/metric/LMNNImpl.cpp +++ b/src/shogun/metric/LMNNImpl.cpp @@ -365,7 +365,7 @@ SGMatrix CLMNNImpl::compute_pca_transform(CDenseFeatures* new CDenseFeatures(features->get_feature_matrix().clone()); CPruneVarSubMean* mean_substractor = new CPruneVarSubMean(false); // false to avoid variance normalization - mean_substractor->init(cloned_features); + mean_substractor->fit(cloned_features); mean_substractor->apply_to_feature_matrix(cloned_features); // Obtain the linear transform applying PCA diff --git a/src/shogun/preprocessor/HomogeneousKernelMap.cpp b/src/shogun/preprocessor/HomogeneousKernelMap.cpp index 70667a255f7..93589c04003 100644 --- a/src/shogun/preprocessor/HomogeneousKernelMap.cpp +++ b/src/shogun/preprocessor/HomogeneousKernelMap.cpp @@ -43,13 +43,6 @@ CHomogeneousKernelMap::~CHomogeneousKernelMap() { } -bool CHomogeneousKernelMap::init(CFeatures* features) -{ - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) - - return true; -} void CHomogeneousKernelMap::cleanup() { @@ -163,7 +156,7 @@ void CHomogeneousKernelMap::init() SGMatrix CHomogeneousKernelMap::apply_to_feature_matrix (CFeatures* features) { - CDenseFeatures* simple_features = (CDenseFeatures*)features; + auto simple_features = features->as>(); int32_t num_vectors = simple_features->get_num_vectors (); int32_t num_features = simple_features->get_num_features (); diff --git a/src/shogun/preprocessor/HomogeneousKernelMap.h b/src/shogun/preprocessor/HomogeneousKernelMap.h index ee7ee792af4..73b3270be69 100644 --- a/src/shogun/preprocessor/HomogeneousKernelMap.h +++ b/src/shogun/preprocessor/HomogeneousKernelMap.h @@ -69,9 +69,6 @@ namespace shogun /** destructor */ virtual ~CHomogeneousKernelMap(); - /** initialize preprocessor from features */ - virtual bool init(CFeatures* features); - /** cleanup */ virtual void cleanup(); diff --git a/src/shogun/preprocessor/LogPlusOne.cpp b/src/shogun/preprocessor/LogPlusOne.cpp index 91aefbe7263..36e8825dc00 100644 --- a/src/shogun/preprocessor/LogPlusOne.cpp +++ b/src/shogun/preprocessor/LogPlusOne.cpp @@ -21,15 +21,6 @@ CLogPlusOne::~CLogPlusOne() { } -/// initialize preprocessor from features -bool CLogPlusOne::init(CFeatures* features) -{ - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) - - return true; -} - /// clean up allocated memory void CLogPlusOne::cleanup() { @@ -56,8 +47,8 @@ bool CLogPlusOne::save(FILE* f) /// return pointer to feature_matrix, i.e. f->get_feature_matrix(); SGMatrix CLogPlusOne::apply_to_feature_matrix(CFeatures* features) { - SGMatrix feature_matrix = - ((CDenseFeatures*)features)->get_feature_matrix(); + auto feature_matrix = + features->as>()->get_feature_matrix(); for (int32_t i=0; i /** destructor */ virtual ~CLogPlusOne(); - /// initialize preprocessor from features - virtual bool init(CFeatures* features); - /// cleanup virtual void cleanup(); /// initialize preprocessor from file diff --git a/src/shogun/preprocessor/NormOne.cpp b/src/shogun/preprocessor/NormOne.cpp index 53134a7ae1e..e8c269701a2 100644 --- a/src/shogun/preprocessor/NormOne.cpp +++ b/src/shogun/preprocessor/NormOne.cpp @@ -22,15 +22,6 @@ CNormOne::~CNormOne() { } -/// initialize preprocessor from features -bool CNormOne::init(CFeatures* features) -{ - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) - - return true; -} - /// clean up allocated memory void CNormOne::cleanup() { @@ -57,7 +48,8 @@ bool CNormOne::save(FILE* f) /// return pointer to feature_matrix, i.e. f->get_feature_matrix(); SGMatrix CNormOne::apply_to_feature_matrix(CFeatures* features) { - SGMatrix feature_matrix=((CDenseFeatures*)features)->get_feature_matrix(); + auto feature_matrix = + features->as>()->get_feature_matrix(); for (int32_t i=0; i /** destructor */ virtual ~CNormOne(); - /// initialize preprocessor from features - virtual bool init(CFeatures* features); /// cleanup virtual void cleanup(); /// initialize preprocessor from file diff --git a/src/shogun/preprocessor/PNorm.cpp b/src/shogun/preprocessor/PNorm.cpp index 6fba5cf7308..88af434e4cb 100644 --- a/src/shogun/preprocessor/PNorm.cpp +++ b/src/shogun/preprocessor/PNorm.cpp @@ -35,15 +35,6 @@ CPNorm::~CPNorm () { } -/// initialize preprocessor from features -bool CPNorm::init (CFeatures* features) -{ - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) - - return true; -} - /// clean up allocated memory void CPNorm::cleanup () { @@ -70,7 +61,8 @@ bool CPNorm::save (FILE* f) /// return pointer to feature_matrix, i.e. f->get_feature_matrix(); SGMatrix CPNorm::apply_to_feature_matrix (CFeatures* features) { - SGMatrix feature_matrix=((CDenseFeatures*)features)->get_feature_matrix(); + auto feature_matrix = + features->as>()->get_feature_matrix(); for (int32_t i=0; i /** destructor */ virtual ~CPNorm (); - /// initialize preprocessor from features - virtual bool init (CFeatures* features); /// cleanup virtual void cleanup (); /// initialize preprocessor from file diff --git a/src/shogun/preprocessor/PruneVarSubMean.cpp b/src/shogun/preprocessor/PruneVarSubMean.cpp index 91c5dc24d42..f56b525b5fd 100644 --- a/src/shogun/preprocessor/PruneVarSubMean.cpp +++ b/src/shogun/preprocessor/PruneVarSubMean.cpp @@ -26,84 +26,78 @@ CPruneVarSubMean::~CPruneVarSubMean() cleanup(); } -/// initialize preprocessor from features -bool CPruneVarSubMean::init(CFeatures* features) +void CPruneVarSubMean::fit(CFeatures* features) { - if (!m_initialized) - { - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) + if (m_initialized) + cleanup(); - CDenseFeatures* simple_features=(CDenseFeatures*) features; - int32_t num_examples = simple_features->get_num_vectors(); - int32_t num_features = simple_features->get_num_features(); + auto simple_features = features->as>(); + int32_t num_examples = simple_features->get_num_vectors(); + int32_t num_features = simple_features->get_num_features(); - m_mean = SGVector(); - m_idx = SGVector(); - m_std = SGVector();; + m_mean = SGVector(); + m_idx = SGVector(); + m_std = SGVector(); - m_mean.resize_vector(num_features); - float64_t* var=SG_MALLOC(float64_t, num_features); - int32_t i,j; + m_mean.resize_vector(num_features); + float64_t* var = SG_MALLOC(float64_t, num_features); + int32_t i, j; - memset(var, 0, num_features*sizeof(float64_t)); - m_mean.zero(); + memset(var, 0, num_features * sizeof(float64_t)); + m_mean.zero(); - SGMatrix feature_matrix = simple_features->get_feature_matrix(); + auto feature_matrix = simple_features->get_feature_matrix(); - // compute mean - for (i=0; i=1e-14) - { - idx_ok[num_ok]=j; - num_ok++ ; - } + if (var[j] >= 1e-14) + { + idx_ok[num_ok] = j; + num_ok++; } + } - SG_INFO("Reducing number of features from %i to %i\n", num_features, num_ok) + SG_INFO("Reducing number of features from %i to %i\n", num_features, num_ok) - m_idx.resize_vector(num_ok); - SGVector new_mean(num_ok); - m_std.resize_vector(num_ok); + m_idx.resize_vector(num_ok); + SGVector new_mean(num_ok); + m_std.resize_vector(num_ok); - for (j=0; j /** destructor */ virtual ~CPruneVarSubMean(); - /// initialize preprocessor from features - virtual bool init(CFeatures* features); + /// Fit preprocessor into features + virtual void fit(CFeatures* features); /// cleanup virtual void cleanup(); diff --git a/src/shogun/preprocessor/RandomFourierGaussPreproc.cpp b/src/shogun/preprocessor/RandomFourierGaussPreproc.cpp index 30fcc9adc57..02c7c548d81 100644 --- a/src/shogun/preprocessor/RandomFourierGaussPreproc.cpp +++ b/src/shogun/preprocessor/RandomFourierGaussPreproc.cpp @@ -329,15 +329,8 @@ void CRandomFourierGaussPreproc::set_randomcoefficients( } -bool CRandomFourierGaussPreproc::init(CFeatures *f) { - if (f->get_feature_class() != get_feature_class()) { - throw ShogunException( - "CRandomFourierGaussPreproc::init (CFeatures *f) requires CDenseFeatures as features\n"); - } - if (f->get_feature_type() != get_feature_type()) { - throw ShogunException( - "CRandomFourierGaussPreproc::init (CFeatures *f) requires CDenseFeatures as features\n"); - } +void CRandomFourierGaussPreproc::fit(CFeatures* f) +{ if (dim_feature_space <= 0) { throw ShogunException( "CRandomFourierGaussPreproc::init (CFeatures *f): dim_feature_space<=0 is not allowed, use void set_dim_feature_space(const int32 dim) before!\n"); @@ -345,20 +338,17 @@ bool CRandomFourierGaussPreproc::init(CFeatures *f) { SG_INFO("calling CRandomFourierGaussPreproc::init(...)\n") int32_t num_features = - ((CDenseFeatures*) f)->get_num_features(); + f->as>()->get_num_features(); if (!test_rfinited()) { dim_input_space = num_features; init_randomcoefficients(); ASSERT( test_rfinited()) - return true; } else { dim_input_space = num_features; // does not reinit if dimension is the same to avoid overriding a previous call of set_randomcoefficients(...) - bool inited = init_randomcoefficients(); - return inited; + init_randomcoefficients(); } - } SGVector CRandomFourierGaussPreproc::apply_to_feature_vector(SGVector vector) @@ -381,8 +371,6 @@ SGVector CRandomFourierGaussPreproc::apply_to_feature_vector(SGVector SGMatrix CRandomFourierGaussPreproc::apply_to_feature_matrix(CFeatures* features) { - init(features); - // version for case dim_feature_space < dim_input space with direct transformation on feature matrix ?? int32_t num_vectors = 0; diff --git a/src/shogun/preprocessor/RandomFourierGaussPreproc.h b/src/shogun/preprocessor/RandomFourierGaussPreproc.h index 302f8a1e0bf..1feb0b3c62b 100644 --- a/src/shogun/preprocessor/RandomFourierGaussPreproc.h +++ b/src/shogun/preprocessor/RandomFourierGaussPreproc.h @@ -79,14 +79,18 @@ class CRandomFourierGaussPreproc: public CDensePreprocessor { */ virtual EFeatureClass get_feature_class(); - /** initializer routine + /** fit to features * calls set_dim_input_space(const int32_t dim); with the proper value - * calls init_randomcoefficients(); this call does NOT override a previous call to void set_randomcoefficients(...) IF and ONLY IF - * the dimensions of input AND feature space are equal to the values from the previous call to void set_randomcoefficients(...) - * @param f the features to be processed, must be of type CDenseFeatures - * @return true if new random coefficients were generated, false if old ones from a call to set_randomcoefficients(...) are kept + * calls init_randomcoefficients(); this call does NOT override a previous + * call to void set_randomcoefficients(...) IF and ONLY IF + * the dimensions of input AND feature space are equal to the values from + * the previous call to void set_randomcoefficients(...) + * @param f the features to be processed, must be of type + * CDenseFeatures + * @return true if new random coefficients were generated, false if old ones + * from a call to set_randomcoefficients(...) are kept */ - virtual bool init(CFeatures *f); + virtual void fit(CFeatures* f); /** setter for kernel width * @param width kernel width to be set @@ -132,14 +136,21 @@ class CRandomFourierGaussPreproc: public CDensePreprocessor { void set_dim_feature_space(const int32_t dim); /** computes new random coefficients IF test_rfinited() evaluates to false - * test_rfinited() evaluates to TRUE if void set_randomcoefficients(...) hase been called and the values set by set_dim_input_space(...) , set_dim_feature_space(...) and set_kernelwidth(...) are consistent to the call of void set_randomcoefficients(...) + * test_rfinited() evaluates to TRUE if void set_randomcoefficients(...) + * hase been called and the values set by set_dim_input_space(...) , + * set_dim_feature_space(...) and set_kernelwidth(...) are consistent to the + * call of void set_randomcoefficients(...) * * throws shogun exception if dim_feature_space <= 0 or dim_input_space <= 0 * - * @return returns true if test_rfinited() evaluates to false and new coefficients are computed - * returns false if test_rfinited() evaluates to true and old random coefficients are kept which were set by a previous call to void set_randomcoefficients(...) + * @return returns true if test_rfinited() evaluates to false and new + * coefficients are computed + * returns false if test_rfinited() evaluates to true and old random + * coefficients are kept which were set by a previous call to void + * set_randomcoefficients(...) * - * this function is useful if you want to use apply_to_feature_vector but cannot call before it init(CFeatures *f) + * this function is useful if you want to use apply_to_feature_vector but + * cannot call before it fit(CFeatures *f) * */ bool init_randomcoefficients(); diff --git a/src/shogun/preprocessor/RescaleFeatures.cpp b/src/shogun/preprocessor/RescaleFeatures.cpp index c7311b1eda0..8dcf1d5f5ee 100644 --- a/src/shogun/preprocessor/RescaleFeatures.cpp +++ b/src/shogun/preprocessor/RescaleFeatures.cpp @@ -20,54 +20,49 @@ CRescaleFeatures::~CRescaleFeatures() cleanup(); } -bool CRescaleFeatures::init(CFeatures* features) +void CRescaleFeatures::fit(CFeatures* features) { - if (!m_initialized) - { - ASSERT(features->get_feature_class()==C_DENSE); - ASSERT(features->get_feature_type()==F_DREAL); + if (m_initialized) + cleanup(); + + auto simple_features = features->as>(); + int32_t num_examples = simple_features->get_num_vectors(); + int32_t num_features = simple_features->get_num_features(); + REQUIRE( + num_examples > 1, "number of feature vectors should be at least 2!\n"); - CDenseFeatures* simple_features=(CDenseFeatures*) features; - int32_t num_examples = simple_features->get_num_vectors(); - int32_t num_features = simple_features->get_num_features(); - REQUIRE(num_examples > 1, - "number of feature vectors should be at least 2!\n"); + SG_INFO("Extracting min and range values for each feature\n") - SG_INFO("Extracting min and range values for each feature\n") + m_min = SGVector(num_features); + m_range = SGVector(num_features); + auto feature_matrix = simple_features->get_feature_matrix(); + for (index_t i = 0; i < num_features; i++) + { + SGVector vec = feature_matrix.get_row_vector(i); + float64_t cur_min = vec[0]; + float64_t cur_max = vec[0]; - m_min = SGVector(num_features); - m_range = SGVector(num_features); - SGMatrix feature_matrix=((CDenseFeatures*)features)->get_feature_matrix(); - for (index_t i = 0; i < num_features; i++) + /* find the max and min values in one loop */ + for (index_t j = 1; j < vec.vlen; j++) { - SGVector vec = feature_matrix.get_row_vector(i); - float64_t cur_min = vec[0]; - float64_t cur_max = vec[0]; - - /* find the max and min values in one loop */ - for (index_t j = 1; j < vec.vlen; j++) - { - cur_min = CMath::min(vec[j], cur_min); - cur_max = CMath::max(vec[j], cur_max); - } - - /* only rescale if range > 0 */ - if ((cur_max - cur_min) > 0) { - m_min[i] = cur_min; - m_range[i] = 1.0/(cur_max - cur_min); - } - else { - m_min[i] = 0.0; - m_range[i] = 1.0; - } + cur_min = CMath::min(vec[j], cur_min); + cur_max = CMath::max(vec[j], cur_max); } - m_initialized = true; - - return true; + /* only rescale if range > 0 */ + if ((cur_max - cur_min) > 0) + { + m_min[i] = cur_min; + m_range[i] = 1.0 / (cur_max - cur_min); + } + else + { + m_min[i] = 0.0; + m_range[i] = 1.0; + } } - return false; + m_initialized = true; } void CRescaleFeatures::cleanup() @@ -79,7 +74,8 @@ SGMatrix CRescaleFeatures::apply_to_feature_matrix(CFeatures* feature { ASSERT(m_initialized); - SGMatrix feature_matrix=((CDenseFeatures*)features)->get_feature_matrix(); + auto feature_matrix = + features->as>()->get_feature_matrix(); ASSERT(feature_matrix.num_rows == m_min.vlen); for (index_t i = 0; i < feature_matrix.num_cols; i++) diff --git a/src/shogun/preprocessor/RescaleFeatures.h b/src/shogun/preprocessor/RescaleFeatures.h index fc727a988e2..eca25eeb40e 100644 --- a/src/shogun/preprocessor/RescaleFeatures.h +++ b/src/shogun/preprocessor/RescaleFeatures.h @@ -32,36 +32,45 @@ namespace shogun /** dtor */ virtual ~CRescaleFeatures(); - /** - * initialize preprocessor from features - * - * @param features the features to derive the min and max values from. - */ - virtual bool init(CFeatures* features); + /** + * Fit preprocessor into features + * + * @param features the features to derive the min and max values + * from. + */ + virtual void fit(CFeatures* features); - /** - * Cleanup - */ - virtual void cleanup(); + /** + * Cleanup + */ + virtual void cleanup(); - /** - * Apply preproc on a feature matrix - * - * @param features input feature matrix - * @return pointer to feature_matrix, i.e. f->get_feature_matrix(); - */ - virtual SGMatrix apply_to_feature_matrix(CFeatures* features); + /** + * Apply preproc on a feature matrix + * + * @param features input feature matrix + * @return pointer to feature_matrix, i.e. f->get_feature_matrix(); + */ + virtual SGMatrix + apply_to_feature_matrix(CFeatures* features); - /** - * Apply preproc on a single feature vector - */ - virtual SGVector apply_to_feature_vector(SGVector vector); + /** + * Apply preproc on a single feature vector + */ + virtual SGVector + apply_to_feature_vector(SGVector vector); - /** @return object name */ - virtual const char* get_name() const { return "RescaleFeatures"; } + /** @return object name */ + virtual const char* get_name() const + { + return "RescaleFeatures"; + } - /** return a type of preprocessor */ - virtual EPreprocessorType get_type() const { return P_RESCALEFEATURES; } + /** return a type of preprocessor */ + virtual EPreprocessorType get_type() const + { + return P_RESCALEFEATURES; + } private: void register_parameters(); diff --git a/src/shogun/preprocessor/SumOne.cpp b/src/shogun/preprocessor/SumOne.cpp index 557a4d6c9da..9f083ec030d 100644 --- a/src/shogun/preprocessor/SumOne.cpp +++ b/src/shogun/preprocessor/SumOne.cpp @@ -20,15 +20,6 @@ CSumOne::~CSumOne() { } -/// initialize preprocessor from features -bool CSumOne::init(CFeatures* features) -{ - ASSERT(features->get_feature_class()==C_DENSE) - ASSERT(features->get_feature_type()==F_DREAL) - - return true; -} - /// clean up allocated memory void CSumOne::cleanup() { @@ -55,7 +46,8 @@ bool CSumOne::save(FILE* f) /// return pointer to feature_matrix, i.e. f->get_feature_matrix(); SGMatrix CSumOne::apply_to_feature_matrix(CFeatures* features) { - SGMatrix feature_matrix=((CDenseFeatures*)features)->get_feature_matrix(); + auto feature_matrix = + features->as>()->get_feature_matrix(); for (int32_t i=0; i /** destructor */ virtual ~CSumOne(); - /// initialize preprocessor from features - virtual bool init(CFeatures* features); /// cleanup virtual void cleanup(); /// initialize preprocessor from file diff --git a/tests/unit/classifier/GaussianProcessClassification_unittest.cc b/tests/unit/classifier/GaussianProcessClassification_unittest.cc index ebe549b27fa..37e200333ec 100644 --- a/tests/unit/classifier/GaussianProcessClassification_unittest.cc +++ b/tests/unit/classifier/GaussianProcessClassification_unittest.cc @@ -593,7 +593,7 @@ TEST_F(GaussianProcessClassification, get_probabilities) TEST_F(GaussianProcessClassification, apply_preprocessor_and_binary) { CRescaleFeatures* preproc=new CRescaleFeatures(); - preproc->init(features_train); + preproc->fit(features_train); features_train->add_preprocessor(preproc); features_train->apply_preprocessor(); @@ -2504,4 +2504,4 @@ TEST_F(GaussianProcessClassificationUsingSingleFITCLaplace, get_probabilities) abs_tolorance = CMath::get_abs_tolerance(0.751048409588992, rel_tolorance); EXPECT_NEAR(probabilities[3], 0.751048409588992, abs_tolorance); } -#endif // USE_GPL_SHOGUN \ No newline at end of file +#endif // USE_GPL_SHOGUN diff --git a/tests/unit/preprocessor/LogPlusOne_unittest.cc b/tests/unit/preprocessor/LogPlusOne_unittest.cc index 97cac68b996..c116c2bcede 100644 --- a/tests/unit/preprocessor/LogPlusOne_unittest.cc +++ b/tests/unit/preprocessor/LogPlusOne_unittest.cc @@ -20,7 +20,7 @@ TEST(LogPlusOne, apply_to_feature_matrix) CDenseFeatures* feats = new CDenseFeatures(m); CLogPlusOne* preproc = new CLogPlusOne(); - preproc->init(feats); + preproc->fit(feats); feats->add_preprocessor(preproc); feats->apply_preprocessor(); diff --git a/tests/unit/preprocessor/MultipleProcessors_unittest.cc b/tests/unit/preprocessor/MultipleProcessors_unittest.cc index efb7b83da73..eea768d1fa4 100644 --- a/tests/unit/preprocessor/MultipleProcessors_unittest.cc +++ b/tests/unit/preprocessor/MultipleProcessors_unittest.cc @@ -23,10 +23,10 @@ TEST(MultipleProcessors, apply_to_feature_matrix) CDenseFeatures* feats = new CDenseFeatures(m); CSumOne* sum1 = new CSumOne(); CLogPlusOne* logp1 = new CLogPlusOne(); - sum1->init(feats); + sum1->fit(feats); feats->add_preprocessor(sum1); - logp1->init(feats); + logp1->fit(feats); feats->add_preprocessor(logp1); feats->apply_preprocessor(); diff --git a/tests/unit/preprocessor/Preprocessor_unittest.cc b/tests/unit/preprocessor/Preprocessor_unittest.cc index 1459be0ded1..d2c9c4c0527 100644 --- a/tests/unit/preprocessor/Preprocessor_unittest.cc +++ b/tests/unit/preprocessor/Preprocessor_unittest.cc @@ -50,7 +50,7 @@ TEST(Preprocessor, dense_apply) CDenseFeatures* features=new CDenseFeatures(data); CDensePreprocessor* preproc=new CNormOne(); - preproc->init(features); + preproc->fit(features); CFeatures* preprocessed=preproc->apply(features); @@ -85,7 +85,7 @@ TEST(Preprocessor, string_apply) /* create num_features 2-dimensional vectors */ CStringFeatures* features=new CStringFeatures(strings, ALPHANUM); CStringPreprocessor* preproc=new CSortWordString(); - preproc->init(features); + preproc->fit(features); CFeatures* preprocessed=preproc->apply(features); diff --git a/tests/unit/preprocessor/RescaleFeatures_unittest.cc b/tests/unit/preprocessor/RescaleFeatures_unittest.cc index 116d6abc16f..d9290473667 100644 --- a/tests/unit/preprocessor/RescaleFeatures_unittest.cc +++ b/tests/unit/preprocessor/RescaleFeatures_unittest.cc @@ -23,7 +23,7 @@ TEST(RescaleFeatures, apply_to_feature_matrix) SGMatrix em(ev.vector, num_features, num_vectors, false); CDenseFeatures* feats = new CDenseFeatures(m); CRescaleFeatures* rescaler = new CRescaleFeatures(); - rescaler->init(feats); + rescaler->fit(feats); /* find the min and range for each feature among all the vectors */ for (index_t i = 0; i < num_features; i++) diff --git a/tests/unit/regression/lars_unittest.cc b/tests/unit/regression/lars_unittest.cc index db0c45c8b3a..938f74cdf21 100644 --- a/tests/unit/regression/lars_unittest.cc +++ b/tests/unit/regression/lars_unittest.cc @@ -432,9 +432,9 @@ TEST(LeastAngleRegression, ols_equivalence) CPruneVarSubMean* proc1=new CPruneVarSubMean(); CNormOne* proc2=new CNormOne(); - proc1->init(features); + proc1->fit(features); proc1->apply_to_feature_matrix(features); - proc2->init(features); + proc2->fit(features); proc2->apply_to_feature_matrix(features); CRegressionLabels* labels=new CRegressionLabels(lab);