Skip to content

Commit

Permalink
Merge pull request #2686 from lisitsyn/bugfix/clang_warnings
Browse files Browse the repository at this point in the history
Removed some warning causes
  • Loading branch information
iglesias committed Jan 30, 2015
2 parents cf48f94 + 4fe4a2e commit e4a7140
Show file tree
Hide file tree
Showing 17 changed files with 34 additions and 31 deletions.
2 changes: 1 addition & 1 deletion examples/undocumented/libshogun/preprocessor_fisherlda.cpp
Expand Up @@ -45,7 +45,7 @@ void test()
// Initiate the FisherLDA class
CFisherLDA* fisherlda=new CFisherLDA(AUTO_FLDA);
SG_REF(fisherlda)
fisherlda->init(features, labels, 1);
fisherlda->fit(features, labels, 1);
SGMatrix<float64_t> y=fisherlda->apply_to_feature_matrix(features);

// display output
Expand Down
Expand Up @@ -79,7 +79,7 @@ int main(int argc, char* argv[])

uint64_t train_time_int = train_time.cur_time_diff();
fprintf(stderr,
"*** total training time: %lum%lus (or %.1f sec), #dim = %d, ||w|| = %f\n",
"*** total training time: %llum%llus (or %.1f sec), #dim = %d, ||w|| = %f\n",
train_time_int / 60, train_time_int % 60, train_time.cur_time_diff(),
w_now.vlen, w_now_norm
);
Expand All @@ -105,7 +105,7 @@ int main(int argc, char* argv[])

test_time.stop();
uint64_t test_time_int = test_time.cur_time_diff();
fprintf(stderr, "*** testing took %lum%lus (or %.1f sec)\n",
fprintf(stderr, "*** testing took %llum%llus (or %.1f sec)\n",
test_time_int / 60, test_time_int % 60, test_time.cur_time_diff());

SG_UNREF(test_features);
Expand Down
Expand Up @@ -18,7 +18,7 @@ def preprocessor_fisherlda_modular (data, labels, method):
sg_labels = MulticlassLabels(labels)

preprocessor=FisherLda(method)
preprocessor.init(sg_features, sg_labels, 1)
preprocessor.fit(sg_features, sg_labels, 1)
yn=preprocessor.apply_to_feature_matrix(sg_features)

return yn
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/gp/SoftMaxLikelihood.cpp
Expand Up @@ -301,7 +301,7 @@ SGVector<float64_t> CSoftMaxLikelihood::predictive_helper(SGVector<float64_t> mu
}

SGVector<float64_t> CSoftMaxLikelihood::get_predictive_log_probabilities(
SGVector<float64_t> mu, SGVector<float64_t> s2, const CLabels *lab) const
SGVector<float64_t> mu, SGVector<float64_t> s2, const CLabels *lab)
{
return predictive_helper(mu, s2, lab, MC_Probability);
}
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/machine/gp/SoftMaxLikelihood.h
Expand Up @@ -161,7 +161,7 @@ class CSoftMaxLikelihood : public CLikelihoodModel
* @return \f$log(p(y_*|X, y, x*))\f$ for each label \f$y_*\f$ (based on 0 and 1 bernoulli-encoding)
*/
virtual SGVector<float64_t> get_predictive_log_probabilities(SGVector<float64_t> mu,
SGVector<float64_t> s2, const CLabels *lab=NULL) const;
SGVector<float64_t> s2, const CLabels *lab=NULL);

/** returns the logarithm of the point-wise likelihood \f$log(p(y_i|f_i))\f$
* for each label \f$y_i\f$, an integer between 1 and C (ie. number of classes).
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/preprocessor/BAHSIC.cpp
Expand Up @@ -35,10 +35,10 @@ using namespace shogun;

CBAHSIC::CBAHSIC() : CKernelDependenceMaximization()
{
init();
initialize();
}

void CBAHSIC::init()
void CBAHSIC::initialize()
{
m_estimator=new CHSIC();
SG_REF(m_estimator);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/preprocessor/BAHSIC.h
Expand Up @@ -84,7 +84,7 @@ class CBAHSIC : public CKernelDependenceMaximization

private:
/** Register params and initialize with default values */
void init();
void initialize();

};

Expand Down
6 changes: 3 additions & 3 deletions src/shogun/preprocessor/DimensionReductionPreprocessor.cpp
Expand Up @@ -15,7 +15,7 @@ CDimensionReductionPreprocessor::CDimensionReductionPreprocessor()
m_kernel = new CLinearKernel();
m_converter = NULL;

init();
initialize();
}

CDimensionReductionPreprocessor::CDimensionReductionPreprocessor(CEmbeddingConverter* converter)
Expand All @@ -27,7 +27,7 @@ CDimensionReductionPreprocessor::CDimensionReductionPreprocessor(CEmbeddingConve
m_kernel = new CLinearKernel();
m_converter = converter;

init();
initialize();
}

CDimensionReductionPreprocessor::~CDimensionReductionPreprocessor()
Expand Down Expand Up @@ -104,7 +104,7 @@ CKernel* CDimensionReductionPreprocessor::get_kernel() const
return m_kernel;
}

void CDimensionReductionPreprocessor::init()
void CDimensionReductionPreprocessor::initialize()
{
SG_ADD((CSGObject**)&m_converter, "converter",
"embedding converter used to apply to data", MS_AVAILABLE);
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/preprocessor/DimensionReductionPreprocessor.h
Expand Up @@ -107,10 +107,10 @@ class CDimensionReductionPreprocessor: public CDensePreprocessor<float64_t>
*/
CKernel* get_kernel() const;

protected:
private:

/** default init */
void init();
void initialize();

protected:

Expand Down
4 changes: 2 additions & 2 deletions src/shogun/preprocessor/FeatureSelection.cpp
Expand Up @@ -42,11 +42,11 @@ namespace shogun
template <class ST>
CFeatureSelection<ST>::CFeatureSelection() : CPreprocessor()
{
init();
initialize();
}

template <class ST>
void CFeatureSelection<ST>::init()
void CFeatureSelection<ST>::initialize()
{
SG_ADD(&m_target_dim, "target_dim", "target dimension",
MS_NOT_AVAILABLE);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/preprocessor/FeatureSelection.h
Expand Up @@ -320,7 +320,7 @@ template <class ST> class CFeatureSelection : public CPreprocessor

private:
/** Register params and initialize with default values */
void init();
void initialize();

};

Expand Down
6 changes: 3 additions & 3 deletions src/shogun/preprocessor/FisherLDA.cpp
Expand Up @@ -52,12 +52,12 @@ using namespace shogun;
CFisherLDA::CFisherLDA (EFLDAMethod method, float64_t thresh):
CDimensionReductionPreprocessor()
{
init();
initialize();
m_method=method;
m_threshold=thresh;
}

void CFisherLDA::init()
void CFisherLDA::initialize()
{
m_method=AUTO_FLDA;
m_threshold=0.01;
Expand All @@ -77,7 +77,7 @@ CFisherLDA::~CFisherLDA()
{
}

bool CFisherLDA::init (CFeatures *features, CLabels *labels, int32_t num_dimensions)
bool CFisherLDA::fit(CFeatures *features, CLabels *labels, int32_t num_dimensions)
{
REQUIRE(features, "Features are not provided!\n")

Expand Down
9 changes: 6 additions & 3 deletions src/shogun/preprocessor/FisherLDA.h
Expand Up @@ -104,13 +104,13 @@ class CFisherLDA: public CDimensionReductionPreprocessor
/** destructor */
virtual ~CFisherLDA();

/** initialize preprocessor from features and corresponding labels
/** fits fisher lda transformation using features and corresponding labels
* @param features using which the transformation matrix will be formed
* @param labels of the given features which will be used here to find
* the transformation matrix unlike PCA where it is not needed.
* @param dimensions number of dimensions to retain
*/
virtual bool init(CFeatures* features, CLabels* labels, int32_t num_dimensions=0);
virtual bool fit(CFeatures* features, CLabels* labels, int32_t num_dimensions=0);

/** cleanup */
virtual void cleanup();
Expand Down Expand Up @@ -145,10 +145,13 @@ class CFisherLDA: public CDimensionReductionPreprocessor

/** @return a type of preprocessor */
virtual EPreprocessorType get_type() const {return P_FISHERLDA;}

private:

void initialize();

protected:

void init();

/** transformation matrix */
SGMatrix<float64_t> m_transformation_matrix;
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/preprocessor/KernelDependenceMaximization.cpp
Expand Up @@ -37,10 +37,10 @@ using namespace shogun;
CKernelDependenceMaximization::CKernelDependenceMaximization()
: CDependenceMaximization()
{
init();
initialize();
}

void CKernelDependenceMaximization::init()
void CKernelDependenceMaximization::initialize()
{
SG_ADD((CSGObject**)&m_kernel_features, "kernel_features",
"the kernel to be used for features", MS_NOT_AVAILABLE);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/preprocessor/KernelDependenceMaximization.h
Expand Up @@ -97,7 +97,7 @@ class CKernelDependenceMaximization : public CDependenceMaximization

private:
/** Register params and initialize with default values */
void init();
void initialize();

};

Expand Down
4 changes: 2 additions & 2 deletions src/shogun/regression/LeastAngleRegression.cpp
Expand Up @@ -58,14 +58,14 @@ static void plane_rot(float64_t x0, float64_t x1,
}
}

static void find_max_abs(const vector<float64_t> &vec, const vector<bool> &ignore,
static void find_max_abs(const vector<float64_t> &vec, const vector<bool> &ignore_mask,
int32_t &imax, float64_t& vmax)
{
imax = -1;
vmax = -1;
for (uint32_t i=0; i < vec.size(); ++i)
{
if (ignore[i])
if (ignore_mask[i])
continue;

if (CMath::abs(vec[i]) > vmax)
Expand Down
6 changes: 3 additions & 3 deletions tests/unit/preprocessor/FisherLDA_unittest.cc
Expand Up @@ -136,7 +136,7 @@ TEST_F(FLDATest, CANVAR_FLDA_Unit_test)
// comparing outputs against BRMLtoolbox MATLAB "CannonVar.m" implementation
// http://web4.cs.ucl.ac.uk/staff/D.Barber/pmwiki/pmwiki.php?n=Brml.Software
CFisherLDA fisherlda(CANVAR_FLDA);
fisherlda.init(dense_feat, labels, 1);
fisherlda.fit(dense_feat, labels, 1);
SGMatrix<float64_t> y=fisherlda.apply_to_feature_matrix(dense_feat);

float64_t epsilon=0.00000000001;
Expand Down Expand Up @@ -174,7 +174,7 @@ TEST_F(FLDATest, CLASSIC_FLDA_Unit_test)
SG_REF(labels);

CFisherLDA fisherlda(CLASSIC_FLDA);
fisherlda.init(dense_feat, labels, 1);
fisherlda.fit(dense_feat, labels, 1);
SGMatrix<float64_t> y=fisherlda.apply_to_feature_matrix(dense_feat);

float64_t epsilon=0.00000000001;
Expand Down Expand Up @@ -259,7 +259,7 @@ TEST(FLDATesti, CANVAR_FLDA_for_D_greater_than_N )
SG_REF(dense_feat);

CFisherLDA fisherlda(CANVAR_FLDA);
fisherlda.init(dense_feat, labels, 1);
fisherlda.fit(dense_feat, labels, 1);
SGMatrix<float64_t> transformy=fisherlda.get_transformation_matrix();

// comparing eigenvectors from the transformation_matrix with that from the
Expand Down

0 comments on commit e4a7140

Please sign in to comment.