Skip to content

Commit

Permalink
Replace cmath::sqrt with std::sqrt (shogun-toolbox#4218)
Browse files Browse the repository at this point in the history
  • Loading branch information
grig-guz authored and ktiefe committed Jul 26, 2019
1 parent 6a18f8c commit dfb2027
Show file tree
Hide file tree
Showing 87 changed files with 574 additions and 500 deletions.
2 changes: 1 addition & 1 deletion examples/undocumented/libshogun/mathematics_lapack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void test_ev()
SGMatrix<float64_t>::display_matrix(A.matrix, A.num_rows, A.num_cols, "A");
SGVector<float64_t>::display_vector(ev.vector, ev.vlen, "eigenvalues");

float64_t sqrt22=CMath::sqrt(2.0)/2.0;
float64_t sqrt22 = std::sqrt(2.0) / 2.0;
float64_t eps=10E-16;

/* check for correct eigenvectors */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,9 @@ int main()

// ************************************************************
// set parameters of the preprocessor
// ******************************** !!!!!!!!!!!!!!!!! CMath::sqrt(rbf_width/2.0)
rfgauss->set_kernelwidth( CMath::sqrt(rbf_width/2.0) );
// ******************************** !!!!!!!!!!!!!!!!!
// std::sqrt(rbf_width/2.0)
rfgauss->set_kernelwidth(std::sqrt(rbf_width / 2.0));
rfgauss->set_dim_input_space(dims);
rfgauss->set_dim_feature_space(randomfourier_featurespace_dim);

Expand Down
4 changes: 2 additions & 2 deletions src/interfaces/swig/Mathematics.i
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,15 @@ namespace shogun
#ifdef USE_FLOAT32
%rename(normal_random_float32) CMath::normal_random(float32_t,float32_t);
%rename(random_float32) CMath::random(float32_t,float32_t);
%rename(sqrt_float32) CMath::sqrt(float32_t);
%rename(sqrt_float32) std::sqrt(float32_t);
#endif

#ifdef USE_FLOAT64
%rename(normal_random_float64) CMath::normal_random(float64_t,float64_t);
%rename(pow_float64_int32) CMath::pow(float64_t,int32_t);
%rename(pow_float64_float64) CMath::pow(float64_t,float64_t);
%rename(random_float64) CMath::random(float64_t,float64_t);
%rename(sqrt_float64) CMath::sqrt(float64_t);
%rename(sqrt_float64) std::sqrt(float64_t);
}
#endif

Expand Down
21 changes: 11 additions & 10 deletions src/shogun/classifier/mkl/MKL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ float64_t CMKL::compute_optimal_betas_elasticnet(
{
if (sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
{
beta[p] = CMath::sqrt(sumw[p]*old_beta[p]*old_beta[p]);
beta[p] = std::sqrt(sumw[p] * old_beta[p] * old_beta[p]);
Z += beta[p];
}
else
Expand All @@ -640,7 +640,7 @@ float64_t CMKL::compute_optimal_betas_elasticnet(
preR = 0.0;
for( p=0; p<num_kernels; ++p )
preR += CMath::pow( beta_local[p] - beta[p], 2.0 );
const float64_t R = CMath::sqrt( preR ) * epsRegul;
const float64_t R = std::sqrt(preR) * epsRegul;
if( !( R >= 0 ) )
{
SG_PRINT("MKL-direct: p = %.3f\n", 1.0 )
Expand All @@ -654,7 +654,7 @@ float64_t CMKL::compute_optimal_betas_elasticnet(
}
SG_PRINT("MKL-direct: preR = %e\n", preR )
SG_PRINT("MKL-direct: preR/p = %e\n", preR )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", CMath::sqrt(preR) )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", std::sqrt(preR))
SG_PRINT("MKL-direct: R = %e\n", R )
SG_ERROR("Assertion R >= 0 failed!\n" )
}
Expand Down Expand Up @@ -701,7 +701,7 @@ void CMKL::elasticnet_dual(float64_t *ff, float64_t *gg, float64_t *hh,
float64_t gam = 1.0-lambda;
for (int32_t i=0; i<len;i++)
{
if (nm[i]>=CMath::sqrt(2*del*gam))
if (nm[i] >= std::sqrt(2 * del * gam))
I.push_back(i);
}
int32_t M=I.size();
Expand All @@ -713,9 +713,10 @@ void CMKL::elasticnet_dual(float64_t *ff, float64_t *gg, float64_t *hh,
{
float64_t nmit = nm[*it];

*ff += del*gam*CMath::pow(nmit/CMath::sqrt(2*del*gam)-1,2)/lambda;
*gg += CMath::sqrt(gam/(2*del))*nmit;
*hh += -0.5*CMath::sqrt(gam/(2*CMath::pow(del,3)))*nmit;
*ff += del * gam * CMath::pow(nmit / std::sqrt(2 * del * gam) - 1, 2) /
lambda;
*gg += std::sqrt(gam / (2 * del)) * nmit;
*hh += -0.5 * std::sqrt(gam / (2 * CMath::pow(del, 3))) * nmit;
}
}

Expand Down Expand Up @@ -758,7 +759,7 @@ float64_t CMKL::compute_elasticnet_dual_objective()
SG_UNREF(kn);
}
// initial delta
del = del/CMath::sqrt(2*(1-ent_lambda));
del = del / std::sqrt(2 * (1 - ent_lambda));

// Newton's method to optimize delta
k=0;
Expand Down Expand Up @@ -876,7 +877,7 @@ float64_t CMKL::compute_optimal_betas_directly(
for( p=0; p<num_kernels; ++p )
preR += CMath::sq( old_beta[p] - beta[p]);

const float64_t R = CMath::sqrt( preR / mkl_norm ) * epsRegul;
const float64_t R = std::sqrt(preR / mkl_norm) * epsRegul;
if( !( R >= 0 ) )
{
SG_PRINT("MKL-direct: p = %.3f\n", mkl_norm )
Expand All @@ -890,7 +891,7 @@ float64_t CMKL::compute_optimal_betas_directly(
}
SG_PRINT("MKL-direct: preR = %e\n", preR )
SG_PRINT("MKL-direct: preR/p = %e\n", preR/mkl_norm )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", CMath::sqrt(preR/mkl_norm) )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", std::sqrt(preR / mkl_norm))
SG_PRINT("MKL-direct: R = %e\n", R )
SG_ERROR("Assertion R >= 0 failed!\n" )
}
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/mkl/MKLMulticlassGradient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ finalbeta=oldweights;
for( int32_t p=0; p<num_kernels; ++p )
preR += CMath::pow( oldweights[p] - finalbeta[p], 2.0 );

const float64_t R = CMath::sqrt( preR / pnorm ) * epsRegul;
const float64_t R = std::sqrt(preR / pnorm) * epsRegul;
if( !( R >= 0 ) )
{
SG_PRINT("MKL-direct: p = %.3f\n", pnorm )
Expand All @@ -418,7 +418,7 @@ finalbeta=oldweights;
}
SG_PRINT("MKL-direct: preR = %e\n", preR )
SG_PRINT("MKL-direct: preR/p = %e\n", preR/pnorm )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", CMath::sqrt(preR/pnorm) )
SG_PRINT("MKL-direct: sqrt(preR/p) = %e\n", std::sqrt(preR / pnorm))
SG_PRINT("MKL-direct: R = %e\n", R )
SG_ERROR("Assertion R >= 0 failed!\n" )
}
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/WDSVMOcas.h
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ class CWDSVMOcas : public CMachine
for (int32_t i=0; i<degree; i++)
normalization_const+=(string_length-i)*wd_weights[i]*wd_weights[i];

normalization_const=CMath::sqrt(normalization_const);
normalization_const = std::sqrt(normalization_const);
SG_DEBUG("normalization_const:%f\n", normalization_const)
}

Expand Down
5 changes: 3 additions & 2 deletions src/shogun/clustering/GMM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,9 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min
float64_t alpha1=coefficients.vector[1]/(coefficients.vector[1]+coefficients.vector[2]);
float64_t alpha2=coefficients.vector[2]/(coefficients.vector[1]+coefficients.vector[2]);

float64_t noise_mag=SGVector<float64_t>::twonorm(components[0]->get_mean().vector, dim_n)*0.1/
CMath::sqrt((float64_t)dim_n);
float64_t noise_mag =
SGVector<float64_t>::twonorm(components[0]->get_mean().vector, dim_n) *
0.1 / std::sqrt((float64_t)dim_n);

SGVector<float64_t> mean(dim_n);
linalg::add(components[1]->get_mean(), components[2]->get_mean(), mean, alpha1, alpha2);
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/clustering/KMeansBase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void CKMeansBase::compute_cluster_variances()
}
}

R.vector[i]=(0.7*CMath::sqrt(rmin1)+0.3*CMath::sqrt(rmin2));
R.vector[i] = (0.7 * std::sqrt(rmin1) + 0.3 * std::sqrt(rmin2));
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/HashedDocConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ SGSparseVector<float64_t> CHashedDocConverter::apply(SGVector<char> document)
/** Normalizing vector */
if (should_normalize)
{
float64_t norm_const = CMath::sqrt((float64_t) document.size());
float64_t norm_const = std::sqrt((float64_t)document.size());
for (index_t i=0; i<sparse_doc_rep.num_feat_entries; i++)
sparse_doc_rep.features[i].entry /= norm_const;
}
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/ica/FastICA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ CFeatures* CFastICA::apply(CFeatures* features)
// see Hyvarinen (13.6) p.267 Here WX is white and data
// in X has been projected onto a subspace by PCA
WX = K * SPX;
WX *= CMath::sqrt((float64_t)p);
WX *= std::sqrt((float64_t)p);
}
else
{
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/distance/AttenuatedEuclideanDistance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ float64_t CAttenuatedEuclideanDistance::compute(int32_t idx_a, int32_t idx_b)
if (disable_sqrt)
return result;

return CMath::sqrt(result);
return std::sqrt(result);
}

void CAttenuatedEuclideanDistance::init()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/distance/CustomDistance.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class CCustomDistance: public CDistance
ASSERT(dm)
ASSERT(len>0)

int64_t cols = (int64_t) floor(-0.5 + CMath::sqrt(0.25+2*len));
int64_t cols = (int64_t)floor(-0.5 + std::sqrt(0.25 + 2 * len));

int64_t int32_max=2147483647;

Expand Down
3 changes: 2 additions & 1 deletion src/shogun/distance/Distance.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@ class CDistance : public CSGObject
int32_t i_start;

if (symmetric)
i_start=(int32_t) CMath::floor(n-CMath::sqrt(CMath::sq((float64_t) n)-offs));
i_start = (int32_t)CMath::floor(
n - std::sqrt(CMath::sq((float64_t)n) - offs));
else
i_start=(int32_t) (offs/int64_t(n));

Expand Down
4 changes: 2 additions & 2 deletions src/shogun/distance/EuclideanDistance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ float64_t CEuclideanDistance::compute(int32_t idx_a, int32_t idx_b)
result=m_lhs_squared_norms[idx_a]+m_rhs_squared_norms[idx_b]-2*result;
if (disable_sqrt)
return result;
return CMath::sqrt(result);
return std::sqrt(result);
}

void CEuclideanDistance::precompute_lhs()
Expand Down Expand Up @@ -169,7 +169,7 @@ float64_t CEuclideanDistance::distance_upper_bounded(int32_t idx_a, int32_t idx_
}

if (!disable_sqrt)
result=CMath::sqrt(result);
result = std::sqrt(result);

return result;
}
2 changes: 1 addition & 1 deletion src/shogun/distance/GeodesicMetric.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ float64_t CGeodesicMetric::compute(int32_t idx_a, int32_t idx_b)
if(s==0 || nx==0 || ny==0)
return 0;

d/=CMath::sqrt(nx*ny);
d /= std::sqrt(nx * ny);

// can only happen due to numerical problems
if (CMath::abs(d)>1.0)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/distance/MahalanobisDistance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ float64_t CMahalanobisDistance::compute(int32_t idx_a, int32_t idx_b)
if (disable_sqrt)
return result;
else
return CMath::sqrt(result);
return std::sqrt(result);
}

void CMahalanobisDistance::init()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/distance/SparseEuclideanDistance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ float64_t CSparseEuclideanDistance::compute(int32_t idx_a, int32_t idx_b)
(CSparseFeatures<float64_t>*) lhs, sq_lhs, idx_a,
(CSparseFeatures<float64_t>*) rhs, sq_rhs, idx_b);

return CMath::sqrt(result);
return std::sqrt(result);
}

void CSparseEuclideanDistance::init()
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/distributions/Gaussian.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -391,12 +391,12 @@ SGVector<float64_t> CGaussian::sample()
case FULL:
case DIAG:
for (int32_t i = 0; i < m_mean.vlen; i++)
r_matrix(i, i) = CMath::sqrt(m_d.vector[i]);
r_matrix(i, i) = std::sqrt(m_d.vector[i]);

break;
case SPHERICAL:
for (int32_t i = 0; i < m_mean.vlen; i++)
r_matrix(i, i) = CMath::sqrt(m_d.vector[0]);
r_matrix(i, i) = std::sqrt(m_d.vector[0]);

break;
}
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/distributions/PositionalPWM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ float64_t CPositionalPWM::get_log_likelihood_example(int32_t num_example)
float64_t CPositionalPWM::get_log_likelihood_window(uint8_t* window, int32_t len, float64_t pos)
{
ASSERT(m_pwm.num_cols == len)
float64_t score = std::log(1 / (m_sigma * CMath::sqrt(2 * M_PI))) -
float64_t score = std::log(1 / (m_sigma * std::sqrt(2 * M_PI))) -
CMath::sq(pos - m_mean) / (2 * CMath::sq(m_sigma));

for (int32_t i=0; i<m_pwm.num_cols; i++)
Expand Down
4 changes: 3 additions & 1 deletion src/shogun/evaluation/ContingencyTableEvaluation.h
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,9 @@ class CContingencyTableEvaluation: public CBinaryClassEvaluation
if (!m_computed)
SG_ERROR("Uninitialized, please call evaluate first")

return (m_TP*m_TN-m_FP*m_FN)/CMath::sqrt((m_TP+m_FP)*(m_TP+m_FN)*(m_TN+m_FP)*(m_TN+m_FN));
return (m_TP * m_TN - m_FP * m_FN) / std::sqrt(
(m_TP + m_FP) * (m_TP + m_FN) *
(m_TN + m_FP) * (m_TN + m_FN));
};

/** recall
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/evaluation/MeanSquaredLogError.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,5 @@ float64_t CMeanSquaredLogError::evaluate(CLabels* predicted, CLabels* ground_tru
msle+=CMath::sq(a-b);
}
msle /= length;
return CMath::sqrt(msle);
return std::sqrt(msle);
}
6 changes: 3 additions & 3 deletions src/shogun/features/BinnedDotFeatures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ float64_t CBinnedDotFeatures::dot(int32_t vec_idx1, CDotFeatures* df, int32_t ve
((CBinnedDotFeatures*) df)->m_features->free_feature_vector(vec2, vec_idx2);

if (m_fill && m_norm_one && sum1!=0 && sum2!=0)
result/=CMath::sqrt(sum1*sum2);
result /= std::sqrt(sum1 * sum2);

return result;

Expand Down Expand Up @@ -154,7 +154,7 @@ float64_t CBinnedDotFeatures::dense_dot(int32_t vec_idx1, const float64_t* vec2,
m_features->free_feature_vector(vec1, vec_idx1);

if (m_fill && m_norm_one && sum!=0)
result/=CMath::sqrt(sum);
result /= std::sqrt(sum);

return result;
}
Expand Down Expand Up @@ -182,7 +182,7 @@ void CBinnedDotFeatures::add_to_dense_vec(float64_t alpha, int32_t vec_idx1, flo
if (alpha_correction==0.0)
return;

alpha/=CMath::sqrt(alpha_correction);
alpha /= std::sqrt(alpha_correction);
}

for (int32_t i=0; i<m_bins.num_cols; i++)
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/features/DataGenerator.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Heiko Strathmann, Viktor Gal, Weijie Lin, Sergey Lisitsyn,
* Authors: Heiko Strathmann, Viktor Gal, Weijie Lin, Sergey Lisitsyn,
* Björn Esser, Soeren Sonnenburg, Evangelos Anagnostopoulos
*/

Expand Down Expand Up @@ -32,7 +32,7 @@ SGMatrix<float64_t> CDataGenerator::generate_checkboard_data(int32_t num_classes
{
int32_t points_per_class = num_points / num_classes;

int32_t grid_size = (int32_t)std::ceil(CMath::sqrt((float64_t)num_classes));
int32_t grid_size = (int32_t)std::ceil(std::sqrt((float64_t)num_classes));
float64_t cell_size = (float64_t ) 1 / grid_size;
SGVector<float64_t> grid_idx(dim);
for (index_t i=0; i<dim; i++)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/features/ExplicitSpecFeatures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ void CExplicitSpecFeatures::obtain_kmer_spectrum(CStringFeatures<uint16_t>* str)
for (int32_t j=0; j<spec_size; j++)
n+=CMath::sq(k_spectrum[i][j]);

n=CMath::sqrt(n);
n = std::sqrt(n);

for (int32_t j=0; j<spec_size; j++)
k_spectrum[i][j]/=n;
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/features/ImplicitWeightedSpecFeatures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void CImplicitWeightedSpecFeatures::compute_normalization_const()
float64_t* factors=SG_MALLOC(float64_t, num_strings);

for (int32_t i=0; i<num_strings; i++)
factors[i]=1.0/CMath::sqrt(dot(i, this, i));
factors[i] = 1.0 / std::sqrt(dot(i, this, i));

normalization_factors=factors;
//CMath::display_vector(normalization_factors, num_strings, "n");
Expand All @@ -72,7 +72,7 @@ bool CImplicitWeightedSpecFeatures::set_wd_weights()
sum+=spec_weights[i];
}
for (i=0; i<degree; i++)
spec_weights[i]=CMath::sqrt(spec_weights[i]/sum);
spec_weights[i] = std::sqrt(spec_weights[i] / sum);

return spec_weights!=NULL;
}
Expand All @@ -84,7 +84,7 @@ bool CImplicitWeightedSpecFeatures::set_weights(float64_t* w, int32_t d)
SG_FREE(spec_weights);
spec_weights=SG_MALLOC(float64_t, degree);
for (int32_t i=0; i<degree; i++)
spec_weights[i]=CMath::sqrt(w[i]);
spec_weights[i] = std::sqrt(w[i]);
return true;
}

Expand Down
2 changes: 1 addition & 1 deletion src/shogun/features/PolyFeatures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ void CPolyFeatures::store_normalization_values()
m_normalization_values=SG_MALLOC(float32_t, num_vec);
for (int i=0; i<num_vec; i++)
{
float64_t tmp = CMath::sqrt(dot(i, this,i));
float64_t tmp = std::sqrt(dot(i, this, i));
if (tmp==0)
// trap division by zero
m_normalization_values[i]=1;
Expand Down

0 comments on commit dfb2027

Please sign in to comment.