Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

drop CMath log #4189

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/shogun/distributions/EMMixtureModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ float64_t CEMMixtureModel::expectation_step()
for (int32_t j=0;j<data.alpha.num_cols;j++)
{
CDistribution* jth_component=data.components->get_element(j)->as<CDistribution>();
alpha_ij[j]=CMath::log(data.weights[j])+jth_component->get_log_likelihood_example(i);
alpha_ij[j] = std::log(data.weights[j]) +
jth_component->get_log_likelihood_example(i);
SG_UNREF(jth_component);
};

Expand Down
12 changes: 6 additions & 6 deletions src/shogun/distributions/Gaussian.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,17 @@ CGaussian::CGaussian(

void CGaussian::init()
{
m_constant=CMath::log(2*M_PI)*m_mean.vlen;
m_constant = std::log(2 * M_PI) * m_mean.vlen;
switch (m_cov_type)
{
case FULL:
case DIAG:
for (const auto& v: m_d)
m_constant+=CMath::log(v);
break;
case SPHERICAL:
m_constant+=m_mean.vlen*CMath::log(m_d.vector[0]);
break;
m_constant += std::log(v);
break;
case SPHERICAL:
m_constant += m_mean.vlen * std::log(m_d.vector[0]);
break;
}
}

Expand Down
3 changes: 1 addition & 2 deletions src/shogun/distributions/Histogram.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ float64_t CHistogram::get_log_derivative(int32_t num_param, int32_t num_example)
free_feature_vector(vector, num_example, free_vec);

if (num_occurences>0)
deriv+=CMath::log((float64_t) num_occurences)-hist[num_param];
deriv += std::log((float64_t)num_occurences) - hist[num_param];
else
deriv=-CMath::INFTY;

Expand Down Expand Up @@ -157,4 +157,3 @@ SGVector<float64_t> CHistogram::get_histogram()
{
return SGVector<float64_t>(hist,get_num_model_parameters(),false);
}

3 changes: 2 additions & 1 deletion src/shogun/distributions/KernelDensity.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,8 @@ public :
{
case K_GAUSSIAN:
{
return -0.5*dim* CMath::log(2*CMath::PI)-dim*CMath::log(width);
return -0.5 * dim * std::log(2 * CMath::PI) -
dim * std::log(width);
break;
}
default:
Expand Down
8 changes: 5 additions & 3 deletions src/shogun/distributions/MixtureModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ float64_t CMixtureModel::get_log_model_parameter(int32_t num_param)
REQUIRE(num_param==1,"number of parameters in mixture model is 1"
" (i.e. number of components). num_components should be 1. %d supplied\n",num_param)

return CMath::log(static_cast<float64_t>(get_num_components()));
return std::log(static_cast<float64_t>(get_num_components()));
}

float64_t CMixtureModel::get_log_derivative(int32_t num_param, int32_t num_example)
Expand All @@ -124,7 +124,9 @@ float64_t CMixtureModel::get_log_likelihood_example(int32_t num_example)
for (int32_t i=0;i<m_components->get_num_elements();i++)
{
CDistribution* ith_comp=m_components->get_element(i)->as<CDistribution>();
log_likelihood_component[i]=ith_comp->get_log_likelihood_example(num_example)+CMath::log(m_weights[i]);
log_likelihood_component[i] =
ith_comp->get_log_likelihood_example(num_example) +
std::log(m_weights[i]);

SG_UNREF(ith_comp);
}
Expand Down Expand Up @@ -214,4 +216,4 @@ void CMixtureModel::init()
SG_ADD(&m_weights,"m_weights","weights of components",MS_NOT_AVAILABLE);
SG_ADD(&m_conv_tol,"m_conv_tol","convergence tolerance",MS_NOT_AVAILABLE);
SG_ADD(&m_max_iters,"m_max_iters","max number of iterations",MS_NOT_AVAILABLE);
}
}
8 changes: 4 additions & 4 deletions src/shogun/distributions/PositionalPWM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ float64_t CPositionalPWM::get_log_model_parameter(int32_t num_param)
return m_pwm[num_param];
}
else if (num_param<m_pwm.num_rows*m_pwm.num_cols+1)
return CMath::log(m_sigma);
return std::log(m_sigma);
else
return CMath::log(m_mean);
return std::log(m_mean);
}

float64_t CPositionalPWM::get_log_derivative(int32_t num_param, int32_t num_example)
Expand Down Expand Up @@ -83,8 +83,8 @@ float64_t CPositionalPWM::get_log_likelihood_example(int32_t num_example)
float64_t CPositionalPWM::get_log_likelihood_window(uint8_t* window, int32_t len, float64_t pos)
{
ASSERT(m_pwm.num_cols == len)
float64_t score = CMath::log(1/(m_sigma*CMath::sqrt(2*M_PI))) -
CMath::sq(pos-m_mean)/(2*CMath::sq(m_sigma));
float64_t score = std::log(1 / (m_sigma * CMath::sqrt(2 * M_PI))) -
CMath::sq(pos - m_mean) / (2 * CMath::sq(m_sigma));

for (int32_t i=0; i<m_pwm.num_cols; i++)
score+=m_pwm[m_pwm.num_rows*i+window[i]];
Expand Down
3 changes: 1 addition & 2 deletions src/shogun/distributions/classical/GaussianDistribution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ SGVector<float64_t> CGaussianDistribution::log_pdf_multiple(SGMatrix<float64_t>
/* for easier to read code */
index_t num_samples=samples.num_cols;

float64_t const_part=-0.5 * m_dimension * CMath::log(2 * CMath::PI);
float64_t const_part = -0.5 * m_dimension * std::log(2 * CMath::PI);

/* determinant is product of diagonal elements of triangular matrix */
float64_t log_det_part=0;
Expand Down Expand Up @@ -168,4 +168,3 @@ void CGaussianDistribution::init()
SG_ADD(&m_L, "L", "Lower factor of covariance matrix, "
"depending on the factorization type.", MS_NOT_AVAILABLE);
}

4 changes: 2 additions & 2 deletions src/shogun/distributions/classical/GaussianDistribution.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ class CGaussianDistribution: public CProbabilityDistribution
static float64_t univariate_log_pdf(float64_t sample, float64_t mu = 0.0, float64_t sigma2 = 1.0)
{
REQUIRE(sigma2 > 0, "Variance should be positive\n");
return -0.5 * (CMath::pow(sample - mu, 2) / sigma2
+ CMath::log(2.0 * CMath::PI) + CMath::log(sigma2));
return -0.5 * (CMath::pow(sample - mu, 2) / sigma2 +
std::log(2.0 * CMath::PI) + std::log(sigma2));
}
private:

Expand Down
13 changes: 7 additions & 6 deletions src/shogun/mathematics/Math.h
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Soeren Sonnenburg, Heiko Strathmann, Viktor Gal, Fernando Iglesias,
* Sergey Lisitsyn, Sanuj Sharma, Soumyajit De, Shashwat Lal Das,
* Thoralf Klein, Wu Lin, Chiyuan Zhang, Harshit Syal, Evan Shelhamer,
* Philippe Tillet, Björn Esser, Yuyu Zhang, Abhinav Agarwalla,
* Authors: Soeren Sonnenburg, Heiko Strathmann, Viktor Gal, Fernando Iglesias,
* Sergey Lisitsyn, Sanuj Sharma, Soumyajit De, Shashwat Lal Das,
* Thoralf Klein, Wu Lin, Chiyuan Zhang, Harshit Syal, Evan Shelhamer,
* Philippe Tillet, Björn Esser, Yuyu Zhang, Abhinav Agarwalla,
* Saurabh Goyal
*/

Expand Down Expand Up @@ -896,7 +896,8 @@ class CMath : public CSGObject
} while ((rand_s == 0) || (rand_s >= 1));

// the meat & potatos, and then the mean & standard deviation shifting...
ret = static_cast<float32_t>(rand_u*CMath::sqrt(-2.0*CMath::log(rand_s)/rand_s));
ret = static_cast<float32_t>(
rand_u * CMath::sqrt(-2.0 * std::log(rand_s) / rand_s));
ret = std_dev*ret + mean;
return ret;
}
Expand Down Expand Up @@ -2107,7 +2108,7 @@ void CMath::qsort_backward_index(T1* output, T2* index, int32_t size)
template <class T>
void CMath::nmin(float64_t* output, T* index, int32_t size, int32_t n)
{
if (6*n*size<13*size*CMath::log(size))
if (6 * n * size < 13 * size * std::log(size))
for (int32_t i=0; i<n; i++)
min(&output[i], &index[i], size-i);
else
Expand Down
8 changes: 4 additions & 4 deletions src/shogun/mathematics/Random.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Viktor Gal, Björn Esser, Thoralf Klein, Heiko Strathmann,
* Authors: Viktor Gal, Björn Esser, Thoralf Klein, Heiko Strathmann,
* Soeren Sonnenburg
*/
#ifdef _WIN32
Expand Down Expand Up @@ -309,8 +309,8 @@ float64_t CRandom::sample_tail() const
float64_t m_R_reciprocal = 1.0 / m_R;
do
{
x = -CMath::log(random_half_open()) * m_R_reciprocal;
y = -CMath::log(random_half_open());
x = -std::log(random_half_open()) * m_R_reciprocal;
y = -std::log(random_half_open());
} while(y+y < x*x);
return m_R + x;
}
Expand All @@ -327,7 +327,7 @@ float64_t CRandom::GaussianPdfDenormInv(float64_t y) const
// y=0 so it doesn't matter. Remember that a Gaussian effectively has a tail going
// off into x == infinity, hence asking what is x when y=0 is an invalid question
// in the context of this class.
return CMath::sqrt(-2.0 * CMath::log(y));
return CMath::sqrt(-2.0 * std::log(y));
}

void CRandom::reinit(uint32_t seed)
Expand Down
29 changes: 14 additions & 15 deletions src/shogun/mathematics/Statistics.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Heiko Strathmann, Soumyajit De, Soeren Sonnenburg, Sanuj Sharma,
* Viktor Gal, Roman Votyakov, Wu Lin, Evgeniy Andreev, Weijie Lin,
* Authors: Heiko Strathmann, Soumyajit De, Soeren Sonnenburg, Sanuj Sharma,
* Viktor Gal, Roman Votyakov, Wu Lin, Evgeniy Andreev, Weijie Lin,
* Björn Esser, Sergey Lisitsyn
*/

Expand Down Expand Up @@ -453,10 +453,10 @@ float64_t CStatistics::lnormal_cdf(float64_t x)
return -2.0*f-log_of_2;
}
else if (x<ERFC_CASE2)
return CMath::log(erfc8_weighted_sum(x))-log_of_2-x*x*0.5;
return std::log(erfc8_weighted_sum(x)) - log_of_2 - x * x * 0.5;

//id3 = ~id2 & ~id1; lp(id3) = log(erfc(-z(id3)/sqrt(2))/2);
return CMath::log(normal_cdf(x));
return std::log(normal_cdf(x));
}

float64_t CStatistics::erfc8_weighted_sum(float64_t x)
Expand Down Expand Up @@ -849,16 +849,16 @@ CStatistics::SigmoidParamters CStatistics::fit_sigmoid(
/* initial Point and Initial Fun Value */
/* result parameters of sigmoid */
float64_t a = 0;
float64_t b = CMath::log((prior0 + 1.0) / (prior1 + 1.0));
float64_t b = std::log((prior0 + 1.0) / (prior1 + 1.0));
float64_t fval = 0.0;

for (index_t i = 0; i < length; ++i)
{
float64_t fApB = scores[i] * a + b;
if (fApB >= 0)
fval += t[i] * fApB + CMath::log(1 + CMath::exp(-fApB));
fval += t[i] * fApB + std::log(1 + CMath::exp(-fApB));
else
fval += (t[i] - 1) * fApB + CMath::log(1 + CMath::exp(fApB));
fval += (t[i] - 1) * fApB + std::log(1 + CMath::exp(fApB));
}

index_t it;
Expand Down Expand Up @@ -924,10 +924,9 @@ CStatistics::SigmoidParamters CStatistics::fit_sigmoid(
{
float64_t fApB = scores[i] * newA + newB;
if (fApB >= 0)
newf += t[i] * fApB + CMath::log(1 + CMath::exp(-fApB));
newf += t[i] * fApB + std::log(1 + CMath::exp(-fApB));
else
newf +=
(t[i] - 1) * fApB + CMath::log(1 + CMath::exp(fApB));
newf += (t[i] - 1) * fApB + std::log(1 + CMath::exp(fApB));
}

/* Check sufficient decrease */
Expand Down Expand Up @@ -1018,16 +1017,16 @@ CStatistics::SigmoidParamters CStatistics::fit_sigmoid(SGVector<float64_t> score
/* initial Point and Initial Fun Value */
/* result parameters of sigmoid */
float64_t a=0;
float64_t b=CMath::log((prior0+1.0)/(prior1+1.0));
float64_t b = std::log((prior0 + 1.0) / (prior1 + 1.0));
float64_t fval=0.0;

for (index_t i=0; i<length; ++i)
{
float64_t fApB=scores[i]*a+b;
if (fApB>=0)
fval+=t[i]*fApB+CMath::log(1+CMath::exp(-fApB));
fval += t[i] * fApB + std::log(1 + CMath::exp(-fApB));
else
fval+=(t[i]-1)*fApB+CMath::log(1+CMath::exp(fApB));
fval += (t[i] - 1) * fApB + std::log(1 + CMath::exp(fApB));
}

index_t it;
Expand Down Expand Up @@ -1093,9 +1092,9 @@ CStatistics::SigmoidParamters CStatistics::fit_sigmoid(SGVector<float64_t> score
{
float64_t fApB=scores[i]*newA+newB;
if (fApB>=0)
newf+=t[i]*fApB+CMath::log(1+CMath::exp(-fApB));
newf += t[i] * fApB + std::log(1 + CMath::exp(-fApB));
else
newf+=(t[i]-1)*fApB+CMath::log(1+CMath::exp(fApB));
newf += (t[i] - 1) * fApB + std::log(1 + CMath::exp(fApB));
}

/* Check sufficient decrease */
Expand Down
3 changes: 1 addition & 2 deletions src/shogun/neuralnets/DeepBeliefNetwork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params,
for (int32_t i=0; i<m_layer_sizes->element(0); i++)
sum += CMath::exp(Out(i,j)-max);

float64_t normalizer = CMath::log(sum);
float64_t normalizer = std::log(sum);
for (int32_t k=0; k<m_layer_sizes->element(0); k++)
Out(k,j) = CMath::exp(Out(k,j)-max-normalizer);
}
Expand Down Expand Up @@ -641,4 +641,3 @@ void CDeepBeliefNetwork::init()

SG_ADD(&m_sigma, "m_sigma", "Initialization Sigma", MS_NOT_AVAILABLE);
}

6 changes: 3 additions & 3 deletions src/shogun/neuralnets/NeuralSoftmaxLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ void CNeuralSoftmaxLayer::compute_activations(SGVector<float64_t> parameters,
{
sum += CMath::exp(m_activations[i+j*m_num_neurons]-max);
}
float64_t normalizer = CMath::log(sum);
float64_t normalizer = std::log(sum);
for (int32_t k=0; k<m_num_neurons; k++)
{
m_activations[k+j*m_num_neurons] =
Expand Down Expand Up @@ -93,9 +93,9 @@ float64_t CNeuralSoftmaxLayer::compute_error(SGMatrix<float64_t> targets)
{
// to prevent taking the log of a zero
if (m_activations[i]==0)
sum += targets[i]*CMath::log(1e-50);
sum += targets[i] * std::log(1e-50);
else
sum += targets[i]*CMath::log(m_activations[i]);
sum += targets[i] * std::log(m_activations[i]);
}
return -1*sum/m_batch_size;
}
7 changes: 3 additions & 4 deletions src/shogun/neuralnets/RBM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ float64_t CRBM::free_energy(SGMatrix< float64_t > visible, SGMatrix< float64_t >
float64_t wv_term = 0;
for (int32_t i=0; i<m_num_hidden; i++)
for (int32_t j=0; j<m_batch_size; j++)
wv_term += CMath::log(1.0+CMath::exp(wv_buffer(i,j)));
wv_term += std::log(1.0 + CMath::exp(wv_buffer(i, j)));

float64_t F = -1.0*(bv_term+wv_term)/m_batch_size;

Expand Down Expand Up @@ -444,7 +444,7 @@ float64_t CRBM::pseudo_likelihood(SGMatrix< float64_t > visible,
for (int32_t j=0; j<m_batch_size; j++)
visible(indices[j],j) = 1.0-visible(indices[j],j);

return m_num_visible*CMath::log(1.0/(1+CMath::exp(f1-f2)));
return m_num_visible * std::log(1.0 / (1 + CMath::exp(f1 - f2)));
}

void CRBM::mean_hidden(SGMatrix< float64_t > visible, SGMatrix< float64_t > result)
Expand Down Expand Up @@ -506,7 +506,7 @@ void CRBM::mean_visible(SGMatrix< float64_t > hidden, SGMatrix< float64_t > resu
for (int32_t i=0; i<m_visible_group_sizes->element(k); i++)
sum += CMath::exp(result(i+offset,j)-max);

float64_t normalizer = CMath::log(sum);
float64_t normalizer = std::log(sum);

for (int32_t i=0; i<m_visible_group_sizes->element(k); i++)
result(i+offset,j) =
Expand Down Expand Up @@ -662,4 +662,3 @@ void CRBM::init()
"Number of Parameters", MS_NOT_AVAILABLE);
SG_ADD(&m_params, "params", "Parameters", MS_NOT_AVAILABLE);
}