Skip to content

Commit

Permalink
STYLE: Prefer double forward slashes for within-method body comments
Browse files Browse the repository at this point in the history
Prefer double forward slashes for within-method body comment blocks
following the ITK SWG coding style guidelines.

Use a single whitespace between the double forward slashes and the
comment sentences.

Follow-up to be6b447.
  • Loading branch information
jhlegarreta authored and dzenanz committed Feb 6, 2023
1 parent 1f81ef6 commit 1a296b5
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 102 deletions.
15 changes: 5 additions & 10 deletions Modules/Core/SpatialObjects/include/itkTubeSpatialObject.hxx
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,7 @@ TubeSpatialObject<TDimension, TTubePointType>::ComputeTangentsAndNormals()
l = l + t[i] * t[i];
}
l = std::sqrt(l);
// if the adjacent points correspond, use the current point and one
// forward point
// If the adjacent points correspond, use the current point and one forward point
if (Math::AlmostEquals(l, 0.0) || std::isnan(l))
{
const PointType & x2 = this->GetPoint(it2)->GetPositionInObjectSpace();
Expand All @@ -370,8 +369,7 @@ TubeSpatialObject<TDimension, TTubePointType>::ComputeTangentsAndNormals()
l = l + t[i] * t[i];
}
l = std::sqrt(l);
// if the forward point and the current point correspond, then
// RemoveDuplicatePointsInObjectSpace was not called.
// If the forward point and the current point correspond, then RemoveDuplicatePointsInObjectSpace was not called
if (Math::AlmostEquals(l, 0.0) || std::isnan(l))
{
std::cerr << "TubeSpatialObject::ComputeTangentAndNormals() : "
Expand Down Expand Up @@ -432,8 +430,7 @@ TubeSpatialObject<TDimension, TTubePointType>::ComputeTangentsAndNormals()
CovariantVectorType n1;
if (TDimension == 2)
{
// The normal to the tangent in 2D is the orthogonal direction to the
// tangent.
// The normal to the tangent in 2D is the orthogonal direction to the tangent.
n1[0] = t[1];
n1[1] = -t[0];
if (it1 != 0)
Expand All @@ -453,8 +450,7 @@ TubeSpatialObject<TDimension, TTubePointType>::ComputeTangentsAndNormals()
}
else if (TDimension == 3)
{
// The normal to the tangent in 3D is the cross product of adjacent
// tangent directions.
// The normal to the tangent in 3D is the cross product of adjacent tangent directions.
n1[0] = t[1] * t2[2] - t[2] * t2[1];
n1[1] = t[2] * t2[0] - t[0] * t2[2];
n1[2] = t[0] * t2[1] - t[1] * t2[0];
Expand Down Expand Up @@ -497,8 +493,7 @@ TubeSpatialObject<TDimension, TTubePointType>::ComputeTangentsAndNormals()
n1 /= l_n1;
}

// The second normal is the cross product of the tangent and the
// first normal
// The second normal is the cross product of the tangent and the first normal
CovariantVectorType n2;
n2[0] = t[1] * n1[2] - t[2] * n1[1];
n2[1] = t[2] * n1[0] - t[0] * n1[2];
Expand Down
170 changes: 78 additions & 92 deletions Modules/Numerics/Optimizers/src/itkSPSAOptimizer.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,9 @@ SPSAOptimizer::PrintSelf(std::ostream & os, Indent indent) const
SPSAOptimizer::MeasureType
SPSAOptimizer::GetValue(const ParametersType & parameters) const
{
/**
* This method just calls the Superclass' implementation,
* but is necessary because GetValue() is also declared
* in this class.
*/
// This method just calls the Superclass' implementation,
// but is necessary because GetValue() is also declared
// in this class.
return this->Superclass::GetValue(parameters);
}

Expand All @@ -105,11 +103,9 @@ SPSAOptimizer::GetValue(const ParametersType & parameters) const
SPSAOptimizer::MeasureType
SPSAOptimizer::GetValue() const
{
/**
* The SPSA does not compute the cost function value at
* the current position during the optimization, so calculate
* it on request:
*/
// The SPSA does not compute the cost function value at
// the current position during the optimization, so calculate
// it on request
return this->GetValue(this->GetCurrentPosition());
}

Expand All @@ -126,7 +122,7 @@ SPSAOptimizer::StartOptimization()
itkExceptionMacro(<< "No objective function defined! ");
}

/** The number of parameters: */
// The number of parameters
const unsigned int spaceDimension = m_CostFunction->GetNumberOfParameters();
if (spaceDimension != this->GetInitialPosition().GetSize())
{
Expand Down Expand Up @@ -172,7 +168,7 @@ SPSAOptimizer::ResumeOptimization()
break;
}

/** Check convergence */
// Check convergence
if ((m_StateOfConvergence < m_Tolerance) && (m_CurrentIteration >= m_MinimumNumberOfIterations))
{
m_StopCondition = StopConditionSPSAOptimizerEnum::BelowTolerance;
Expand Down Expand Up @@ -202,7 +198,7 @@ SPSAOptimizer::AdvanceOneStep()
{
itkDebugMacro("AdvanceOneStep");

/** Maximize of Minimize the function? */
// Maximize of Minimize the function?
double direction;
if (this->m_Maximize)
{
Expand All @@ -213,17 +209,15 @@ SPSAOptimizer::AdvanceOneStep()
direction = -1.0;
}

/** The number of parameters: */
// The number of parameters
const unsigned int spaceDimension = m_CostFunction->GetNumberOfParameters();

/** Instantiate the newPosition vector and get the current
* parameters */
// Instantiate the newPosition vector and get the current parameters
ParametersType newPosition(spaceDimension);
const ParametersType & currentPosition = this->GetCurrentPosition();

/** Compute the gradient as an average of q estimates, where
* q = m_NumberOfPerturbations
*/
// Compute the gradient as an average of q estimates, where
// q = m_NumberOfPerturbations
try
{
this->ComputeGradient(currentPosition, m_Gradient);
Expand All @@ -238,21 +232,19 @@ SPSAOptimizer::AdvanceOneStep()
throw;
}

/** Compute the gain a_k */
// Compute the gain a_k
const double ak = this->Compute_a(m_CurrentIteration);
/** And save it for users that are interested */
// And save it for users that are interested
m_LearningRate = ak;

/**
* Compute the new parameters.
*/
// Compute the new parameters
newPosition = currentPosition + (direction * ak) * m_Gradient;
this->SetCurrentPosition(newPosition);

/** Compute the GradientMagnitude (for checking convergence) */
// Compute the GradientMagnitude (for checking convergence)
m_GradientMagnitude = m_Gradient.magnitude();

/** Update the state of convergence: */
// Update the state of convergence
m_StateOfConvergence += ak * m_GradientMagnitude;
} // end AdvanceOneStep

Expand Down Expand Up @@ -306,14 +298,12 @@ SPSAOptimizer::GenerateDelta(const unsigned int spaceDimension)
const ScalesType & invScales = this->GetInverseScales();
for (unsigned int j = 0; j < spaceDimension; ++j)
{
/** Generate randomly -1 or 1. */
// Generate randomly -1 or 1
m_Delta[j] = 2 * Math::Round<int>(this->m_Generator->GetUniformVariate(0.0f, 1.0f)) - 1;

/**
* Take scales into account. The perturbation of a parameter that has a
* large range (and thus is assigned a small scaler) should be higher than
* the perturbation of a parameter that has a small range.
*/
// Take scales into account. The perturbation of a parameter that has a
// large range (and thus is assigned a small scaler) should be higher than
// the perturbation of a parameter that has a small range.
m_Delta[j] *= invScales[j];
}
} // end GenerateDelta
Expand All @@ -326,93 +316,90 @@ SPSAOptimizer::ComputeGradient(const ParametersType & parameters, DerivativeType
{
const unsigned int spaceDimension = parameters.GetSize();

/** Compute c_k */
// Compute c_k
const double ck = this->Compute_c(m_CurrentIteration);

/** Instantiate the vectors thetaplus, thetamin,
* set the gradient to the correct size, and get the scales.
*/
// Instantiate the vectors thetaplus, thetamin,
// set the gradient to the correct size, and get the scales.
ParametersType thetaplus(spaceDimension);
ParametersType thetamin(spaceDimension);

gradient = DerivativeType(spaceDimension);
gradient.Fill(0.0);
const ScalesType & scales = this->GetScales();

/** Compute the gradient as an average of q estimates, where
* q = m_NumberOfPerturbations
*/
// Compute the gradient as an average of q estimates, where
// q = m_NumberOfPerturbations
for (SizeValueType perturbation = 1; perturbation <= this->GetNumberOfPerturbations(); ++perturbation)
{
/** Generate a (scaled) perturbation vector m_Delta */
// Generate a (scaled) perturbation vector m_Delta
this->GenerateDelta(spaceDimension);

/** Create thetaplus and thetamin */
// Create thetaplus and thetamin
for (unsigned int j = 0; j < spaceDimension; ++j)
{
thetaplus[j] = parameters[j] + ck * m_Delta[j];
thetamin[j] = parameters[j] - ck * m_Delta[j];
}

/** Compute the cost function value at thetaplus */
// Compute the cost function value at thetaplus
const double valueplus = this->GetValue(thetaplus);

/** Compute the cost function value at thetamin */
// Compute the cost function value at thetamin
const double valuemin = this->GetValue(thetamin);

/** Compute the contribution to the gradient g_k */
// Compute the contribution to the gradient g_k
const double valuediff = (valueplus - valuemin) / (2 * ck);
for (unsigned int j = 0; j < spaceDimension; ++j)
{
// remember to divide the gradient by the NumberOfPerturbations!
gradient[j] += valuediff / m_Delta[j];
}
} // end for ++perturbation
}

/** Apply scaling (see below) and divide by the NumberOfPerturbations */
// Apply scaling (see below) and divide by the NumberOfPerturbations
for (unsigned int j = 0; j < spaceDimension; ++j)
{
gradient[j] /= (itk::Math::sqr(scales[j]) * static_cast<double>(m_NumberOfPerturbations));
}
/**
* Scaling was still needed, because the gradient
* should point along the direction of the applied
* perturbation.
*
* Recall that we scaled the perturbation vector by dividing each
* element j by scales[j]:
* delta'[j] = delta[j] / scales[j]
* = (+ or -) 1 / scales[j]
*
* Consider the case of NumberOfPerturbations=1.
* If we would not do any scaling the gradient would
* be computed as:
* grad[j] = valuediff / delta'[j]
* = valuediff / ( delta[j] / scales[j] )
* = scales[j] * valuediff / delta[j]
* = (+ or -) valuediff * scales[j]
*
* This is wrong, because it gives a vector that points
* in a different direction than the perturbation. Besides,
* it would give an opposite effect as expected from the scaling.
* For rigid registration for example, we choose the scaler for
* the rotation 1 and for the translation 1/1000 (see
* Examples/Registration/ImageRegistration5.cxx), because
* we want the optimizer to adjust the translation in bigger steps.
* In the formula above, the grad[translation] would then be SMALLER
* than grad[rotation], so the optimizer would adjust the translation
* in smaller steps.
*
* To make the gradient point along the perturbation direction we
* have to divide it by the square of the scales, to return the scaling
* parameter to the denominator where it belongs:
* grad[j] = (+ or -) valuediff * scales[j] / scales[j]^2
* = (+ or -) valuediff / scales[j]
* which is correct. Now the optimizer will take a step
* in the direction of the perturbation (or the opposite
* of course, if valuediff is negative).
*
*/
//
// Scaling was still needed, because the gradient
// should point along the direction of the applied
// perturbation.
//
// Recall that we scaled the perturbation vector by dividing each
// element j by scales[j]:
// delta'[j] = delta[j] / scales[j]
// = (+ or -) 1 / scales[j]
//
// Consider the case of NumberOfPerturbations=1.
// If we would not do any scaling the gradient would
// be computed as:
// grad[j] = valuediff / delta'[j]
// = valuediff / ( delta[j] / scales[j] )
// = scales[j] * valuediff / delta[j]
// = (+ or -) valuediff * scales[j]
//
// This is wrong, because it gives a vector that points
// in a different direction than the perturbation. Besides,
// it would give an opposite effect as expected from the scaling.
// For rigid registration for example, we choose the scaler for
// the rotation 1 and for the translation 1/1000 (see
// Examples/Registration/ImageRegistration5.cxx), because
// we want the optimizer to adjust the translation in bigger steps.
// In the formula above, the grad[translation] would then be SMALLER
// than grad[rotation], so the optimizer would adjust the translation
// in smaller steps.
//
// To make the gradient point along the perturbation direction we
// have to divide it by the square of the scales, to return the scaling
// parameter to the denominator where it belongs:
// grad[j] = (+ or -) valuediff * scales[j] / scales[j]^2
// = (+ or -) valuediff / scales[j]
// which is correct. Now the optimizer will take a step
// in the direction of the perturbation (or the opposite
// of course, if valuediff is negative).
//
} // end ComputeGradient

/**
Expand All @@ -421,25 +408,25 @@ SPSAOptimizer::ComputeGradient(const ParametersType & parameters, DerivativeType
void
SPSAOptimizer::GuessParameters(SizeValueType numberOfGradientEstimates, double initialStepSize)
{
/** Guess A */
// Guess A
this->SetA(static_cast<double>(this->GetMaximumNumberOfIterations()) / 10.0);

if (!m_CostFunction)
{
itkExceptionMacro(<< "No objective function defined! ");
}

/** The number of parameters: */
// The number of parameters
const unsigned int spaceDimension = m_CostFunction->GetNumberOfParameters();

/** Check if the initial position has the correct number of parameters */
// Check if the initial position has the correct number of parameters
const ParametersType & initialPosition = this->GetInitialPosition();
if (spaceDimension != initialPosition.GetSize())
{
itkExceptionMacro(<< "Number of parameters not correct!");
}

/** Estimate the maximum absolute element of the initial gradient */
// Estimate the maximum absolute element of the initial gradient
DerivativeType averageAbsoluteGradient(spaceDimension);
averageAbsoluteGradient.Fill(0.0);
m_CurrentIteration = 0;
Expand All @@ -450,11 +437,10 @@ SPSAOptimizer::GuessParameters(SizeValueType numberOfGradientEstimates, double i
{
averageAbsoluteGradient[j] += itk::Math::abs(m_Gradient[j]);
}
} // end for ++n
}
averageAbsoluteGradient /= static_cast<double>(numberOfGradientEstimates);

/** Set a in order to make the first steps approximately have an
initialStepSize */
// Set a in order to make the first steps approximately have an initialStepSize
this->SetSa(initialStepSize * std::pow(m_A + 1.0, m_Alpha) / averageAbsoluteGradient.max_value());
} // end GuessParameters

Expand Down

0 comments on commit 1a296b5

Please sign in to comment.