Permalink
Browse files

Fixed bug in magical steps

- now produces continuous decrease in objective function
- early tests show steps are effective, but not yet efficient for solving small problems
  • Loading branch information...
1 parent a01a23b commit 83cb293bde179ce4948bed719703e7f2a735fdc6 Andrew Lambe committed May 16, 2012
Showing with 9 additions and 5 deletions.
  1. +9 −5 nlpy/optimize/solvers/sbmin.py
@@ -217,14 +217,16 @@ def Solve(self, **kwargs):
# Trust-region step is successful
self.TR.UpdateRadius(rho, stepnorm)
self.x = x_trial
+ self.f = f_trial
+ self.g = nlp.grad(self.x)
if self.magic_steps_cons:
m_step = nlp.magical_step(self.x, self.g)
self.x += m_step
self.true_step += m_step
+ self.f = nlp.obj(self.x)
+ self.g = nlp.grad(self.x)
- self.f = nlp.obj(self.x)
- self.g = nlp.grad(self.x)
self.pgnorm = np.max(np.abs( \
self.projected_gradient(self.x,self.g)))
step_status = 'Acc'
@@ -251,17 +253,19 @@ def Solve(self, **kwargs):
step_status = 'N-Y Rej'
else:
# Backtrack succeeded, update the current point
- self.x = x_trial
self.true_step *= alpha
+ self.x = x_trial
+ self.f = f_trial
+ self.g = nlp.grad(self.x)
# Magical steps can also apply if backtracking succeeds
if self.magic_steps_cons:
m_step = nlp.magical_step(self.x, self.g)
self.x += m_step
self.true_step += m_step
+ self.f = nlp.obj(self.x)
+ self.g = nlp.grad(self.x)
- self.f = f_trial
- self.g = nlp.grad(self.x)
self.pgnorm = np.max(np.abs( \
self.projected_gradient(self.x,self.g)))
step_status = 'N-Y Acc'

0 comments on commit 83cb293

Please sign in to comment.