lambe/nlpy forked from optimizers/nlpy

Fixing some bugs

1 parent 4bb2f54 commit 1ff92275000313cc7e13fb4377ba85aa0bb7aba8 syarra committed Apr 27, 2012
Showing with 149 additions and 34 deletions.
1. +49 −14 nlpy/optimize/solvers/auglag2.py
2. +15 −20 nlpy/optimize/solvers/bqp.py
3. +85 −0 tests/SBMIN/resultsSBMIN.txt
63 nlpy/optimize/solvers/auglag2.py
 @@ -71,7 +71,6 @@ def __init__(self, nlp, **kwargs): self.Lvar = self.nlp.Lvar self.Uvar = self.nlp.Uvar self.x0 = self.nlp.x0 - # end def # Evaluate augmented Lagrangian function @@ -152,6 +151,39 @@ def hess(self, x, z=None, **kwargs): # end class +class AugmentedLagrangianLbfgs(AugmentedLagrangian): + ''' + ''' + + def __init__(self, nlp, **kwargs): + AugmentedLagrangian.__init__(self, nlp, **kwargs) + self.Hessapp = LBFGS(self.n,**kwargs) + + def hprod(self, x, z, v, **kwargs): + ''' + Compute the Hessian-vector product of the Hessian of the augmented + Lagrangian with arbitrary vector v. + ''' + w = self.Hessapp.matvec(v) + return w + + def hess(self, x, z=None, **kwargs): + return SimpleLinearOperator(self.n, self.n, symmetric=True, + matvec= lambda u: self.hprod(x,z,u)) + + def hupdate(self, new_s=None, new_y=None): + if new_s is not None and new_y is not None: + self.Hessapp.store(new_s,new_y) + return + + def hrestart(self): + self.Hessapp.restart() + return + +# end class + + + class AugmentedLagrangianFramework(object): ''' @@ -169,7 +201,7 @@ class AugmentedLagrangianFramework(object): Springer, 2006, pp 519--523 ''' - def __init__(self, nlp, innerSolver, rho_init=10., tau=0.1, **kwargs): + def __init__(self, nlp, innerSolver, **kwargs): ''' Initialize augmented Lagrangian method and options. @@ -185,11 +217,11 @@ def __init__(self, nlp, innerSolver, rho_init=10., tau=0.1, **kwargs): self.dphi0 = None self.dphi0norm = None self.alprob.pi0 = self.pi0 - self.alprob.rho = numpy.array(rho_init) - self.alprob.rho_init = numpy.array(rho_init) + self.alprob.rho = kwargs.get('rho_init',numpy.array(10.)) + self.alprob.rho_init = kwargs.get('rho_init',numpy.array(10.)) self.rho = self.alprob.rho self.pi = self.alprob.pi - self.tau = tau + self.tau =kwargs.get('tau', 0.1) self.omega = None self.eta = None self.omega_init = kwargs.get('omega_init',.1) # rho_init**-1 @@ -221,7 +253,7 @@ def UpdateMultipliersOrPenaltyParameters(self, consnorm, convals): if consnorm <= numpy.maximum(self.eta, self.eta_opt): # No change in rho, update multipliers, tighten tolerances - self.pi += self.rho*convals + self.pi -= self.rho*convals else: # Increase rho, reset tolerances based on new rho self.rho /= self.tau @@ -261,7 +293,7 @@ def solve(self, **kwargs): self.iter = 0 self.inner_fail_count = 0 self.pg0 = Pmax - + self.niter_total = 0 # Convergence check converged = Pmax <= self.omega_opt and max_cons <= self.eta_opt @@ -279,17 +311,19 @@ def solve(self, **kwargs): print '\n' # Perform bound-constrained minimization - tr = TR(eta1=0.25, eta2=0.75, gamma1=0.0625, gamma2=2) + #tr = TR(eta1=0.25, eta2=0.75, gamma1=0.0625, gamma2=2) + tr = TR(eta1=1.0e-4, eta2=.99, gamma1=.3, gamma2=2.5) SBMIN = self.innerSolver(self.alprob, tr, TRSolver, reltol=self.omega, x0=self.x, - maxiter = 250, + maxiter = 100, verbose=self.sbmin_verbose, magic_steps=self.magic_steps) SBMIN.Solve(rho_pen=self.rho,slack_index=original_n) self.x = SBMIN.x self.f = self.alprob.nlp.obj(self.x[:original_n]) + self.niter_total += SBMIN.iter dphi = self.alprob.grad(self.x) Pdphi = self.alprob.project_gradient(self.x,dphi) @@ -298,6 +332,7 @@ def solve(self, **kwargs): if self.alprob.nlp.m == 0: max_cons_new = 0 + self.iter = SBMIN.iter else: max_cons_new = numpy.max(numpy.abs(convals_new)) @@ -334,7 +369,7 @@ def solve(self, **kwargs): self.inner_fail_count = 0 else: self.inner_fail_count += 1 - if self.inner_fail_count == 10: + if self.inner_fail_count == 10 and self.printlevel >= 1: print '\n Current point could not be improved, exiting ... \n' break # end if @@ -374,9 +409,9 @@ def solve(self, **kwargs): # end def # end class -class AugmentedLagrangianLBFGSFramework(object): - - def __init__(self, nlp, innerSolver): - AugmentedLagrangian.__init__(self, innerSolver, rho_init=10., tau=0.1, **kwargs) +class AugmentedLagrangianLbfgsFramework(AugmentedLagrangianFramework): + def __init__(self,nlp, innerSolver, **kwargs): + AugmentedLagrangianFramework.__init__(self, nlp, innerSolver, **kwargs) + self.alprob = AugmentedLagrangianLbfgs(nlp)
35 nlpy/optimize/solvers/bqp.py
 @@ -25,7 +25,6 @@ __docformat__ = 'restructuredtext' - class SufficientDecreaseCG(TruncatedCG): """ An implementation of the conjugate-gradient algorithm with @@ -185,9 +184,10 @@ def projected_linesearch(self, x, g, d, qval, step=1.0): # Perform projected Armijo linesearch. while not finished: + #print 'proj linsearch, x:', x,' d:', d xTrial = self.project(x + step * d) qTrial = qp.obj(xTrial) - #print 'x= ', x + #print 'proj linsearch, xtrial= ', xTrial #print 'xTrial=', xTrial, ' qTrial=', qTrial slope = np.dot(g, xTrial-x) #print ' step=', step, ', slope=', slope @@ -196,7 +196,7 @@ def projected_linesearch(self, x, g, d, qval, step=1.0): finished = True else: step /= 3 - + #print 'proj linsearch, finished:', finished, ' step:', step return (xTrial, qTrial) @@ -235,28 +235,17 @@ def projected_gradient(self, x0, g=None, active_set=None, qval=None, **kwargs): sufficient_decrease = False best_decrease = 0 iter = 0 + #print 'H:', FormEntireMatrix(self.qp.n, self.qp.n, self.H) while not settled_down and not sufficient_decrease and \ iter < maxiter: iter += 1 qOld = qval - + #print 'pg iter:', iter, ' x:', x, 'g:', g # TODO: Use appropriate initial steplength. - n = self.qp.n - fixed_vars = np.concatenate((lower,upper)) - free_vars = np.setdiff1d(np.arange(n, dtype=np.int), fixed_vars) - - ZHZ = ReducedHessian(self.H, free_vars) - Zg = g[free_vars] - - if np.dot(Zg,ZHZ*Zg) <=0 : - step = 10. - else: - step = np.linalg.norm(Zg)**2 / np.dot(Zg,ZHZ*Zg) - #print 'Hessian DP step : ', step - #step = 10. - (x, qval) = self.projected_linesearch(x, g, -g, qval, step=step) + (x, qval) = self.projected_linesearch(x, g, -g, qval) +# print 'x:', x # Check decrease in objective. decrease = qOld - qval @@ -269,7 +258,6 @@ def projected_gradient(self, x0, g=None, active_set=None, qval=None, **kwargs): if identical(lower,lowerTrial) and identical(upper,upperTrial): settled_down = True lower, upper = lowerTrial, upperTrial - #print ' qval=', qval, 'lower=', lower, ', upper=', upper #print ' settled=', repr(settled_down), ', decrease=', repr(sufficient_decrease) @@ -289,6 +277,9 @@ def solve(self, **kwargs): lower, upper = self.get_active_set(x) iter = 0 + #print 'x0:', x + #print 'qp.Lvar', qp.Lvar, 'qp.Uvar', qp.Uvar + # Compute stopping tolerance. g = qp.grad(x) gNorm = np.linalg.norm(g) @@ -310,13 +301,17 @@ def solve(self, **kwargs): continue # Projected-gradient phase: determine next working set. + + #print 'x=', x, ' g=', g (x, (lower,upper)) = self.projected_gradient(x, g=g, active_set=(lower,upper)) g = qp.grad(x) qval = qp.obj(x) pg = self.pgrad(x, g=g, active_set=(lower,upper)) pgNorm = np.linalg.norm(pg) #print 'Main loop with iter=%d and pgNorm=%g' % (iter, pgNorm) + #print 'x=', x, ' g=', g, ' qval=', qval + if pgNorm <= stoptol: exitOptimal = True continue @@ -360,7 +355,7 @@ def solve(self, **kwargs): g = qp.grad(x) pg = self.pgrad(x, g=g, active_set=(lower,upper)) pgNorm = np.linalg.norm(pg) - +# print 'x:',x if pgNorm <= stoptol: exitOptimal = True continue
85 tests/SBMIN/resultsSBMIN.txt
 @@ -11,3 +11,88 @@ hs004 2 4 2.66666667e+00 'opt' hs045 5 0 2.00000000e+00 'opt' hs005 2 5 -1.91322295e+00 'opt' + +Cuter Test Problems + +Name Dim Iter Objective Opt +-------------------------------------------- +allinitu 4 14 5.74438491e+00 'opt' +arglina 100 3 1.00000000e+02 'opt' +arglinb 10 1 4.63414634e+00 'opt' +arglinc 8 1 6.13513514e+00 'opt' +bard 3 8 8.21487731e-03 'opt' +beale 2 10 9.34960637e-18 'opt' +biggs6 6 100 8.03227059e-07 'opt' +box3 3 7 8.44803006e-12 'opt' +brkmcc 2 2 1.69042679e-01 'opt' +brownal 10 6 7.98497885e-11 'opt' +brownbs 2 10 0.00000000e+00 'opt' +brownden 4 8 8.58222016e+04 'opt' +burkehan +chnrosnb 50 69 1.48175648e-15 'opt' +ckoehelb +cliff 2 27 1.99786614e-01 'opt' +cube 2 44 3.42915043e-15 'opt' +deconvu 51 118 7.35629473e-07 'opt' +denschna 2 5 2.21425291e-12 'opt' +denschnb 2 6 1.46939384e-12 'opt' +denschnc 2 10 2.13815379e-20 'opt' +denschnd 3 32 1.53187512e-08 'opt' +denschne 3 17 1.33830691e-11 'opt' +denschnf 2 6 1.04838731e-21 'opt' +dixon3dq 10 3 4.08349823e-11 'opt' +djtl 2 36 -8.95154472e+03 'opt' +engval2 3 21 4.52320150e-23 'opt' +errinros 50 67 3.99041539e+01 'opt' +expfit 2 11 2.40510594e-01 'opt' +extrosnb 10 0 0.00000000e+00 'opt' +fletcbv2 100 14 -5.14006786e-01 'opt' +fletchcr 100 11 4.11284729e-13 'opt' +genhumps 5 151 3.85576899e-02 'itr' +growthls 3 91 1.22209485e+00 'itr' +gulf 3 26 4.83084005e-16 'opt' +hairy 2 61 1.93902729e+02 'itr' +hatfldd 3 23 6.61511619e-08 'opt' +hatflde 3 30 4.43440071e-07 'opt' +heart6ls 6 181 4.17259439e-01 'itr' +heart8ls 8 135 3.74045510e-13 'opt' +hilberta 10 3 1.73722139e-09 'opt' +hilbertb 50 2 2.50521776e-19 'opt' +himmelbb 2 11 9.83237594e-15 'opt' +himmelbf 4 8 3.18571749e+02 'opt' +himmelbg 2 6 1.25552220e-13 'opt' +himmelbh 2 5 -1.00000000e+00 'opt' +humps 2 61 2.40399323e+04 'itr' +jensmp 2 9 1.24362182e+02 'opt' +kowosb 4 11 3.07505610e-04 'opt' +loghairy 2 61 6.54254374e+00 'itr' +mancino 100 5 3.60114493e-21 'opt' +maratosb 2 11 -1.00000006e+00 'opt' +mexhat 2 3 -4.01000000e-02 'opt' +meyer3 3 91 6.21276541e+04 'itr' +minsurf 36 12 1.00000000e+00 'opt' +nonmsqrt 9 196 7.51805719e-01 'opt' +osbornea 5 151 8.79026294e-01 'itr' +osborneb 11 18 4.01377363e-02 'opt' +palmer1c 8 3 9.75979913e-02 'opt' +palmer1d 7 3 6.52682594e-01 'opt' +palmer2c 8 3 1.44213912e-02 'opt' +palmer3c 8 3 1.95376385e-02 'opt' +palmer4c 8 3 5.03106958e-02 'opt' +palmer5c 6 2 2.12808667e+00 'opt' +palmer6c 8 2 1.63874216e-02 'opt' +palmer7c 8 2 6.01985672e-01 'opt' +palmer8c 8 2 1.59768063e-01 'opt' +penalty2 100 19 9.70960840e+04 'opt' +pfit1ls 3 91 1.43647367e-04 'itr' +pfit2ls 3 91 1.05698762e-04 'itr' +pfit3ls 3 65 1.29726658e-16 'opt' +pfit4ls 3 91 1.74645812e-03 'itr' +rosenbr 2 31 4.14799022e-17 'opt' +sineval 2 60 5.14830500e-29 'opt' +sisser 2 14 1.77427121e-08 'opt' +tointqor 50 3 1.17547222e+03 'opt' +vardim 100 25 2.04758709e-28 'opt' +watson 31 12 6.17445994e-07 'opt' +yfitu 3 60 6.66972064e-13 'opt' +zangwil2 2 3 -1.82000000e+01 'opt'