Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Major iteration counting #37

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 4 additions & 29 deletions postprocessing/OptView.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,11 +223,8 @@ def OptimizationHistory(self):
pass

# Check to see if there is proper saved info about iter type
if 'iu0' in db['0'].keys():
if db[db['last']]['iu0'] > 0:
self.storedIters = True
else:
self.storedIters = False
if 'isMajor' in db['0'].keys():
self.storedIters = True
else:
self.storedIters = False

Expand Down Expand Up @@ -271,11 +268,10 @@ def DetermineMajorIterations(self, db, OpenMDAO):
# actual major iteration. In particular, one has funcs
# and the next has funcsSens, but they're both part of the
# same major iteration.
if any('funcs' == s for s in db[key].keys()):

if 'funcs' in db[key].keys():
# If this iteration has 'funcs' within it, but it's not
# flagged as major, then it's a minor iteration.
if i == 0:
if i == 0 or db[key]['isMajor']:
self.iter_type[i] = 1
else:
self.iter_type[i] = 2
Expand All @@ -295,27 +291,6 @@ def DetermineMajorIterations(self, db, OpenMDAO):
self.iter_type[i] = 0 # this is not a real iteration,
# just the sensitivity evaluation

min_list = []
# Loop over each optimization iteration
for i, iter_type in enumerate(self.iter_type):

if iter_type == 0:
continue

key = '%d' % i

# # If the proper history is stored coming out of
# # pyoptsparse, use that for filtering major iterations.
# if self.storedIters:
# if db[key]['iu0'] != db[prev_key]['iu0']:
# min_array = np.array(min_list)
# argmin = np.argmin(min_array[:, 1])
# major_key = min_array[argmin, 0]
# self.iter_type[int(major_key)] = 1
# min_list = []
# min_list.append([int(key), db[key]['funcs'][self.obj_key]])
# prev_key = i

else: # this is if it's OpenMDAO
for i, iter_type in enumerate(self.iter_type):
key = '{}|{}'.format(self.solver_name, i+1) # OpenMDAO uses 1-indexing
Expand Down
41 changes: 38 additions & 3 deletions pyoptsparse/pyOpt_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
# External Python modules
# =============================================================================
import os
import numpy as np
from .pyOpt_error import Error
from .sqlitedict.sqlitedict import SqliteDict
# =============================================================================
Expand All @@ -42,7 +43,7 @@ class History(object):
closed

flag : str
String speciying the mode. Similar to what was used in
String specifying the mode. Similar to what was used in
shelve. 'n' for a new database and 'r' to read an existing one.
"""
def __init__(self, fileName, temp=False, flag='n'):
Expand Down Expand Up @@ -80,17 +81,24 @@ def write(self, callCounter, data):

# String key to database on disk
key = '%d'% callCounter

self.db[key] = data
# if the point exists, we merely update with new data
if self.pointExists(callCounter):
oldData = self.read(callCounter)
oldData.update(data)
self.db[key] = oldData
else:
self.db[key] = data
self.db['last'] = key
self.db.sync()
self.keys = list(self.db.keys())

def writeData(self, key, data):
"""
Write arbitrary key:data value to db
"""
self.db[key] = data
self.db.commit()
self.keys = list(self.db.keys())

def pointExists(self, callCounter):
"""
Expand Down Expand Up @@ -120,6 +128,33 @@ def readData(self, key):
except KeyError:
return None

def getCallCounter(self,x):
"""
Returns the callCounter corresponding to the function evaluation at 'x',
returns None if the point did not match previous evaluations
"""
last = int(self.db['last'])
callCounter = None
for i in range(last,0,-1):
key = '%d'% i
xuser = self.deProcessX(self.db[key]['xuser'])
if np.isclose(x,xuser).all() and 'funcs' in self.db[key].keys():
callCounter = i
break
return callCounter

def deProcessX(self,xuser):
"""
This is a much more simple version of pyOpt_history.deProcessX without error checking.
We traverse the OrderedDict and stack all the DVs as a single numpy array, preserving
the order so that we get the correct x vector.
"""
x_list = []
for key in xuser.keys():
x_list.append(xuser[key])
x_array = np.hstack(x_list)
return x_array

def __del__(self):
try:
self.db.close()
Expand Down
10 changes: 6 additions & 4 deletions pyoptsparse/pyOpt_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from .pyOpt_optimization import INFINITY
from .pyOpt_utils import convertToDense, convertToCOO, extractRows, \
mapToCSC, scaleRows, IDATA
from collections import OrderedDict
eps = numpy.finfo(1.0).eps

# =============================================================================
Expand Down Expand Up @@ -88,7 +89,8 @@ def __init__(self, name=None, category=None, defOptions=None,

# Create object to pass information about major iterations.
# Only relevant for SNOPT.
self.iu0 = 0
self.isMajor = False
self.iterDict = {}

# Store the jacobian conversion maps
self._jac_map_csr_to_csc = None
Expand Down Expand Up @@ -511,13 +513,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True):
hist['fail'] = masterFail

# Save information about major iteration counting (only matters for SNOPT).
hist['iu0'] = self.iu0
hist['isMajor'] = False # this will be updated in _snstop if it is major

# Add constraint and variable bounds at beginning of optimization.
# This info is used for visualization using OptView.
if self.callCounter == 0 and self.optProb.comm.rank == 0:
conInfo = {}
varInfo = {}
conInfo = OrderedDict()
varInfo = OrderedDict()

# Cycle through constraints adding the bounds
for key in self.optProb.constraints.keys():
Expand Down
77 changes: 71 additions & 6 deletions pyoptsparse/pySNOPT/pySNOPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,7 @@ def __call__(self, optProb, sens=None, sensStep=None, sensMode=None,

# Store the starting time if the keyword timeLimit is given:
self.timeLimit = timeLimit
self.startTime = None
if self.timeLimit is not None:
self.startTime = time.time()
self.startTime = time.time()

if len(optProb.constraints) == 0:
# If the user *actually* has an unconstrained problem,
Expand Down Expand Up @@ -493,8 +491,8 @@ def __call__(self, optProb, sens=None, sensStep=None, sensMode=None,

# The snopt c interface
timeA = time.time()
snopt.snoptc(start, nnCon, nnObj, nnJac, iObj, ObjAdd, ProbNm,
self._userfg_wrap, Acol, indA, locA, bl, bu,
snopt.snkerc(start, nnCon, nnObj, nnJac, iObj, ObjAdd, ProbNm,
self._userfg_wrap, snopt.snlog, snopt.snlog2, snopt.sqlog, self._snstop, Acol, indA, locA, bl, bu,
Names, hs, xs, pi, rc, inform, mincw, miniw, minrw,
nS, ninf, sinf, ff, cu, iu, ru, cw, iw, rw)
optTime = time.time()-timeA
Expand Down Expand Up @@ -546,7 +544,7 @@ def _userfg_wrap(self, mode, nnJac, x, fobj, gobj, fcon, gcon,
which will take care of everything else.
"""
fail = False
self.iu0 = iu[0]
self.isMajor = False
if mode == 0 or mode == 2:
fobj, fcon, fail = self._masterFunc(x, ['fobj', 'fcon'])
if not fail:
Expand All @@ -573,6 +571,73 @@ def _userfg_wrap(self, mode, nnJac, x, fobj, gobj, fcon, gcon,

return mode, fobj, gobj, fcon, gcon

def _getHessian(self,iw,rw):
"""
This function retrieves the approximate Hessian from the SNOPT workspace arrays
Call it for example from the _snstop routine or after SNOPT has finished, where iw and rw arrays are available
Currently only full memory Hessian mode is implemented, do not use this for limited-memory case.

The FM Hessian in SNOPT is stored with its Cholesky factor
which has been flattened to 1D
"""
lvlHes = iw[72-1] # 0,1,2 => LM, FM, Exact Hessian
if lvlHes != 1:
Error('Limited-memory Hessian not supported!')
lU = iw[391-1]-1 # U(lenU), BFGS Hessian H = U'U
lenU = iw[392-1]
Uvec = rw[lU:lU+lenU]
# nnH = int((-1+numpy.sqrt(1+8*lenU))/2) # can we access nnH from iw?
nnH = iw[24-1]
Umat = numpy.zeros((nnH,nnH))
Umat[numpy.triu_indices(nnH)] = Uvec
H = numpy.matmul(Umat.T,Umat)
return H

def _getPenaltyParam(self,iw,rw):
"""
Retrieves the full penalty parameter vector from the work arrays.
"""
nnCon = iw[23-1]
lxPen = iw[304-1]-1
xPen = rw[lxPen:lxPen+nnCon]
return xPen

def _snstop(self,ktcond,mjrprtlvl,minimize,n,nncon,nnobj,ns,itn,nmajor,nminor,nswap,condzhz,iobj,scaleobj,objadd,fobj,fmerit,penparm,step,primalinf,dualinf,maxvi,maxvirel,hs,locj,indj,jcol,scales,bl,bu,fx,fcon,gcon,gobj,ycon,pi,rc,rg,x,cu,iu,ru,cw,iw,rw):
"""
This routine is called every major iteration in SNOPT, after solving QP but before line search
Currently we use it just to determine the correct major iteration counting,
and save some parameters in history if needed

returning with iabort != 0 will terminate SNOPT immediately
"""
self.isMajor = True
H = self._getHessian(iw,rw)
xPen = self._getPenaltyParam(iw,rw)
iterDict = {
'isMajor' : True,
'time' : time.time() - self.startTime,
'nMajor' : nmajor,
'nMinor' : nminor,
'merit' : fmerit,
'feasibility' : primalinf,
'optimality' : dualinf,
'penalty' : xPen,
'pi' : pi,
'Hessian' : H,
}
if self.storeHistory:
currX = x[:n] # only the first n component is x, the rest are the slacks
if nmajor == 0:
callCounter = 0
else:
xScaled = self.optProb.invXScale * currX + self.optProb.xOffset
callCounter = self.hist.getCallCounter(xScaled)
if callCounter is not None:
self.hist.write(callCounter, iterDict)
iabort = 0
return iabort


def _set_snopt_options(self, iPrint, iSumm, cw, iw, rw):
"""
Set all the options into SNOPT that have been assigned
Expand Down
Loading