Skip to content

Commit

Permalink
Merge pull request #1059 from Kenneth-T-Moore/ken2
Browse files Browse the repository at this point in the history
Place upper bound of 1.0 on slope parameter in ArmijoGoldstein and tests that violated it.
  • Loading branch information
swryan committed Sep 27, 2019
2 parents 56dfde3 + 5c8a84b commit 67689d5
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 30 deletions.
43 changes: 22 additions & 21 deletions openmdao/recorders/tests/test_sqlite_recorder.py
Expand Up @@ -16,6 +16,7 @@
from openmdao.test_suite.components.sellar import SellarDerivatives, SellarDerivativesGrouped, \
SellarProblem, SellarStateConnection, SellarProblemWithArrays
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.solvers.linesearch.tests.test_backtracking import ImplCompTwoStates

from openmdao.recorders.tests.sqlite_recorder_test_utils import assertMetadataRecorded, \
assertDriverIterDataRecorded, assertSystemIterDataRecorded, assertSolverIterDataRecorded, \
Expand Down Expand Up @@ -832,19 +833,24 @@ def test_record_solver(self):
assertSolverIterDataRecorded(self, expected_data, self.eps, prefix='run_again')

def test_record_line_search_armijo_goldstein(self):
prob = SellarProblem()
prob.setup()
prob = om.Problem()
prob.model.add_subsystem('px', om.IndepVarComp('x', 1.0))
prob.model.add_subsystem('comp', ImplCompTwoStates())
prob.model.connect('px.x', 'comp.x')

model = prob.model
model.linear_solver = om.ScipyKrylov()
prob.model.nonlinear_solver = om.NewtonSolver()
prob.model.nonlinear_solver.options['maxiter'] = 10
prob.model.linear_solver = om.ScipyKrylov()

nl = model.nonlinear_solver = om.NewtonSolver()
nl.options['solve_subsystems'] = True
nl.options['max_sub_solves'] = 4

ls = nl.linesearch = om.ArmijoGoldsteinLS(bound_enforcement='vector')
ls.options['c'] = 100.0 # This is bogus, but it ensures that we get a few LS iterations.
ls = prob.model.nonlinear_solver.linesearch = om.ArmijoGoldsteinLS(bound_enforcement='vector')
ls.add_recorder(self.recorder)
ls.options['c'] = .1

prob.setup()

prob['px.x'] = 2.0
prob['comp.y'] = 0.
prob['comp.z'] = 1.6

t0, t1 = run_driver(prob)

Expand All @@ -858,18 +864,14 @@ def test_record_line_search_armijo_goldstein(self):
'ArmijoGoldsteinLS', (2,)
]

expected_abs_error = 5.6736837450444e-12
expected_rel_error = 0.0047475363051265665
expected_abs_error = 3.2882366094914777
expected_rel_error = 0.9999999999999998

expected_solver_output = {
"con_cmp1.con1": [-22.42830237],
"d1.y1": [25.58830237],
"con_cmp2.con2": [-11.941511849],
"pz.z": [5.0, 2.0],
"obj_cmp.obj": [28.58830816516],
"d2.y2": [12.058488150],
"px.x": [1.0]
}
"comp.z": [1.5],
"comp.y": [1.75],
"px.x": [2.0],
}

expected_solver_residuals = None

Expand Down Expand Up @@ -937,7 +939,6 @@ def test_record_pop_bug(self):
nl.options['max_sub_solves'] = 4

ls = nl.linesearch = om.ArmijoGoldsteinLS(bound_enforcement='vector')
ls.options['c'] = 100.0 # This is bogus, but it ensures that we get a few LS iterations.
model.add_recorder(self.recorder)

try:
Expand Down
6 changes: 3 additions & 3 deletions openmdao/solvers/linesearch/backtracking.py
Expand Up @@ -263,9 +263,9 @@ def _declare_options(self):
super(ArmijoGoldsteinLS, self)._declare_options()
opt = self.options
opt['maxiter'] = 5
opt.declare('c', default=0.1, lower=0.0, desc="Slope parameter for line of sufficient "
"decrease. The larger the step, the more decrease is required to terminate the "
"line search.")
opt.declare('c', default=0.1, lower=0.0, upper=1.0, desc="Slope parameter for line of "
"sufficient decrease. The larger the step, the more decrease is required to "
"terminate the line search.")
opt.declare('rho', default=0.5, lower=0.0, upper=1.0, desc="Contraction factor.")
opt.declare('alpha', default=1.0, lower=0.0, desc="Initial line search step.")
opt.declare('retry_on_analysis_error', default=True,
Expand Down
51 changes: 45 additions & 6 deletions openmdao/solvers/linesearch/tests/test_backtracking.py
Expand Up @@ -13,6 +13,7 @@
from openmdao.test_suite.components.double_sellar import DoubleSellar
from openmdao.test_suite.components.implicit_newton_linesearch \
import ImplCompTwoStates, ImplCompTwoStatesArrays
from openmdao.test_suite.components.sellar import SellarDis1, SellarDis2withDerivatives
from openmdao.utils.assert_utils import assert_rel_error


Expand Down Expand Up @@ -285,7 +286,7 @@ def test_deep_analysis_error_iprint(self):
ls.options['maxiter'] = 5
ls.options['alpha'] = 10.0
ls.options['retry_on_analysis_error'] = True
ls.options['c'] = 10000.0
ls.options['c'] = 1.0

top.setup()
top.set_solver_print(level=2)
Expand Down Expand Up @@ -334,7 +335,7 @@ def test_read_only_bug(self):
ls.options['maxiter'] = 5
ls.options['alpha'] = 10.0
ls.options['retry_on_analysis_error'] = True
ls.options['c'] = 10000.0
ls.options['c'] = 1.0

top.setup()
top.set_solver_print(level=2)
Expand Down Expand Up @@ -543,6 +544,46 @@ def test_undeclared_options(self):
"has not been declared.\"")


class SellarDis1withDerivativesMod(SellarDis1):
# Version of Sellar discipline 1 with a slightly incorrect x derivative.
# This will still solve, but will require some backtracking at times.

def _do_declares(self):
self.declare_partials(of='*', wrt='*')

def compute_partials(self, inputs, partials):
partials['y1', 'y2'] = -0.2
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = 1.5


class SubSellarMod(om.Group):

def __init__(self, units=None, scaling=None, **kwargs):
super(SubSellarMod, self).__init__(**kwargs)

self.add_subsystem('d1', SellarDis1withDerivativesMod(units=units, scaling=scaling),
promotes=['x', 'z', 'y1', 'y2'])
self.add_subsystem('d2', SellarDis2withDerivatives(units=units, scaling=scaling),
promotes=['z', 'y1', 'y2'])


class DoubleSellarMod(om.Group):

def __init__(self, units=None, scaling=None, **kwargs):
super(DoubleSellarMod, self).__init__(**kwargs)

self.add_subsystem('g1', SubSellarMod(units=units, scaling=scaling))
self.add_subsystem('g2', SubSellarMod(units=units, scaling=scaling))

self.connect('g1.y2', 'g2.x')
self.connect('g2.y2', 'g1.x')

# Converge the outer loop with Gauss Seidel, with a looser tolerance.
self.nonlinear_solver = om.NewtonSolver()
self.linear_solver = om.DirectSolver()


class TestArmijoGoldsteinLSArrayBounds(unittest.TestCase):

def setUp(self):
Expand Down Expand Up @@ -635,8 +676,9 @@ def test_linesearch_wall_bound_enforcement_scalar(self):
self.assertTrue(2.4 <= top['comp.z'][ind] <= self.ub[ind])

def test_with_subsolves(self):

prob = om.Problem()
model = prob.model = DoubleSellar()
model = prob.model = DoubleSellarMod()

g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver()
Expand All @@ -655,9 +697,6 @@ def test_with_subsolves(self):
model.nonlinear_solver.options['max_sub_solves'] = 4
ls = model.nonlinear_solver.linesearch = om.ArmijoGoldsteinLS(bound_enforcement='vector')

# This is pretty bogus, but it ensures that we get a few LS iterations.
ls.options['c'] = 100.0 # FIXME c should be 0 <= c <= 1

prob.set_solver_print(level=0)

prob.setup()
Expand Down

0 comments on commit 67689d5

Please sign in to comment.