Skip to content

Commit

Permalink
Merge pull request #413 from Kenneth-T-Moore/rob
Browse files Browse the repository at this point in the history
Allow DesVar (actually IndepVarComp unknown) to be specified as an objective.
  • Loading branch information
naylor-b committed Dec 1, 2015
2 parents b8bca68 + 3ff0812 commit ccfc52b
Show file tree
Hide file tree
Showing 3 changed files with 197 additions and 4 deletions.
5 changes: 4 additions & 1 deletion openmdao/core/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -1161,6 +1161,8 @@ def _calc_gradient_fd(self, indep_list, unknown_list, return_format,
cn_scale = {}

abs_params = []
fd_unknowns = [var for var in unknown_list if var not in indep_list]
pass_unknowns = [var for var in unknown_list if var in indep_list]
for name in indep_list:

if name in unknowns:
Expand All @@ -1174,7 +1176,8 @@ def _calc_gradient_fd(self, indep_list, unknown_list, return_format,
abs_params.append(name)

Jfd = root.fd_jacobian(params, unknowns, root.resids, total_derivs=True,
fd_params=abs_params, fd_unknowns=unknown_list,
fd_params=abs_params, fd_unknowns=fd_unknowns,
pass_unknowns=pass_unknowns,
poi_indices=self._poi_indices,
qoi_indices=self._qoi_indices)

Expand Down
23 changes: 21 additions & 2 deletions openmdao/core/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ def _set_vars_as_remote(self):
meta['remote'] = True

def fd_jacobian(self, params, unknowns, resids, total_derivs=False,
fd_params=None, fd_unknowns=None,
fd_params=None, fd_unknowns=None, pass_unknowns=(),
poi_indices=None, qoi_indices=None):
"""Finite difference across all unknowns in this system w.r.t. all
incoming params.
Expand Down Expand Up @@ -336,6 +336,11 @@ def fd_jacobian(self, params, unknowns, resids, total_derivs=False,
calculated. This is used by problem to limit the derivatives that
are taken.
pass_unknowns : list of strings, optional
List of outputs that are also finite difference inputs. OpenMDAO
supports specifying a design variable (or slice of one) as an objective,
so gradients of these are also required.
poi_indices: dict of list of integers, optional
This is a dict that contains the index values for each parameter of
interest, so that we only finite difference those indices.
Expand Down Expand Up @@ -414,6 +419,7 @@ def fd_jacobian(self, params, unknowns, resids, total_derivs=False,
inputs = params

target_input = inputs._dat[p_name].val
param_src = None

mydict = {}
# since p_name is a promoted name, it could refer to multiple
Expand All @@ -437,7 +443,7 @@ def fd_jacobian(self, params, unknowns, resids, total_derivs=False,
p_idxs = range(p_size)

# Size our Outputs
for u_name in fd_unknowns:
for u_name in chain(fd_unknowns, pass_unknowns):
if qoi_indices and u_name in qoi_indices:
u_size = len(qoi_indices[u_name])
else:
Expand Down Expand Up @@ -525,6 +531,19 @@ def fd_jacobian(self, params, unknowns, resids, total_derivs=False,
fd_cols[(u_name, p_name, col)] = \
jac[u_name, p_name][:, col]

# When an unknown is a parameter, it isn't calculated, so
# we manually fill in identity by placing a 1 wherever it
# is needed.
for u_name in pass_unknowns:
if u_name == param_src:
if qoi_indices and u_name in qoi_indices:
q_idxs = qoi_indices[u_name]
if idx in q_idxs:
row = qoi_indices[u_name].index(idx)
jac[u_name, p_name][row][col] = 1.0
else:
jac[u_name, p_name] = np.array([[1.0]])

# Restore old residual
resultvec.vec[:] = cache1

Expand Down
173 changes: 172 additions & 1 deletion openmdao/drivers/test/test_driver_param_indices.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
""" Testing optimizer ScipyOptimize."""
""" Testing desvar indices, param indices, and associated derivatives.."""

import os

Expand Down Expand Up @@ -312,5 +312,176 @@ def test_poi_index_w_irrelevant_var(self):
assert_rel_error(self, J['con2.c']['p2.x'], -3.0, 1e-3)


class TestMiscParamIndices(unittest.TestCase):

def test_param_as_obj_scaler_explicit(self):

prob = Problem()
root = prob.root = Group()
root.add('comp', ExecComp('y = 3.0*x'))
root.add('p', IndepVarComp('x', 3.0))
root.connect('p.x', 'comp.x')

prob.driver.add_desvar('p.x', 1.0)
prob.driver.add_objective('p.x')
prob.driver.add_constraint('comp.y', lower=-100.0)

prob.setup(check=False)

# Cheat to make Driver give derivs
prob.driver._problem = prob

prob.run()

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fwd', return_format='dict')
self.assertEqual(J['p.x']['p.x'][0][0], 1.0)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fwd', return_format='array')
self.assertEqual(J[0][0], 1.0)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='rev', return_format='dict')
self.assertEqual(J['p.x']['p.x'][0][0], 1.0)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='rev', return_format='array')
self.assertEqual(J[0][0], 1.0)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fd', return_format='dict')
self.assertEqual(J['p.x']['p.x'][0][0], 1.0)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fd', return_format='array')
self.assertEqual(J[0][0], 1.0)

def test_param_as_obj_scaler_implicit(self):

prob = Problem()
root = prob.root = Group()
root.add('comp', ExecComp('y = 3.0*x'), promotes=['x', 'y'])
root.add('p', IndepVarComp('x', 3.0), promotes=['x'])

prob.driver.add_desvar('x', 1.0)
prob.driver.add_objective('x')
prob.driver.add_constraint('y', lower=-100.0)

prob.setup(check=False)

# Cheat to make Driver give derivs
prob.driver._problem = prob

prob.run()

J = prob.driver.calc_gradient(['x'], ['x'], mode='fwd', return_format='dict')
self.assertEqual(J['x']['x'][0][0], 1.0)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fwd', return_format='array')
self.assertEqual(J[0][0], 1.0)

J = prob.driver.calc_gradient(['x'], ['x'], mode='rev', return_format='dict')
self.assertEqual(J['x']['x'][0][0], 1.0)

J = prob.driver.calc_gradient(['x'], ['x'], mode='rev', return_format='array')
self.assertEqual(J[0][0], 1.0)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fd', return_format='dict')
self.assertEqual(J['x']['x'][0][0], 1.0)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fd', return_format='array')
self.assertEqual(J[0][0], 1.0)

def test_param_as_obj_1darray_explicit(self):

prob = Problem()
root = prob.root = Group()
root.add('comp', ExecComp('y = 3.0*x', x=np.zeros((10, )), y=np.zeros((10, )) ))
root.add('p', IndepVarComp('x', np.zeros((10, )) ))
root.connect('p.x', 'comp.x')

prob.driver.add_desvar('p.x', np.ones((8, )), indices=[1, 2, 3, 4, 5, 6, 7, 8])
prob.driver.add_objective('p.x', indices=[5, 6, 7])
prob.driver.add_constraint('comp.y', lower=-100.0)

prob.setup(check=False)

# Cheat to make Driver give derivs
prob.driver._problem = prob

prob.run()

Jbase = np.zeros((3, 8))
Jbase[0, 4] = 1.0
Jbase[1, 5] = 1.0
Jbase[2, 6] = 1.0

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fwd', return_format='dict')
diff = np.linalg.norm(J['p.x']['p.x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fwd', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='rev', return_format='dict')
diff = np.linalg.norm(J['p.x']['p.x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='rev', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fd', return_format='dict')
diff = np.linalg.norm(J['p.x']['p.x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['p.x'], ['p.x'], mode='fd', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

def test_param_as_obj_1darray_implicit(self):

prob = Problem()
root = prob.root = Group()
root.add('comp', ExecComp('y = 3.0*x', x=np.zeros((10, )), y=np.zeros((10, )) ),
promotes=['x', 'y'])
root.add('p', IndepVarComp('x', np.zeros((10, )) ), promotes=['x'])

prob.driver.add_desvar('x', np.ones((8, )), indices=[1, 2, 3, 4, 5, 6, 7, 8])
prob.driver.add_objective('x', indices=[5, 6, 7])
prob.driver.add_constraint('y', lower=-100.0)

prob.setup(check=False)

# Cheat to make Driver give derivs
prob.driver._problem = prob

prob.run()

Jbase = np.zeros((3, 8))
Jbase[0, 4] = 1.0
Jbase[1, 5] = 1.0
Jbase[2, 6] = 1.0

J = prob.driver.calc_gradient(['x'], ['x'], mode='fwd', return_format='dict')
diff = np.linalg.norm(J['x']['x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fwd', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['x'], ['x'], mode='rev', return_format='dict')
diff = np.linalg.norm(J['x']['x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['x'], ['x'], mode='rev', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fd', return_format='dict')
diff = np.linalg.norm(J['x']['x'] - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

J = prob.driver.calc_gradient(['x'], ['x'], mode='fd', return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1.0e-9)

if __name__ == "__main__":
unittest.main()

0 comments on commit ccfc52b

Please sign in to comment.