Skip to content

Commit

Permalink
Merge ea19043 into 7ce6e68
Browse files Browse the repository at this point in the history
  • Loading branch information
DKilkenny committed Nov 13, 2019
2 parents 7ce6e68 + ea19043 commit 1a66b0d
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 11 deletions.
30 changes: 30 additions & 0 deletions openmdao/core/tests/test_coloring.py
Expand Up @@ -27,6 +27,7 @@
from openmdao.utils.mpi import MPI
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.test_suite.tot_jac_builder import TotJacBuilder
from openmdao.utils.general_utils import run_driver

import openmdao.test_suite

Expand Down Expand Up @@ -155,6 +156,7 @@ def run_opt(driver_class, mode, assemble_type=None, color_info=None, derivs=True
p.driver.declare_coloring(tol=1e-15)
del options['dynamic_total_coloring']

p.driver.options['debug_print'] = ['totals']
p.driver.options.update(options)

p.model.add_design_var('x')
Expand Down Expand Up @@ -345,6 +347,34 @@ def test_dynamic_total_coloring_pyoptsparse_slsqp_auto(self):
rep = repr(p_color.driver._coloring_info['coloring'])
self.assertEqual(rep.replace('L', ''), 'Coloring (direction: fwd, ncolors: 5, shape: (22, 21)')

@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
def test_print_options_total_with_coloring_fwd(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'fwd', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True)

failed, output = run_driver(p_color)

self.assertFalse(failed, "Optimization failed.")

self.assertTrue('In mode: fwd, Solving variable: indeps.y' in output)
self.assertTrue('Sub Indices: [1 3 5 7 9]' in output)
self.assertTrue('Elapsed Time:' in output)

def test_print_options_total_with_coloring_rev(self):
# first, run w/o coloring
p = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False)
p_color = run_opt(pyOptSparseDriver, 'rev', optimizer='SNOPT', print_results=False,
dynamic_total_coloring=True)

failed, output = run_driver(p_color)

self.assertFalse(failed, "Optimization failed.")

self.assertTrue('In mode: rev, Solving variable: r_con.g' in output)
self.assertTrue('Sub Indices: [2 0]' in output)
self.assertTrue('Elapsed Time:' in output)

@use_tempdirs
@unittest.skipUnless(OPTIMIZER == 'SNOPT', "This test requires SNOPT.")
Expand Down
44 changes: 42 additions & 2 deletions openmdao/core/total_jac.py
Expand Up @@ -1197,6 +1197,40 @@ def par_deriv_matmat_jac_setter(self, inds, mode):
for matmat_idxs in inds:
self.matmat_jac_setter(matmat_idxs, mode)

def local_indices(self, inds):
col_names = self.idx_iter_dict[self.mode]['@simul_coloring'][0]['coloring']._col_vars
col_sizes = self.idx_iter_dict[self.mode]['@simul_coloring'][0]['coloring']._col_var_sizes
row_names = self.idx_iter_dict[self.mode]['@simul_coloring'][0]['coloring']._row_vars
row_sizes = self.idx_iter_dict[self.mode]['@simul_coloring'][0]['coloring']._row_var_sizes

if self.mode == 'fwd':
names_array = np.zeros((1, sum(col_sizes)), dtype=object)
local_positions_array = np.zeros((1, sum(col_sizes)), dtype=int)
col_info = zip(col_names, col_sizes)
else:
names_array = np.zeros((1, sum(row_sizes)), dtype=object)
local_positions_array = np.zeros((1, sum(row_sizes)), dtype=int)
col_info = zip(row_names, row_sizes)

names = []
local_indices = []
for i, j in col_info:
names.append(np.repeat(i, j))
local_indices.append(np.arange(j))

names_list = np.concatenate(names).ravel().tolist()
local_list = np.concatenate(local_indices).ravel().tolist()
names_array[0, :] = names_list
local_positions_array[0, :] = local_list

if isinstance(inds, (list)):
idx_name = names_array[0, inds[0]]
else:
idx_name = names_array[0, inds]
idx = local_positions_array[0, inds]

return idx_name, idx

def compute_totals(self):
"""
Compute derivatives of desired quantities with respect to desired inputs.
Expand Down Expand Up @@ -1242,8 +1276,14 @@ def compute_totals(self):
varlist = '(' + ', '.join([name for name in par_deriv[key]]) + ')'
print('Solving color:', key, varlist)
else:
print('Solving variable: {0} \nIn mode: {1} \n'
'Sub Indices: {2}'.format(key, mode, inds))
if key == '@simul_coloring':
local_inds = self.local_indices(inds)
print('In mode: {0}, Solving variable: {1}\n'
'Sub Indices: {2}'.format(
mode, local_inds[0], local_inds[1]))
else:
print('In mode: {0}, Solving variable: {1}\n'
'Sub Indices: {2}'.format(mode, key, inds))

sys.stdout.flush()
t0 = time.time()
Expand Down
8 changes: 3 additions & 5 deletions openmdao/drivers/tests/test_pyoptsparse_driver.py
Expand Up @@ -1373,11 +1373,9 @@ def test_debug_print_option_totals(self):
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))

self.assertTrue('Solving variable: comp.f_xy' in output)
self.assertTrue('In mode: rev' in output)
self.assertTrue('In mode: rev, Solving variable: comp.f_xy' in output)
self.assertTrue('Sub Indices: 0' in output)
self.assertTrue('Elapsed Time:' in output)
self.assertTrue('Solving variable: con.c' in output)

prob = om.Problem()
model = prob.model
Expand Down Expand Up @@ -1409,8 +1407,8 @@ def test_debug_print_option_totals(self):
self.assertFalse(failed, "Optimization failed, info = " +
str(prob.driver.pyopt_solution.optInform))

self.assertTrue('Solving variable: p1.x' in output)
self.assertTrue('Solving variable: p2.y' in output)
self.assertTrue('In mode: fwd, Solving variable: p1.x' in output)
self.assertTrue('In mode: fwd, Solving variable: p2.y' in output)

def test_debug_print_option(self):

Expand Down
7 changes: 3 additions & 4 deletions openmdao/drivers/tests/test_scipy_optimizer.py
Expand Up @@ -1206,8 +1206,7 @@ def test_debug_print_option_totals(self):

self.assertFalse(failed, "Optimization failed.")

self.assertTrue('Solving variable: comp.f_xy' in output)
self.assertTrue('In mode: rev ' in output)
self.assertTrue('In mode: rev, Solving variable: comp.f_xy' in output)
self.assertTrue('Sub Indices: 0' in output)
self.assertTrue('Elapsed Time:' in output)
self.assertTrue('Solving variable: con.c' in output)
Expand Down Expand Up @@ -1240,8 +1239,8 @@ def test_debug_print_option_totals(self):

self.assertFalse(failed, "Optimization failed.")

self.assertTrue('Solving variable: p1.x' in output)
self.assertTrue('Solving variable: p2.y' in output)
self.assertTrue('In mode: fwd, Solving variable: p1.x' in output)
self.assertTrue('In mode: fwd, Solving variable: p2.y' in output)

def test_debug_print_option(self):

Expand Down

0 comments on commit 1a66b0d

Please sign in to comment.