Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sync #121

Merged
merged 34 commits into from
Dec 1, 2020
Merged

sync #121

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
93d667f
First draft
DKilkenny Nov 9, 2020
8c7d9c4
reshaping
DKilkenny Nov 10, 2020
25fb085
added get_remote checks
DKilkenny Nov 12, 2020
f0c8d3b
gathering and casting the correct values
DKilkenny Nov 13, 2020
fbebc6c
issues with J_final
DKilkenny Nov 16, 2020
bf526a7
Broken
DKilkenny Nov 16, 2020
84c6e42
progress
naylor-b Nov 17, 2020
1bd3f40
fixes
naylor-b Nov 17, 2020
6c942c7
test added
DKilkenny Nov 17, 2020
64f1fbe
Fixed test
DKilkenny Nov 17, 2020
d6d4907
Latest, broken
DKilkenny Nov 19, 2020
73800f1
tests passing
naylor-b Nov 19, 2020
296d91e
interim
naylor-b Nov 19, 2020
8e41162
moved test and fixed issue with auto assignment of src_indices to non…
naylor-b Nov 19, 2020
de7337d
Fixed deprecation warning when user passed dataframe to discrete input
DKilkenny Nov 20, 2020
49f98ff
test fix
DKilkenny Nov 20, 2020
613e35a
Re-added dict attr check
DKilkenny Nov 20, 2020
aa7d27e
interim - issue with different jacs in rev mode (may be allowed)
naylor-b Nov 23, 2020
9fc84d2
Added complex check to make_serializable
DKilkenny Nov 23, 2020
963e594
rearranged mpi test and added fwd test
naylor-b Nov 23, 2020
524f791
Stopped memory leak by changing key type in Group._scope_cache
tadkollar Nov 23, 2020
5bdeaf8
Merge pull request #1781 from naylor-b/mpi_debug
swryan Nov 23, 2020
b628c04
Switched to using pathname as the _scope_cache key
tadkollar Nov 23, 2020
96588d5
Merge pull request #1780 from DKilkenny/pandas_serial
swryan Nov 24, 2020
25a6b49
Added a test for the setup memory leak
tadkollar Nov 24, 2020
3f84803
Switched to using System.msginfo as key since pathname can be none/empty
tadkollar Nov 24, 2020
53df179
Check for excl_sub is None
tadkollar Nov 24, 2020
8a6fa2a
pep8 fix; raised memleak limit slightly to allow for different systems
tadkollar Nov 24, 2020
e51b513
pep8 fix; raised memleak limit slightly to allow for different systems
tadkollar Nov 24, 2020
7517ec5
Merge pull request #1782 from tadkollar/memleak1746
swryan Nov 24, 2020
b2efbb2
tests passing
naylor-b Nov 25, 2020
72580de
merged out
naylor-b Nov 25, 2020
764abdd
cleanup
naylor-b Nov 25, 2020
74e4583
Merge pull request #1783 from naylor-b/danny_distrib
swryan Dec 1, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions openmdao/core/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -869,6 +869,7 @@ def _update_dist_src_indices(self, abs_in2out, all_abs2meta, all_abs2idx, all_si
added_src_inds = set()
for i, iname in enumerate(self._var_allprocs_abs2meta['input']):
if iname in abs2meta_in and abs2meta_in[iname]['src_indices'] is None:
meta_in = abs2meta_in[iname]
src = abs_in2out[iname]
out_i = all_abs2idx[src]
nzs = np.nonzero(sizes_out[:, out_i])[0]
Expand All @@ -886,13 +887,19 @@ def _update_dist_src_indices(self, abs_in2out, all_abs2meta, all_abs2idx, all_si
raise RuntimeError(f"{self.msginfo}: Can't determine src_indices "
f"automatically for input '{iname}'. They must be "
"supplied manually.")
simple_warning(f"{self.msginfo}: Component is distributed but input '{iname}' was "
"added without src_indices. Setting src_indices to "
f"range({offset}, {end}).")
abs2meta_in[iname]['src_indices'] = np.arange(offset, end, dtype=INT_DTYPE)

inds = np.arange(offset, end, dtype=INT_DTYPE)
if meta_in['shape'] != inds.shape:
inds = inds.reshape(meta_in['shape'])
meta_in['src_indices'] = inds
meta_in['flat_src_indices'] = True
all_abs2meta_in[iname]['has_src_indices'] = True
added_src_inds.add(iname)

simple_warning(f"{self.msginfo}: Component is distributed but input '{iname}' was "
"added without src_indices. Setting src_indices to "
f"np.arange({offset}, {end}, dtype=int).reshape({inds.shape}).")

return added_src_inds

def _approx_partials(self, of, wrt, method='fd', **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion openmdao/core/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class Driver(object):
_designvars_discrete : list
List of design variables that are discrete.
_dist_driver_vars : dict
Dict of constraints that are distributed outputs. Key is rank, values are
Dict of constraints that are distributed outputs. Key is abs variable name, values are
(local indices, local sizes).
_cons : dict
Contains all constraint info.
Expand Down
20 changes: 16 additions & 4 deletions openmdao/core/group.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,8 +329,17 @@ def _get_scope(self, excl_sub=None):
(set, set)
Sets of output and input variables.
"""
if excl_sub is None:
cache_key = None
else:
cache_key = excl_sub.pathname

try:
return self._scope_cache[excl_sub]
io_vars = self._scope_cache[cache_key]

# Make sure they're the same subsystem instance before returning
if io_vars[2] is excl_sub:
return (io_vars[:2])
except KeyError:
pass

Expand All @@ -356,7 +365,10 @@ def _get_scope(self, excl_sub=None):
scope_in.add(abs_in)
scope_in = frozenset(scope_in)

self._scope_cache[excl_sub] = (scope_out, scope_in)
# Use the pathname as the dict key instead of the object itself. When
# the object is used as the key, memory leaks result from multiple
# calls to setup().
self._scope_cache[cache_key] = (scope_out, scope_in, excl_sub)
return scope_out, scope_in

def _compute_root_scale_factors(self):
Expand Down Expand Up @@ -2070,8 +2082,8 @@ def _setup_connections(self):
fail = True
else:
for d in range(source_dimensions):
if all_abs_out['distributed'] is True or \
allprocs_abs2meta_in[abs_in]['distributed'] is True:
if all_abs_out['distributed'] or \
allprocs_abs2meta_in[abs_in]['distributed']:
d_size = out_shape[d] * self.comm.size
else:
d_size = out_shape[d]
Expand Down
10 changes: 6 additions & 4 deletions openmdao/core/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -1152,8 +1152,7 @@ def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes
for inp in in_list:
inp_abs = rel_name2abs_name(comp, inp)
if mode == 'fwd':
directional = inp in local_opts and \
local_opts[inp]['directional'] is True
directional = inp in local_opts and local_opts[inp]['directional']
else:
directional = c_name in mfree_directions

Expand Down Expand Up @@ -1573,7 +1572,7 @@ def check_totals(self, of=None, wrt=None, out_stream=_DEFAULT_OUT_STREAM, compac
return data['']

def compute_totals(self, of=None, wrt=None, return_format='flat_dict', debug_print=False,
driver_scaling=False, use_abs_names=False):
driver_scaling=False, use_abs_names=False, get_remote=True):
"""
Compute derivatives of desired quantities with respect to desired inputs.

Expand All @@ -1597,6 +1596,8 @@ def compute_totals(self, of=None, wrt=None, return_format='flat_dict', debug_pri
add_constraint were called on the model. Default is False, which is unscaled.
use_abs_names : bool
Set to True when passing in absolute names to skip some translation steps.
get_remote : bool
If True, the default, the full distributed total jacobian will be retrieved.

Returns
-------
Expand Down Expand Up @@ -1625,7 +1626,8 @@ def compute_totals(self, of=None, wrt=None, return_format='flat_dict', debug_pri
return total_info.compute_totals_approx(initialize=True)
else:
total_info = _TotalJacInfo(self, of, wrt, use_abs_names, return_format,
debug_print=debug_print, driver_scaling=driver_scaling)
debug_print=debug_print, driver_scaling=driver_scaling,
get_remote=get_remote)
return total_info.compute_totals()

def set_solver_print(self, level=2, depth=1e99, type_='all'):
Expand Down
211 changes: 209 additions & 2 deletions openmdao/core/tests/test_check_derivs.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
SellarDis2withDerivatives
from openmdao.test_suite.components.simple_comps import DoubleArrayComp
from openmdao.test_suite.components.array_comp import ArrayComp
from openmdao.test_suite.groups.parallel_groups import FanInSubbedIDVC
from openmdao.test_suite.groups.parallel_groups import FanInSubbedIDVC, Diamond
from openmdao.utils.assert_utils import assert_near_equal, assert_warning, assert_check_partials
from openmdao.utils.mpi import MPI

Expand Down Expand Up @@ -118,7 +118,6 @@ def compute_partials(self, inputs, partials):
J['y', 'x1'] = np.array([4.0])
J['y', 'x2'] = np.array([40])


class TestProblemCheckPartials(unittest.TestCase):

def test_incorrect_jacobian(self):
Expand Down Expand Up @@ -2287,6 +2286,214 @@ def test_set_method_and_step_bug(self):
assert_check_partials(J, atol=1e-5, rtol=1e-5)


class DistribParaboloid(om.ExplicitComponent):

def setup(self):
self.options['distributed'] = True

if self.comm.rank == 0:
ndvs = 3
else:
ndvs = 2

self.add_input('w', val=1.) # this will connect to a non-distributed IVC
self.add_input('x', shape=ndvs) # this will connect to a distributed IVC

self.add_output('y', shape=2) # all-gathered output, duplicated on all procs
self.add_output('z', shape=ndvs) # distributed output
self.declare_partials('y', 'x')
self.declare_partials('y', 'w')
self.declare_partials('z', 'x')

def compute(self, inputs, outputs):
x = inputs['x']
local_y = np.sum((x-5)**2)
y_g = np.zeros(self.comm.size)
self.comm.Allgather(local_y, y_g)
val = np.sum(y_g) + (inputs['w']-10)**2
outputs['y'] = np.array([val, val*3.])
outputs['z'] = x**2

def compute_partials(self, inputs, J):
x = inputs['x']
J['y', 'x'] = np.array([2*(x-5), 6*(x-5)])
J['y', 'w'] = np.array([2*(inputs['w']-10), 6*(inputs['w']-10)])
J['z', 'x'] = np.diag(2*x)


class DistribParaboloid2D(om.ExplicitComponent):

def setup(self):

comm = self.comm
rank = comm.rank

if rank == 0:
vshape = (3,2)
else:
vshape = (2,2)

self.options['distributed'] = True

self.add_input('w', val=1., src_indices=np.array([1])) # this will connect to a non-distributed IVC
self.add_input('x', shape=vshape) # this will connect to a distributed IVC

self.add_output('y') # all-gathered output, duplicated on all procs
self.add_output('z', shape=vshape) # distributed output
self.declare_partials('y', 'x')
self.declare_partials('y', 'w')
self.declare_partials('z', 'x')

def compute(self, inputs, outputs):
x = inputs['x']
local_y = np.sum((x-5)**2)
y_g = np.zeros(self.comm.size)
self.comm.Allgather(local_y, y_g)
outputs['y'] = np.sum(y_g) + (inputs['w']-10)**2
outputs['z'] = x**2

def compute_partials(self, inputs, J):
x = inputs['x'].flatten()
J['y', 'x'] = 2*(x-5)
J['y', 'w'] = 2*(inputs['w']-10)
J['z', 'x'] = np.diag(2*x)


@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class TestProblemComputeTotalsGetRemoteFalse(unittest.TestCase):

N_PROCS = 2

def _do_compute_totals(self, mode):
comm = MPI.COMM_WORLD

p = om.Problem()
d_ivc = p.model.add_subsystem('distrib_ivc',
om.IndepVarComp(distributed=True),
promotes=['*'])
if comm.rank == 0:
ndvs = 3
else:
ndvs = 2
d_ivc.add_output('x', 2*np.ones(ndvs))

ivc = p.model.add_subsystem('ivc',
om.IndepVarComp(distributed=False),
promotes=['*'])
ivc.add_output('w', 2.0)
p.model.add_subsystem('dp', DistribParaboloid(), promotes=['*'])

p.model.add_design_var('x', lower=-100, upper=100)
p.model.add_objective('y')

p.setup(mode=mode)
p.run_model()

dv_vals = p.driver.get_design_var_values(get_remote=False)

# Compute totals and check the length of the gradient array on each proc
objcongrad = p.compute_totals(get_remote=False)

# Check the values of the gradient array
assert_near_equal(objcongrad[('dp.y', 'distrib_ivc.x')][0], -6.0*np.ones(ndvs))
assert_near_equal(objcongrad[('dp.y', 'distrib_ivc.x')][1], -18.0*np.ones(ndvs))

def test_distrib_compute_totals_fwd(self):
self._do_compute_totals('fwd')

def test_distrib_compute_totals_rev(self):
self._do_compute_totals('rev')

def _do_compute_totals_2D(self, mode):
# this test has some non-flat variables
comm = MPI.COMM_WORLD

p = om.Problem()
d_ivc = p.model.add_subsystem('distrib_ivc',
om.IndepVarComp(distributed=True),
promotes=['*'])
if comm.rank == 0:
ndvs = 6
two_d = (3,2)
else:
ndvs = 4
two_d = (2,2)

d_ivc.add_output('x', 2*np.ones(two_d))

ivc = p.model.add_subsystem('ivc',
om.IndepVarComp(distributed=False),
promotes=['*'])
ivc.add_output('w', 2.0)
p.model.add_subsystem('dp', DistribParaboloid2D(), promotes=['*'])

p.model.add_design_var('x', lower=-100, upper=100)
p.model.add_objective('y')

p.setup(mode=mode)
p.run_model()

dv_vals = p.driver.get_design_var_values(get_remote=False)

# Compute totals and check the length of the gradient array on each proc
objcongrad = p.compute_totals(get_remote=False)

# Check the values of the gradient array
assert_near_equal(objcongrad[('dp.y', 'distrib_ivc.x')][0], -6.0*np.ones(ndvs))

def test_distrib_compute_totals_2D_fwd(self):
self._do_compute_totals_2D('fwd')

def test_distrib_compute_totals_2D_rev(self):
self._do_compute_totals_2D('rev')

def _remotevar_compute_totals(self, mode):
indep_list = ['iv.x']
unknown_list = [
'c1.y1',
'c1.y2',
'sub.c2.y1',
'sub.c3.y1',
'c4.y1',
'c4.y2',
]

full_expected = {
('c1.y1', 'iv.x'): [[8.]],
('c1.y2', 'iv.x'): [[3.]],
('sub.c2.y1', 'iv.x'): [[4.]],
('sub.c3.y1', 'iv.x'): [[10.5]],
('c4.y1', 'iv.x'): [[25.]],
('c4.y2', 'iv.x'): [[-40.5]],
}

prob = om.Problem()
prob.model = Diamond()

prob.setup(mode=mode)
prob.set_solver_print(level=0)
prob.run_model()

assert_near_equal(prob['c4.y1'], 46.0, 1e-6)
assert_near_equal(prob['c4.y2'], -93.0, 1e-6)

J = prob.compute_totals(of=unknown_list, wrt=indep_list)
for key, val in full_expected.items():
assert_near_equal(J[key], val, 1e-6)

reduced_expected = {key: v for key, v in full_expected.items() if key[0] in prob.model._var_abs2meta['output']}

J = prob.compute_totals(of=unknown_list, wrt=indep_list, get_remote=False)
for key, val in reduced_expected.items():
assert_near_equal(J[key], val, 1e-6)
self.assertEqual(len(J), len(reduced_expected))

def test_remotevar_compute_totals_fwd(self):
self._remotevar_compute_totals('fwd')

def test_remotevar_compute_totals_rev(self):
self._remotevar_compute_totals('rev')

class TestProblemCheckTotals(unittest.TestCase):

def test_cs(self):
Expand Down
31 changes: 30 additions & 1 deletion openmdao/core/tests/test_discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from openmdao.visualization.n2_viewer.n2_viewer import _get_viewer_data
from openmdao.test_suite.components.sellar import StateConnection, \
SellarDis1withDerivatives, SellarDis2withDerivatives
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.assert_utils import assert_near_equal, assert_no_warning
from openmdao.utils.general_utils import remove_whitespace
from openmdao.utils.logger_utils import TestLogger

Expand Down Expand Up @@ -676,6 +676,35 @@ def compute(self, inputs, outputs):
"compute() takes 3 positional arguments but 5 were given")
self.assertEqual(str(cm.exception), msg)

def test_discrete_input_dataframe(self):
class OMDataFrame:
def __dict__(self):
pass

class ModCompEx2(ModCompEx):
def setup(self):
super().setup()
self.add_discrete_input('test', OMDataFrame())

prob = om.Problem()
model = prob.model

indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])
indep.add_discrete_output('x', 11)
model.add_subsystem('comp', ModCompEx2(3), promotes=['*'])

rec = om.SqliteRecorder('test')
prob.driver.add_recorder(rec)
prob.add_recorder(rec)

prob.setup()
msg = ("DeprecationWarning: The truth value of an empty array is ambiguous. Returning"
"False, but in future this will result in an error. Use `array.size > 0` to check "
"that an array is not empty.")

with assert_no_warning(DeprecationWarning, msg):
prob.run_model()

class SolverDiscreteTestCase(unittest.TestCase):
def _setup_model(self, solver_class):
prob = om.Problem()
Expand Down
Loading