Skip to content

Commit

Permalink
fixed conflict in problem.py
Browse files Browse the repository at this point in the history
  • Loading branch information
naylor-b committed Feb 23, 2024
2 parents 5ea6dd9 + 56f4bb7 commit b60f684
Show file tree
Hide file tree
Showing 33 changed files with 618 additions and 326 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/openmdao_test_workflow.yml
Expand Up @@ -75,8 +75,8 @@ jobs:
# test oldest supported versions
- NAME: Ubuntu Oldest
OS: ubuntu-latest
PY: '3.7'
NUMPY: '1.21'
PY: '3.8'
NUMPY: '1.22'
SCIPY: '1.7'
OPENMPI: '4.0'
MPI4PY: '3.0'
Expand Down
2 changes: 1 addition & 1 deletion README.md
Expand Up @@ -73,7 +73,7 @@ generator. You can install everything needed for development by running:

## OpenMDAO Versions

**OpenMDAO 3.x.y** represents the current, supported version. It requires Python 3.7
**OpenMDAO 3.x.y** represents the current, supported version. It requires Python 3.8
or later and is maintained [here][4]. To upgrade to the latest release, run:

pip install --upgrade openmdao
Expand Down
4 changes: 0 additions & 4 deletions openmdao/core/component.py
Expand Up @@ -201,10 +201,6 @@ def _setup_procs(self, pathname, comm, mode, prob_meta):
self._has_distrib_vars = self._has_distrib_outputs = False

for meta in self._static_var_rel2meta.values():
# variable isn't distributed if we're only running on 1 proc
if nprocs == 1 and 'distributed' in meta and meta['distributed']:
meta['distributed'] = False

# reset shape if any dynamic shape parameters are set in case this is a resetup
# NOTE: this is necessary because we allow variables to be added in __init__.
if 'shape_by_conn' in meta and (meta['shape_by_conn'] or
Expand Down
22 changes: 5 additions & 17 deletions openmdao/core/problem.py
Expand Up @@ -936,19 +936,6 @@ def setup(self, check=False, logger=None, mode='auto', force_alloc_complex=False
model = self.model
comm = self.comm

if sys.version_info.minor < 8:
sv = sys.version_info
msg = f'OpenMDAO support for Python version {sv.major}.{sv.minor} will end soon.'
try:
from IPython import get_ipython
ip = get_ipython()
if ip is None or ip.config is None or 'IPKernelApp' not in ip.config:
warn_deprecation(msg)
except ImportError:
warn_deprecation(msg)
except AttributeError:
warn_deprecation(msg)

if not isinstance(self.model, Group):
raise TypeError("The model for this Problem is of type "
f"'{self.model.__class__.__name__}'. "
Expand Down Expand Up @@ -1214,11 +1201,12 @@ def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes
excludes = [excludes] if isinstance(excludes, str) else excludes

comps = []
under_CI = env_truthy('OPENMDAO_CHECK_ALL_PARTIALS')

# OPENMDAO_CHECK_ALL_PARTIALS overrides _no_check_partials (used for testing)
force_check_partials = env_truthy('OPENMDAO_CHECK_ALL_PARTIALS')

for comp in model.system_iter(typ=Component, include_self=True):
# if we're under CI, do all of the partials, ignoring _no_check_partials
if comp._no_check_partials and not under_CI:
if comp._no_check_partials and not force_check_partials:
continue

# skip any Component with no outputs
Expand Down Expand Up @@ -1405,7 +1393,7 @@ def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes
# Matrix Vector Product
self._metadata['checking'] = True
try:
comp._apply_linear(None, mode)
comp.run_apply_linear(mode)
finally:
self._metadata['checking'] = False

Expand Down
40 changes: 27 additions & 13 deletions openmdao/core/system.py
Expand Up @@ -2128,13 +2128,28 @@ def _setup_driver_units(self, abs2meta=None):
"were specified."
raise RuntimeError(msg.format(self.msginfo, name, var_units, units))

factor, offset = unit_conversion(var_units, units)
base_adder, base_scaler = determine_adder_scaler(None, None,
meta['adder'],
meta['scaler'])

meta['total_adder'] = offset + base_adder / factor
meta['total_scaler'] = base_scaler * factor
# Derivation of the total scaler and total adder for design variables:
# Given based design variable value y
# First we apply the desired unit conversion
# y_in_desired_units = unit_scaler * (y + unit_adder)
# Then we apply the user-declared scaling
# y_opt = declared_scaler * (y_in_desired_units + declared_adder)
# Thus
# y_opt = declared_scaler * (unit_scaler * (y + unit_adder) + declared_adder)
# And collecting terms
# y_opt = [declared_scaler * unit_scaler]
# * (y + unit_adder + declared_adder/unit_scaler)
# So the total_scaler and total_adder for the optimizer are:
# total_scaler = declared_scaler * unit_scaler
# total_adder = unit_adder + declared_adder / unit_scaler

unit_scaler, unit_adder = unit_conversion(var_units, units)
declared_adder, declared_scaler = determine_adder_scaler(None, None,
meta['adder'],
meta['scaler'])

meta['total_adder'] = unit_adder + declared_adder / unit_scaler
meta['total_scaler'] = declared_scaler * unit_scaler

if meta['total_scaler'] is not None:
has_scaling = True
Expand Down Expand Up @@ -2172,13 +2187,12 @@ def _setup_driver_units(self, abs2meta=None):
raise RuntimeError(msg.format(self.msginfo, type_dict[meta['type']],
name, src_units, units))

factor, offset = unit_conversion(src_units, units)
base_adder, base_scaler = determine_adder_scaler(None, None,
meta['adder'],
meta['scaler'])
unit_scaler, unit_adder = unit_conversion(src_units, units)
declared_adder, declared_scaler =\
determine_adder_scaler(None, None, meta['adder'], meta['scaler'])

meta['total_scaler'] = base_scaler * factor
meta['total_adder'] = offset + base_adder / factor
meta['total_scaler'] = declared_scaler * unit_scaler
meta['total_adder'] = unit_adder + declared_adder / unit_scaler

if meta['total_scaler'] is not None:
has_scaling = True
Expand Down
54 changes: 28 additions & 26 deletions openmdao/core/tests/test_check_partials.py
Expand Up @@ -1983,17 +1983,14 @@ def create_problem(force_alloc_complex=False):
prob.setup(force_alloc_complex=force_alloc_complex)
return prob, parab

expected_check_partials_error = f"Problem .*: Checking partials with respect " \
"to variable '{var}' in component " \
"'{comp.pathname}' using the same " \
"method and options as are used to compute the " \
"component's derivatives " \
"will not provide any relevant information on the " \
"accuracy\.\n" \
expected_check_partials_error = "Problem {prob._name}: Checking partials " \
"with respect to variable '{var}' in component '{comp.pathname}' using the " \
"same method and options as are used to compute the component's derivatives " \
"will not provide any relevant information on the accuracy.\n" \
"To correct this, change the options to do the \n" \
"check_partials using either:\n" \
" - arguments to Problem\.check_partials. \n" \
" - arguments to Component\.set_check_partial_options"
" - arguments to Problem.check_partials. \n" \
" - arguments to Component.set_check_partial_options"

# Scenario 1:
# Compute partials: exact
Expand All @@ -2008,9 +2005,10 @@ def create_problem(force_alloc_complex=False):
# Expected result: Error
prob, parab = create_problem()
parab.declare_partials(of='*', wrt='*', method='fd')
expected_error_msg = expected_check_partials_error.format(var='x', comp=self.parab)
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning, expected_error_msg):
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_partials(method='fd')
self.assertEqual(str(cm.exception),
expected_check_partials_error.format(prob=prob, var='x', comp=parab))

# Scenario 3:
# Compute partials: fd, with default options
Expand Down Expand Up @@ -2052,9 +2050,10 @@ def create_problem(force_alloc_complex=False):
# Expected result: Error since using fd to check fd. All options the same
prob, parab = create_problem()
parab.declare_partials(of='*', wrt='*', method='fd')
expected_error_msg = expected_check_partials_error.format(var='x', comp=parab)
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning, expected_error_msg):
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_partials(method='cs')
self.assertEqual(str(cm.exception),
expected_check_partials_error.format(prob=prob, var='x', comp=parab))

# Scenario 7:
# Compute partials: fd, with default options
Expand All @@ -2074,9 +2073,10 @@ def create_problem(force_alloc_complex=False):
prob, parab = create_problem()
parab.declare_partials(of='*', wrt='*', method='fd')
parab.set_check_partial_options('*')
expected_error_msg = expected_check_partials_error.format(var='x', comp=parab)
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning, expected_error_msg):
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_partials()
self.assertEqual(str(cm.exception),
expected_check_partials_error.format(prob=prob, var='x', comp=parab))

# Scenario 9:
# Compute partials: fd, with default options
Expand Down Expand Up @@ -2132,9 +2132,10 @@ def create_problem(force_alloc_complex=False):
prob, parab = create_problem(force_alloc_complex=True)
parab.declare_partials(of='*', wrt='*', method='cs')
parab.set_check_partial_options('*', method='cs')
expected_error_msg = expected_check_partials_error.format(var='x', comp=parab)
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning, expected_error_msg):
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_partials()
self.assertEqual(str(cm.exception),
expected_check_partials_error.format(prob=prob, var='x', comp=parab))

# Scenario 15:
# Compute partials: cs, with default options
Expand All @@ -2148,12 +2149,11 @@ def create_problem(force_alloc_complex=False):
prob.check_partials()

# Now do similar checks for check_totals when approximations are used
expected_check_totals_error_msg = "Problem .*: Checking totals using the same " \
"method and options as are used to compute the " \
"totals will not provide any relevant information on the " \
"accuracy\.\n" \
expected_check_totals_error_msg = "Problem {prob._name}: Checking totals using the " \
"same method and options as are used to compute the totals will not provide " \
"any relevant information on the accuracy.\n" \
"To correct this, change the options to do the " \
"check_totals or on the call to approx_totals for the model\."
"check_totals or on the call to approx_totals for the model."

# Scenario 16:
# Compute totals: no approx on totals
Expand All @@ -2172,9 +2172,10 @@ def create_problem(force_alloc_complex=False):
prob.model.approx_totals()
prob.setup()
prob.run_model()
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning,
expected_check_totals_error_msg) :
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_totals()
self.assertEqual(str(cm.exception),
expected_check_totals_error_msg.format(prob=prob))

# Scenario 18:
# Compute totals: approx on totals using defaults
Expand Down Expand Up @@ -2225,9 +2226,10 @@ def create_problem(force_alloc_complex=False):
prob.model.approx_totals(method='cs')
prob.setup()
prob.run_model()
with self.assertRaisesRegex(OMInvalidCheckDerivativesOptionsWarning,
expected_check_totals_error_msg):
with self.assertRaises(OMInvalidCheckDerivativesOptionsWarning) as cm:
prob.check_totals(method='cs')
self.assertEqual(str(cm.exception),
expected_check_totals_error_msg.format(prob=prob))

# Scenario 22:
# Compute totals: fd, the default
Expand Down
49 changes: 49 additions & 0 deletions openmdao/core/tests/test_distribcomp.py
Expand Up @@ -1091,6 +1091,55 @@ def compute(self, inputs, outputs):
assert_near_equal(p.get_val('C1.y', get_remote=False), 6. if p.model.C1.comm.rank == 0 else 14.)


@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class TestDistribCheckMPI(unittest.TestCase):
N_PROCS = 2

def test_distrib_conn_check(self):
class Serial2Distributed(om.ExplicitComponent):
def setup(self):
self.add_input("serial_in", shape=3)

if self.comm.rank == 0:
self.add_output("dist_out", shape=3, distributed=True)
else:
self.add_output("dist_out", shape=0, distributed=True)

def compute(self, inputs, outputs):
if self.comm.rank == 0:
outputs["dist_out"] = inputs["serial_in"]

class DistributedSum(om.ExplicitComponent):
def setup(self):
self.add_output("sum", shape=1)

def compute(self, inputs, outputs):
outputs["sum"] = self.comm.bcast(sum(inputs["dist_in"]), root=0)

class SumGroup(om.Group):
def setup(self):
self.add_subsystem("s2d", Serial2Distributed(), promotes_inputs=[("serial_in", "in")])
self.add_subsystem("sum", DistributedSum(), promotes_outputs=["sum"])
self.sum.add_input("dist_in", shape_by_conn=True, distributed=True)
self.connect("s2d.dist_out", "sum.dist_in")

prob = om.Problem()
model = prob.model

model.add_subsystem("ivc", om.IndepVarComp("x", [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]))
parallel = model.add_subsystem('parallel', om.ParallelGroup())
parallel.add_subsystem('sum1', SumGroup())
parallel.add_subsystem('sum2', SumGroup())

model.connect("ivc.x", "parallel.sum1.in", src_indices=om.slicer[:3])
model.connect("ivc.x", "parallel.sum2.in", src_indices=om.slicer[3:])

prob.setup()
prob.run_model()

assert_near_equal(prob.get_val("parallel.sum1.sum", get_remote=True), 3.0)
assert_near_equal(prob.get_val("parallel.sum2.sum", get_remote=True), 12.0)

if __name__ == '__main__':
from openmdao.utils.mpi import mpirun_tests
mpirun_tests()

0 comments on commit b60f684

Please sign in to comment.