-
Notifications
You must be signed in to change notification settings - Fork 240
/
problem.py
2287 lines (1954 loc) · 98 KB
/
problem.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Define the Problem class and a FakeComm class for non-MPI users."""
import sys
import pprint
import os
import logging
import weakref
from collections import defaultdict, namedtuple, OrderedDict
from fnmatch import fnmatchcase
from itertools import product
from io import StringIO
import numpy as np
import scipy.sparse as sparse
from openmdao.core.component import Component
from openmdao.core.driver import Driver, record_iteration
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.group import Group, System
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.total_jac import _TotalJacInfo
from openmdao.core.constants import _DEFAULT_OUT_STREAM, _UNDEFINED, INT_DTYPE
from openmdao.approximation_schemes.complex_step import ComplexStep
from openmdao.approximation_schemes.finite_difference import FiniteDifference
from openmdao.solvers.solver import SolverInfo
from openmdao.error_checking.check_config import _default_checks, _all_checks
from openmdao.recorders.recording_iteration_stack import _RecIteration
from openmdao.recorders.recording_manager import RecordingManager, record_viewer_data, \
record_system_options
from openmdao.utils.record_util import create_local_meta
from openmdao.utils.general_utils import ContainsAll, pad_name, simple_warning, warn_deprecation, \
_is_slicer_op
from openmdao.utils.mpi import FakeComm
from openmdao.utils.mpi import MPI
from openmdao.utils.name_maps import prom_name2abs_name, name2abs_names
from openmdao.utils.options_dictionary import OptionsDictionary
from openmdao.utils.units import convert_units
from openmdao.utils import coloring as coloring_mod
from openmdao.core.constants import _SetupStatus
from openmdao.utils.name_maps import abs_key2rel_key
from openmdao.vectors.vector import _full_slice
from openmdao.vectors.default_vector import DefaultVector
from openmdao.utils.logger_utils import get_logger, TestLogger
import openmdao.utils.coloring as coloring_mod
from openmdao.utils.hooks import _setup_hooks
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
from openmdao.utils.name_maps import rel_key2abs_key, rel_name2abs_name
ErrorTuple = namedtuple('ErrorTuple', ['forward', 'reverse', 'forward_reverse'])
MagnitudeTuple = namedtuple('MagnitudeTuple', ['forward', 'reverse', 'fd'])
_contains_all = ContainsAll()
CITATION = """@article{openmdao_2019,
Author={Justin S. Gray and John T. Hwang and Joaquim R. R. A.
Martins and Kenneth T. Moore and Bret A. Naylor},
Title="{OpenMDAO: An Open-Source Framework for Multidisciplinary
Design, Analysis, and Optimization}",
Journal="{Structural and Multidisciplinary Optimization}",
Year={2019},
Publisher={Springer},
pdf={http://openmdao.org/pubs/openmdao_overview_2019.pdf},
note= {In Press}
}"""
class Problem(object):
"""
Top-level container for the systems and drivers.
Attributes
----------
model : <System>
Pointer to the top-level <System> object (root node in the tree).
comm : MPI.Comm or <FakeComm>
The global communicator.
driver : <Driver>
Slot for the driver. The default driver is `Driver`, which just runs
the model once.
_mode : 'fwd' or 'rev'
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint).
_orig_mode : 'fwd', 'rev', or 'auto'
Derivatives calculation mode assigned by the user. If set to 'auto', _mode will be
automatically assigned to 'fwd' or 'rev' based on relative sizes of design variables vs.
responses.
_initial_condition_cache : dict
Any initial conditions that are set at the problem level via setitem are cached here
until they can be processed.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
options : <OptionsDictionary>
Dictionary with general options for the problem.
recording_options : <OptionsDictionary>
Dictionary with problem recording options.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this problem.
_check : bool
If True, call check_config at the end of final_setup.
_filtered_vars_to_record : dict
Dictionary of lists of design vars, constraints, etc. to record.
_logger : object or None
Object for logging config checks if _check is True.
_name : str
Problem name.
_system_options_recorded : bool
A flag to indicate whether the system options for all the systems have been recorded
_metadata : dict
Problem level metadata.
_run_counter : int
The number of times run_driver or run_model has been called.
"""
def __init__(self, model=None, driver=None, comm=None, name=None, **options):
"""
Initialize attributes.
Parameters
----------
model : <System> or None
The top-level <System>. If not specified, an empty <Group> will be created.
driver : <Driver> or None
The driver for the problem. If not specified, a simple "Run Once" driver will be used.
comm : MPI.Comm or <FakeComm> or None
The global communicator.
name : str
Problem name. Can be used to specify a Problem instance when multiple Problems
exist.
**options : named args
All remaining named args are converted to options.
"""
self.cite = CITATION
self._name = name
if comm is None:
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
comm = FakeComm()
if model is None:
self.model = Group()
elif isinstance(model, System):
self.model = model
else:
raise TypeError(self.msginfo +
": The value provided for 'model' is not a valid System.")
if driver is None:
self.driver = Driver()
elif isinstance(driver, Driver):
self.driver = driver
else:
raise TypeError(self.msginfo +
": The value provided for 'driver' is not a valid Driver.")
self.comm = comm
self._mode = None # mode is assigned in setup()
self._initial_condition_cache = {}
self._metadata = None
self._run_counter = -1
self._system_options_recorded = False
self._rec_mgr = RecordingManager()
# General options
self.options = OptionsDictionary(parent_name=type(self).__name__)
self.options.declare('coloring_dir', types=str,
default=os.path.join(os.getcwd(), 'coloring_files'),
desc='Directory containing coloring files (if any) for this Problem.')
self.options.update(options)
# Case recording options
self.recording_options = OptionsDictionary(parent_name=type(self).__name__)
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the '
'problem level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the problem level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the '
'problem level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set True to record constraints and objectives at the '
'problem level.')
self.recording_options.declare('record_inputs', types=bool, default=False,
desc='Set True to record inputs at the '
'problem level.')
self.recording_options.declare('record_outputs', types=bool, default=True,
desc='Set True to record outputs at the '
'problem level.')
self.recording_options.declare('record_residuals', types=bool, default=False,
desc='Set True to record residuals at the '
'problem level.')
self.recording_options.declare('record_derivatives', types=bool, default=False,
desc='Set to True to record derivatives for the problem '
'level')
self.recording_options.declare('record_abs_error', types=bool, default=True,
desc='Set to True to record absolute error of '
'model nonlinear solver')
self.recording_options.declare('record_rel_error', types=bool, default=True,
desc='Set to True to record relative error of model \
nonlinear solver')
self.recording_options.declare('includes', types=list, default=['*'],
desc='Patterns for variables to include in recording. \
Uses fnmatch wildcards')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes). Uses fnmatch wildcards')
_setup_hooks(self)
def _get_var_abs_name(self, name):
if name in self.model._var_allprocs_abs2meta:
return name
elif name in self.model._var_allprocs_prom2abs_list['output']:
return self.model._var_allprocs_prom2abs_list['output'][name][0]
elif name in self.model._var_allprocs_prom2abs_list['input']:
abs_names = self.model._var_allprocs_prom2abs_list['input'][name]
if len(abs_names) == 1:
return abs_names[0]
else:
raise KeyError("{}: Using promoted name `{}' is ambiguous and matches unconnected "
"inputs %s. Use absolute name to disambiguate.".format(self.msginfo,
name,
abs_names))
raise KeyError('{}: Variable "{}" not found.'.format(self.msginfo, name))
@property
def msginfo(self):
"""
Return info to prepend to messages.
Returns
-------
str
Info to prepend to messages.
"""
if self._name is None:
return type(self).__name__
return '{} {}'.format(type(self).__name__, self._name)
def _get_inst_id(self):
return self._name
def is_local(self, name):
"""
Return True if the named variable or system is local to the current process.
Parameters
----------
name : str
Name of a variable or system.
Returns
-------
bool
True if the named system or variable is local to this process.
"""
if self._metadata is None:
raise RuntimeError("{}: is_local('{}') was called before setup() "
"completed.".format(self.msginfo, name))
try:
abs_name = self._get_var_abs_name(name)
except KeyError:
sub = self.model._get_subsystem(name)
return sub is not None and sub._is_local
# variable exists, but may be remote
return abs_name in self.model._var_abs2meta
def _get_cached_val(self, name, get_remote=False):
# We have set and cached already
if name in self._initial_condition_cache:
return self._initial_condition_cache[name]
# Vector not setup, so we need to pull values from saved metadata request.
else:
proms = self.model._var_allprocs_prom2abs_list
meta = self.model._var_abs2meta
try:
conns = self.model._conn_abs_in2out
except AttributeError:
conns = {}
abs_names = name2abs_names(self.model, name)
if not abs_names:
raise KeyError('{}: Variable "{}" not found.'.format(self.model.msginfo, name))
abs_name = abs_names[0]
remote_vars = self._metadata['remote_vars']
if abs_name in meta:
if abs_name in conns:
val = meta[conns[abs_name]]['value']
else:
val = meta[abs_name]['value']
if get_remote and abs_name in remote_vars:
owner = remote_vars[abs_name]
if self.model.comm.rank == owner:
self.model.comm.bcast(val, root=owner)
else:
val = self.model.comm.bcast(None, root=owner)
if val is not _UNDEFINED:
# Need to cache the "get" in case the user calls in-place numpy operations.
self._initial_condition_cache[name] = val
return val
@property
def _recording_iter(self):
return self._metadata['recording_iter']
def __getitem__(self, name):
"""
Get an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
Returns
-------
float or ndarray or any python object
the requested output/input variable.
"""
return self.get_val(name, get_remote=None)
def get_val(self, name, units=None, indices=None, get_remote=False):
"""
Get an output/input variable.
Function is used if you want to specify display units.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
units : str, optional
Units to convert to before return.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to return.
get_remote : bool or None
If True, retrieve the value even if it is on a remote process. Note that if the
variable is remote on ANY process, this function must be called on EVERY process
in the Problem's MPI communicator.
If False, only retrieve the value if it is on the current process, or only the part
of the value that's on the current process for a distributed variable.
If None and the variable is remote or distributed, a RuntimeError will be raised.
Returns
-------
object
The value of the requested output/input variable.
"""
if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:
val = self._get_cached_val(name, get_remote=get_remote)
if val is not _UNDEFINED:
if indices is not None:
val = val[indices]
if units is not None:
val = self.model.convert2units(name, val, units)
else:
val = self.model.get_val(name, units=units, indices=indices, get_remote=get_remote,
from_src=True)
if val is _UNDEFINED:
if get_remote:
raise KeyError('{}: Variable name "{}" not found.'.format(self.msginfo, name))
else:
raise RuntimeError(f"{self.model.msginfo}: Variable '{name}' is not local to "
f"rank {self.comm.rank}. You can retrieve values from "
"other processes using `get_val(<name>, get_remote=True)`.")
return val
def __setitem__(self, name, value):
"""
Set an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
value : float or ndarray or any python object
value to set this variable to.
"""
self.set_val(name, value)
def set_val(self, name, value, units=None, indices=None):
"""
Set an output/input variable.
Function is used if you want to set a value using a different unit.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
value : float or ndarray or list
Value to set this variable to.
units : str, optional
Units that value is defined in.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to set to specified value.
"""
model = self.model
if self._metadata is not None:
conns = model._conn_global_abs_in2out
else:
raise RuntimeError(f"{self.msginfo}: '{name}' Cannot call set_val before setup.")
all_meta = model._var_allprocs_abs2meta
n_proms = 0 # if nonzero, name given was promoted input name w/o a matching prom output
try:
ginputs = model._group_inputs
except AttributeError:
ginputs = {} # could happen if top level system is not a Group
abs_names = name2abs_names(model, name)
if abs_names:
n_proms = len(abs_names) # for output this will never be > 1
if n_proms > 1 and name in ginputs:
abs_name = ginputs[name][0].get('use_tgt', abs_names[0])
else:
abs_name = abs_names[0]
else:
raise KeyError(f'{model.msginfo}: Variable "{name}" not found.')
if abs_name in conns:
src = conns[abs_name]
if abs_name not in model._var_allprocs_discrete['input']:
value = np.asarray(value)
tmeta = all_meta[abs_name]
tunits = tmeta['units']
sunits = all_meta[src]['units']
if abs_name in model._var_abs2meta:
tlocmeta = model._var_abs2meta[abs_name]
else:
tlocmeta = None
gunits = ginputs[name][0].get('units') if name in ginputs else None
if n_proms > 1: # promoted input name was used
if gunits is None:
tunit_list = [all_meta[n]['units'] for n in abs_names]
tu0 = tunit_list[0]
for tu in tunit_list:
if tu != tu0:
model._show_ambiguity_msg(name, ('units',), abs_names)
if units is None:
# avoids double unit conversion
if self._metadata['setup_status'] > _SetupStatus.POST_SETUP:
ivalue = value
if sunits is not None:
if gunits is not None and gunits != tunits:
value = model.convert_from_units(src, value, gunits)
else:
value = model.convert_from_units(src, value, tunits)
else:
if gunits is None:
ivalue = model.convert_from_units(abs_name, value, units)
else:
ivalue = model.convert_units(name, value, units, gunits)
if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:
value = ivalue
else:
value = model.convert_from_units(src, value, units)
else:
src = abs_name
if units is not None:
value = model.convert_from_units(abs_name, value, units)
# Caching only needed if vectors aren't allocated yet.
if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:
if indices is not None:
self._get_cached_val(name)
try:
if _is_slicer_op(indices):
self._initial_condition_cache[name] = value[indices]
else:
self._initial_condition_cache[name][indices] = value
except IndexError:
self._initial_condition_cache[name][indices] = value
except Exception as err:
raise RuntimeError(f"Failed to set value of '{name}': {str(err)}.")
else:
self._initial_condition_cache[name] = value
else:
myrank = model.comm.rank
if indices is None:
indices = _full_slice
if model._outputs._contains_abs(abs_name):
model._outputs.set_var(abs_name, value, indices)
elif abs_name in conns: # input name given. Set value into output
if model._outputs._contains_abs(src): # src is local
if (model._outputs._abs_get_val(src).size == 0 and
src.rsplit('.', 1)[0] == '_auto_ivc' and all_meta[src]['distributed']):
pass # special case, auto_ivc dist var with 0 local size
elif tmeta['has_src_indices'] and n_proms < 2:
if tlocmeta: # target is local
src_indices = tlocmeta['src_indices']
if tmeta['distributed']:
ssizes = model._var_sizes['nonlinear']['output']
sidx = model._var_allprocs_abs2idx['nonlinear'][src]
ssize = ssizes[myrank, sidx]
start = np.sum(ssizes[:myrank, sidx])
end = start + ssize
if np.any(src_indices < start) or np.any(src_indices >= end):
raise RuntimeError(f"{model.msginfo}: Can't set {name}: "
"src_indices refer "
"to out-of-process array entries.")
if start > 0:
src_indices = src_indices - start
model._outputs.set_var(src, value, src_indices[indices])
else:
raise RuntimeError(f"{model.msginfo}: Can't set {abs_name}: remote"
" connected inputs with src_indices currently not"
" supported.")
else:
value = np.asarray(value)
model._outputs.set_var(src, value, indices)
elif src in model._discrete_outputs:
model._discrete_outputs[src] = value
# also set the input
# TODO: maybe remove this if inputs are removed from case recording
if n_proms < 2:
if model._inputs._contains_abs(abs_name):
model._inputs.set_var(abs_name, ivalue, indices)
elif abs_name in model._discrete_inputs:
model._discrete_inputs[abs_name] = value
else:
# must be a remote var. so, just do nothing on this proc. We can't get here
# unless abs_name is found in connections, so the variable must exist.
if abs_name in model._var_allprocs_abs2meta:
print(f"Variable '{name}' is remote on rank {self.comm.rank}. "
"Local assignment ignored.")
elif abs_name in model._discrete_outputs:
model._discrete_outputs[abs_name] = value
elif model._inputs._contains_abs(abs_name): # could happen if model is a component
model._inputs.set_var(abs_name, value, indices)
elif abs_name in model._discrete_inputs: # could happen if model is a component
model._discrete_inputs[abs_name] = value
def _set_initial_conditions(self):
"""
Set all initial conditions that have been saved in cache after setup.
"""
for name, value in self._initial_condition_cache.items():
self.set_val(name, value)
# Clean up cache
self._initial_condition_cache = OrderedDict()
def run_model(self, case_prefix=None, reset_iter_counts=True):
"""
Run the model by calling the root system's solve_nonlinear.
Parameters
----------
case_prefix : str or None
Prefix to prepend to coordinates when recording.
reset_iter_counts : bool
If True and model has been run previously, reset all iteration counters.
"""
if self._mode is None:
raise RuntimeError(self.msginfo +
": The `setup` method must be called before `run_model`.")
if case_prefix:
if not isinstance(case_prefix, str):
raise TypeError(self.msginfo + ": The 'case_prefix' argument should be a string.")
self._recording_iter.prefix = case_prefix
else:
self._recording_iter.prefix = None
if self.model.iter_count > 0 and reset_iter_counts:
self.driver.iter_count = 0
self.model._reset_iter_counts()
self._run_counter += 1
self.final_setup()
self.model._clear_iprint()
self.model.run_solve_nonlinear()
def run_driver(self, case_prefix=None, reset_iter_counts=True):
"""
Run the driver on the model.
Parameters
----------
case_prefix : str or None
Prefix to prepend to coordinates when recording.
reset_iter_counts : bool
If True and model has been run previously, reset all iteration counters.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
if self._mode is None:
raise RuntimeError(self.msginfo +
": The `setup` method must be called before `run_driver`.")
if case_prefix:
if not isinstance(case_prefix, str):
raise TypeError(self.msginfo + ": The 'case_prefix' argument should be a string.")
self._recording_iter.prefix = case_prefix
else:
self._recording_iter.prefix = None
if self.model.iter_count > 0 and reset_iter_counts:
self.driver.iter_count = 0
self.model._reset_iter_counts()
self._run_counter += 1
self.final_setup()
self.model._clear_iprint()
return self.driver.run()
def compute_jacvec_product(self, of, wrt, mode, seed):
"""
Given a seed and 'of' and 'wrt' variables, compute the total jacobian vector product.
Parameters
----------
of : list of str
Variables whose derivatives will be computed.
wrt : list of str
Derivatives will be computed with respect to these variables.
mode : str
Derivative direction ('fwd' or 'rev').
seed : dict or list
Either a dict keyed by 'wrt' varnames (fwd) or 'of' varnames (rev), containing
dresidual (fwd) or doutput (rev) values, OR a list of dresidual or doutput
values that matches the corresponding 'wrt' (fwd) or 'of' (rev) varname list.
Returns
-------
dict
The total jacobian vector product, keyed by variable name.
"""
if mode == 'fwd':
if len(wrt) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'wrt' list must be the same length in fwd mode.")
lnames, rnames = of, wrt
lkind, rkind = 'output', 'residual'
else: # rev
if len(of) != len(seed):
raise RuntimeError(self.msginfo +
": seed and 'of' list must be the same length in rev mode.")
lnames, rnames = wrt, of
lkind, rkind = 'residual', 'output'
rvec = self.model._vectors[rkind]['linear']
lvec = self.model._vectors[lkind]['linear']
rvec.set_val(0.)
conns = self.model._conn_global_abs_in2out
# set seed values into dresids (fwd) or doutputs (rev)
# seed may have keys that are inputs and must be converted into auto_ivcs
try:
seed[rnames[0]]
except (IndexError, TypeError):
for i, name in enumerate(rnames):
if name in conns:
rvec[conns[name]] = seed[i]
else:
rvec[name] = seed[i]
else:
for name in rnames:
if name in conns:
rvec[conns[name]] = seed[name]
else:
rvec[name] = seed[name]
# We apply a -1 here because the derivative of the output is minus the derivative of
# the residual in openmdao.
rvec._data *= -1.
self.model.run_solve_linear(['linear'], mode)
if mode == 'fwd':
return {n: lvec[n].copy() for n in lnames}
else:
# may need to convert some lnames to auto_ivc names
return {n: lvec[conns[n] if n in conns else n].copy() for n in lnames}
def _setup_recording(self):
"""
Set up case recording.
"""
self._filtered_vars_to_record = self.driver._get_vars_to_record(self.recording_options)
self._rec_mgr.startup(self)
def add_recorder(self, recorder):
"""
Add a recorder to the problem.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
# shut down all recorders
self._rec_mgr.shutdown()
# clean up driver and model resources
self.driver.cleanup()
for system in self.model.system_iter(include_self=True, recurse=True):
system.cleanup()
def record(self, case_name):
"""
Record the variables at the Problem level.
Must be called after `final_setup` has been called. This can either
happen automatically through `run_driver` or `run_model`, or it can be
called manually.
Parameters
----------
case_name : str
Name used to identify this Problem case.
"""
if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
raise RuntimeError(f"{self.msginfo}: Problem.record() cannot be called before "
"`Problem.run_model()`, `Problem.run_driver()`, or "
"`Problem.final_setup()`.")
else:
record_iteration(self, self, case_name)
def record_iteration(self, case_name):
"""
Record the variables at the Problem level.
Parameters
----------
case_name : str
Name used to identify this Problem case.
"""
warn_deprecation("'Problem.record_iteration' has been deprecated. "
"Use 'Problem.record' instead.")
record_iteration(self, self, case_name)
def _get_recorder_metadata(self, case_name):
"""
Return metadata from the latest iteration for use in the recorder.
Parameters
----------
case_name : str
Name of current case.
Returns
-------
dict
Metadata dictionary for the recorder.
"""
return create_local_meta(case_name)
def setup(self, check=False, logger=None, mode='auto', force_alloc_complex=False,
distributed_vector_class=PETScVector, local_vector_class=DefaultVector,
derivatives=True):
"""
Set up the model hierarchy.
When `setup` is called, the model hierarchy is assembled, the processors are allocated
(for MPI), and variables and connections are all assigned. This method traverses down
the model hierarchy to call `setup` on each subsystem, and then traverses up the model
hierarchy to call `configure` on each subsystem.
Parameters
----------
check : boolean
whether to run config check after setup is complete.
logger : object
Object for logging config checks if check is True.
mode : string
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint). Default is 'auto', which will pick 'fwd' or 'rev' based on
the direction resulting in the smallest number of linear solves required to
compute derivatives.
force_alloc_complex : bool
Force allocation of imaginary part in nonlinear vectors. OpenMDAO can generally
detect when you need to do this, but in some cases (e.g., complex step is used
after a reconfiguration) you may need to set this to True.
distributed_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in interprocess communication.
local_vector_class : type
Reference to the <Vector> class or factory function used to instantiate vectors
and associated transfers involved in intraprocess communication.
derivatives : bool
If True, perform any memory allocations necessary for derivative computation.
Returns
-------
self : <Problem>
this enables the user to instantiate and setup in one line.
"""
model = self.model
comm = self.comm
# PETScVector is required for MPI
if comm.size > 1:
if PETScVector is None:
raise ValueError(self.msginfo +
": Attempting to run in parallel under MPI but PETScVector "
"could not be imported.")
elif distributed_vector_class is not PETScVector:
raise ValueError("%s: The `distributed_vector_class` argument must be "
"`PETScVector` when running in parallel under MPI but '%s' was "
"specified." % (self.msginfo, distributed_vector_class.__name__))
if mode not in ['fwd', 'rev', 'auto']:
msg = "%s: Unsupported mode: '%s'. Use either 'fwd' or 'rev'." % (self.msginfo, mode)
raise ValueError(msg)
self._mode = self._orig_mode = mode
model_comm = self.driver._setup_comm(comm)
# this metadata will be shared by all Systems/Solvers in the system tree
self._metadata = {
'coloring_dir': self.options['coloring_dir'], # directory for coloring files
'recording_iter': _RecIteration(), # manager of recorder iterations
'local_vector_class': local_vector_class,
'distributed_vector_class': distributed_vector_class,
'solver_info': SolverInfo(),
'use_derivatives': derivatives,
'force_alloc_complex': force_alloc_complex,
'remote_vars': {}, # vars that are remote somewhere. does not include distrib vars
'prom2abs': {'input': {}, 'output': {}}, # includes ALL promotes including buried ones
'static_mode': False, # used to determine where various 'static'
# and 'dynamic' data structures are stored.
# Dynamic ones are added during System
# setup/configure. They are wiped out and re-created during
# each Problem setup. Static ones are added outside of
# Problem setup and they are never wiped out or re-created.
'config_info': None, # used during config to determine if additional updates required
'parallel_groups': [], # list of pathnames of parallel groups in this model (all procs)
'setup_status': _SetupStatus.PRE_SETUP,
'vec_names': None, # names of all nonlinear and linear vectors
'lin_vec_names': None, # names of linear vectors
'model_ref': weakref.ref(model) # ref to the model (needed to get out-of-scope
# src data for inputs)
}
model._setup(model_comm, mode, self._metadata)
# set static mode back to True in all systems in this Problem
self._metadata['static_mode'] = True
# Cache all args for final setup.
self._check = check
self._logger = logger
self._metadata['setup_status'] = _SetupStatus.POST_SETUP
return self
def final_setup(self):
"""
Perform final setup phase on problem in preparation for run.
This is the second phase of setup, and is done automatically at the start of `run_driver`
and `run_model`. At the beginning of final_setup, we have a model hierarchy with defined
variables, solvers, case_recorders, and derivative settings. During this phase, the vectors
are created and populated, the drivers and solvers are initialized, and the recorders are
started, and the rest of the framework is prepared for execution.
"""
driver = self.driver
response_size, desvar_size = driver._update_voi_meta(self.model)
# update mode if it's been set to 'auto'
if self._orig_mode == 'auto':
mode = 'rev' if response_size < desvar_size else 'fwd'
self._mode = mode
else:
mode = self._orig_mode
if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
self.model._final_setup(self.comm)
driver._setup_driver(self)
info = driver._coloring_info
coloring = info['coloring']
if coloring is None and info['static'] is not None:
coloring = driver._get_static_coloring()
if coloring and coloring_mod._use_total_sparsity:
# if we're using simultaneous total derivatives then our effective size is less
# than the full size
if coloring._fwd and coloring._rev:
pass # we're doing both!
elif mode == 'fwd' and coloring._fwd:
desvar_size = coloring.total_solves()
elif mode == 'rev' and coloring._rev:
response_size = coloring.total_solves()
if ((mode == 'fwd' and desvar_size > response_size) or
(mode == 'rev' and response_size > desvar_size)):
simple_warning("Inefficient choice of derivative mode. You chose '%s' for a "
"problem with %d design variables and %d response variables "
"(objectives and nonlinear constraints)." %
(mode, desvar_size, response_size), RuntimeWarning)
if self._metadata['setup_status'] == _SetupStatus.PRE_SETUP and \
hasattr(self.model, '_order_set') and self.model._order_set:
raise RuntimeError("%s: Cannot call set_order without calling "
"setup after" % (self.msginfo))
# we only want to set up recording once, after problem setup
if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:
driver._setup_recording()
self._setup_recording()
record_viewer_data(self)
record_system_options(self)
if self._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
self._metadata['setup_status'] = _SetupStatus.POST_FINAL_SETUP
self._set_initial_conditions()
if self._check:
if self._check is True:
checks = _default_checks
else:
checks = self._check
if self.comm.rank == 0:
logger = self._logger
else:
logger = TestLogger()
self.check_config(logger, checks=checks)
def check_partials(self, out_stream=_DEFAULT_OUT_STREAM, includes=None, excludes=None,
compact_print=False, abs_err_tol=1e-6, rel_err_tol=1e-6,
method='fd', step=None, form='forward', step_calc='abs',
force_dense=True, show_only_incorrect=False):
"""
Check partial derivatives comprehensively for all components in your model.
Parameters
----------
out_stream : file-like object
Where to send human readable output. By default it goes to stdout.
Set to None to suppress.
includes : None or list_like
List of glob patterns for pathnames to include in the check. Default is None, which
includes all components in the model.
excludes : None or list_like
List of glob patterns for pathnames to exclude from the check. Default is None, which
excludes nothing.
compact_print : bool
Set to True to just print the essentials, one line per input-output pair.
abs_err_tol : float
Threshold value for absolute error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Default is 1.0E-6.
rel_err_tol : float
Threshold value for relative error. Errors about this value will have a '*' displayed
next to them in output, making them easy to search for. Note at times there may be a
significant relative error due to a minor absolute error. Default is 1.0E-6.