-
Notifications
You must be signed in to change notification settings - Fork 240
/
multifi_meta_model_unstructured_comp.py
333 lines (287 loc) · 13.6 KB
/
multifi_meta_model_unstructured_comp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
"""Define the MultiFiMetaModel class."""
from itertools import chain
import numpy as np
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.utils.array_utils import shape_to_len
def _get_name_fi(name, fi_index):
"""
Generate variable name taking into account fidelity level.
Parameters
----------
name : str
base name
fi_index : int
fidelity level
Returns
-------
str
variable name
"""
if fi_index > 0:
return "%s_fi%d" % (name, fi_index + 1)
else:
return name
class MultiFiMetaModelUnStructuredComp(MetaModelUnStructuredComp):
"""
Generalize MetaModel to be able to train surrogates with multi-fidelity training inputs.
For a given number of levels of fidelity **nfi** (given at initialization)
the corresponding training input variables *train_[invar]_fi[2..nfi]* and
*train_[outvar]_fi[2..nfi]* are automatically created
besides the given *train_[invar]* and *train_[outvar]* variables.
Note the index starts at 2, the index 1 is omitted considering
the simple name *var* is equivalent to *var_fi1* which is intended
to be the data of highest fidelity.
The surrogate models are trained with a list of (m samples, n dim)
ndarrays built from the various training input data. By convention,
the fidelities are intended to be ordered from highest to lowest fidelity.
Obviously for a given level of fidelity corresponding lists
*train_[var]_fi[n]* have to be of the same size.
Thus given the initialization::
>>> mm = MultiFiMetaModelUnStructuredComp(nfi=2)`
>>> mm.add_input('x1', 0.)
>>> mm.add_input('x2', 0.)
>>> mm.add_output('y1', 0.)
>>> mm.add_output('y2', 0.)
the following supplementary training input variables
``train_x1_fi2`` and ``train_x2_fi2`` are created together with the classic
ones ``train_x1`` and ``train_x2`` and the output variables ``train_y1_fi2``
and ``train_y2_fi2`` are created as well.
The embedded surrogate for y1 will be trained with a couple (X, Y).
Where X is the list [X_fi1, X_fi2] where X_fi1 is an (m1, 2) ndarray
filled with the m1 samples [x1 value, x2 value], X_fi2 is an (m2, 2) ndarray
filled with the m2 samples [x1_fi2 value, x2_fi2 value]
Where Y is a list [Y1_fi1, Y1_fi2] where Y1_fi1 is a (m1, 1) ndarray of
y1 values and Y1_fi2 a (m2, 1) ndarray y1_fi2 values.
.. note:: when *nfi* ==1 a :class:`MultiFiMetaModelUnStructuredComp` object behaves as
a :class:`MetaModelUnStructured` object.
Parameters
----------
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Component options.
Attributes
----------
_input_sizes : list
Stores the size of the inputs at each level.
_static_input_sizes : list
Stores the size of the inputs at each level for inputs added outside of setup.
_nfi : float
number of levels of fidelity
_training_input : dict
Training data for inputs.
"""
def __init__(self, **kwargs):
"""
Initialize all attributes.
"""
super().__init__(**kwargs)
nfi = self._nfi = self.options['nfi']
# generalize MetaModelUnStructured training inputs to a list of training inputs
self._training_input = nfi * [np.empty(0)]
self._input_sizes = nfi * [0]
self._static_input_sizes = nfi * [0]
self._no_check_partials = True
def initialize(self):
"""
Declare options.
"""
super().initialize()
self.options.declare('nfi', types=int, default=1, lower=1,
desc='Number of levels of fidelity.')
def _setup_procs(self, pathname, comm, mode, prob_meta):
"""
Execute first phase of the setup process.
Distribute processors, assign pathnames, and call setup on the component.
Parameters
----------
pathname : str
Global name of the system, including the path.
comm : MPI.Comm or <FakeComm>
MPI communicator object.
mode : str
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint).
prob_meta : dict
Problem level options.
"""
self._input_sizes = list(self._static_input_sizes)
super()._setup_procs(pathname, comm, mode, prob_meta)
def add_input(self, name, val=1.0, shape=None, src_indices=None, flat_src_indices=None,
units=None, desc=''):
"""
Add an input variable to the component.
Parameters
----------
name : str
Name of the variable in this component's namespace.
val : float or list or tuple or ndarray or Iterable
The initial value of the variable being added in user-defined units.
Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if src_indices not provided and
val is not an array. Default is None.
src_indices : int or list of ints or tuple of ints or int ndarray or Iterable or None
The global indices of the source variable to transfer data from.
If val is given as an array_like object, the shapes of val and
src_indices must match. A value of None implies this input depends
on all entries of source. Default is None.
flat_src_indices : bool
If True, each entry of src_indices is assumed to be an index into the
flattened source. Otherwise each entry must be a tuple or list of size equal
to the number of dimensions of the source.
units : str or None
Units in which this input variable will be provided to the component
during execution. Default is None, which means it is unitless.
desc : str
Description of the variable.
"""
metadata = super().add_input(name, val, shape=shape, src_indices=src_indices,
flat_src_indices=flat_src_indices, units=units,
desc=desc)
if self.options['vec_size'] > 1:
input_size = metadata['val'][0].size
else:
input_size = metadata['val'].size
if self._static_mode:
self._static_input_sizes[0] += input_size
else:
self._input_sizes[0] += input_size
# Add train_<invar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
train_name = 'train_' + _get_name_fi(name, fi)
self.options.declare(
train_name, default=None, desc='Training data for %s' % train_name)
if self._static_mode:
self._static_input_sizes[fi] += input_size
else:
self._input_sizes[fi] += input_size
def add_output(self, name, val=1.0, surrogate=None, shape=None, units=None, res_units=None,
desc='', lower=None, upper=None, ref=1.0, ref0=0.0, res_ref=1.0, tags=None,
shape_by_conn=False, copy_shape=None, distributed=None):
"""
Add an output variable to the component.
Parameters
----------
name : str
Name of the variable in this component's namespace.
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
surrogate : SurrogateModel
Surrogate model to use.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
res_units : str or None
Units in which the residuals of this output will be given to the user when requested.
Default is None, which means it has no units.
desc : str
Description of the variable.
lower : float or list or tuple or ndarray or Iterable or None
Lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no lower bound.
Default is None.
upper : float or list or tuple or ndarray or or Iterable None
Upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no upper bound.
Default is None.
ref : float
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 1. Default is 1.
ref0 : float
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 0. Default is 0.
res_ref : float
Scaling parameter. The value in the user-defined res_units of this output's residual
when the scaled value is 1. Default is 1.
tags : str or list of strs or set of strs
User defined tags that can be used to filter what gets listed when calling
list_inputs and list_outputs.
shape_by_conn : bool
If True, shape this output to match its connected input(s).
copy_shape : str or None
If a str, that str is the name of a variable. Shape this output to match that of
the named variable.
distributed : bool
If True, this variable is a distributed variable, so it can have different sizes/values
across MPI processes.
"""
super().add_output(name, val, shape=shape,
units=units, res_units=res_units,
desc=desc, lower=lower,
upper=upper, ref=ref,
ref0=ref0, res_ref=res_ref,
surrogate=surrogate, tags=tags,
shape_by_conn=shape_by_conn,
copy_shape=copy_shape,
distributed=distributed)
self._training_output[name] = self._nfi * [np.empty(0)]
# Add train_<outvar>_fi<n>
for fi in range(self._nfi):
if fi > 0:
train_name = 'train_' + _get_name_fi(name, fi)
self.options.declare(
train_name, default=None, desc='Training data for %s' % train_name)
def _train(self):
"""
Override MetaModelUnStructured _train method to take into account multi-fidelity input data.
"""
if self._nfi == 1:
# shortcut: fallback to base class behaviour immediatly
super()._train()
return
num_sample = self._nfi * [None]
for name_root, _ in chain(self._surrogate_input_names, self._surrogate_output_names):
for fi in range(self._nfi):
name = _get_name_fi(name_root, fi)
val = self.options['train_' + name]
if num_sample[fi] is None:
num_sample[fi] = len(val)
elif len(val) != num_sample[fi]:
msg = f"{self.msginfo}: Each variable must have the same number " \
f"of training points. Expected {num_sample[fi]} but found {len(val)} " \
f"points for '{name}'."
raise RuntimeError(msg)
inputs = [np.zeros((num_sample[fi], self._input_sizes[fi]))
for fi in range(self._nfi)]
# add training data for each input
idx = self._nfi * [0]
for name_root, sz in self._surrogate_input_names:
for fi in range(self._nfi):
name = _get_name_fi(name_root, fi)
val = self.options['train_' + name]
if isinstance(val[0], float):
inputs[fi][:, idx[fi]] = val
idx[fi] += 1
else:
for row_idx, v in enumerate(val):
v = np.asarray(v)
inputs[fi][row_idx, idx[fi]:idx[fi] + sz] = v.flat
# add training data for each output
outputs = self._nfi * [None]
for name_root, shape in self._surrogate_output_names:
output_size = shape_to_len(shape)
for fi in range(self._nfi):
name_fi = _get_name_fi(name_root, fi)
outputs[fi] = np.zeros((num_sample[fi], output_size))
val = self.options['train_' + name_fi]
if isinstance(val[0], float):
outputs[fi][:, 0] = val
else:
for row_idx, v in enumerate(val):
v = np.asarray(v)
outputs[fi][row_idx, :] = v.flat
self._training_output[name] = []
self._training_output[name].extend(outputs)
surrogate = self._metadata(name_root).get('surrogate')
if surrogate is None:
msg = f"{self.msginfo}: No surrogate specified for output '{name_root}'"
raise RuntimeError(msg)
else:
surrogate.train_multifi(inputs, self._training_output[name])
self._training_input = inputs
self.train = False