/
differential_evolution_driver.py
594 lines (482 loc) · 21.8 KB
/
differential_evolution_driver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
"""
Driver for a differential evolution genetic algorithm.
TODO: add better references than: https://en.wikipedia.org/wiki/Differential_evolution
Most of this driver (except execute_ga) is based on SimpleGA, so the following may still apply:
The following reference is only for the penalty function:
Smith, A. E., Coit, D. W. (1995) Penalty functions. In: Handbook of Evolutionary Computation, 97(1).
The following reference is only for weighted sum multi-objective optimization:
Sobieszczanski-Sobieski, J., Morris, A. J., van Tooren, M. J. L. (2015)
Multidisciplinary Design Optimization Supported by Knowledge Based Engineering.
John Wiley & Sons, Ltd.
"""
import os
import copy
import numpy as np
import openmdao
from openmdao.core.driver import Driver, RecordingDebugging
from openmdao.utils.concurrent import concurrent_eval
from openmdao.utils.mpi import MPI
from openmdao.core.analysis_error import AnalysisError
class DifferentialEvolutionDriver(Driver):
"""
Driver for a differential evolution genetic algorithm.
This algorithm requires that inputs are floating point numbers.
Attributes
----------
_concurrent_pop_size : int
Number of points to run concurrently when model is a parallel one.
_concurrent_color : int
Color of current rank when running a parallel model.
_desvar_idx : dict
Keeps track of the indices for each desvar, since DifferentialEvolution sees an array of
design variables.
_ga : <DifferentialEvolution>
Main genetic algorithm lies here.
_randomstate : np.random.RandomState, int
Random state (or seed-number) which controls the seed and random draws.
"""
def __init__(self, **kwargs):
"""
Initialize the DifferentialEvolutionDriver driver.
Parameters
----------
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Driver options.
"""
super(DifferentialEvolutionDriver, self).__init__(**kwargs)
# What we support
self.supports['inequality_constraints'] = True
self.supports['equality_constraints'] = True
self.supports['multiple_objectives'] = True
# What we don't support yet
self.supports['integer_design_vars'] = False
self.supports['two_sided_constraints'] = False
self.supports['linear_constraints'] = False
self.supports['simultaneous_derivatives'] = False
self.supports['active_set'] = False
self.supports._read_only = True
self._desvar_idx = {}
self._ga = None
# random state can be set for predictability during testing
if 'DifferentialEvolutionDriver_seed' in os.environ:
self._randomstate = int(os.environ['DifferentialEvolutionDriver_seed'])
else:
self._randomstate = None
# Support for Parallel models.
self._concurrent_pop_size = 0
self._concurrent_color = 0
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
self.options.declare('max_gen', default=100,
desc='Number of generations before termination.')
self.options.declare('pop_size', default=0,
desc='Number of points in the GA. Set to 0 and it will be computed '
'as 20 times the total number of inputs.')
self.options.declare('run_parallel', types=bool, default=False,
desc='Set to True to execute the points in a generation in parallel.')
self.options.declare('procs_per_model', default=1, lower=1,
desc='Number of processors to give each model under MPI.')
self.options.declare('penalty_parameter', default=10., lower=0.,
desc='Penalty function parameter.')
self.options.declare('penalty_exponent', default=1.,
desc='Penalty function exponent.')
self.options.declare('Pc', default=0.9, lower=0., upper=1.,
desc='Crossover probability.')
self.options.declare('F', default=0.9, lower=0., upper=1., allow_none=True,
desc='Differential rate.')
self.options.declare('multi_obj_weights', default={}, types=(dict),
desc='Weights of objectives for multi-objective optimization.'
'Weights are specified as a dictionary with the absolute names'
'of the objectives. The same weights for all objectives are assumed, '
'if not given.')
self.options.declare('multi_obj_exponent', default=1., lower=0.,
desc='Multi-objective weighting exponent.')
def _setup_driver(self, problem):
"""
Prepare the driver for execution.
This is the final thing to run during setup.
Parameters
----------
problem : <Problem>
Pointer to the containing problem.
"""
super(DifferentialEvolutionDriver, self)._setup_driver(problem)
model_mpi = None
comm = problem.comm
if self._concurrent_pop_size > 0:
model_mpi = (self._concurrent_pop_size, self._concurrent_color)
elif not self.options['run_parallel']:
comm = None
self._ga = DifferentialEvolution(self.objective_callback, comm=comm, model_mpi=model_mpi)
def _setup_comm(self, comm):
"""
Perform any driver-specific setup of communicators for the model.
Here, we generate the model communicators.
Parameters
----------
comm : MPI.Comm or <FakeComm> or None
The communicator for the Problem.
Returns
-------
MPI.Comm or <FakeComm> or None
The communicator for the Problem model.
"""
procs_per_model = self.options['procs_per_model']
if MPI and self.options['run_parallel']:
full_size = comm.size
size = full_size // procs_per_model
if full_size != size * procs_per_model:
raise RuntimeError("The total number of processors is not evenly divisible by the "
"specified number of processors per model.\n Provide a "
"number of processors that is a multiple of %d, or "
"specify a number of processors per model that divides "
"into %d." % (procs_per_model, full_size))
color = comm.rank % size
model_comm = comm.Split(color)
# Everything we need to figure out which case to run.
self._concurrent_pop_size = size
self._concurrent_color = color
return model_comm
self._concurrent_pop_size = 0
self._concurrent_color = 0
return comm
def _get_name(self):
"""
Get name of current Driver.
Returns
-------
str
Name of current Driver.
"""
return "DifferentialEvolution"
def run(self):
"""
Execute the genetic algorithm.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
model = self._problem().model
ga = self._ga
pop_size = self.options['pop_size']
max_gen = self.options['max_gen']
F = self.options['F']
Pc = self.options['Pc']
self._check_for_missing_objective()
# Size design variables.
desvars = self._designvars
desvar_vals = self.get_design_var_values()
count = 0
for name, meta in desvars.items():
size = meta['size']
self._desvar_idx[name] = (count, count + size)
count += size
lower_bound = np.empty((count, ))
upper_bound = np.empty((count, ))
x0 = np.empty(count)
# Figure out bounds vectors and initial design vars
for name, meta in desvars.items():
i, j = self._desvar_idx[name]
lower_bound[i:j] = meta['lower']
upper_bound[i:j] = meta['upper']
x0[i:j] = desvar_vals[name]
abs2prom = model._var_abs2prom['output']
# Automatic population size.
if pop_size == 0:
pop_size = 20 * count
desvar_new, obj, nfit = ga.execute_ga(x0, lower_bound, upper_bound,
pop_size, max_gen,
self._randomstate, F, Pc)
# Pull optimal parameters back into framework and re-run, so that
# framework is left in the right final state
for name in desvars:
i, j = self._desvar_idx[name]
val = desvar_new[i:j]
self.set_design_var(name, val)
with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
model.run_solve_nonlinear()
rec.abs = 0.0
rec.rel = 0.0
self.iter_count += 1
return False
def objective_callback(self, x, icase):
r"""
Evaluate problem objective at the requested point.
In case of multi-objective optimization, a simple weighted sum method is used:
.. math::
f = (\sum_{k=1}^{N_f} w_k \cdot f_k)^a
where :math:`N_f` is the number of objectives and :math:`a>0` is an exponential
weight. Choosing :math:`a=1` is equivalent to the conventional weighted sum method.
The weights given in the options are normalized, so:
.. math::
\sum_{k=1}^{N_f} w_k = 1
If one of the objectives :math:`f_k` is not a scalar, its elements will have the same
weights, and it will be normed with length of the vector.
Takes into account constraints with a penalty function.
All constraints are converted to the form of :math:`g_i(x) \leq 0` for
inequality constraints and :math:`h_i(x) = 0` for equality constraints.
The constraint vector for inequality constraints is the following:
.. math::
g = [g_1, g_2 \dots g_N], g_i \in R^{N_{g_i}}
h = [h_1, h_2 \dots h_N], h_i \in R^{N_{h_i}}
The number of all constraints:
.. math::
N_g = \sum_{i=1}^N N_{g_i}, N_h = \sum_{i=1}^N N_{h_i}
The fitness function is constructed with the penalty parameter :math:`p`
and the exponent :math:`\kappa`:
.. math::
\Phi(x) = f(x) + p \cdot \sum_{k=1}^{N^g}(\delta_k \cdot g_k)^{\kappa}
+ p \cdot \sum_{k=1}^{N^h}|h_k|^{\kappa}
where :math:`\delta_k = 0` if :math:`g_k` is satisfied, 1 otherwise
.. note::
The values of :math:`\kappa` and :math:`p` can be defined as driver options.
Parameters
----------
x : ndarray
Value of design variables.
icase : int
Case number, used for identification when run in parallel.
Returns
-------
float
Objective value
bool
Success flag, True if successful
int
Case number, used for identification when run in parallel.
"""
model = self._problem().model
success = 1
objs = self.get_objective_values()
nr_objectives = len(objs)
# Single objective, if there is only one objective, which has only one element
if nr_objectives > 1:
is_single_objective = False
else:
for obj in objs.items():
is_single_objective = len(obj) == 1
break
obj_exponent = self.options['multi_obj_exponent']
if self.options['multi_obj_weights']: # not empty
obj_weights = self.options['multi_obj_weights']
else:
# Same weight for all objectives, if not specified
obj_weights = {name: 1. for name in objs.keys()}
sum_weights = sum(obj_weights.values())
for name in self._designvars:
i, j = self._desvar_idx[name]
self.set_design_var(name, x[i:j])
# a very large number, but smaller than the result of nan_to_num in Numpy
almost_inf = openmdao.INF_BOUND
# Execute the model
with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
self.iter_count += 1
try:
model.run_solve_nonlinear()
# Tell the optimizer that this is a bad point.
except AnalysisError:
model._clear_iprint()
success = 0
obj_values = self.get_objective_values()
if is_single_objective: # Single objective optimization
for i in obj_values.values():
obj = i # First and only key in the dict
else: # Multi-objective optimization with weighted sums
weighted_objectives = np.array([])
for name, val in obj_values.items():
# element-wise multiplication with scalar
# takes the average, if an objective is a vector
try:
weighted_obj = val * obj_weights[name] / val.size
except KeyError:
msg = ('Name "{}" in "multi_obj_weights" option '
'is not an absolute name of an objective.')
raise KeyError(msg.format(name))
weighted_objectives = np.hstack((weighted_objectives, weighted_obj))
obj = sum(weighted_objectives / sum_weights)**obj_exponent
# Parameters of the penalty method
penalty = self.options['penalty_parameter']
exponent = self.options['penalty_exponent']
if penalty == 0:
fun = obj
else:
constraint_violations = np.array([])
for name, val in self.get_constraint_values().items():
con = self._cons[name]
# The not used fields will either None or a very large number
if (con['lower'] is not None) and np.any(con['lower'] > -almost_inf):
diff = val - con['lower']
violation = np.array([0. if d >= 0 else abs(d) for d in diff])
elif (con['upper'] is not None) and np.any(con['upper'] < almost_inf):
diff = val - con['upper']
violation = np.array([0. if d <= 0 else abs(d) for d in diff])
elif (con['equals'] is not None) and np.any(np.abs(con['equals']) < almost_inf):
diff = val - con['equals']
violation = np.absolute(diff)
constraint_violations = np.hstack((constraint_violations, violation))
fun = obj + penalty * sum(np.power(constraint_violations, exponent))
# Record after getting obj to assure they have
# been gathered in MPI.
rec.abs = 0.0
rec.rel = 0.0
return fun, success, icase
class DifferentialEvolution(object):
"""
Differential Evolution Genetic Algorithm.
TODO: add better references than: https://en.wikipedia.org/wiki/Differential_evolution
Attributes
----------
comm : MPI communicator or None
The MPI communicator that will be used objective evaluation for each generation.
lchrom : int
Chromosome length.
model_mpi : None or tuple
If the model in objfun is also parallel, then this will contain a tuple with the the
total number of population points to evaluate concurrently, and the color of the point
to evaluate on this rank.
npop : int
Population size.
objfun : function
Objective function callback.
"""
def __init__(self, objfun, comm=None, model_mpi=None):
"""
Initialize genetic algorithm object.
Parameters
----------
objfun : function
Objective callback function.
comm : MPI communicator or None
The MPI communicator that will be used objective evaluation for each generation.
model_mpi : None or tuple
If the model in objfun is also parallel, then this will contain a tuple with the the
total number of population points to evaluate concurrently, and the color of the point
to evaluate on this rank.
"""
self.objfun = objfun
self.comm = comm
self.lchrom = 0
self.npop = 0
self.model_mpi = model_mpi
def execute_ga(self, x0, vlb, vub, pop_size, max_gen, random_state, F=0.5, Pc=0.5):
"""
Perform the genetic algorithm.
Parameters
----------
x0 : ndarray
Initial design values
vlb : ndarray
Lower bounds array.
vub : ndarray
Upper bounds array.
pop_size : int
Number of points in the population.
max_gen : int
Number of generations to run the GA.
random_state : np.random.RandomState, int
Random state (or seed-number) which controls the seed and random draws.
F : float
Differential rate
Pc : float
Crossover rate
Returns
-------
ndarray
Best design point
float
Objective value at best design point.
int
Number of successful function evaluations.
"""
comm = self.comm
xopt = copy.deepcopy(vlb)
fopt = np.inf
self.lchrom = len(x0)
if np.mod(pop_size, 2) == 1:
pop_size += 1
self.npop = int(pop_size)
# use different seeds in different MPI processes
seed = random_state + self.comm.Get_rank() if self.comm else 0
rng = np.random.default_rng(seed)
# create random initial population, scaled to bounds
population = rng.random([self.npop, self.lchrom]) * (vub - vlb) + vlb # scale to bounds
fitness = np.ones(self.npop) * np.inf # initialize fitness to infinitely bad
# Main Loop
nfit = 0
for generation in range(max_gen + 1):
# Evaluate fitness of points in this generation
if comm is not None: # Parallel
# Since GA is random, ranks generate different new populations, so just take one
# and use it on all.
population = comm.bcast(population, root=0)
cases = [((item, ii), None) for ii, item in enumerate(population)]
# Pad the cases with some dummy cases to make the cases divisible amongst the procs.
# TODO: Add a load balancing option to this driver.
extra = len(cases) % comm.size
if extra > 0:
for j in range(comm.size - extra):
cases.append(cases[-1])
results = concurrent_eval(self.objfun, cases, comm,
allgather=True, model_mpi=self.model_mpi)
fitness[:] = np.inf
for result in results:
returns, traceback = result
if returns:
val, success, ii = returns
if success:
fitness[ii] = val
nfit += 1
else:
# Print the traceback if it fails
print('A case failed:')
print(traceback)
else: # Serial
for ii in range(self.npop):
fitness[ii], success, _ = self.objfun(population[ii], 0)
nfit += 1
# Find best performing point in this generation.
min_fit = np.min(fitness)
min_index = np.argmin(fitness)
min_gen = population[min_index]
# Update overall best.
if min_fit < fopt:
fopt = min_fit
xopt = min_gen
if generation == max_gen: # finished
break
# Selection: new generation members replace parents, if better (implied elitism)
if generation == 0:
parentPop = copy.deepcopy(population)
parentFitness = copy.deepcopy(fitness)
else:
for ii in range(self.npop):
if fitness[ii] < parentFitness[ii]: # if child is better, else parent unchanged
parentPop[ii] = population[ii]
parentFitness[ii] = fitness[ii]
# Evolve new generation.
population = np.zeros(np.shape(parentPop))
fitness = np.ones(self.npop) * np.inf
for ii in range(self.npop):
# randomly select 3 different population members other than the current choice
a, b, c = ii, ii, ii
while a == ii:
a = rng.integers(0, self.npop)
while b == ii or b == a:
b = rng.integers(0, self.npop)
while c == ii or c == a or c == b:
c = rng.integers(0, self.npop)
# randomly select chromosome index for forced crossover
r = rng.integers(0, self.lchrom)
# crossover and mutation
population[ii] = parentPop[ii] # start the same as parent
# clip mutant so that it cannot be outside the bounds
mutant = np.clip(parentPop[a] + F * (parentPop[b] - parentPop[c]), vlb, vub)
# sometimes replace parent's feature with mutant's
rr = rng.random(self.lchrom)
idx = np.where(rr < Pc)
population[ii][idx] = mutant[idx]
population[ii][r] = mutant[r] # always replace at least one with mutant's
return xopt, fopt, nfit