-
Notifications
You must be signed in to change notification settings - Fork 26
/
ccpso2.py
178 lines (162 loc) · 9.88 KB
/
ccpso2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import numpy as np # engine for numerical computing
from scipy.stats import cauchy
from pypop7.optimizers.core.optimizer import Optimizer
from pypop7.optimizers.pso.pso import PSO # abstract class of all particle swarm optimizer (PSO) classes
class CCPSO2(PSO):
"""Cooperative Coevolving Particle Swarm Optimizer (CCPSO2).
.. note:: `CCPSO2` employs the popular `cooperative coevolution <https://tinyurl.com/57wzdrhm>`_ framework to
extend PSO for large-scale black-box optimization (LSBBO) with *random grouping/partitioning*. However, it
may suffer from **performance degradation** on *non-separable* functions (particularly ill-conditioned),
owing to its axis-parallel decomposition strategy (see the classical **coordinate descent** from the
mathematical programming community for detailed mathematical explanation).
Parameters
----------
problem : dict
problem arguments with the following common settings (`keys`):
* 'fitness_function' - objective function to be **minimized** (`func`),
* 'ndim_problem' - number of dimensionality (`int`),
* 'upper_boundary' - upper boundary of search range (`array_like`),
* 'lower_boundary' - lower boundary of search range (`array_like`).
options : dict
optimizer options with the following common settings (`keys`):
* 'max_function_evaluations' - maximum of function evaluations (`int`, default: `np.inf`),
* 'max_runtime' - maximal runtime to be allowed (`float`, default: `np.inf`),
* 'seed_rng' - seed for random number generation needed to be *explicitly* set (`int`);
and with the following particular settings (`keys`):
* 'n_individuals' - swarm (population) size, aka number of particles (`int`, default: `30`),
* 'p' - probability of using Cauchy sampling distribution (`float`, default: `0.5`),
* 'group_sizes' - a pool of candidate dimensions for grouping (`list`, default:
`[2, 5, 10, 50, 100, 250]`).
Examples
--------
Use the optimizer to minimize the well-known test function
`Rosenbrock <http://en.wikipedia.org/wiki/Rastrigin_function>`_:
.. code-block:: python
:linenos:
>>> import numpy
>>> from pypop7.benchmarks.base_functions import rosenbrock # function to be minimized
>>> from pypop7.optimizers.pso.ccpso2 import CCPSO2
>>> problem = {'fitness_function': rosenbrock, # define problem arguments
... 'ndim_problem': 500,
... 'lower_boundary': -5*numpy.ones((500,)),
... 'upper_boundary': 5*numpy.ones((500,))}
>>> options = {'max_function_evaluations': 1000000, # set optimizer options
... 'seed_rng': 2022}
>>> ccpso2 = CCPSO2(problem, options) # initialize the optimizer class
>>> results = ccpso2.optimize() # run the optimization process
>>> # return the number of function evaluations and best-so-far fitness
>>> print(f"CCPSO2: {results['n_function_evaluations']}, {results['best_so_far_y']}")
CCPSO2: 1000000, 1150.0205163111475
For its correctness checking of coding, refer to `this code-based repeatability report
<https://tinyurl.com/bdfywpyx>`_ for more details.
Attributes
----------
group_sizes : `list`
a pool of candidate dimensions for grouping.
n_individuals : `int`
swarm (population) size, aka number of particles.
p : `float`
probability of using Cauchy sampling distribution.
References
----------
Li, X. and Yao, X., 2012.
Cooperatively coevolving particle swarms for large scale optimization.
IEEE Transactions on Evolutionary Computation, 16(2), pp.210-224.
https://ieeexplore.ieee.org/document/5910380/
Potter, M.A. and De Jong, K.A., 1994, October.
A cooperative coevolutionary approach to function optimization.
In International Conference on Parallel Problem Solving from Nature (pp. 249-257).
Springer, Berlin, Heidelberg.
https://link.springer.com/chapter/10.1007/3-540-58484-6_269
"""
def __init__(self, problem, options):
PSO.__init__(self, problem, options)
self.n_individuals = options.get('n_individuals', 30) # swarm (population) size, aka number of particles
assert self.n_individuals > 0
self.p = options.get('p', 0.5) # probability of using Cauchy sampling distribution
assert 0.0 <= self.p <= 1.0
self.group_sizes = options.get('group_sizes', [2, 5, 10, 50, 100, 250])
assert np.all(np.array(self.group_sizes) <= self.ndim_problem)
self._indices = np.arange(self.ndim_problem) # indices of all dimensions
self._s = self.rng_optimization.choice(self.group_sizes) # dimension to be optimized by each swarm
self._k = int(np.ceil(self.ndim_problem/self._s)) # number of swarms
assert self._k > 0
self._improved = True
self._best_so_far_y = self.best_so_far_y
def initialize(self, args=None):
x = self.rng_initialization.uniform(self.initial_lower_boundary, self.initial_upper_boundary,
size=(self.n_individuals, self.ndim_problem)) # positions
y = np.empty((self._k, self.n_individuals)) # fitness for all individuals of all swarms
p_x, p_y = np.copy(x), np.copy(y) # personally best positions and fitness
n_x = np.copy(x) # neighborly best positions
for i in range(self.n_individuals):
if self._check_terminations():
return x, y, p_x, p_y, n_x
y[:, i] = self._evaluate_fitness(x[i], args)
p_y = np.copy(y)
return x, y, p_x, p_y, n_x
def _ring_topology(self, p_x=None, p_y=None, j=None, i=None, indices=None):
left, right = i - 1, i + 1
if i == 0:
left = self.n_individuals - 1
elif i == self.n_individuals - 1:
right = 0
ring = [left, i, right]
return p_x[ring[int(np.argmin(p_y[j, ring]))], indices]
def iterate(self, x=None, y=None, p_x=None, p_y=None, n_x=None, args=None, fitness=None):
if self._improved is False:
candidates = self.rng_optimization.choice(self.group_sizes, size=(2,), replace=False)
if self._s == candidates[0]: # to ensure that a different value is chosen
self._s = candidates[1]
else:
self._s = candidates[0]
self._k = int(np.ceil(self.ndim_problem/self._s))
self.rng_optimization.shuffle(self._indices) # random permutation
y = np.empty((self._k, self.n_individuals)) # fitness for all individuals of all swarms
for j in range(self._k): # for each swarm
for i in range(self.n_individuals): # for each individual
if self._check_terminations():
return x, y, p_x, p_y, n_x
cv = np.copy(self.best_so_far_x) # context vector
indices = self._indices[np.arange(j*self._s, min(self.ndim_problem, (j + 1)*self._s))]
cv[indices] = x[i, indices]
y[j, i] = self._evaluate_fitness(cv, args)
if self.saving_fitness:
fitness.extend(y.flatten())
p_y = np.copy(y)
self._improved, self._best_so_far_y = False, self.best_so_far_y
for j in range(self._k): # for each swarm
for i in range(self.n_individuals): # for each individual
if self._check_terminations():
return x, y, p_x, p_y, n_x
cv = np.copy(self.best_so_far_x) # context vector
indices = self._indices[np.arange(j*self._s, min(self.ndim_problem, (j + 1)*self._s))]
cv[indices] = x[i, indices]
y[j, i] = self._evaluate_fitness(cv, args)
if y[j, i] < p_y[j, i]:
p_x[i, indices], p_y[j, i] = cv[indices], y[j, i]
if y[j, i] < self._best_so_far_y:
self._improved, self._best_so_far_y = True, y[j, i]
for i in range(self.n_individuals):
indices = self._indices[np.arange(j*self._s, min(self.ndim_problem, (j + 1)*self._s))]
n_x[i, indices] = self._ring_topology(p_x, p_y, j, i, indices)
for j in range(self._k): # for each swarm
for i in range(self.n_individuals): # for each individual
indices = self._indices[np.arange(j*self._s, min(self.ndim_problem, (j + 1)*self._s))]
std = np.abs(p_x[i, indices] - n_x[i, indices])
for jj, ii in enumerate(indices):
if self.rng_optimization.random() <= self.p: # sampling from Cauchy distribution
x[i, ii] = p_x[i, ii] + std[jj]*cauchy.rvs(random_state=self.rng_optimization)
else: # sampling from Gaussian distribution
x[i, ii] = n_x[i, ii] + std[jj]*self.rng_optimization.standard_normal()
x[i, indices] = np.clip(x[i, indices], self.lower_boundary[indices], self.upper_boundary[indices])
self._n_generations += 1
return x, y, p_x, p_y, n_x
def optimize(self, fitness_function=None, args=None):
fitness = Optimizer.optimize(self, fitness_function)
x, y, p_x, p_y, n_x = self.initialize(args)
self._print_verbose_info(fitness, y[0])
while not self.termination_signal:
x, y, p_x, p_y, n_x = self.iterate(x, y, p_x, p_y, n_x, args, fitness)
self._print_verbose_info(fitness, y.flatten())
return self._collect(fitness)