-
Notifications
You must be signed in to change notification settings - Fork 59
/
_xnes.py
285 lines (221 loc) · 8.82 KB
/
_xnes.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
from __future__ import annotations
import math
import numpy as np
from typing import cast
from typing import Optional
_EPS = 1e-8
_MEAN_MAX = 1e32
_SIGMA_MAX = 1e32
class XNES:
"""xNES stochastic optimizer class with ask-and-tell interface.
Example:
.. code::
import numpy as np
from cmaes import XNES
def quadratic(x1, x2):
return (x1 - 3) ** 2 + (10 * (x2 + 2)) ** 2
optimizer = XNES(mean=np.zeros(2), sigma=1.3)
for generation in range(50):
solutions = []
for _ in range(optimizer.population_size):
# Ask a parameter
x = optimizer.ask()
value = quadratic(x[0], x[1])
solutions.append((x, value))
print(f"#{generation} {value} (x1={x[0]}, x2 = {x[1]})")
# Tell evaluation values.
optimizer.tell(solutions)
Args:
mean:
Initial mean vector of multi-variate gaussian distributions.
sigma:
Initial standard deviation of covariance matrix.
bounds:
Lower and upper domain boundaries for each parameter (optional).
n_max_resampling:
A maximum number of resampling parameters (default: 100).
If all sampled parameters are infeasible, the last sampled one
will be clipped with lower and upper bounds.
seed:
A seed number (optional).
population_size:
A population size (optional).
"""
# Paper: https://dl.acm.org/doi/10.1145/1830483.1830557
def __init__(
self,
mean: np.ndarray,
sigma: float,
bounds: Optional[np.ndarray] = None,
n_max_resampling: int = 100,
seed: Optional[int] = None,
population_size: Optional[int] = None,
):
assert sigma > 0, "sigma must be non-zero positive value"
assert np.all(
np.abs(mean) < _MEAN_MAX
), f"Abs of all elements of mean vector must be less than {_MEAN_MAX}"
n_dim = len(mean)
assert n_dim > 1, "The dimension of mean must be larger than 1"
if population_size is None:
population_size = 4 + math.floor(3 * math.log(n_dim))
assert population_size > 0, "popsize must be non-zero positive value."
w_hat = np.log(population_size / 2 + 1) - np.log(
np.arange(1, population_size + 1)
)
w_hat[np.where(w_hat < 0)] = 0
weights = w_hat / sum(w_hat) - (1.0 / population_size)
self._n_dim = n_dim
self._popsize = population_size
# weights
self._weights = weights
# learning rate
self._eta_mean = 1.0
self._eta_sigma = (3 / 5) * (3 + math.log(n_dim)) / (n_dim * math.sqrt(n_dim))
self._eta_B = self._eta_sigma
# distribution parameter
self._mean = mean.copy()
self._sigma = sigma
self._B = np.eye(n_dim)
# bounds contains low and high of each parameter.
assert bounds is None or _is_valid_bounds(bounds, mean), "invalid bounds"
self._bounds = bounds
self._n_max_resampling = n_max_resampling
self._g = 0
self._rng = np.random.RandomState(seed)
# Termination criteria
self._tolx = 1e-12 * sigma
self._tolxup = 1e4
self._tolfun = 1e-12
self._tolconditioncov = 1e14
self._funhist_term = 10 + math.ceil(30 * n_dim / population_size)
self._funhist_values = np.empty(self._funhist_term * 2)
@property
def dim(self) -> int:
"""A number of dimensions"""
return self._n_dim
@property
def population_size(self) -> int:
"""A population size"""
return self._popsize
@property
def generation(self) -> int:
"""Generation number which is monotonically incremented
when multi-variate gaussian distribution is updated."""
return self._g
def reseed_rng(self, seed: int) -> None:
self._rng.seed(seed)
def set_bounds(self, bounds: Optional[np.ndarray]) -> None:
"""Update boundary constraints"""
assert bounds is None or _is_valid_bounds(bounds, self._mean), "invalid bounds"
self._bounds = bounds
def ask(self) -> np.ndarray:
"""Sample a parameter"""
for i in range(self._n_max_resampling):
x = self._sample_solution()
if self._is_feasible(x):
return x
x = self._sample_solution()
x = self._repair_infeasible_params(x)
return x
def _sample_solution(self) -> np.ndarray:
z = self._rng.randn(self._n_dim) # ~ N(0, I)
x = self._mean + self._sigma * self._B.dot(z) # ~ N(m, σ^2 B B^T)
return x
def _is_feasible(self, param: np.ndarray) -> bool:
if self._bounds is None:
return True
return cast(
bool,
np.all(param >= self._bounds[:, 0]) and np.all(param <= self._bounds[:, 1]),
) # Cast bool_ to bool.
def _repair_infeasible_params(self, param: np.ndarray) -> np.ndarray:
if self._bounds is None:
return param
# clip with lower and upper bound.
param = np.where(param < self._bounds[:, 0], self._bounds[:, 0], param)
param = np.where(param > self._bounds[:, 1], self._bounds[:, 1], param)
return param
def tell(self, solutions: list[tuple[np.ndarray, float]]) -> None:
"""Tell evaluation values"""
assert len(solutions) == self._popsize, "Must tell popsize-length solutions."
for s in solutions:
assert np.all(
np.abs(s[0]) < _MEAN_MAX
), f"Abs of all param values must be less than {_MEAN_MAX} to avoid overflow errors"
self._g += 1
solutions.sort(key=lambda s: s[1])
# Stores 'best' and 'worst' values of the
# last 'self._funhist_term' generations.
funhist_idx = 2 * (self.generation % self._funhist_term)
self._funhist_values[funhist_idx] = solutions[0][1]
self._funhist_values[funhist_idx + 1] = solutions[-1][1]
z_k = np.array(
[
np.linalg.inv(self._sigma * self._B).dot(s[0] - self._mean)
for s in solutions
]
)
# natural gradient estimation in local coordinate
G_delta = np.sum(
[self._weights[i] * z_k[i, :] for i in range(self.population_size)], axis=0
)
G_M = np.sum(
[
self._weights[i]
* (np.outer(z_k[i, :], z_k[i, :]) - np.eye(self._n_dim))
for i in range(self.population_size)
],
axis=0,
)
G_sigma = G_M.trace() / self._n_dim
G_B = G_M - G_sigma * np.eye(self._n_dim)
# parameter update
self._mean += self._eta_mean * self._sigma * np.dot(self._B, G_delta)
self._sigma *= math.exp((self._eta_sigma / 2.0) * G_sigma)
self._B = self._B.dot(_expm((self._eta_B / 2.0) * G_B))
def should_stop(self) -> bool:
A = self._B.dot(self._B.T)
A = (A + A.T) / 2
E2, V = np.linalg.eigh(A)
E = np.sqrt(np.where(E2 < 0, _EPS, E2))
diagA = np.diag(A)
# Stop if the range of function values of the recent generation is below tolfun.
if (
self.generation > self._funhist_term
and np.max(self._funhist_values) - np.min(self._funhist_values)
< self._tolfun
):
return True
# Stop if detecting divergent behavior.
if self._sigma * np.max(E) > self._tolxup:
return True
# No effect coordinates: stop if adding 0.2-standard deviations
# in any single coordinate does not change m.
if np.any(self._mean == self._mean + (0.2 * self._sigma * np.sqrt(diagA))):
return True
# No effect axis: stop if adding 0.1-standard deviation vector in
# any principal axis direction of C does not change m. "pycma" check
# axis one by one at each generation.
i = self.generation % self.dim
if np.all(self._mean == self._mean + (0.1 * self._sigma * E[i] * V[:, i])):
return True
# Stop if the condition number of the covariance matrix exceeds 1e14.
condition_cov = np.max(E) / np.min(E)
if condition_cov > self._tolconditioncov:
return True
return False
def _is_valid_bounds(bounds: Optional[np.ndarray], mean: np.ndarray) -> bool:
if bounds is None:
return True
if (mean.size, 2) != bounds.shape:
return False
if not np.all(bounds[:, 0] <= mean):
return False
if not np.all(mean <= bounds[:, 1]):
return False
return True
def _expm(mat: np.ndarray) -> np.ndarray:
D, U = np.linalg.eigh(mat)
expD = np.exp(D)
return U @ np.diag(expD) @ U.T