Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MAINT: differential_evolution improve init_population_lhs comments closes #5101. #5132

Merged
merged 3 commits into from Oct 13, 2015
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
101 changes: 61 additions & 40 deletions scipy/optimize/_differentialevolution.py
Expand Up @@ -323,8 +323,8 @@ def __init__(self, func, bounds, args=(),
self.polish = polish
self.tol = tol

#Mutation constant should be in [0, 2). If specified as a sequence
#then dithering is performed.
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
Expand Down Expand Up @@ -364,13 +364,17 @@ def __init__(self, func, bounds, args=(),
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])

parameter_count = np.size(self.limits, 1)
self.parameter_count = np.size(self.limits, 1)

self.random_number_generator = _make_random_gen(seed)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While I appreciate well-commented code, I think this is overdoing it.


#default initialization is a latin hypercube design, but there
#are other population initializations possible.
self.population = np.zeros((popsize * parameter_count,
parameter_count))
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
self.num_population_members = popsize * self.parameter_count

self.population_shape = (self.num_population_members,
self.parameter_count)

if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
Expand All @@ -379,43 +383,49 @@ def __init__(self, func, bounds, args=(),
raise ValueError("The population initialization method must be one"
"of 'latinhypercube' or 'random'")

self.population_energies = np.ones(
popsize * parameter_count) * np.inf
self.population_energies = (np.ones(self.num_population_members)
* np.inf)

self.disp = disp

def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling
Latin Hypercube Sampling ensures that the sampling of parameter space
is maximised.
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
samples = np.size(self.population, 0)
N = np.size(self.population, 1)
rng = self.random_number_generator

# Generate the intervals
segsize = 1.0 / samples
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members

# Fill points uniformly in each interval
rdrange = rng.rand(samples, N) * segsize
rdrange += np.atleast_2d(
np.linspace(0., 1., samples, endpoint=False)).T
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.random_sample(self.population_shape)

# Make the random pairings
self.population = np.zeros_like(rdrange)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])

for j in range(N):
order = rng.permutation(range(samples))
self.population[:, j] = rdrange[order, j]
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)

# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]

def init_population_random(self):
"""
Initialises the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population.shape)
self.population = rng.random_sample(self.population_shape)

@property
def x(self):
Expand Down Expand Up @@ -483,23 +493,34 @@ def solve(self):
if self.dither is not None:
self.scale = self.random_number_generator.rand(
) * (self.dither[1] - self.dither[0]) + self.dither[0]
for candidate in range(np.size(self.population, 0)):

for candidate in range(self.num_population_members):
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break

# create a trial solution
trial = self._mutate(candidate)

# ensuring that it's in the range [0, 1)
self._ensure_constraint(trial)

# scale from [0, 1) to the actual parameter value
parameters = self._scale_parameters(trial)

# determine the energy of the objective function
energy = self.func(parameters, *self.args)
nfev += 1

# if the energy of the trial candidate is lower than the
# original population member then replace it
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy

# if the trial candidate also has a lower energy than the
# best solution then replace that as well
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
Expand Down Expand Up @@ -584,9 +605,10 @@ def _mutate(self, candidate):
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
parameter_count = np.size(trial, 0)

fill_point = self.random_number_generator.randint(0, parameter_count)
rng = self.random_number_generator

fill_point = rng.randint(0, self.parameter_count)

if (self.strategy == 'randtobest1exp'
or self.strategy == 'randtobest1bin'):
Expand All @@ -596,7 +618,7 @@ def _mutate(self, candidate):
bprime = self.mutation_func(self._select_samples(candidate, 5))

if self.strategy in self._binomial:
crossovers = self.random_number_generator.rand(parameter_count)
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
Expand All @@ -608,12 +630,11 @@ def _mutate(self, candidate):

elif self.strategy in self._exponential:
i = 0
while (i < parameter_count and
self.random_number_generator.rand() <
self.cross_over_probability):
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):

trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % parameter_count
fill_point = (fill_point + 1) % self.parameter_count
i += 1

return trial
Expand Down Expand Up @@ -651,8 +672,8 @@ def _best2(self, samples):
"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1]
- self.population[r2] - self.population[r3]))
(self.population[r0] + self.population[r1]
- self.population[r2] - self.population[r3]))

return bprime

Expand All @@ -662,17 +683,17 @@ def _rand2(self, samples):
"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))

return bprime

def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(np.size(self.population, 0)),
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(np.size(self.population, 0)))
idxs = list(range(self.num_population_members))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
Expand Down