Skip to content

Commit

Permalink
Merge pull request #117 from jakobj/maint/local-search-per-individual
Browse files Browse the repository at this point in the history
Change local search to accept single individual not list of individuals
  • Loading branch information
mschmidt87 committed May 22, 2020
2 parents f25563e + 348c094 commit 5e6b057
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 27 deletions.
7 changes: 4 additions & 3 deletions gp/ea/mu_plus_lambda.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(
tournament_size: int,
*,
n_processes: int = 1,
local_search: Callable[[List[Individual]], None] = lambda combined: None
local_search: Callable[[Individual], None] = lambda combined: None
):
"""Init function
Expand All @@ -39,7 +39,7 @@ def __init__(
n_processes : int, optional
Number of parallel processes to be used. If greater than 1,
parallel evaluation of the objective is supported. Defaults to 1.
local_search : Callable[[List[gp.Individua]], None], optional
local_search : Callable[[Individua], None], optional
Called before each fitness evaluation with a joint list of
offsprings and parents to optimize numeric leaf values of
the graph. Defaults to identity function.
Expand Down Expand Up @@ -106,7 +106,8 @@ def step(self, pop: Population, objective: Callable[[Individual], Individual]) -
# population instead of the other way around
combined = offsprings + pop.parents

self.local_search(combined)
for ind in combined:
self.local_search(ind)

combined = self._compute_fitness(combined, objective)

Expand Down
41 changes: 20 additions & 21 deletions gp/local_search/gradient_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,28 @@
except ModuleNotFoundError:
torch_available = False

from typing import Callable, List, Optional
from typing import Callable, Optional


from ..individual import Individual # noqa: F401


def gradient_based(
individuals: List[Individual],
individual: Individual,
objective: Callable[[torch.nn.Module], torch.Tensor],
lr: float,
gradient_steps: int,
optimizer: Optional[Optimizer] = None,
clip_value: Optional[float] = None,
) -> None:
"""Perform a local search for numeric leaf values for the list of
individuals based on gradient information obtained via automatic
"""Perform a local search for numeric leaf values for an individual
based on gradient information obtained via automatic
differentiation.
Parameters
----------
individuals : List
List of individuals for which to perform local search.
individual : Individual
Individual for which to perform local search.
objective : Callable
Objective function that is called with a differentiable graph
and returns a differentiable loss.
Expand All @@ -55,23 +55,22 @@ def gradient_based(
if clip_value is None:
clip_value = 0.1 * 1.0 / lr

for ind in individuals:
f = ind.to_torch()
f = individual.to_torch()

if len(list(f.parameters())) > 0:
optimizer = optimizer_class(f.parameters(), lr=lr)
if len(list(f.parameters())) > 0:
optimizer = optimizer_class(f.parameters(), lr=lr)

for i in range(gradient_steps):
loss = objective(f)
if not torch.isfinite(loss):
continue
for i in range(gradient_steps):
loss = objective(f)
if not torch.isfinite(loss):
continue

f.zero_grad()
loss.backward()
if clip_value is not np.inf:
torch.nn.utils.clip_grad.clip_grad_value_(f.parameters(), clip_value)
optimizer.step()
f.zero_grad()
loss.backward()
if clip_value is not np.inf:
torch.nn.utils.clip_grad.clip_grad_value_(f.parameters(), clip_value)
optimizer.step()

assert all(torch.isfinite(t) for t in f.parameters())
assert all(torch.isfinite(t) for t in f.parameters())

ind.update_parameters_from_torch_class(f)
individual.update_parameters_from_torch_class(f)
6 changes: 3 additions & 3 deletions test/test_local_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ def objective(f):

# test increase parameter value if too small
ind.parameter_names_to_values["<p1>"] = 0.9
gp.local_search.gradient_based([ind], objective, 0.05, 1)
gp.local_search.gradient_based(ind, objective, 0.05, 1)
assert ind.parameter_names_to_values["<p1>"] == pytest.approx(0.91)

# test decrease parameter value if too large
ind.parameter_names_to_values["<p1>"] = 1.1
gp.local_search.gradient_based([ind], objective, 0.05, 1)
gp.local_search.gradient_based(ind, objective, 0.05, 1)
assert ind.parameter_names_to_values["<p1>"] == pytest.approx(1.09)

# test no change of parameter value if at optimum
ind.parameter_names_to_values["<p1>"] = 1.0
gp.local_search.gradient_based([ind], objective, 0.05, 1)
gp.local_search.gradient_based(ind, objective, 0.05, 1)
assert ind.parameter_names_to_values["<p1>"] == pytest.approx(1.0)

0 comments on commit 5e6b057

Please sign in to comment.