Skip to content

Commit

Permalink
Fix examples
Browse files Browse the repository at this point in the history
  • Loading branch information
jakobj committed Apr 7, 2021
1 parent f593665 commit fc4426b
Show file tree
Hide file tree
Showing 12 changed files with 41 additions and 42 deletions.
2 changes: 1 addition & 1 deletion examples/example_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def inner_objective(ind):
expr = ind.to_sympy()
loss = []
for x0 in np.linspace(-2.0, 2.0, 100):
y = float(expr[0].subs({"x_0": x0}).evalf())
y = float(expr.subs({"x_0": x0}).evalf())
loss.append((f_target(x0) - y) ** 2)

time.sleep(0.25) # emulate long fitness evaluation
Expand Down
2 changes: 1 addition & 1 deletion examples/example_differential_evo_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def recording_callback(pop):
ax_function.set_xlabel(r"$x$")


print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}")
print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}")

history_fitness = np.array(history["fitness_parents"])
ax_fitness.plot(np.max(history_fitness, axis=1), label="Champion")
Expand Down
6 changes: 3 additions & 3 deletions examples/example_evo_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def objective(individual, target_function, seed):
"ignore", message="invalid value encountered in double_scalars"
)
try:
y[i] = f_graph(x_i)[0]
y[i] = f_graph(x_i[0], x_i[1])
except ZeroDivisionError:
individual.fitness = -np.inf
return individual
Expand Down Expand Up @@ -123,7 +123,7 @@ def evolution(f_target):
Individual
Individual with the highest fitness in the last generation
"""
population_params = {"n_parents": 10, "seed": 8188211}
population_params = {"n_parents": 10, "seed": 818821}

genome_params = {
"n_inputs": 2,
Expand Down Expand Up @@ -191,7 +191,7 @@ def recording_callback(pop):
x_0_range = np.linspace(-5.0, 5.0, 20)
x_1_range = np.ones_like(x_0_range) * 2.0
# fix x_1 such than 1d plot makes sense
y = [f_graph([x_0, x_1_range[0]]) for x_0 in x_0_range]
y = [f_graph(x_0, x_1_range[0]) for x_0 in x_0_range]
y_target = target_function(np.hstack([x_0_range.reshape(-1, 1), x_1_range.reshape(-1, 1)]))

ax_function.plot(x_0_range, y_target, lw=2, alpha=0.5, label="Target")
Expand Down
2 changes: 1 addition & 1 deletion examples/example_fec_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def inner_objective(ind):

loss = []
for x_0 in np.linspace(-2.0, 2.0, 100):
y = f([x_0])
y = f(x_0)
loss.append((f_target(x_0) - y) ** 2)

time.sleep(0.25) # emulate long fitness evaluation
Expand Down
14 changes: 7 additions & 7 deletions examples/example_hurdles.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@


def f_target(x):
return x[0] ** 2 + 1.0
return x ** 2 + 1.0


# %%
Expand Down Expand Up @@ -73,8 +73,8 @@ def objective_one(individual):
# the callable returned from `to_func` accepts and returns
# lists; accordingly we need to pack the argument and unpack
# the return value
y = f([x])[0]
loss += (f_target([x]) - y) ** 2
y = f(x)
loss += (f_target(x) - y) ** 2

individual.fitness = -loss / n_function_evaluations

Expand All @@ -97,8 +97,8 @@ def objective_two(individual):
# the callable returned from `to_func` accepts and returns
# lists; accordingly we need to pack the argument and unpack
# the return value
y = f([x])[0]
loss += (f_target([x]) - y) ** 2
y = f(x)
loss += (f_target(x) - y) ** 2

individual.fitness = -loss / n_function_evaluations

Expand Down Expand Up @@ -184,8 +184,8 @@ def recording_callback(pop):

f = pop.champion.to_func()
x = np.linspace(-5.0, 5.0, 20)
y = [f([x_i]) for x_i in x]
y_target = [f_target([x_i]) for x_i in x]
y = [f(x_i) for x_i in x]
y_target = [f_target(x_i) for x_i in x]

ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target")
ax_function.plot(x, y, "x", label="Champion")
Expand Down
8 changes: 4 additions & 4 deletions examples/example_local_search_evolution_strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@


def f_target(x):
return np.e * x[:, 0] ** 2 + 1.0 + np.pi
return np.e * x ** 2 + 1.0 + np.pi


# %%
Expand All @@ -61,9 +61,9 @@ def inner_objective(ind, seed):
f = ind.to_numpy()
rng = np.random.RandomState(seed)
batch_size = 500
x = rng.uniform(-5, 5, size=(batch_size, 1))
x = rng.uniform(-5, 5, size=batch_size)
y = f(x)
return -np.mean((f_target(x) - y[:, 0]) ** 2)
return -np.mean((f_target(x) - y) ** 2)


def objective(individual, seed):
Expand Down Expand Up @@ -165,7 +165,7 @@ def recording_callback(pop):
ax_function.set_xlabel(r"$x$")


print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}")
print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}")

history_fitness = np.array(history["fitness_parents"])
ax_fitness.plot(np.max(history_fitness, axis=1), label="Champion")
Expand Down
10 changes: 5 additions & 5 deletions examples/example_minimal.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@


def f_target(x):
return x[0] ** 2 + 1.0
return x ** 2 + 1.0


# %%
Expand All @@ -56,8 +56,8 @@ def objective(individual):
# the callable returned from `to_func` accepts and returns
# lists; accordingly we need to pack the argument and unpack
# the return value
y = f([x])[0]
loss += (f_target([x]) - y) ** 2
y = f(x)
loss += (f_target(x) - y) ** 2

individual.fitness = -loss / n_function_evaluations

Expand Down Expand Up @@ -128,8 +128,8 @@ def recording_callback(pop):

f = pop.champion.to_func()
x = np.linspace(-5.0, 5.0, 20)
y = [f([x_i]) for x_i in x]
y_target = [f_target([x_i]) for x_i in x]
y = [f(x_i) for x_i in x]
y_target = [f_target(x_i) for x_i in x]

ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target")
ax_function.plot(x, y, "x", label="Champion")
Expand Down
10 changes: 5 additions & 5 deletions examples/example_mountain_car.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def inner_objective(f, seed, n_runs_per_individual, n_total_steps, *, render):
if render:
env.render()

continuous_action = f(observation)
observation, reward, done, _ = env.step(continuous_action)
continuous_action = f(*observation)
observation, reward, done, _ = env.step([continuous_action])
cum_reward_this_episode += reward

if done:
Expand Down Expand Up @@ -231,8 +231,8 @@ def evaluate_champion(ind):
cum_reward_this_episode = 0
while len(cum_reward_all_episodes) < 100:

continuous_action = f(observation)
observation, reward, done, _ = env.step(continuous_action)
continuous_action = f(*observation)
observation, reward, done, _ = env.step([continuous_action])
cum_reward_this_episode += reward

if done:
Expand Down Expand Up @@ -299,7 +299,7 @@ def f(x):
print("evolution ended")

max_fitness = history["fitness_champion"][-1]
best_expr = history["expr_champion"][-1][0]
best_expr = history["expr_champion"][-1]
best_expr_str = str(best_expr).replace("x_0", "x").replace("x_1", "dx/dt")
print(f'solution with highest fitness: "{best_expr_str}" (fitness: {max_fitness:.05f})')

Expand Down
10 changes: 5 additions & 5 deletions examples/example_multi_genome.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ def objective(individual):
# Note that f is now a list of functions because individual is an instance
# of `InvidividualMultiGenome`
f = individual.to_numpy()
x = np.random.uniform(-4, 4, (n_function_evaluations, 1))
y = np.piecewise(x, [x[:, 0] < 0, x[:, 0] >= 0], f)[:, 0]
loss = np.sum((f_target(x[:, 0]) - y) ** 2)
x = np.random.uniform(-4, 4, n_function_evaluations)
y = np.piecewise(x, [x < 0, x >= 0], f)
loss = np.sum((f_target(x) - y) ** 2)
individual.fitness = -loss / n_function_evaluations
return individual

Expand Down Expand Up @@ -131,8 +131,8 @@ def recording_callback(pop):
f = pop.champion.to_numpy()
x = np.linspace(-5.0, 5.0, 20)[:, np.newaxis]

y = np.piecewise(x, [x[:, 0] < 0, x[:, 0] >= 0], f)[:, 0]
y_target = f_target(x[:, 0])
y = np.piecewise(x, [x < 0, x >= 0], f)
y_target = f_target(x)

ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target")
ax_function.plot(x, y, "x", label="Champion")
Expand Down
2 changes: 1 addition & 1 deletion examples/example_parametrized_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def recording_callback(pop):
# After the evolutionary search has ended, we print the expression
# with the highest fitness and plot the progression of the search.

print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}")
print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}")

print("Best performing expression per generation (for fitness increase > 0.5):")
old_fitness = -np.inf
Expand Down
11 changes: 5 additions & 6 deletions examples/example_piecewise_target_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
Options:
-h --help
--max-generations=<N> Maximum number of generations [default: 5000]
--max-generations=<N> Maximum number of generations [default: 2000]
"""

import functools
Expand Down Expand Up @@ -61,7 +61,7 @@ def objective(individual, rng):
n_function_evaluations = 1000

f = individual.to_numpy()
x = rng.uniform(-5, 5, size=(n_function_evaluations, 1))
x = rng.uniform(-5, 5, size=n_function_evaluations)
y = f(x)

loss = np.mean((f_target(x) - y) ** 2)
Expand Down Expand Up @@ -125,19 +125,18 @@ def recording_callback(pop):
# %%
# After the evolutionary search has ended, we print the expression
# with the highest fitness and plot the search progression and target and evolved functions.
print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}")
print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}")

fig = plt.figure(1)
plt.plot(history["fitness_champion"])
plt.ylim(1.1 * min(history["fitness_champion"]), 5)
plt.xlabel("Generation")
plt.ylabel("Loss (Fitness)")
plt.legend(["Champion loss per generation"])
plt.title({pop.champion.to_sympy()[0]})
plt.title({pop.champion.to_sympy()})
fig.savefig("example_piecewise_fitness_history.pdf")

x = np.arange(-5, 5, 0.01)
x.reshape(x.size, 1)
champion_numpy = pop.champion.to_numpy()

fig = plt.figure(2)
Expand All @@ -148,7 +147,7 @@ def recording_callback(pop):
plt.title("Target function")
plt.legend(["target"])
plt.subplot(122)
plt.plot(x, champion_numpy(x.reshape(x.size, 1)), "r")
plt.plot(x, champion_numpy(x), "r")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Evolved function")
Expand Down
6 changes: 3 additions & 3 deletions examples/example_reorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@


def f_target(x):
return x[0] ** 2 + 1.0
return x ** 2 + 1.0


# %%
Expand All @@ -60,8 +60,8 @@ def objective(individual):
# the callable returned from `to_func` accepts and returns
# lists; accordingly we need to pack the argument and unpack
# the return value
y = f([x])[0]
loss += (f_target([x]) - y) ** 2
y = f(x)
loss += (f_target(x) - y) ** 2

individual.fitness = -loss / n_function_evaluations

Expand Down

0 comments on commit fc4426b

Please sign in to comment.