From f72523bc3c8aaff9c50a32fd4ceef3363b729a56 Mon Sep 17 00:00:00 2001 From: Jakob Jordan Date: Sat, 20 Mar 2021 15:22:57 +0100 Subject: [PATCH] Redesign signatures of compilation targets --- cgp/cartesian_graph.py | 100 ++++++++++++------ cgp/node_impl.py | 4 +- cgp/node_input_output.py | 2 +- cgp/node_validation.py | 12 +-- cgp/utils.py | 18 ++-- examples/example_caching.py | 2 +- .../example_differential_evo_regression.py | 2 +- examples/example_evo_regression.py | 6 +- examples/example_fec_caching.py | 2 +- examples/example_hurdles.py | 14 +-- ...ample_local_search_evolution_strategies.py | 8 +- examples/example_minimal.py | 10 +- examples/example_mountain_car.py | 10 +- examples/example_multi_genome.py | 10 +- examples/example_parametrized_nodes.py | 2 +- examples/example_piecewise_target_function.py | 11 +- examples/example_reorder.py | 6 +- test/test_cartesian_graph.py | 53 +++++----- test/test_ea_mu_plus_lambda.py | 4 +- test/test_hl_api.py | 9 +- test/test_individual.py | 16 +-- test/test_ls_evolution_strategies.py | 22 ++-- test/test_node.py | 47 ++++---- test/test_utils.py | 28 +++-- 24 files changed, 225 insertions(+), 173 deletions(-) diff --git a/cgp/cartesian_graph.py b/cgp/cartesian_graph.py index f825af16..3cab1afc 100644 --- a/cgp/cartesian_graph.py +++ b/cgp/cartesian_graph.py @@ -2,7 +2,7 @@ import copy import math # noqa: F401 import re -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Union import numpy as np # noqa: F401 @@ -230,24 +230,35 @@ def _fill_parameter_values(self, func_str: str) -> str: ) return func_str - def to_func(self) -> Callable[[List[float]], List[float]]: - """Compile the function(s) represented by the graph. + def to_func(self) -> Callable[..., List[float]]: + """Create a Python callable implementing the function described by + this graph. - Generates a definition of the function in Python code and - executes the function definition to create a Callable. + The returned callable expects as many arguments as the number + of inputs defined in the genome. The function returns a tuple + with length equal to the number of outputs defined in the + genome. For convenience, if only a single output is defined + the function will *not* return a tuple but only its first + element. Returns ------- Callable - Callable executing the function(s) represented by the graph. + """ self._format_output_str_of_all_nodes() s = ", ".join(node.output_str for node in self.output_nodes) func_str = f"""\ -def _f(x): +def _f(*x): if len(x) != {self._n_inputs}: raise ValueError(f'input has length {{len(x)}}, expected {self._n_inputs}') - return [{s}] + + res = [{s}] + + if len(res) == 1: + return res[0] + else: + return res """ func_str = self._fill_parameter_values(func_str) exec(func_str, {**globals(), **CUSTOM_ATOMIC_OPERATORS}, locals()) @@ -263,30 +274,38 @@ def _format_output_str_numpy_of_all_nodes(self): for node in active_nodes[hidden_column_idx]: node.format_output_str_numpy(self) - def to_numpy(self) -> Callable[[np.ndarray], np.ndarray]: - """Compile the function(s) represented by the graph to NumPy - expression(s). + def to_numpy(self) -> Callable[..., List[np.ndarray]]: + """Create a NumPy-array-compatible Python callable implementing the + function described by this graph. - Generates a definition of the function in Python code and - executes the function definition to create a Callable - accepting NumPy arrays. + The returned callable expects as many arguments as the number + of inputs defined in the genome. Every argument needs to be a + NumPy array of equal length. The function returns a tuple with + length equal to the number of outputs defined in the + genome. Each element will have the same length as the input + arrays. For convenience, if only a single output is defined + the function will *not* return a tuple but only its first + element. Returns ------- Callable - Callable executing the function(s) represented by the graph. + """ self._format_output_str_numpy_of_all_nodes() s = ", ".join(node.output_str for node in self.output_nodes) func_str = f"""\ -def _f(x): - if (len(x.shape) != 2) or (x.shape[1] != {self._n_inputs}): - raise ValueError( - f"input has shape {{tuple(x.shape)}}, expected (, {self._n_inputs})" - ) +def _f(*x): + if len(x) != {self._n_inputs}: + raise ValueError(f'input has length {{len(x)}}, expected {self._n_inputs}') - return np.stack([{s}], axis=1) + res = [{s}] + + if len(res) == 1: + return res[0] + else: + return res """ func_str = self._fill_parameter_values(func_str) exec(func_str, {**globals(), **CUSTOM_ATOMIC_OPERATORS}, locals()) @@ -294,15 +313,17 @@ def _f(x): return locals()["_f"] def to_torch(self) -> "torch.nn.Module": - """Compile the function(s) represented by the graph to a Torch class. + """Create a Torch nn.Module instance implementing the function defined + by this graph. - Generates a definition of the Torch class in Python code and - executes it to create an instance of the class. + The generated instance will have a `forward` method accepting + Torch tensor of dimension (, n_inputs) and + returning a tensor of dimension (, n_outputs). Returns ------- torch.nn.Module - Instance of the PyTorch class. + """ if not torch_available: raise ModuleNotFoundError("No module named 'torch' (extra requirement)") @@ -359,10 +380,15 @@ def _format_output_str_sympy_of_all_nodes(self): for node in active_nodes[hidden_column_idx]: node.format_output_str_sympy(self) - def to_sympy(self, simplify: Optional[bool] = True) -> List["sympy_expr.Expr"]: - """Compile the function(s) represented by the graph to a SymPy expression. + def to_sympy( + self, simplify: Optional[bool] = True + ) -> Union["sympy_expr.Expr", List["sympy_expr.Expr"]]: + """Create SymPy expression(s) representing the function(s) described + by this graph. - Generates one SymPy expression for each output node. + Returns a list of SymPy expressions, one for each output + node. For convenience, if only one output node is defined, it + directly returns its expression. Parameters ---------- @@ -372,15 +398,17 @@ def to_sympy(self, simplify: Optional[bool] = True) -> List["sympy_expr.Expr"]: Returns ---------- - List[sympy.core.expr.Expr] - List of SymPy expressions. + List[sympy.core.expr.Expr] or sympy.core.expr.Expr + List of SymPy expressions or single expression. + """ + if not sympy_available: raise ModuleNotFoundError("No module named 'sympy' (extra requirement)") self._format_output_str_sympy_of_all_nodes() - sympy_exprs = [] + sympy_exprs: List = [] for output_node in self.output_nodes: # replace all input-variable strings with sympy-compatible symbol @@ -396,12 +424,16 @@ def to_sympy(self, simplify: Optional[bool] = True) -> List["sympy_expr.Expr"]: # sympy should not automatically simplify the expression sympy_exprs.append(sympy.sympify(s, evaluate=False)) - if not simplify: - return sympy_exprs - else: # simplify expression if desired and possible + if simplify: for i, expr in enumerate(sympy_exprs): try: sympy_exprs[i] = expr.simplify() except TypeError: RuntimeWarning(f"SymPy could not simplify expression: {expr}") + + # if the genome encodes only a single function we directly + # return the sympy expression instead of a list of length 1 + if len(sympy_exprs) == 1: + return sympy_exprs[0] + else: return sympy_exprs diff --git a/cgp/node_impl.py b/cgp/node_impl.py index 907c731a..716f08e7 100644 --- a/cgp/node_impl.py +++ b/cgp/node_impl.py @@ -6,7 +6,7 @@ class ConstantFloat(OperatorNode): _arity = 0 _def_output = "1.0" - _def_numpy_output = "np.ones(x.shape[0]) * 1.0" + _def_numpy_output = "np.ones(len(x[0])) * 1.0" _def_torch_output = "torch.ones(1).expand(x.shape[0]) * 1.0" @@ -57,7 +57,7 @@ class Parameter(OperatorNode): _arity = 0 _initial_values = {"

": lambda: 1.0} _def_output = "

" - _def_numpy_output = "np.ones(x.shape[0]) *

" + _def_numpy_output = "np.ones(len(x[0])) *

" _def_torch_output = "torch.ones(1).expand(x.shape[0]) *

" diff --git a/cgp/node_input_output.py b/cgp/node_input_output.py index d610b12d..a771977a 100644 --- a/cgp/node_input_output.py +++ b/cgp/node_input_output.py @@ -22,7 +22,7 @@ def format_output_str(self, graph: "CartesianGraph") -> None: self._output_str = f"x[{self._idx}]" def format_output_str_numpy(self, graph: "CartesianGraph") -> None: - self._output_str = f"x[:, {self._idx}]" + self.format_output_str(graph) def format_output_str_torch(self, graph: "CartesianGraph") -> None: self._output_str = f"x[:, {self._idx}]" diff --git a/cgp/node_validation.py b/cgp/node_validation.py index e2118e94..aea9dbc6 100644 --- a/cgp/node_validation.py +++ b/cgp/node_validation.py @@ -3,7 +3,6 @@ import numpy as np try: - import sympy # noqa: F401 from sympy.core import expr as sympy_expr # noqa: F401 sympy_available = True @@ -51,8 +50,8 @@ def check_to_func(cls: Type["OperatorNode"]) -> None: genome = _create_genome(cls) f = CartesianGraph(genome).to_func() - x = [1.0] - f(x)[0] + x = 1.0 + f(x) def check_to_numpy(cls: Type["OperatorNode"]) -> None: @@ -62,8 +61,8 @@ def check_to_numpy(cls: Type["OperatorNode"]) -> None: genome = _create_genome(cls) f = CartesianGraph(genome).to_numpy() - x = np.ones((3, 1)) - f(x)[0] + x = np.ones(3) + f(x) def check_to_torch(cls: Type["OperatorNode"]) -> None: @@ -91,6 +90,7 @@ def check_to_sympy(cls: Type["OperatorNode"]) -> None: genome = _create_genome(cls) - f = CartesianGraph(genome).to_sympy()[0] + f = CartesianGraph(genome).to_sympy() + assert isinstance(f, sympy_expr.Expr) x = [1.0] f.subs("x_0", x[0]).evalf() diff --git a/cgp/utils.py b/cgp/utils.py index 74a6f8d2..f91dd8f0 100644 --- a/cgp/utils.py +++ b/cgp/utils.py @@ -106,16 +106,22 @@ def compute_key_from_numpy_evaluation_and_args( ind = args[0] if isinstance(ind, IndividualSingleGenome): f_single = ind.to_numpy() - x = rng.uniform(_min_value, _max_value, (_batch_size, ind.genome._n_inputs)) - y = f_single(x) - s = np.array_str(y, precision=15) + x = [ + rng.uniform(_min_value, _max_value, size=_batch_size) + for _ in range(ind.genome._n_inputs) + ] + y = f_single(*x) + s = np.array_str(np.array(y), precision=15) elif isinstance(ind, IndividualMultiGenome): f_multi = ind.to_numpy() s = "" for i in range(len(ind.genome)): - x = rng.uniform(_min_value, _max_value, (_batch_size, ind.genome[i]._n_inputs)) - y = f_multi[i](x) - s += np.array_str(y, precision=15) + x = [ + rng.uniform(_min_value, _max_value, size=_batch_size) + for _ in range(ind.genome[i]._n_inputs) + ] + y = f_multi[i](*x) + s += np.array_str(np.array(y), precision=15) else: assert False # should never be reached diff --git a/examples/example_caching.py b/examples/example_caching.py index 4123bdd9..aafc4825 100644 --- a/examples/example_caching.py +++ b/examples/example_caching.py @@ -46,7 +46,7 @@ def inner_objective(ind): expr = ind.to_sympy() loss = [] for x0 in np.linspace(-2.0, 2.0, 100): - y = float(expr[0].subs({"x_0": x0}).evalf()) + y = float(expr.subs({"x_0": x0}).evalf()) loss.append((f_target(x0) - y) ** 2) time.sleep(0.25) # emulate long fitness evaluation diff --git a/examples/example_differential_evo_regression.py b/examples/example_differential_evo_regression.py index 18f1a958..06e388c6 100644 --- a/examples/example_differential_evo_regression.py +++ b/examples/example_differential_evo_regression.py @@ -176,7 +176,7 @@ def recording_callback(pop): ax_function.set_xlabel(r"$x$") -print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}") +print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}") history_fitness = np.array(history["fitness_parents"]) ax_fitness.plot(np.max(history_fitness, axis=1), label="Champion") diff --git a/examples/example_evo_regression.py b/examples/example_evo_regression.py index 57744749..a070081f 100644 --- a/examples/example_evo_regression.py +++ b/examples/example_evo_regression.py @@ -84,7 +84,7 @@ def objective(individual, target_function, seed): "ignore", message="invalid value encountered in double_scalars" ) try: - y[i] = f_graph(x_i)[0] + y[i] = f_graph(x_i[0], x_i[1]) except ZeroDivisionError: individual.fitness = -np.inf return individual @@ -123,7 +123,7 @@ def evolution(f_target): Individual Individual with the highest fitness in the last generation """ - population_params = {"n_parents": 10, "seed": 8188211} + population_params = {"n_parents": 10, "seed": 818821} genome_params = { "n_inputs": 2, @@ -191,7 +191,7 @@ def recording_callback(pop): x_0_range = np.linspace(-5.0, 5.0, 20) x_1_range = np.ones_like(x_0_range) * 2.0 # fix x_1 such than 1d plot makes sense - y = [f_graph([x_0, x_1_range[0]]) for x_0 in x_0_range] + y = [f_graph(x_0, x_1_range[0]) for x_0 in x_0_range] y_target = target_function(np.hstack([x_0_range.reshape(-1, 1), x_1_range.reshape(-1, 1)])) ax_function.plot(x_0_range, y_target, lw=2, alpha=0.5, label="Target") diff --git a/examples/example_fec_caching.py b/examples/example_fec_caching.py index 03f6e168..c6d9795d 100644 --- a/examples/example_fec_caching.py +++ b/examples/example_fec_caching.py @@ -61,7 +61,7 @@ def inner_objective(ind): loss = [] for x_0 in np.linspace(-2.0, 2.0, 100): - y = f([x_0]) + y = f(x_0) loss.append((f_target(x_0) - y) ** 2) time.sleep(0.25) # emulate long fitness evaluation diff --git a/examples/example_hurdles.py b/examples/example_hurdles.py index 1db7b52b..c31013f7 100644 --- a/examples/example_hurdles.py +++ b/examples/example_hurdles.py @@ -44,7 +44,7 @@ def f_target(x): - return x[0] ** 2 + 1.0 + return x ** 2 + 1.0 # %% @@ -73,8 +73,8 @@ def objective_one(individual): # the callable returned from `to_func` accepts and returns # lists; accordingly we need to pack the argument and unpack # the return value - y = f([x])[0] - loss += (f_target([x]) - y) ** 2 + y = f(x) + loss += (f_target(x) - y) ** 2 individual.fitness = -loss / n_function_evaluations @@ -97,8 +97,8 @@ def objective_two(individual): # the callable returned from `to_func` accepts and returns # lists; accordingly we need to pack the argument and unpack # the return value - y = f([x])[0] - loss += (f_target([x]) - y) ** 2 + y = f(x) + loss += (f_target(x) - y) ** 2 individual.fitness = -loss / n_function_evaluations @@ -184,8 +184,8 @@ def recording_callback(pop): f = pop.champion.to_func() x = np.linspace(-5.0, 5.0, 20) -y = [f([x_i]) for x_i in x] -y_target = [f_target([x_i]) for x_i in x] +y = [f(x_i) for x_i in x] +y_target = [f_target(x_i) for x_i in x] ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target") ax_function.plot(x, y, "x", label="Champion") diff --git a/examples/example_local_search_evolution_strategies.py b/examples/example_local_search_evolution_strategies.py index 85702669..c2d4ba32 100644 --- a/examples/example_local_search_evolution_strategies.py +++ b/examples/example_local_search_evolution_strategies.py @@ -37,7 +37,7 @@ def f_target(x): - return np.e * x[:, 0] ** 2 + 1.0 + np.pi + return np.e * x ** 2 + 1.0 + np.pi # %% @@ -61,9 +61,9 @@ def inner_objective(ind, seed): f = ind.to_numpy() rng = np.random.RandomState(seed) batch_size = 500 - x = rng.uniform(-5, 5, size=(batch_size, 1)) + x = rng.uniform(-5, 5, size=batch_size) y = f(x) - return -np.mean((f_target(x) - y[:, 0]) ** 2) + return -np.mean((f_target(x) - y) ** 2) def objective(individual, seed): @@ -165,7 +165,7 @@ def recording_callback(pop): ax_function.set_xlabel(r"$x$") -print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}") +print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}") history_fitness = np.array(history["fitness_parents"]) ax_fitness.plot(np.max(history_fitness, axis=1), label="Champion") diff --git a/examples/example_minimal.py b/examples/example_minimal.py index 70640e50..64f85dec 100644 --- a/examples/example_minimal.py +++ b/examples/example_minimal.py @@ -31,7 +31,7 @@ def f_target(x): - return x[0] ** 2 + 1.0 + return x ** 2 + 1.0 # %% @@ -56,8 +56,8 @@ def objective(individual): # the callable returned from `to_func` accepts and returns # lists; accordingly we need to pack the argument and unpack # the return value - y = f([x])[0] - loss += (f_target([x]) - y) ** 2 + y = f(x) + loss += (f_target(x) - y) ** 2 individual.fitness = -loss / n_function_evaluations @@ -128,8 +128,8 @@ def recording_callback(pop): f = pop.champion.to_func() x = np.linspace(-5.0, 5.0, 20) -y = [f([x_i]) for x_i in x] -y_target = [f_target([x_i]) for x_i in x] +y = [f(x_i) for x_i in x] +y_target = [f_target(x_i) for x_i in x] ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target") ax_function.plot(x, y, "x", label="Champion") diff --git a/examples/example_mountain_car.py b/examples/example_mountain_car.py index c9caab51..d3813fae 100644 --- a/examples/example_mountain_car.py +++ b/examples/example_mountain_car.py @@ -83,8 +83,8 @@ def inner_objective(f, seed, n_runs_per_individual, n_total_steps, *, render): if render: env.render() - continuous_action = f(observation) - observation, reward, done, _ = env.step(continuous_action) + continuous_action = f(*observation) + observation, reward, done, _ = env.step([continuous_action]) cum_reward_this_episode += reward if done: @@ -231,8 +231,8 @@ def evaluate_champion(ind): cum_reward_this_episode = 0 while len(cum_reward_all_episodes) < 100: - continuous_action = f(observation) - observation, reward, done, _ = env.step(continuous_action) + continuous_action = f(*observation) + observation, reward, done, _ = env.step([continuous_action]) cum_reward_this_episode += reward if done: @@ -299,7 +299,7 @@ def f(x): print("evolution ended") max_fitness = history["fitness_champion"][-1] - best_expr = history["expr_champion"][-1][0] + best_expr = history["expr_champion"][-1] best_expr_str = str(best_expr).replace("x_0", "x").replace("x_1", "dx/dt") print(f'solution with highest fitness: "{best_expr_str}" (fitness: {max_fitness:.05f})') diff --git a/examples/example_multi_genome.py b/examples/example_multi_genome.py index ce077ae1..a0295850 100644 --- a/examples/example_multi_genome.py +++ b/examples/example_multi_genome.py @@ -56,9 +56,9 @@ def objective(individual): # Note that f is now a list of functions because individual is an instance # of `InvidividualMultiGenome` f = individual.to_numpy() - x = np.random.uniform(-4, 4, (n_function_evaluations, 1)) - y = np.piecewise(x, [x[:, 0] < 0, x[:, 0] >= 0], f)[:, 0] - loss = np.sum((f_target(x[:, 0]) - y) ** 2) + x = np.random.uniform(-4, 4, n_function_evaluations) + y = np.piecewise(x, [x < 0, x >= 0], f) + loss = np.sum((f_target(x) - y) ** 2) individual.fitness = -loss / n_function_evaluations return individual @@ -131,8 +131,8 @@ def recording_callback(pop): f = pop.champion.to_numpy() x = np.linspace(-5.0, 5.0, 20)[:, np.newaxis] -y = np.piecewise(x, [x[:, 0] < 0, x[:, 0] >= 0], f)[:, 0] -y_target = f_target(x[:, 0]) +y = np.piecewise(x, [x < 0, x >= 0], f) +y_target = f_target(x) ax_function.plot(x, y_target, lw=2, alpha=0.5, label="Target") ax_function.plot(x, y, "x", label="Champion") diff --git a/examples/example_parametrized_nodes.py b/examples/example_parametrized_nodes.py index e5135f9c..ccbb489c 100644 --- a/examples/example_parametrized_nodes.py +++ b/examples/example_parametrized_nodes.py @@ -158,7 +158,7 @@ def recording_callback(pop): # After the evolutionary search has ended, we print the expression # with the highest fitness and plot the progression of the search. -print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}") +print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}") print("Best performing expression per generation (for fitness increase > 0.5):") old_fitness = -np.inf diff --git a/examples/example_piecewise_target_function.py b/examples/example_piecewise_target_function.py index 2aee6360..d16980f0 100644 --- a/examples/example_piecewise_target_function.py +++ b/examples/example_piecewise_target_function.py @@ -14,7 +14,7 @@ Options: -h --help - --max-generations= Maximum number of generations [default: 5000] + --max-generations= Maximum number of generations [default: 2000] """ import functools @@ -61,7 +61,7 @@ def objective(individual, rng): n_function_evaluations = 1000 f = individual.to_numpy() - x = rng.uniform(-5, 5, size=(n_function_evaluations, 1)) + x = rng.uniform(-5, 5, size=n_function_evaluations) y = f(x) loss = np.mean((f_target(x) - y) ** 2) @@ -125,7 +125,7 @@ def recording_callback(pop): # %% # After the evolutionary search has ended, we print the expression # with the highest fitness and plot the search progression and target and evolved functions. -print(f"Final expression {pop.champion.to_sympy()[0]} with fitness {pop.champion.fitness}") +print(f"Final expression {pop.champion.to_sympy()} with fitness {pop.champion.fitness}") fig = plt.figure(1) plt.plot(history["fitness_champion"]) @@ -133,11 +133,10 @@ def recording_callback(pop): plt.xlabel("Generation") plt.ylabel("Loss (Fitness)") plt.legend(["Champion loss per generation"]) -plt.title({pop.champion.to_sympy()[0]}) +plt.title({pop.champion.to_sympy()}) fig.savefig("example_piecewise_fitness_history.pdf") x = np.arange(-5, 5, 0.01) -x.reshape(x.size, 1) champion_numpy = pop.champion.to_numpy() fig = plt.figure(2) @@ -148,7 +147,7 @@ def recording_callback(pop): plt.title("Target function") plt.legend(["target"]) plt.subplot(122) -plt.plot(x, champion_numpy(x.reshape(x.size, 1)), "r") +plt.plot(x, champion_numpy(x), "r") plt.xlabel("x") plt.ylabel("y") plt.title("Evolved function") diff --git a/examples/example_reorder.py b/examples/example_reorder.py index 47da116b..1f52eb74 100644 --- a/examples/example_reorder.py +++ b/examples/example_reorder.py @@ -36,7 +36,7 @@ def f_target(x): - return x[0] ** 2 + 1.0 + return x ** 2 + 1.0 # %% @@ -60,8 +60,8 @@ def objective(individual): # the callable returned from `to_func` accepts and returns # lists; accordingly we need to pack the argument and unpack # the return value - y = f([x])[0] - loss += (f_target([x]) - y) ** 2 + y = f(x) + loss += (f_target(x) - y) ** 2 individual.fitness = -loss / n_function_evaluations diff --git a/test/test_cartesian_graph.py b/test/test_cartesian_graph.py index 21353892..ddfc6925 100644 --- a/test/test_cartesian_graph.py +++ b/test/test_cartesian_graph.py @@ -29,9 +29,9 @@ def test_to_func_simple(): f = graph.to_func() x = [5.0, 2.0] - y = f(x) + y = f(*x) - assert x[0] + x[1] == pytest.approx(y[0]) + assert x[0] + x[1] == pytest.approx(y) primitives = (cgp.Sub,) genome = cgp.Genome(2, 1, 1, 1, primitives) @@ -54,9 +54,9 @@ def test_to_func_simple(): f = graph.to_func() x = [5.0, 2.0] - y = f(x) + y = f(*x) - assert x[0] - x[1] == pytest.approx(y[0]) + assert x[0] - x[1] == pytest.approx(y) def test_compile_two_columns(): @@ -84,9 +84,9 @@ def test_compile_two_columns(): f = graph.to_func() x = [5.0, 2.0] - y = f(x) + y = f(*x) - assert x[0] - (x[0] + x[1]) == pytest.approx(y[0]) + assert x[0] - (x[0] + x[1]) == pytest.approx(y) def test_compile_two_columns_two_rows(): @@ -123,7 +123,7 @@ def test_compile_two_columns_two_rows(): f = graph.to_func() x = [5.0, 2.0] - y = f(x) + y = f(*x) assert x[0] + (x[0] + x[1]) == pytest.approx(y[0]) assert (x[0] + x[1]) + (x[0] - x[1]) == pytest.approx(y[1]) @@ -168,9 +168,9 @@ def test_compile_addsubmul(): f = graph.to_func() x = [5.0, 2.0] - y = f(x) + y = f(*x) - assert (x[0] * x[1]) - (x[0] - x[1]) == pytest.approx(y[0]) + assert (x[0] * x[1]) - (x[0] - x[1]) == pytest.approx(y) def test_to_numpy(): @@ -200,7 +200,7 @@ def test_to_numpy(): graph = cgp.CartesianGraph(genome) f = graph.to_numpy() - x = np.random.normal(size=(100, 1)) + x = np.random.normal(size=100) y = f(x) y_target = x ** 2 + 1.0 @@ -308,9 +308,14 @@ def test_to_numpy(): def test_compile_numpy_output_shape(genome, batch_size): c = cgp.CartesianGraph(genome).to_numpy() - x = np.random.normal(size=(batch_size, 1)) + x = np.random.normal(size=batch_size) y = c(x) - assert y.shape == (batch_size, genome._n_outputs) + + if genome._n_outputs == 1: + assert y.shape == (batch_size,) + else: + assert len(y) == genome._n_outputs + assert y[0].shape == (batch_size,) @pytest.mark.parametrize("genome, batch_size", itertools.product(genome, batch_sizes)) @@ -353,11 +358,11 @@ def test_to_sympy(): graph = cgp.CartesianGraph(genome) y_0_target = sympy.sympify("x_0 + x_0 + 1.0", evaluate=False) - y_0 = graph.to_sympy(simplify=False)[0] + y_0 = graph.to_sympy(simplify=False) assert y_0_target == y_0 y_0_target = sympy.sympify("2 * x_0 + 1.0", evaluate=True) - y_0 = graph.to_sympy()[0] + y_0 = graph.to_sympy() assert y_0_target == y_0 for x in np.random.normal(size=100): @@ -387,7 +392,7 @@ def test_allow_sympy_expr_with_infinities(): ] graph = cgp.CartesianGraph(genome) - expr = graph.to_sympy(simplify=True)[0] + expr = graph.to_sympy(simplify=True) # complex infinity should appear in expression assert "zoo" in str(expr) @@ -426,14 +431,14 @@ def test_input_dim_python(rng_seed): # fail for too short input with pytest.raises(ValueError): - f([None]) + f(None) # fail for too long input with pytest.raises(ValueError): - f([None, None, None]) + f(None, None, None) # do not fail for input with correct length - f([None, None]) + f(None, None) def test_input_dim_numpy(rng_seed): @@ -443,20 +448,20 @@ def test_input_dim_numpy(rng_seed): genome.randomize(rng) f = cgp.CartesianGraph(genome).to_numpy() - # fail for missing batch dimension - with pytest.raises(ValueError): - f(np.array([1.0])) + # # fail for missing batch dimension + # with pytest.raises(ValueError): + # f(np.array([1.0])) # fail for too short input with pytest.raises(ValueError): - f(np.array([1.0]).reshape(-1, 1)) + f(np.array([1.0])) # fail for too long input with pytest.raises(ValueError): - f(np.array([1.0, 1.0, 1.0]).reshape(-1, 3)) + f(np.array([1.0]), np.array([1.0]), np.array([1.0])) # do not fail for input with correct shape - f(np.array([1.0, 1.0]).reshape(-1, 2)) + f(np.array([1.0]), np.array([1.0])) def test_input_dim_torch(rng_seed): diff --git a/test/test_ea_mu_plus_lambda.py b/test/test_ea_mu_plus_lambda.py index 27d3a691..1bfe4a81 100644 --- a/test/test_ea_mu_plus_lambda.py +++ b/test/test_ea_mu_plus_lambda.py @@ -102,7 +102,9 @@ def test_local_search_is_only_applied_to_best_k_individuals( torch = pytest.importorskip("torch") def inner_objective(f): - return torch.nn.MSELoss()(torch.Tensor([[1.1]]), f(torch.zeros(1, 1))) + return torch.nn.MSELoss()( + torch.DoubleTensor([[1.1]]), f(torch.zeros(1, 1, dtype=torch.double)) + ) def objective(ind): if not ind.fitness_is_None(): diff --git a/test/test_hl_api.py b/test/test_hl_api.py index fd87a176..ff47ec87 100644 --- a/test/test_hl_api.py +++ b/test/test_hl_api.py @@ -22,9 +22,10 @@ def f_target(x): # target function return x[:, 0] - x[:, 1] x = np.random.normal(size=(n_function_evaluations, 2)) - y = np.empty(n_function_evaluations) - for i, x_i in enumerate(x): - y[i] = f_graph(x_i)[0] + y = f_graph(x[:, 0], x[:, 1]) + # y = np.empty(n_function_evaluations) + # for i, x_i in enumerate(x): + # y[i] = f_graph(x_i) loss = np.mean((f_target(x) - y) ** 2) individual.fitness = -loss @@ -96,7 +97,7 @@ def f1(x): x1 = np.random.uniform(size=2) loss += float((f0(x0) - y0(x0)) ** 2) - loss += float((f1(x1) - y1(x1)) ** 2) + loss += float((f1(x1) - y1(x1[0], x1[1])) ** 2) individual.fitness = -loss return individual diff --git a/test/test_individual.py b/test/test_individual.py index 3fe8719a..5d0f2c2d 100644 --- a/test/test_individual.py +++ b/test/test_individual.py @@ -105,8 +105,8 @@ def test_individual_with_parameter_python(individual_type, params, graph_input_v f = _unpack_evaluation(individual.to_func(), individual_type) for xi in x: - y = f([xi]) - assert y[0] == pytest.approx(target_function(xi, c)) + y = f(xi) + assert y == pytest.approx(target_function(xi, c)) @pytest.mark.parametrize("individual_type", ["SingleGenome", "MultiGenome"]) @@ -138,7 +138,7 @@ def test_individual_with_parameter_sympy(individual_type, params, graph_input_va x, c = graph_input_values.x, graph_input_values.c _unpack_genome(individual, individual_type)._parameter_names_to_values[""] = c - f = _unpack_evaluation(individual.to_sympy(), individual_type)[0] + f = _unpack_evaluation(individual.to_sympy(), individual_type) for xi in x: y = f.subs("x_0", xi).evalf() @@ -216,17 +216,17 @@ def f_target(x): assert loss.detach().numpy() < 1e-15 # use old parameter values to compile function - x = [3.0] + x = 3.0 f_func = _unpack_evaluation(individual.to_func(), individual_type) y = f_func(x) - assert y[0] != pytest.approx(f_target(x[0])) + assert y != pytest.approx(f_target(x)) # update parameter values from torch class and compile new # function with new parameter values individual.update_parameters_from_torch_class(f) f_func = _unpack_evaluation(individual.to_func(), individual_type) y = f_func(x) - assert y[0] == pytest.approx(f_target(x[0])) + assert y == pytest.approx(f_target(x)) @pytest.mark.parametrize("individual_type", ["SingleGenome", "MultiGenome"]) @@ -262,7 +262,7 @@ def test_update_parameters_from_torch_class_resets_fitness(individual_type): g = _unpack_evaluation(individual.to_func(), individual_type) x = 2.0 - assert g([x])[0] == pytest.approx(math.pi * x) + assert g(x) == pytest.approx(math.pi * x) @pytest.mark.parametrize("individual_type", ["SingleGenome", "MultiGenome"]) @@ -298,7 +298,7 @@ def test_update_parameters_from_torch_class_does_not_reset_fitness_for_unused_pa g = _unpack_evaluation(individual.to_func(), individual_type) x = 2.0 - assert g([x])[0] == pytest.approx(x ** 2) + assert g(x) == pytest.approx(x ** 2) @pytest.mark.parametrize("individual_type", ["SingleGenome", "MultiGenome"]) diff --git a/test/test_ls_evolution_strategies.py b/test/test_ls_evolution_strategies.py index 5ceaf4f5..f1d4cd84 100644 --- a/test/test_ls_evolution_strategies.py +++ b/test/test_ls_evolution_strategies.py @@ -49,9 +49,11 @@ def objective(ind): def _objective_convergence_to_maximum(ind): f = ind.to_numpy() - x_dummy = np.zeros((1, 1)) # input, not used - target_value = np.array([[1.0, 1.1]]) - return -np.sum((f(x_dummy) - target_value) ** 2) + x_dummy = np.zeros(1) # input, not used + target_value_0 = 1.0 + target_value_1 = 1.1 + y = f(x_dummy) + return -((y[0] - target_value_0) ** 2) - (y[1] - target_value_1) ** 2 def test_convergence_to_maximum(rng_seed): @@ -106,10 +108,16 @@ def test_step_towards_maximum_multi_genome(rng_seed): def objective(ind): f = ind.to_numpy() - x_dummy = np.zeros((1, 1)) # input, not used - target_value = np.array([[1.0, 1.1]]) - return -np.sum((f[0](x_dummy) - target_value) ** 2) - np.sum( - (f[1](x_dummy) - target_value) ** 2 + x_dummy = np.zeros(1) # input, not used + target_value_0 = 1.0 + target_value_1 = 1.1 + y0 = f[0](x_dummy) + y1 = f[1](x_dummy) + return ( + -((y0[0] - target_value_0) ** 2) + - (y0[1] - target_value_1) ** 2 + - (y1[0] - target_value_0) ** 2 + - (y1[1] - target_value_1) ** 2 ) # test increase parameter value if too small first genome, diff --git a/test/test_node.py b/test/test_node.py index 6177f7a2..e0aa1158 100644 --- a/test/test_node.py +++ b/test/test_node.py @@ -46,12 +46,13 @@ def _test_to_x_compilations( def _test_to_func(genome, x, y_target): graph = cgp.CartesianGraph(genome) - assert graph.to_func()(x) == pytest.approx(y_target) + assert graph.to_func()(*x) == pytest.approx(y_target) def _test_to_numpy(genome, x, y_target): graph = cgp.CartesianGraph(genome) - assert graph.to_numpy()(np.array(x).reshape(1, -1)) == pytest.approx(y_target) + args = [np.array([xi]) for xi in x] + assert graph.to_numpy()(*args) == pytest.approx(y_target) def _test_to_torch(genome, x, y_target): @@ -63,7 +64,7 @@ def _test_to_torch(genome, x, y_target): def _test_to_sympy(genome, x, y_target): pytest.importorskip("sympy") graph = cgp.CartesianGraph(genome) - assert [graph.to_sympy()[0].subs({f"x_{i}": x[i] for i in range(len(x))})] == pytest.approx( + assert graph.to_sympy().subs({f"x_{i}": x[i] for i in range(len(x))}) == pytest.approx( y_target ) @@ -92,7 +93,7 @@ def test_add(): ] x = [5.0, 1.5] - y_target = [x[0] + x[1]] + y_target = x[0] + x[1] _test_to_x_compilations(genome, x, y_target) @@ -121,7 +122,7 @@ def test_sub(): ] x = [5.0, 1.5] - y_target = [x[0] - x[1]] + y_target = x[0] - x[1] _test_to_x_compilations(genome, x, y_target) @@ -150,7 +151,7 @@ def test_mul(): ] x = [5.0, 1.5] - y_target = [x[0] * x[1]] + y_target = x[0] * x[1] _test_to_x_compilations(genome, x, y_target) @@ -179,7 +180,7 @@ def test_div(): ] x = [5.0, 1.5] - y_target = [x[0] / x[1]] + y_target = x[0] / x[1] _test_to_x_compilations(genome, x, y_target) @@ -208,7 +209,7 @@ def test_pow(): ] x = [5.0, 1.5] - y_target = [x[0] ** x[1]] + y_target = x[0] ** x[1] _test_to_x_compilations(genome, x, y_target) @@ -233,7 +234,7 @@ def test_constant_float(): ] x = [1.0, 2.0] - y_target = [1.0] # by default the output value of the ConstantFloat node is 1.0 + y_target = 1.0 # by default the output value of the ConstantFloat node is 1.0 _test_to_x_compilations(genome, x, y_target) @@ -251,7 +252,7 @@ def test_parameter(): genome.dna = [ID_INPUT_NODE, ID_NON_CODING_GENE, 0, 0, ID_OUTPUT_NODE, 1] x = [1.0] - y_target = [1.0] # by default the output value of the Parameter node is 1.0 + y_target = 1.0 # by default the output value of the Parameter node is 1.0 _test_to_x_compilations(genome, x, y_target) @@ -274,7 +275,7 @@ class CustomParameter(cgp.Parameter): genome.dna = [ID_INPUT_NODE, ID_NON_CODING_GENE, 0, 0, ID_OUTPUT_NODE, 1] x = [1.0] - y_target = [initial_value] + y_target = initial_value _test_to_x_compilations(genome, x, y_target) @@ -310,7 +311,7 @@ def test_parameter_two_nodes(): x = [1.0] # by default the output value of the Parameter node is 1.0, # hence the sum of two Parameter nodes is 2.0 - y_target = [2.0] + y_target = 2.0 _test_to_x_compilations(genome, x, y_target) @@ -335,7 +336,7 @@ class CustomParameter(cgp.Parameter): # f(x) = c genome.dna = [ID_INPUT_NODE, ID_NON_CODING_GENE, 0, 0, ID_OUTPUT_NODE, 1] f = cgp.CartesianGraph(genome).to_func() - y = f([0.0])[0] + y = f(0.0) assert min_val <= y assert y <= max_val @@ -351,7 +352,7 @@ class DoubleParameter(cgp.OperatorNode): _arity = 0 _initial_values = {"

": lambda: p, "": lambda: q} _def_output = "

+ " - _def_numpy_output = "np.ones(x.shape[0]) * (

+ )" + _def_numpy_output = "np.ones(len(x[0])) * (

+ )" _def_torch_output = "torch.ones(1).expand(x.shape[0]) * (

+ )" genome_params = { @@ -365,7 +366,7 @@ class DoubleParameter(cgp.OperatorNode): # f(x) = p + q genome.dna = [ID_INPUT_NODE, ID_NON_CODING_GENE, 0, 0, ID_OUTPUT_NODE, 1] f = cgp.CartesianGraph(genome).to_func() - y = f([0.0])[0] + y = f(0.0) assert y == pytest.approx(p + q) @@ -387,7 +388,7 @@ class CustomParameter(cgp.Parameter): genome._parameter_names_to_values[""] = 1.0 f = cgp.CartesianGraph(genome).to_func() - y = f([0.0])[0] + y = f(0.0) assert y == pytest.approx(1.0) # now mutate the genome, since there is only one other option for @@ -400,7 +401,7 @@ class CustomParameter(cgp.Parameter): genome.dna = [ID_INPUT_NODE, ID_NON_CODING_GENE, 0, 0, ID_OUTPUT_NODE, 1] f = cgp.CartesianGraph(genome).to_func() - y = f([0.0])[0] + y = f(0.0) assert y == pytest.approx(np.pi) @@ -440,15 +441,15 @@ def test_if_else_operator(): ] x_0 = [1.0, 10.0, -20.0] - y_target_0 = [10.0] + y_target_0 = 10.0 _test_to_x_compilations(genome, x_0, y_target_0) x_1 = [0.0, 10.0, -20.0] - y_target_1 = [10.0] + y_target_1 = 10.0 _test_to_x_compilations(genome, x_1, y_target_1) x_2 = [-1.0, 10.0, -20.0] - y_target_2 = [-20.0] + y_target_2 = -20.0 _test_to_x_compilations(genome, x_2, y_target_2) @@ -544,7 +545,7 @@ class MyScaledAdd(cgp.node.OperatorNode): ] x = [5.0, 1.5] - y_target = [2 * (x[0] + x[1])] + y_target = 2 * (x[0] + x[1]) _test_to_x_compilations(genome, x, y_target) @@ -588,7 +589,7 @@ class MyScaledAdd(cgp.node.OperatorNode): ] x = [5.0, 1.5] - y_target = [2 * (x[0] + x[1])] + y_target = 2 * (x[0] + x[1]) _test_to_x_compilations(genome, x, y_target, test_to_sympy=False) @@ -634,6 +635,6 @@ class MyScaledAdd(cgp.node.OperatorNode): ] x = [5.0, 1.5] - y_target = [scipy_const.golden_ratio * (x[0] + x[1])] + y_target = scipy_const.golden_ratio * (x[0] + x[1]) _test_to_x_compilations(genome, x, y_target, test_to_sympy=False) diff --git a/test/test_utils.py b/test/test_utils.py index a32233f3..0bcf8feb 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -27,9 +27,9 @@ def inner_objective(ind): np.random.seed(rng_seed) if individual_type == "SingleGenome": - expr_unpacked = expr[0] + expr_unpacked = expr elif individual_type == "MultiGenome": - expr_unpacked = expr[0][0] + expr_unpacked = expr[0] else: raise NotImplementedError @@ -87,7 +87,7 @@ def test_fec_cache_decorator_produces_identical_history( evolve_params = {"max_generations": 10, "min_fitness": 0.0} def f_target(x): - return x[0] - x[1] + return x[:, 0] - x[:, 1] def inner_objective(ind): np.random.seed(rng_seed) @@ -99,9 +99,8 @@ def inner_objective(ind): else: raise NotImplementedError - loss = 0 - for x in np.random.uniform(size=(5, 2)): - loss += (f_target(x) - f(x)[0]) ** 2 + x = np.random.uniform(size=(5, 2)) + loss = np.sum((f(x[:, 0], x[:, 1]) - f_target(x)) ** 2) return loss @cgp.utils.disk_cache( @@ -149,7 +148,7 @@ def recording_callback(pop): def _fec_cache_decorator_with_multiple_inputs_multiple_outputs_objective(ind): f = ind.to_numpy() x = np.array([[1.0, 2.0], [3.0, 4.0]]) - y = f(x) + y = f(x[:, 0], x[:, 1]) return y @@ -212,8 +211,8 @@ def test_fec_cache_decorator_with_multiple_inputs_multiple_outputs(genome_params # dimension being identical y1 = _fec_cache_decorator_with_multiple_inputs_multiple_outputs_objective(ind1) - assert y0[:, 0] == pytest.approx(y1[:, 0]) - assert y0[:, 1] != pytest.approx(y1[:, 1]) + assert y0[0] == pytest.approx(y1[0]) + assert y0[1] != pytest.approx(y1[1]) @cgp.utils.disk_cache(tempfile.mkstemp()[1]) @@ -391,7 +390,7 @@ def test_primitives_from_class_names_for_genome(genome_params): def test_fec_cache_decorator_with_additional_arguments(genome_params, rng, rng_seed): def f_target(x): - return x[0] - x[1] + return x[:, 0] - x[:, 1] @cgp.utils.disk_cache( tempfile.mkstemp()[1], compute_key=cgp.utils.compute_key_from_numpy_evaluation_and_args @@ -401,9 +400,8 @@ def inner_objective(ind, n_samples): f = ind.to_func() - loss = 0 - for x in np.random.uniform(size=(n_samples, 2)): - loss += (f_target(x) - f(x)[0]) ** 2 + x = np.random.uniform(size=(n_samples, 2)) + loss = np.sum((f(x[:, 0], x[:, 1]) - f_target(x)) ** 2) return loss g = cgp.Genome(**genome_params) @@ -423,14 +421,14 @@ def test_custom_compute_key_for_disk_cache(individual, rng): tempfile.mkstemp()[1], compute_key=cgp.utils.compute_key_from_numpy_evaluation_and_args ) def inner_objective(ind): - return ind.to_func()([1.0, 2.0])[0] + return ind.to_func()(1.0, 2.0) def my_compute_key(ind): return 0 @cgp.utils.disk_cache(tempfile.mkstemp()[1], compute_key=my_compute_key) def inner_objective_custom_compute_key(ind): - return ind.to_func()([1.0, 2.0])[0] + return ind.to_func()(1.0, 2.0) individual0 = individual.clone() individual0.genome.randomize(rng)