Skip to content

Commit

Permalink
[WIP] Restructured optimization problem class to support multiple sol…
Browse files Browse the repository at this point in the history
…vers.
  • Loading branch information
alucantonio committed Jul 4, 2023
1 parent 2df1ba1 commit bec1e4f
Show file tree
Hide file tree
Showing 6 changed files with 165 additions and 112 deletions.
259 changes: 156 additions & 103 deletions src/dctkit/math/opt/optctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from jax import grad, jit, jacrev, value_and_grad, Array
from typing import Callable
import numpy.typing as npt
from typing import List, Dict, Tuple
from typing import List, Dict
import pygmo as pg
from typeguard import check_type
from petsc4py import PETSc, init
Expand All @@ -23,24 +23,30 @@ class OptimizationProblem():
def __init__(self, dim: int, state_dim: int,
objfun: Callable[..., float | npt.NDArray | Array |
np.float32 | np.float64],
objandgrad: Callable[...,
Tuple[float | npt.NDArray | Array |
np.float32 |
np.float64,
npt.NDArray | Array]]
| None = None,
constrfun: Callable[..., float | npt.NDArray | Array |
np.float32 | np.float64] | None = None,
constr_args: Dict[str, float | np.float32 |
np.float64 | npt.NDArray | Array] = {},
framework: str = 'pygmo'):
solver_lib: str = 'pygmo'):

self.dim = dim
self.state_dim = state_dim
self.obj = jit(objfun)

self.obj_args = {}
self.__register_obj_and_grad_fn(objfun, constrfun, constr_args)

self.framework = framework
if solver_lib == "petsc":
self.solver = PETScSolver(self)
else:
self.solver = PygmoSolver(self)

self.last_opt_result = -1

def __register_obj_and_grad_fn(self, objfun, constrfun, constr_args):
self.obj = jit(objfun)
# gradient of the objective function wrt parameters array
self.grad_obj = jit(grad(objfun))

self.objandgrad = jit(value_and_grad(objfun))

self.constr_problem = False
# constrained optimization problem
Expand All @@ -53,104 +59,23 @@ def __init__(self, dim: int, state_dim: int,
npt.NDArray | Array])
self.constr_args = constr_args
self.constr_problem = True
# gradient of the objective function wrt parameters array
self.grad_obj = jit(grad(objfun))
self.last_opt_result = -1

if framework == "petsc":
self.objandgrad = jit(value_and_grad(objfun))
init()
self.tao = PETSc.TAO().create()
self.tao.setType(PETSc.TAO.Type.LMVM) # Specify the solver type
self.g = PETSc.Vec().createSeq(self.dim)
self.tao.setObjectiveGradient(
self.objective_and_gradient, self.g, kargs=self.obj_args)

def set_obj_args(self, args: dict) -> None:
"""Sets the additional arguments to be passed to the objective function."""
check_type(args, Dict[str, float | np.float32 | np.float64
| npt.NDArray | Array])
self.obj_args = args
if self.framework == "petsc":
self.tao.setObjectiveGradient(
self.objective_and_gradient, self.g, kargs=self.obj_args)

def get_nec(self) -> int:
"""Returns the number of equality constraints: for a constrained problem, it
is equal to the number of state variables, otherwise it is zero.
"""
if self.constr_problem:
return self.state_dim
else:
return 0

def fitness(self, x: npt.NDArray | Array) -> npt.NDArray | List[float]:
fit = self.obj(x, **self.obj_args)
check_type(fit, float | np.float32 | np.float64 | npt.NDArray | Array)
if self.constr_problem:
constr_res = self.constr(x, **self.constr_args)
return np.concatenate(([fit], constr_res))
else:
return [fit]
self.solver.set_obj_args(args)

def gradient(self, x: npt.NDArray | Array) -> npt.NDArray | Array:
grad = self.grad_obj(x, **self.obj_args)
check_type(grad, float | np.float32 | np.float64 | npt.NDArray | Array)
if self.constr_problem:
constr_jac = self.constr_grad(x, **self.constr_args)
# first dim components are grad of object wrt parameters, then grad of
# constraint equations wrt parameters.
return np.concatenate((grad, np.ravel(constr_jac)))
else:
return grad
def solve(self, x0: npt.NDArray, algo: str = "tnewton", ftol_abs: float = 1e-5,
ftol_rel: float = 1e-5, gatol=None, grtol=None, gttol=None,
maxeval: int = 500, verbose: bool = False) -> npt.NDArray:

# FIXME: these parameters should be set through a separate function of the
# solver

def objective_and_gradient(self, tao, x: Vec, g: Vec, *args: tuple, **kargs: dict):
"""PETSc-compatible wrapper for the function that returns the objective value
and the gradient."""
fval, grad = self.objandgrad(x.getArray(), **kargs)
g.setArray(grad)
return fval

def get_bounds(self):
return ([-100]*self.dim, [100]*self.dim)

def get_name(self) -> str:
"""Returns the name of the optimization problem. Override this method to set
another name.
"""
return "Optimization problem"
kwargs = {"algo": algo, "ftol_abs": ftol_abs, "ftol_rel": ftol_rel,
"gatol": gatol, "grtol": grtol, "gttol": gttol, "maxeval": maxeval,
"verbose": verbose}

def run(self, x0: npt.NDArray, algo: str = "tnewton", ftol_abs: float = 1e-5,
ftol_rel: float = 1e-5, gatol=None, grtol=None, gttol=None,
maxeval: int = 500, verbose: bool = False) -> npt.NDArray:

if self.framework == "pygmo":
prb = pg.problem(self)

if self.constr_problem:
algo = "slsqp"
algo = pg.algorithm(pg.nlopt(solver=algo))
algo.extract(pg.nlopt).ftol_abs = ftol_abs # type: ignore
algo.extract(pg.nlopt).ftol_rel = ftol_rel # type: ignore
algo.extract(pg.nlopt).maxeval = maxeval # type: ignore
pop = pg.population(prb)
pop.push_back(x0)
algo.set_verbosity(verbose)
pop = algo.evolve(pop) # type: ignore
self.last_opt_result = algo.extract( # type: ignore
pg.nlopt).get_last_opt_result()
u = pop.champion_x
elif self.framework == "petsc":
x = PETSc.Vec().createWithArray(x0)
self.tao.setSolution(x)
self.tao.setMaximumIterations(maxeval)
self.tao.setTolerances(gatol=gatol, grtol=grtol, gttol=gttol)
self.tao.setFromOptions() # Set options for the solver
self.tao.solve()
if verbose:
self.tao.view()
u = self.tao.getSolution().getArray()
objective_value = self.tao.getObjectiveValue()
u = self.solver.run(x0, **kwargs)

check_type(u, npt.NDArray)
return u
Expand Down Expand Up @@ -189,3 +114,131 @@ def __init__(self, objfun: Callable[..., np.float32 | np.float64 |
super().__init__(dim=nparams, state_dim=state_dim, objfun=objfun,
constrfun=statefun, constr_args=constraint_args)
super().set_obj_args(obj_args)


class OptimizationSolver():
def __init__(self, prb: OptimizationProblem):
self.prb = prb

def set_obj_args(self, args: Dict):
"""Sets the additional keyword arguments to be passed to the objective
function."""
raise NotImplementedError

def run(self, x0: npt.NDArray, **kwargs: Dict) -> npt.NDArray | Array:
# FIXME: remove kwargs from run method and implement separate methods for
# solver settings
raise NotImplementedError


class PETScSolver(OptimizationSolver):
def __init__(self, prb: OptimizationProblem):
super().__init__(prb)
init()
# create default solver and settings
self.tao = PETSc.TAO().create()
self.tao.setType(PETSc.TAO.Type.LMVM) # Specify the solver type
# create variable to store the gradient of the objective function
self.g = PETSc.Vec().createSeq(self.prb.dim)

def set_obj_args(self, args: dict) -> None:
check_type(args, Dict[str, float | np.float32 | np.float64
| npt.NDArray | Array])
self.tao.setObjectiveGradient(
self.objective_and_gradient, self.g, kargs=args)

def objective_and_gradient(self, tao, x: Vec, g: Vec, **kwargs: Dict):
"""PETSc-compatible wrapper for the function that returns the objective value
and the gradient."""
fval, grad = self.prb.objandgrad(x.getArray(), **kwargs)
g.setArray(grad)
return fval

def run(self, x0: npt.NDArray, **kwargs: Dict) -> npt.NDArray:
maxeval = kwargs["maxeval"]
gatol = kwargs["gatol"]
grtol = kwargs["grtol"]
gttol = kwargs["gttol"]
verbose = kwargs["verbose"]
x = PETSc.Vec().createWithArray(x0)
self.tao.setSolution(x)
self.tao.setMaximumIterations(maxeval)
self.tao.setTolerances(gatol=gatol, grtol=grtol, gttol=gttol)
self.tao.setFromOptions() # Set options for the solver
self.tao.solve()
if verbose:
self.tao.view()
u = self.tao.getSolution().getArray()
objective_value = self.tao.getObjectiveValue()
return u


class PygmoSolver(OptimizationSolver):
def __init__(self, prb: OptimizationProblem):
super().__init__(prb)
self.prb.obj_args = {}

def set_obj_args(self, args: Dict):
self.prb.obj_args = args

def get_nec(self) -> int:
"""Returns the number of equality constraints: for a constrained problem, it
is equal to the number of state variables, otherwise it is zero.
"""
if self.prb.constr_problem:
return self.prb.state_dim
else:
return 0

def fitness(self, x: npt.NDArray | Array) -> npt.NDArray | List[float]:
fit = self.prb.obj(x, **self.prb.obj_args)
check_type(fit, float | np.float32 | np.float64 | npt.NDArray | Array)
if self.prb.constr_problem:
constr_res = self.prb.constr(x, **self.prb.constr_args)
return np.concatenate(([fit], constr_res))
else:
return [fit]

def gradient(self, x: npt.NDArray | Array) -> npt.NDArray | Array:
grad = self.prb.grad_obj(x, **self.prb.obj_args)
check_type(grad, float | np.float32 | np.float64 | npt.NDArray | Array)
if self.prb.constr_problem:
constr_jac = self.prb.constr_grad(x, **self.prb.constr_args)
# first dim components are grad of object wrt parameters, then grad of
# constraint equations wrt parameters.
return np.concatenate((grad, np.ravel(constr_jac)))
else:
return grad

def get_bounds(self):
return ([-100]*self.prb.dim, [100]*self.prb.dim)

def get_name(self) -> str:
"""Returns the name of the optimization problem. Override this method to set
another name.
"""
return "Optimization problem"

def run(self, x0: npt.NDArray, **kwargs) -> npt.NDArray:
algo = kwargs["algo"]
ftol_abs = kwargs["ftol_abs"]
ftol_rel = kwargs["ftol_rel"]
maxeval = kwargs["maxeval"]
verbose = kwargs["verbose"]

pygmo_prb = pg.problem(self)

if self.prb.constr_problem:
algo = "slsqp"
algo = pg.algorithm(pg.nlopt(solver=algo))
algo.extract(pg.nlopt).ftol_abs = ftol_abs # type: ignore
algo.extract(pg.nlopt).ftol_rel = ftol_rel # type: ignore
algo.extract(pg.nlopt).maxeval = maxeval # type: ignore
pop = pg.population(pygmo_prb)
pop.push_back(x0)
algo.set_verbosity(verbose)
pop = algo.evolve(pop) # type: ignore
self.prb.last_opt_result = algo.extract( # type: ignore
pg.nlopt).get_last_opt_result()
u = pop.champion_x
return u
6 changes: 3 additions & 3 deletions tests/test_elastica.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def obj(x: npt.NDArray, theta_true: npt.NDArray) -> Array:
# initial guess for the bending stiffness
B_in = 1*np.ones(1, dtype=dt.float_dtype)
x0 = np.concatenate((theta_in, B_in))
x = prb.run(x0=x0)
x = prb.solve(x0=x0)

# extend solution array with boundary element
theta = x[:-1]
Expand Down Expand Up @@ -175,7 +175,7 @@ def obj(x: npt.NDArray) -> Array:
dim=num_elements-1, state_dim=num_elements-1, objfun=obj)

prb.set_obj_args({})
sol = prb.run(x0=theta_0)
sol = prb.solve(x0=theta_0)
theta = jnp.insert(sol, 0, theta_true[0])

x, y = reconstruct_xy(theta, h, num_nodes)
Expand Down Expand Up @@ -272,7 +272,7 @@ def obj(x: npt.NDArray, theta_true: npt.NDArray) -> Array:
# initial guess for the bending stiffness
B_in = 1*np.ones(1, dtype=dt.float_dtype)
x0 = np.concatenate((theta_in, B_in))
x = prb.run(x0=x0)
x = prb.solve(x0=x0)

print(x[-1])
# extend solution array with boundary element
Expand Down
2 changes: 1 addition & 1 deletion tests/test_linear_elasticity.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def test_linear_elasticity(setup_test):

prb.set_obj_args(obj_args)
node_coords_flattened = S.node_coords.flatten()
sol = prb.run(x0=node_coords_flattened)
sol = prb.solve(x0=node_coords_flattened)
curr_node_coords = sol.reshape(S.node_coords.shape)

assert np.sum((true_curr_node_coords - curr_node_coords)**2) < 1e-6
4 changes: 2 additions & 2 deletions tests/test_optctrl.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def objfun(x: npt.NDArray) -> jax.Array:
prb = optctrl.OptimalControlProblem(
objfun=objfun, statefun=statefun, state_dim=state_dim, nparams=nparams)

x = prb.run(x0=x0)
x = prb.solve(x0=x0)
u = x[:state_dim]
y = x[state_dim:]

Expand Down Expand Up @@ -119,7 +119,7 @@ def objfun(x: npt.NDArray) -> jax.Array:

prb = optctrl.OptimalControlProblem(
objfun=objfun, statefun=statefun, state_dim=dim_0, nparams=dim_0 + len(f0))
x = prb.run(x0=x0)
x = prb.solve(x0=x0)
u = x[:dim_0]
f = x[dim_0:]
print("u = ", u)
Expand Down
4 changes: 2 additions & 2 deletions src/dctkit/experimental/petsc_test.py → tests/test_petsc.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,14 @@ def energy_poisson(x, f, boundary_values, gamma):


prb = oc.OptimizationProblem(
num_nodes, num_nodes, energy_poisson, framework="petsc")
num_nodes, num_nodes, energy_poisson, solver_lib="petsc")

kargs = {"f": f_vec, "boundary_values": boundary_values, "gamma": gamma}
prb.set_obj_args(kargs)

tic = time.time()

u = prb.run(u_0)
u = prb.solve(u_0)

toc = time.time()

Expand Down
2 changes: 1 addition & 1 deletion tests/test_poisson.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def obj_poisson(x, f, k, boundary_values, gamma, mask):

prb = oc.OptimizationProblem(dim=dim_0, state_dim=dim_0, objfun=obj)
prb.set_obj_args(args)
u = prb.run(u_0, algo="lbfgs").astype(dt.float_dtype)
u = prb.solve(u_0, algo="lbfgs").astype(dt.float_dtype)

elif optimizer == "jaxopt":
print("Using jaxopt optimizer...")
Expand Down

0 comments on commit bec1e4f

Please sign in to comment.