Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions .github/workflows/Documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
push:
branches:
- master
tags: '*'
tags: "*"
pull_request:

jobs:
Expand All @@ -14,9 +14,11 @@ jobs:
- uses: actions/checkout@v6
- uses: julia-actions/setup-julia@latest
with:
version: '1'
version: "1"
- name: Add the HolyLabRegistry
run: julia --project -e 'using Pkg; Pkg.Registry.add(); Pkg.Registry.add(RegistrySpec(url = "https://github.com/HolyLab/HolyLabRegistry.git"))'
- name: Install dependencies
run: julia --project=docs/ -e 'using Pkg; Pkg.develop(vcat(PackageSpec(path = pwd()), [PackageSpec(path = joinpath("lib", dir)) for dir in readdir("lib") if (dir !== "OptimizationQuadDIRECT" && dir !== "OptimizationMultistartOptimization")])); Pkg.instantiate()'
run: julia --project=docs/ -e 'using Pkg; Pkg.develop(vcat(PackageSpec(path = pwd()), [PackageSpec(path = joinpath("lib", dir)) for dir in readdir("lib") if (dir !== "OptimizationMultistartOptimization")])); Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token
Expand Down
25 changes: 25 additions & 0 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
AmplNLWriter = "7c4d4715-977e-5154-bfe0-e096adeac482"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Expand All @@ -19,6 +20,7 @@ NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856"
NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd"
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
OptimizationAuglag = "2ea93f80-9333-43a1-a68d-1f53b957a421"
OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b"
OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb"
OptimizationCMAEvolutionStrategy = "bd407f91-200f-4536-9381-e4ba712f53f8"
Expand All @@ -27,18 +29,26 @@ OptimizationGCMAES = "6f0a0517-dbc2-4a7a-8a20-99ae7f27e911"
OptimizationIpopt = "43fad042-7963-4b32-ab19-e2a4f9a67124"
OptimizationLBFGSB = "22f7324a-a79d-40f2-bebe-3af60c77bd15"
OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1"
OptimizationMadNLP = "5d9c809f-c847-4062-9fba-1793bbfef577"
OptimizationManopt = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6"
OptimizationMetaheuristics = "3aafef2f-86ae-4776-b337-85a36adf0b55"
OptimizationMultistartOptimization = "e4316d97-8bbb-4fd3-a7d8-3851d2a72823"
OptimizationNLPModels = "064b21be-54cf-11ef-1646-cdfee32b588f"
OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb"
OptimizationNOMAD = "2cab0595-8222-4775-b714-9828e6a9e01b"
OptimizationODE = "dfa73e59-e644-4d8a-bf84-188d7ecb34e4"
OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e"
OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1"
OptimizationPRIMA = "72f8369c-a2ea-4298-9126-56167ce9cbc2"
OptimizationPolyalgorithms = "500b13db-7e66-49ce-bda4-eed966be6282"
OptimizationPyCMA = "fb0822aa-1fe5-41d8-99a6-e7bf6c238d3b"
OptimizationQuadDIRECT = "842ac81e-713d-465f-80f7-84eddaced298"
OptimizationSciPy = "cce07bd8-c79b-4b00-aee8-8db9cce22837"
OptimizationSophia = "892fee11-dca1-40d6-b698-84ba0d87399a"
OptimizationSpeedMapping = "3d669222-0d7d-4eb9-8a9f-d8528b0d9b91"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
QuadDIRECT = "dae52e8d-d666-5120-a592-9e15c33b8d7a"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
Expand All @@ -50,6 +60,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[sources]
Optimization = {path = ".."}
OptimizationAuglag = {path = "../lib/OptimizationAuglag"}
OptimizationBBO = {path = "../lib/OptimizationBBO"}
OptimizationBase = {path = "../lib/OptimizationBase"}
OptimizationCMAEvolutionStrategy = {path = "../lib/OptimizationCMAEvolutionStrategy"}
Expand All @@ -58,15 +69,22 @@ OptimizationGCMAES = {path = "../lib/OptimizationGCMAES"}
OptimizationIpopt = {path = "../lib/OptimizationIpopt"}
OptimizationLBFGSB = {path = "../lib/OptimizationLBFGSB"}
OptimizationMOI = {path = "../lib/OptimizationMOI"}
OptimizationMadNLP = {path = "../lib/OptimizationMadNLP"}
OptimizationManopt = {path = "../lib/OptimizationManopt"}
OptimizationMetaheuristics = {path = "../lib/OptimizationMetaheuristics"}
OptimizationMultistartOptimization = {path = "../lib/OptimizationMultistartOptimization"}
OptimizationNLPModels = {path = "../lib/OptimizationNLPModels"}
OptimizationNLopt = {path = "../lib/OptimizationNLopt"}
OptimizationNOMAD = {path = "../lib/OptimizationNOMAD"}
OptimizationODE = {path = "../lib/OptimizationODE"}
OptimizationOptimJL = {path = "../lib/OptimizationOptimJL"}
OptimizationOptimisers = {path = "../lib/OptimizationOptimisers"}
OptimizationPRIMA = {path = "../lib/OptimizationPRIMA"}
OptimizationPolyalgorithms = {path = "../lib/OptimizationPolyalgorithms"}
OptimizationPyCMA = {path = "../lib/OptimizationPyCMA"}
OptimizationQuadDIRECT = {path = "../lib/OptimizationQuadDIRECT"}
OptimizationSciPy = {path = "../lib/OptimizationSciPy"}
OptimizationSophia = {path = "../lib/OptimizationSophia"}
OptimizationSpeedMapping = {path = "../lib/OptimizationSpeedMapping"}

[compat]
Expand All @@ -89,22 +107,29 @@ NLPModels = "0.21"
NLPModelsTest = "0.10"
NLopt = "0.6, 1"
Optimization = "5"
OptimizationAuglag = "1"
OptimizationBBO = "0.4"
OptimizationBase = "4"
OptimizationCMAEvolutionStrategy = "0.3"
OptimizationEvolutionary = "0.4"
OptimizationGCMAES = "0.3"
OptimizationIpopt = "0.2"
OptimizationMOI = "0.5"
OptimizationMadNLP = "0.3"
OptimizationManopt = "1"
OptimizationMetaheuristics = "0.3"
OptimizationMultistartOptimization = "0.3"
OptimizationNLPModels = "0.0.2, 1"
OptimizationNLopt = "0.3"
OptimizationNOMAD = "0.3"
OptimizationODE = "0.1"
OptimizationOptimJL = "0.4"
OptimizationOptimisers = "0.3"
OptimizationPRIMA = "0.3"
OptimizationPolyalgorithms = "0.3"
OptimizationQuadDIRECT = "0.3"
OptimizationSciPy = "0.4"
OptimizationSophia = "1"
OptimizationSpeedMapping = "0.2"
OrdinaryDiffEq = "6"
Plots = "1"
Expand Down
11 changes: 5 additions & 6 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
using Documenter, Optimization
using FiniteDiff, ForwardDiff, ModelingToolkit, ReverseDiff, Tracker, Zygote
using ADTypes
using OptimizationLBFGSB, OptimizationSophia

cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)
cp(joinpath(@__DIR__, "Manifest.toml"), joinpath(@__DIR__, "src/assets/Manifest.toml"), force = true)
cp(joinpath(@__DIR__, "Project.toml"), joinpath(@__DIR__, "src/assets/Project.toml"), force = true)

include("pages.jl")

makedocs(sitename = "Optimization.jl",
authors = "Chris Rackauckas, Vaibhav Kumar Dixit et al.",
modules = [Optimization, Optimization.SciMLBase, Optimization.OptimizationBase,
FiniteDiff, ForwardDiff, ModelingToolkit, ReverseDiff, Tracker, Zygote, ADTypes],
modules = [Optimization, Optimization.SciMLBase, Optimization.OptimizationBase, Optimization.ADTypes,
OptimizationLBFGSB, OptimizationSophia],
clean = true, doctest = false, linkcheck = true,
warnonly = [:missing_docs, :cross_references],
format = Documenter.HTML(assets = ["assets/favicon.ico"],
Expand Down
16 changes: 8 additions & 8 deletions docs/src/API/ad.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ The choices for the auto-AD fill-ins with quick descriptions are:

## Automatic Differentiation Choice API

The following sections describe the Auto-AD choices in detail.
The following sections describe the Auto-AD choices in detail. These types are defined in the [ADTypes.jl](https://github.com/SciML/ADTypes.jl) package.

```@docs
OptimizationBase.AutoForwardDiff
OptimizationBase.AutoFiniteDiff
OptimizationBase.AutoReverseDiff
OptimizationBase.AutoZygote
OptimizationBase.AutoTracker
OptimizationBase.AutoSymbolics
OptimizationBase.AutoEnzyme
ADTypes.AutoForwardDiff
ADTypes.AutoFiniteDiff
ADTypes.AutoReverseDiff
ADTypes.AutoZygote
ADTypes.AutoTracker
ADTypes.AutoSymbolics
ADTypes.AutoEnzyme
ADTypes.AutoMooncake
```
2 changes: 1 addition & 1 deletion docs/src/API/optimization_state.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# [OptimizationState](@id optstate)

```@docs
Optimization.OptimizationState
OptimizationBase.OptimizationState
```
45 changes: 23 additions & 22 deletions docs/src/examples/rosenbrock.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,16 @@ An optimization problem can now be defined and solved to estimate the values for

```@example rosenbrock
# Define the problem to solve
using Optimization, ForwardDiff, Zygote
using SciMLBase, OptimizationBase
using ADTypes, ForwardDiff, Zygote

rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
_p = [1.0, 100.0]

f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
f = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff())
l1 = rosenbrock(x0, _p)
prob = OptimizationProblem(f, x0, _p)
prob = SciMLBase.OptimizationProblem(f, x0, _p)
```

## Optim.jl Solvers
Expand All @@ -59,19 +60,19 @@ prob = OptimizationProblem(f, x0, _p)
```@example rosenbrock
using OptimizationOptimJL
sol = solve(prob, SimulatedAnnealing())
prob = OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
prob = SciMLBase.OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
sol = solve(prob, SAMIN())

l1 = rosenbrock(x0, _p)
prob = OptimizationProblem(rosenbrock, x0, _p)
prob = SciMLBase.OptimizationProblem(rosenbrock, x0, _p)
sol = solve(prob, NelderMead())
```

### Now a gradient-based optimizer with forward-mode automatic differentiation

```@example rosenbrock
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
prob = OptimizationProblem(optf, x0, _p)
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff())
prob = SciMLBase.OptimizationProblem(optf, x0, _p)
sol = solve(prob, BFGS())
```

Expand All @@ -91,19 +92,19 @@ sol = solve(prob, Optim.KrylovTrustRegion())

```@example rosenbrock
cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2]
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons)
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff(); cons = cons)

prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf])
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf])
sol = solve(prob, IPNewton()) # Note that -Inf < x[1]^2 + x[2]^2 < Inf is always true

prob = OptimizationProblem(optf, x0, _p, lcons = [-5.0], ucons = [10.0])
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [-5.0], ucons = [10.0])
sol = solve(prob, IPNewton()) # Again, -5.0 < x[1]^2 + x[2]^2 < 10.0

prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf],
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [Inf],
lb = [-500.0, -500.0], ub = [50.0, 50.0])
sol = solve(prob, IPNewton())

prob = OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5],
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [0.5], ucons = [0.5],
lb = [-500.0, -500.0], ub = [50.0, 50.0])
sol = solve(prob, IPNewton())

Expand All @@ -118,8 +119,8 @@ function con_c(res, x, p)
res .= [x[1]^2 + x[2]^2]
end

optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = con_c)
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2])
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff(); cons = con_c)
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [-Inf], ucons = [0.25^2])
sol = solve(prob, IPNewton()) # -Inf < cons_circ(sol.u, _p) = 0.25^2
```

Expand All @@ -139,17 +140,17 @@ function con2_c(res, x, p)
res .= [x[1]^2 + x[2]^2, x[2] * sin(x[1]) - x[1]]
end

optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote(); cons = con2_c)
prob = OptimizationProblem(optf, x0, _p, lcons = [-Inf, -Inf], ucons = [100.0, 100.0])
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoZygote(); cons = con2_c)
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lcons = [-Inf, -Inf], ucons = [100.0, 100.0])
sol = solve(prob, Ipopt.Optimizer())
```

## Now let's switch over to OptimizationOptimisers with reverse-mode AD

```@example rosenbrock
import OptimizationOptimisers
optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote())
prob = OptimizationProblem(optf, x0, _p)
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoZygote())
prob = SciMLBase.OptimizationProblem(optf, x0, _p)
sol = solve(prob, OptimizationOptimisers.Adam(0.05), maxiters = 1000, progress = false)
```

Expand All @@ -164,8 +165,8 @@ sol = solve(prob, CMAEvolutionStrategyOpt())

```@example rosenbrock
using OptimizationNLopt, ModelingToolkit
optf = OptimizationFunction(rosenbrock, Optimization.AutoSymbolics())
prob = OptimizationProblem(optf, x0, _p)
optf = SciMLBase.OptimizationFunction(rosenbrock, ADTypes.AutoSymbolics())
prob = SciMLBase.OptimizationProblem(optf, x0, _p)

sol = solve(prob, Opt(:LN_BOBYQA, 2))
sol = solve(prob, Opt(:LD_LBFGS, 2))
Expand All @@ -174,7 +175,7 @@ sol = solve(prob, Opt(:LD_LBFGS, 2))
### Add some box constraints and solve with a few NLopt.jl methods

```@example rosenbrock
prob = OptimizationProblem(optf, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
prob = SciMLBase.OptimizationProblem(optf, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8])
sol = solve(prob, Opt(:LD_LBFGS, 2))
sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiters = 10000) #a global optimizer with random starts of local optimization
```
Expand All @@ -183,7 +184,7 @@ sol = solve(prob, Opt(:G_MLSL_LDS, 2), local_method = Opt(:LD_LBFGS, 2), maxiter

```@example rosenbrock
using OptimizationBBO
prob = Optimization.OptimizationProblem(rosenbrock, [0.0, 0.3], _p, lb = [-1.0, 0.2],
prob = SciMLBase.OptimizationProblem(rosenbrock, [0.0, 0.3], _p, lb = [-1.0, 0.2],
ub = [0.8, 0.43])
sol = solve(prob, BBO_adaptive_de_rand_1_bin()) # -1.0 ≤ x[1] ≤ 0.8, 0.2 ≤ x[2] ≤ 0.43
```
Expand Down
12 changes: 6 additions & 6 deletions docs/src/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ The simplest copy-pasteable code using a quasi-Newton method (LBFGS) to solve th

```@example intro
# Import the package and define the problem to optimize
using Optimization, OptimizationLBFGSB, Zygote
using OptimizationBase, OptimizationLBFGSB, ADTypes, Zygote
rosenbrock(u, p) = (p[1] - u[1])^2 + p[2] * (u[2] - u[1]^2)^2
u0 = zeros(2)
p = [1.0, 100.0]

optf = OptimizationFunction(rosenbrock, AutoZygote())
optf = OptimizationFunction(rosenbrock, ADTypes.AutoZygote())
prob = OptimizationProblem(optf, u0, p)

sol = solve(prob, OptimizationLBFGSB.LBFGS())
sol = solve(prob, OptimizationLBFGSB.LBFGSB())
```

```@example intro
Expand Down Expand Up @@ -131,8 +131,8 @@ automatically construct the derivative functions using ForwardDiff.jl. This
looks like:

```@example intro
using ForwardDiff
optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
using ForwardDiff, ADTypes
optf = OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff())
prob = OptimizationProblem(optf, u0, p)
sol = solve(prob, OptimizationOptimJL.BFGS())
```
Expand All @@ -155,7 +155,7 @@ We can demonstrate this via:

```@example intro
using Zygote
optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote())
optf = OptimizationFunction(rosenbrock, ADTypes.AutoZygote())
prob = OptimizationProblem(optf, u0, p)
sol = solve(prob, OptimizationOptimJL.BFGS())
```
Expand Down
2 changes: 1 addition & 1 deletion docs/src/optimization_packages/blackboxoptim.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
p = [1.0, 100.0]
f = OptimizationFunction(rosenbrock)
prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
prob = SciMLBase.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited(), maxiters = 100000,
maxtime = 1000.0)
```
2 changes: 1 addition & 1 deletion docs/src/optimization_packages/cmaevolutionstrategy.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,6 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
p = [1.0, 100.0]
f = OptimizationFunction(rosenbrock)
prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
prob = SciMLBase.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
sol = solve(prob, CMAEvolutionStrategyOpt())
```
2 changes: 1 addition & 1 deletion docs/src/optimization_packages/evolutionary.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,6 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
p = [1.0, 100.0]
f = OptimizationFunction(rosenbrock)
prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
prob = SciMLBase.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
sol = solve(prob, Evolutionary.CMAES(μ = 40, λ = 100))
```
7 changes: 4 additions & 3 deletions docs/src/optimization_packages/gcmaes.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@ rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
p = [1.0, 100.0]
f = OptimizationFunction(rosenbrock)
prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
prob = SciMLBase.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
sol = solve(prob, GCMAESOpt())
```

We can also utilize the gradient information of the optimization problem to aid the optimization as follows:

```@example GCMAES
f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff())
prob = Optimization.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
using ADTypes, ForwardDiff
f = OptimizationFunction(rosenbrock, ADTypes.AutoForwardDiff())
prob = SciMLBase.OptimizationProblem(f, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0])
sol = solve(prob, GCMAESOpt())
```
Loading
Loading