From 880d24902504bb6d997c54f1b4d2c9ba7f4228bd Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Thu, 2 Oct 2025 12:13:45 +0200 Subject: [PATCH 01/17] Fix dependency structure of solvers to OptimizationBase --- lib/OptimizationAuglag/Project.toml | 8 +-- .../src/OptimizationAuglag.jl | 3 +- lib/OptimizationBBO/Project.toml | 14 +++-- lib/OptimizationBBO/src/OptimizationBBO.jl | 26 ++++----- lib/OptimizationBase/src/OptimizationBase.jl | 2 + {src => lib/OptimizationBase/src}/state.jl | 0 {src => lib/OptimizationBase/src}/utils.jl | 0 .../Project.toml | 14 +++-- .../src/OptimizationCMAEvolutionStrategy.jl | 17 +++--- lib/OptimizationEvolutionary/Project.toml | 14 +++-- .../src/OptimizationEvolutionary.jl | 16 ++--- lib/OptimizationGCMAES/Project.toml | 18 +++--- .../src/OptimizationGCMAES.jl | 14 ++--- lib/OptimizationIpopt/Project.toml | 4 +- .../src/OptimizationIpopt.jl | 10 ++-- lib/OptimizationIpopt/src/callback.jl | 2 +- lib/OptimizationLBFGSB/Project.toml | 6 +- .../src/OptimizationLBFGSB.jl | 18 +++--- lib/OptimizationMOI/Project.toml | 54 ++++++++--------- lib/OptimizationMOI/src/OptimizationMOI.jl | 4 +- lib/OptimizationMOI/src/moi.jl | 8 +-- lib/OptimizationMOI/src/nlp.jl | 8 +-- lib/OptimizationManopt/Project.toml | 36 ++++++------ .../src/OptimizationManopt.jl | 8 +-- lib/OptimizationMetaheuristics/Project.toml | 14 +++-- .../src/OptimizationMetaheuristics.jl | 14 ++--- .../Project.toml | 20 ++++--- .../src/OptimizationMultistartOptimization.jl | 8 +-- lib/OptimizationNLPModels/Project.toml | 26 +++++---- .../src/OptimizationNLPModels.jl | 2 +- lib/OptimizationNLopt/Project.toml | 20 ++++--- .../src/OptimizationNLopt.jl | 18 +++--- lib/OptimizationNOMAD/Project.toml | 12 ++-- .../src/OptimizationNOMAD.jl | 10 ++-- lib/OptimizationODE/Project.toml | 4 +- lib/OptimizationODE/src/OptimizationODE.jl | 16 ++--- lib/OptimizationOptimJL/Project.toml | 30 +++++----- .../src/OptimizationOptimJL.jl | 28 ++++----- lib/OptimizationOptimisers/Project.toml | 28 ++++----- .../src/OptimizationOptimisers.jl | 16 ++--- lib/OptimizationPRIMA/Project.toml | 17 +++--- .../src/OptimizationPRIMA.jl | 14 ++--- lib/OptimizationPolyalgorithms/Project.toml | 20 ++++--- .../src/OptimizationPolyalgorithms.jl | 4 +- lib/OptimizationPyCMA/Project.toml | 16 ++--- .../src/OptimizationPyCMA.jl | 16 ++--- lib/OptimizationQuadDIRECT/Project.toml | 14 +++-- .../src/OptimizationQuadDIRECT.jl | 8 +-- lib/OptimizationSciPy/Project.toml | 24 ++++---- .../src/OptimizationSciPy.jl | 58 +++++++++---------- lib/OptimizationSophia/Project.toml | 6 +- .../src/OptimizationSophia.jl | 12 ++-- lib/OptimizationSpeedMapping/Project.toml | 18 +++--- .../src/OptimizationSpeedMapping.jl | 14 ++--- 54 files changed, 425 insertions(+), 386 deletions(-) rename {src => lib/OptimizationBase/src}/state.jl (100%) rename {src => lib/OptimizationBase/src}/utils.jl (100%) diff --git a/lib/OptimizationAuglag/Project.toml b/lib/OptimizationAuglag/Project.toml index ea8341e10..341ee27e2 100644 --- a/lib/OptimizationAuglag/Project.toml +++ b/lib/OptimizationAuglag/Project.toml @@ -5,21 +5,21 @@ version = "1.0.0" [deps] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] ForwardDiff = "1.0.1" -MLUtils = "0.4.8" -Optimization = "4.4.0" OptimizationBase = "2.10.0" +MLUtils = "0.4.8" OptimizationOptimisers = "0.3.8" Test = "1.10.0" +SciMLBase = "2.58" julia = "1.10" [targets] diff --git a/lib/OptimizationAuglag/src/OptimizationAuglag.jl b/lib/OptimizationAuglag/src/OptimizationAuglag.jl index 2d97f0e59..9c5ff8308 100644 --- a/lib/OptimizationAuglag/src/OptimizationAuglag.jl +++ b/lib/OptimizationAuglag/src/OptimizationAuglag.jl @@ -1,7 +1,6 @@ module OptimizationAuglag -using Optimization -using OptimizationBase.SciMLBase: OptimizationProblem, OptimizationFunction, OptimizationStats +using SciMLBase: OptimizationProblem, OptimizationFunction, OptimizationStats using OptimizationBase.LinearAlgebra: norm @kwdef struct AugLag diff --git a/lib/OptimizationBBO/Project.toml b/lib/OptimizationBBO/Project.toml index 7bdd8e5c4..42b148cf0 100644 --- a/lib/OptimizationBBO/Project.toml +++ b/lib/OptimizationBBO/Project.toml @@ -5,17 +5,19 @@ version = "0.4.2" [deps] BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + [compat] +julia = "1.10" BlackBoxOptim = "0.6" -Optimization = "4.4" +OptimizationBase = "2.10" +SciMLBase = "2.58" Reexport = "1.2" -julia = "1.10" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] test = ["Test"] diff --git a/lib/OptimizationBBO/src/OptimizationBBO.jl b/lib/OptimizationBBO/src/OptimizationBBO.jl index 57f874356..cb62b3324 100644 --- a/lib/OptimizationBBO/src/OptimizationBBO.jl +++ b/lib/OptimizationBBO/src/OptimizationBBO.jl @@ -1,10 +1,10 @@ module OptimizationBBO using Reexport -import Optimization -import Optimization: OptimizationBase -import BlackBoxOptim, Optimization.SciMLBase -import Optimization.SciMLBase: MultiObjectiveOptimizationFunction +import OptimizationBase +import OptimizationBase: SciMLBase +import BlackBoxOptim +import SciMLBase: MultiObjectiveOptimizationFunction abstract type BBO end @@ -48,7 +48,7 @@ function decompose_trace(opt::BlackBoxOptim.OptRunController, progress) return BlackBoxOptim.best_candidate(opt) end -function __map_optimizer_args(prob::Optimization.OptimizationCache, opt::BBO; +function __map_optimizer_args(prob::OptimizationBase.OptimizationCache, opt::BBO; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -96,7 +96,7 @@ function map_objective(obj::BlackBoxOptim.IndexedTupleFitness) obj.orig end -function SciMLBase.__solve(cache::Optimization.OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -123,13 +123,13 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ C } function _cb(trace) - if cache.callback === Optimization.DEFAULT_CALLBACK + if cache.callback === OptimizationBase.DEFAULT_CALLBACK cb_call = false else n_steps = BlackBoxOptim.num_steps(trace) curr_u = decompose_trace(trace, cache.progress) objective = map_objective(BlackBoxOptim.best_fitness(trace)) - opt_state = Optimization.OptimizationState(; + opt_state = OptimizationBase.OptimizationState(; iter = n_steps, u = curr_u, p = cache.p, @@ -148,15 +148,15 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ cb_call end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) _loss = function (θ) cache.f(θ, cache.p) end opt_args = __map_optimizer_args(cache, cache.opt; - callback = cache.callback === Optimization.DEFAULT_CALLBACK ? + callback = cache.callback === OptimizationBase.DEFAULT_CALLBACK ? nothing : _cb, cache.solver_args..., maxiters = maxiters, @@ -176,8 +176,8 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ end # Use the improved convert function - opt_ret = Optimization.deduce_retcode(opt_res.stop_reason) - stats = Optimization.OptimizationStats(; + opt_ret = OptimizationBase.deduce_retcode(opt_res.stop_reason) + stats = OptimizationBase.OptimizationStats(; iterations = opt_res.iterations, time = opt_res.elapsed_time, fevals = opt_res.f_calls) diff --git a/lib/OptimizationBase/src/OptimizationBase.jl b/lib/OptimizationBase/src/OptimizationBase.jl index e6965a542..244f086ce 100644 --- a/lib/OptimizationBase/src/OptimizationBase.jl +++ b/lib/OptimizationBase/src/OptimizationBase.jl @@ -28,6 +28,8 @@ include("OptimizationDIExt.jl") include("OptimizationDISparseExt.jl") include("function.jl") include("solve.jl") +include("utils.jl") +include("state.jl") export solve, OptimizationCache, DEFAULT_CALLBACK, DEFAULT_DATA, IncompatibleOptimizerError, OptimizerMissingError, _check_opt_alg, diff --git a/src/state.jl b/lib/OptimizationBase/src/state.jl similarity index 100% rename from src/state.jl rename to lib/OptimizationBase/src/state.jl diff --git a/src/utils.jl b/lib/OptimizationBase/src/utils.jl similarity index 100% rename from src/utils.jl rename to lib/OptimizationBase/src/utils.jl diff --git a/lib/OptimizationCMAEvolutionStrategy/Project.toml b/lib/OptimizationCMAEvolutionStrategy/Project.toml index 533d07c94..6633beb3e 100644 --- a/lib/OptimizationCMAEvolutionStrategy/Project.toml +++ b/lib/OptimizationCMAEvolutionStrategy/Project.toml @@ -5,17 +5,19 @@ version = "0.3.2" [deps] CMAEvolutionStrategy = "8d3b24bd-414e-49e0-94fb-163cc3a3e411" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + [compat] -julia = "1.10" CMAEvolutionStrategy = "0.2" -Optimization = "4.4" +julia = "1.10" +OptimizationBase = "2.10" +SciMLBase = "2.58" Reexport = "1.2" -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - [targets] test = ["Test"] diff --git a/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl index fa6e9d8f4..90b65b975 100644 --- a/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl +++ b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl @@ -1,8 +1,9 @@ module OptimizationCMAEvolutionStrategy using Reexport -@reexport using Optimization -using CMAEvolutionStrategy, Optimization.SciMLBase +@reexport using OptimizationBase +using CMAEvolutionStrategy +using OptimizationBase: SciMLBase export CMAEvolutionStrategyOpt @@ -20,7 +21,7 @@ SciMLBase.requireshessian(::CMAEvolutionStrategyOpt) = false SciMLBase.requiresconsjac(::CMAEvolutionStrategyOpt) = false SciMLBase.requiresconshess(::CMAEvolutionStrategyOpt) = false -function __map_optimizer_args(prob::OptimizationCache, opt::CMAEvolutionStrategyOpt; +function __map_optimizer_args(prob::OptimizationBase.OptimizationCache, opt::CMAEvolutionStrategyOpt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -52,7 +53,7 @@ function __map_optimizer_args(prob::OptimizationCache, opt::CMAEvolutionStrategy return mapped_args end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -82,7 +83,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ function _cb(opt, y, fvals, perm) curr_u = xbest(opt) - opt_state = Optimization.OptimizationState(; iter = length(opt.logger.fmedian), + opt_state = OptimizationBase.OptimizationState(; iter = length(opt.logger.fmedian), u = curr_u, p = cache.p, objective = fbest(opt), @@ -95,8 +96,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ cb_call end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) _loss = function (θ) x = cache.f(θ, cache.p) @@ -112,7 +113,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ t1 = time() opt_ret = opt_res.stop.reason - stats = Optimization.OptimizationStats(; + stats = OptimizationBase.OptimizationStats(; iterations = length(opt_res.logger.fmedian), time = t1 - t0, fevals = length(opt_res.logger.fmedian)) diff --git a/lib/OptimizationEvolutionary/Project.toml b/lib/OptimizationEvolutionary/Project.toml index ab8167cde..c86f60289 100644 --- a/lib/OptimizationEvolutionary/Project.toml +++ b/lib/OptimizationEvolutionary/Project.toml @@ -4,19 +4,21 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.4.2" [deps] +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + [compat] julia = "1.10" +OptimizationBase = "2.10" Evolutionary = "0.11" -Optimization = "4.4" +SciMLBase = "2.58" Reexport = "1.2" -[extras] -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - [targets] test = ["Random", "Test"] diff --git a/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl index 8d6ee4a02..2d9c4c305 100644 --- a/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl +++ b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl @@ -1,8 +1,8 @@ module OptimizationEvolutionary using Reexport -@reexport using Evolutionary, Optimization -using Optimization.SciMLBase +@reexport using Evolutionary, OptimizationBase +using SciMLBase SciMLBase.allowsbounds(opt::Evolutionary.AbstractOptimizer) = true SciMLBase.allowsconstraints(opt::Evolutionary.AbstractOptimizer) = true @@ -43,7 +43,7 @@ function Evolutionary.trace!(tr, iteration, objfun, state, population, options.callback) end -function __map_optimizer_args(cache::OptimizationCache, +function __map_optimizer_args(cache::OptimizationBase.OptimizationCache, opt::Evolutionary.AbstractOptimizer; callback = nothing, maxiters::Union{Number, Nothing} = nothing, @@ -76,7 +76,7 @@ function __map_optimizer_args(cache::OptimizationCache, return Evolutionary.Options(; mapped_args...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -106,7 +106,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ function _cb(trace) curr_u = decompose_trace(trace).metadata["curr_u"] - opt_state = Optimization.OptimizationState(; + opt_state = OptimizationBase.OptimizationState(; iter = decompose_trace(trace).iteration, u = curr_u, p = cache.p, @@ -119,8 +119,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ cb_call end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) f = cache.f @@ -174,7 +174,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ end t1 = time() opt_ret = Symbol(Evolutionary.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + stats = OptimizationBase.OptimizationStats(; iterations = opt_res.iterations, time = t1 - t0, fevals = opt_res.f_calls) if !isa(f, MultiObjectiveOptimizationFunction) SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationGCMAES/Project.toml b/lib/OptimizationGCMAES/Project.toml index cf60a8107..90ec0a797 100644 --- a/lib/OptimizationGCMAES/Project.toml +++ b/lib/OptimizationGCMAES/Project.toml @@ -4,19 +4,21 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" [deps] -GCMAES = "4aa9d100-eb0f-11e8-15f1-25748831eb3b" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" - -[compat] -julia = "1.10" -Optimization = "4.4" -GCMAES = "0.1" -Reexport = "1.2" +GCMAES = "4aa9d100-eb0f-11e8-15f1-25748831eb3b" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +[compat] +julia = "1.10" +OptimizationBase = "2.10" +SciMLBase = "2.58" +Reexport = "1.2" +GCMAES = "0.1" + [targets] test = ["ForwardDiff", "Test"] diff --git a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl index 68fce38ab..02e840548 100644 --- a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl +++ b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl @@ -1,8 +1,8 @@ module OptimizationGCMAES using Reexport -@reexport using Optimization -using GCMAES, Optimization.SciMLBase +@reexport using OptimizationBase +using GCMAES, SciMLBase export GCMAESOpt @@ -22,7 +22,7 @@ SciMLBase.requireshessian(::GCMAESOpt) = false SciMLBase.requiresconsjac(::GCMAESOpt) = false SciMLBase.requiresconshess(::GCMAESOpt) = false -function __map_optimizer_args(cache::OptimizationCache, opt::GCMAESOpt; +function __map_optimizer_args(cache::OptimizationBase.OptimizationCache, opt::GCMAESOpt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -61,7 +61,7 @@ function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -102,8 +102,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ end end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) opt_args = __map_optimizer_args(cache, cache.opt; cache.solver_args..., maxiters = maxiters, @@ -126,7 +126,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.ub; opt_args...) end t1 = time() - stats = Optimization.OptimizationStats(; + stats = OptimizationBase.OptimizationStats(; iterations = maxiters === nothing ? 0 : maxiters, time = t1 - t0) SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationIpopt/Project.toml b/lib/OptimizationIpopt/Project.toml index a19b58137..fbbbeb66d 100644 --- a/lib/OptimizationIpopt/Project.toml +++ b/lib/OptimizationIpopt/Project.toml @@ -6,7 +6,7 @@ version = "0.2.2" [deps] Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" @@ -15,7 +15,7 @@ SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" Ipopt = "1.10.3" LinearAlgebra = "1.10.0" ModelingToolkit = "10.23" -Optimization = "4.3.0" +OptimizationBase = "2.10" SciMLBase = "2.90.0" SparseArrays = "1.10.0" SymbolicIndexingInterface = "0.3.40" diff --git a/lib/OptimizationIpopt/src/OptimizationIpopt.jl b/lib/OptimizationIpopt/src/OptimizationIpopt.jl index a877977f3..6b3222c7e 100644 --- a/lib/OptimizationIpopt/src/OptimizationIpopt.jl +++ b/lib/OptimizationIpopt/src/OptimizationIpopt.jl @@ -1,6 +1,6 @@ module OptimizationIpopt -using Optimization +using OptimizationBase using Ipopt using LinearAlgebra using SparseArrays @@ -84,7 +84,7 @@ The following common optimization arguments can be passed to `solve`: # Examples ```julia -using Optimization, OptimizationIpopt +using OptimizationBase, OptimizationIpopt # Basic usage with default settings opt = IpoptOptimizer() @@ -331,8 +331,8 @@ function map_retcode(solvestat) end function SciMLBase.__solve(cache::IpoptCache) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) opt_setup = __map_optimizer_args(cache, cache.opt; @@ -359,7 +359,7 @@ function SciMLBase.__solve(cache::IpoptCache) minimum = opt_setup.obj_val minimizer = opt_setup.x - stats = Optimization.OptimizationStats(; time = time() - start_time, + stats = OptimizationBase.OptimizationStats(; time = time() - start_time, iterations = cache.iterations[], fevals = cache.f_calls, gevals = cache.f_grad_calls) finalize(opt_setup) diff --git a/lib/OptimizationIpopt/src/callback.jl b/lib/OptimizationIpopt/src/callback.jl index ebe644f86..c1d08f810 100644 --- a/lib/OptimizationIpopt/src/callback.jl +++ b/lib/OptimizationIpopt/src/callback.jl @@ -78,7 +78,7 @@ function (cb::IpoptProgressLogger)( cb.lambda ) - opt_state = Optimization.OptimizationState(; + opt_state = OptimizationBase.OptimizationState(; iter = Int(iter_count), cb.u, objective = obj_value, original) cb.iterations[] = Int(iter_count) diff --git a/lib/OptimizationLBFGSB/Project.toml b/lib/OptimizationLBFGSB/Project.toml index 83abb72e9..c27054335 100644 --- a/lib/OptimizationLBFGSB/Project.toml +++ b/lib/OptimizationLBFGSB/Project.toml @@ -6,8 +6,8 @@ version = "1.0.0" [deps] DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" LBFGSB = "5be7bae1-8223-5378-bac3-9e7378a2f6e6" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -20,8 +20,8 @@ DocStringExtensions = "0.9.5" ForwardDiff = "1.0.1" LBFGSB = "0.4.1" MLUtils = "0.4.8" -Optimization = "4.4.0" -OptimizationBase = "2.10.0" +OptimizationBase = "2.10" +SciMLBase = "2.58" Zygote = "0.7.10" julia = "1.10" diff --git a/lib/OptimizationLBFGSB/src/OptimizationLBFGSB.jl b/lib/OptimizationLBFGSB/src/OptimizationLBFGSB.jl index e8917fc3b..c39ac837b 100644 --- a/lib/OptimizationLBFGSB/src/OptimizationLBFGSB.jl +++ b/lib/OptimizationLBFGSB/src/OptimizationLBFGSB.jl @@ -1,12 +1,12 @@ module OptimizationLBFGSB -using Optimization +using OptimizationBase using DocStringExtensions import LBFGSB as LBFGSBJL -using OptimizationBase.SciMLBase: OptimizationStats, OptimizationFunction +using SciMLBase: OptimizationStats, OptimizationFunction using OptimizationBase: ReturnCode using OptimizationBase.LinearAlgebra: norm -using Optimization: deduce_retcode +using OptimizationBase: deduce_retcode """ $(TYPEDEF) @@ -46,7 +46,7 @@ function task_message_to_string(task::Vector{UInt8}) return String(task) end -function __map_optimizer_args(cache::OptimizationCache, opt::LBFGSB; +function __map_optimizer_args(cache::OptimizationBase.OptimizationCache, opt::LBFGSB; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -78,7 +78,7 @@ function __map_optimizer_args(cache::OptimizationCache, opt::LBFGSB; return mapped_args end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -104,7 +104,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ P, C } - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) local x @@ -137,7 +137,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.f.cons(cons_tmp, θ) cons_tmp[eq_inds] .= cons_tmp[eq_inds] - cache.lcons[eq_inds] cons_tmp[ineq_inds] .= cons_tmp[ineq_inds] .- cache.ucons[ineq_inds] - opt_state = Optimization.OptimizationState( + opt_state = OptimizationBase.OptimizationState( u = θ, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback.") @@ -227,7 +227,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ _loss = function (θ) x = cache.f(θ, cache.p) iter_count[] += 1 - opt_state = Optimization.OptimizationState( + opt_state = OptimizationBase.OptimizationState( u = θ, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback.") @@ -262,7 +262,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ t1 = time() - stats = Optimization.OptimizationStats(; iterations = optimizer.isave[30], + stats = OptimizationBase.OptimizationStats(; iterations = optimizer.isave[30], time = t1 - t0, fevals = optimizer.isave[34], gevals = optimizer.isave[34]) return SciMLBase.build_solution(cache, cache.opt, res[2], res[1], stats = stats, diff --git a/lib/OptimizationMOI/Project.toml b/lib/OptimizationMOI/Project.toml index 28f39cbb3..dc97f8713 100644 --- a/lib/OptimizationMOI/Project.toml +++ b/lib/OptimizationMOI/Project.toml @@ -4,46 +4,48 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.5.7" [deps] +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLStructures = "53ae85a6-f571-4167-b2af-e1d143709226" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" + +[extras] +HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" +AmplNLWriter = "7c4d4715-977e-5154-bfe0-e096adeac482" +ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Ipopt_jll = "9cc047cb-c261-5740-88fc-0cf96f7bdcc7" +Juniper = "2ddba703-00a4-53a7-87a5-e8b9971dde84" +Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] -AmplNLWriter = "1" HiGHS = "1" -Ipopt = "1" +OptimizationBase = "2.10" +Test = "1.6" +Symbolics = "6" +AmplNLWriter = "1" +LinearAlgebra = "1" Ipopt_jll = "300.1400" Juniper = "0.9" -LinearAlgebra = "1" -MathOptInterface = "1" -ModelingToolkit = "10.23" +Ipopt = "1" NLopt = "1" -Optimization = "4.4" -Reexport = "1.2" -SciMLStructures = "1" +SciMLBase = "2.58" SparseArrays = "1.6" +ModelingToolkit = "10.23" SymbolicIndexingInterface = "0.3" -Symbolics = "6" -Test = "1.6" -Zygote = "0.6, 0.7" julia = "1.10" - -[extras] -AmplNLWriter = "7c4d4715-977e-5154-bfe0-e096adeac482" -HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" -Ipopt_jll = "9cc047cb-c261-5740-88fc-0cf96f7bdcc7" -Juniper = "2ddba703-00a4-53a7-87a5-e8b9971dde84" -NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" -ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +Zygote = "0.6, 0.7" +MathOptInterface = "1" +Reexport = "1.2" +SciMLStructures = "1" [targets] test = ["AmplNLWriter", "HiGHS", "Ipopt", "Ipopt_jll", "Juniper", "NLopt", "ReverseDiff", "Test", "Zygote"] diff --git a/lib/OptimizationMOI/src/OptimizationMOI.jl b/lib/OptimizationMOI/src/OptimizationMOI.jl index 41eaf7f90..ee3c2c6af 100644 --- a/lib/OptimizationMOI/src/OptimizationMOI.jl +++ b/lib/OptimizationMOI/src/OptimizationMOI.jl @@ -1,9 +1,9 @@ module OptimizationMOI using Reexport -@reexport using Optimization +@reexport using OptimizationBase using MathOptInterface -using Optimization.SciMLBase +using SciMLBase using SciMLStructures using SymbolicIndexingInterface using SparseArrays diff --git a/lib/OptimizationMOI/src/moi.jl b/lib/OptimizationMOI/src/moi.jl index 3481fd9be..455155a11 100644 --- a/lib/OptimizationMOI/src/moi.jl +++ b/lib/OptimizationMOI/src/moi.jl @@ -1,4 +1,4 @@ -struct MOIOptimizationCache{F <: OptimizationFunction, RC, LB, UB, I, S, EX, +struct MOIOptimizationBase.OptimizationCache{F <: OptimizationFunction, RC, LB, UB, I, S, EX, CEX, O} <: SciMLBase.AbstractOptimizationCache f::F reinit_cache::RC @@ -107,8 +107,8 @@ function _add_moi_variables!(opt_setup, cache::MOIOptimizationCache) end function SciMLBase.__solve(cache::MOIOptimizationCache) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) opt_setup = __map_optimizer_args(cache, cache.opt; abstol = cache.solver_args.abstol, @@ -171,7 +171,7 @@ function SciMLBase.__solve(cache::MOIOptimizationCache) minimum = NaN opt_ret = SciMLBase.ReturnCode.Default end - stats = Optimization.OptimizationStats() + stats = OptimizationBase.OptimizationStats() return SciMLBase.build_solution(cache, cache.opt, minimizer, diff --git a/lib/OptimizationMOI/src/nlp.jl b/lib/OptimizationMOI/src/nlp.jl index 5c8b9be00..f78fdd7f7 100644 --- a/lib/OptimizationMOI/src/nlp.jl +++ b/lib/OptimizationMOI/src/nlp.jl @@ -237,7 +237,7 @@ function MOI.eval_objective(evaluator::MOIOptimizationNLPEvaluator, x) else l = evaluator.f(x, evaluator.p) evaluator.iteration += 1 - state = Optimization.OptimizationState(iter = evaluator.iteration, + state = OptimizationBase.OptimizationState(iter = evaluator.iteration, u = x, p = evaluator.p, objective = l[1]) @@ -521,8 +521,8 @@ function _add_moi_variables!(opt_setup, evaluator::MOIOptimizationNLPEvaluator) end function SciMLBase.__solve(cache::MOIOptimizationNLPCache) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) opt_setup = __map_optimizer_args(cache, cache.opt; abstol = cache.solver_args.abstol, @@ -572,7 +572,7 @@ function SciMLBase.__solve(cache::MOIOptimizationNLPCache) 0 end - stats = Optimization.OptimizationStats(; time = MOI.get(opt_setup, MOI.SolveTimeSec()), + stats = OptimizationBase.OptimizationStats(; time = MOI.get(opt_setup, MOI.SolveTimeSec()), iterations) return SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index 44c623104..a9d23bd1b 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -4,34 +4,36 @@ authors = ["Mateusz Baran ", "Ronny Bergmann and contributors"] version = "0.3.2" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + [compat] julia = "1.10" +OptimizationBase = "2.10" Metaheuristics = "3" +SciMLBase = "2.58" Reexport = "1.2" -Optimization = "4.4" - -[extras] -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] test = ["Random", "Test"] diff --git a/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl b/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl index c71b1b256..65ae4c935 100644 --- a/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl +++ b/lib/OptimizationMetaheuristics/src/OptimizationMetaheuristics.jl @@ -1,8 +1,8 @@ module OptimizationMetaheuristics using Reexport -@reexport using Metaheuristics, Optimization -using Optimization.SciMLBase +@reexport using Metaheuristics, OptimizationBase +using SciMLBase SciMLBase.requiresbounds(opt::Metaheuristics.AbstractAlgorithm) = true SciMLBase.allowsbounds(opt::Metaheuristics.AbstractAlgorithm) = true @@ -31,7 +31,7 @@ function initial_population!(opt, cache, bounds, f) return nothing end -function __map_optimizer_args!(cache::OptimizationCache, +function __map_optimizer_args!(cache::OptimizationBase.OptimizationCache, opt::Metaheuristics.AbstractAlgorithm; callback = nothing, maxiters::Union{Number, Nothing} = nothing, @@ -79,7 +79,7 @@ function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -107,8 +107,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ } local x - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) f = cache.f _loss = function (θ) @@ -148,7 +148,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ t0 = time() opt_res = Metaheuristics.optimize(_loss, opt_bounds, cache.opt) t1 = time() - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(cache, cache.opt, Metaheuristics.minimizer(opt_res), Metaheuristics.minimum(opt_res); original = opt_res, diff --git a/lib/OptimizationMultistartOptimization/Project.toml b/lib/OptimizationMultistartOptimization/Project.toml index 6b5b30a0a..46455c097 100644 --- a/lib/OptimizationMultistartOptimization/Project.toml +++ b/lib/OptimizationMultistartOptimization/Project.toml @@ -4,22 +4,24 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" MultistartOptimization = "3933049c-43be-478e-a8bb-6e0f7fd53575" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -[compat] -julia = "1.10" -MultistartOptimization = "0.2, 0.3" -Optimization = "4.4" -Reexport = "1.2" - [extras] +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -OptimizationNLopt= "4e6fcdb7-1186-4e1f-a706-475e75c168bb" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" -Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" + +[compat] +julia = "1.10" +OptimizationBase = "2.10" +MultistartOptimization = "0.2, 0.3" +SciMLBase = "2.58" +Reexport = "1.2" [targets] test = ["ForwardDiff", "OptimizationNLopt", "ReverseDiff", "Pkg", "Test"] diff --git a/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl index 4516100d2..8bc79152f 100644 --- a/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl +++ b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl @@ -1,8 +1,8 @@ module OptimizationMultistartOptimization using Reexport -@reexport using MultistartOptimization, Optimization -using Optimization.SciMLBase +@reexport using MultistartOptimization, OptimizationBase +using SciMLBase SciMLBase.requiresbounds(opt::MultistartOptimization.TikTak) = true SciMLBase.allowsbounds(opt::MultistartOptimization.TikTak) = true @@ -24,7 +24,7 @@ function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -73,7 +73,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ use_threads = cache.solver_args.use_threads) t1 = time() opt_ret = hasproperty(opt_res, :ret) ? opt_res.ret : nothing - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(cache, (cache.opt, cache.solver_args.local_opt), opt_res.location, opt_res.value; diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index dba2a6912..b89c1774c 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -4,26 +4,28 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.0.2" [deps] -ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -[compat] -ADTypes = "1.7" -NLPModels = "0.21" -Optimization = "4.4" -Reexport = "1.2" -julia = "1.10" - [extras] +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" -OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" +Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" + +[compat] +julia = "1.10" +NLPModels = "0.21" +ADTypes = "1.7" +OptimizationBase = "2.10" +SciMLBase = "2.58" +Reexport = "1.2" [targets] test = ["Test", "NLPModelsTest", "OptimizationOptimJL", "ReverseDiff", "Zygote", "Ipopt", "OptimizationMOI"] diff --git a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl index 946707b0f..2533cb3ae 100644 --- a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl +++ b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl @@ -1,7 +1,7 @@ module OptimizationNLPModels using Reexport -@reexport using NLPModels, Optimization, ADTypes +@reexport using NLPModels, OptimizationBase, ADTypes """ OptimizationFunction(nlpmodel::AbstractNLPModel, adtype::AbstractADType = NoAD()) diff --git a/lib/OptimizationNLopt/Project.toml b/lib/OptimizationNLopt/Project.toml index 6f0b75bfd..4b8611573 100644 --- a/lib/OptimizationNLopt/Project.toml +++ b/lib/OptimizationNLopt/Project.toml @@ -4,21 +4,23 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.4" [deps] -NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -[compat] -NLopt = "1.1" -Optimization = "4.4" -Reexport = "1.2" -julia = "1.10" - [extras] +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + +[compat] +julia = "1.10" +OptimizationBase = "2.10" +NLopt = "1.1" +SciMLBase = "2.58" +Reexport = "1.2" [targets] test = ["ReverseDiff", "Test", "Zygote"] diff --git a/lib/OptimizationNLopt/src/OptimizationNLopt.jl b/lib/OptimizationNLopt/src/OptimizationNLopt.jl index 3803cc46f..3b0abcf8e 100644 --- a/lib/OptimizationNLopt/src/OptimizationNLopt.jl +++ b/lib/OptimizationNLopt/src/OptimizationNLopt.jl @@ -1,9 +1,9 @@ module OptimizationNLopt using Reexport -@reexport using NLopt, Optimization -using Optimization.SciMLBase -using Optimization: deduce_retcode +@reexport using NLopt, OptimizationBase +using SciMLBase +using OptimizationBase: deduce_retcode (f::NLopt.Algorithm)() = f @@ -57,7 +57,7 @@ function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, opt::NLopt.Algori kwargs...) end -function __map_optimizer_args!(cache::OptimizationCache, opt::NLopt.Opt; +function __map_optimizer_args!(cache::OptimizationBase.OptimizationCache, opt::NLopt.Opt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -136,7 +136,7 @@ function __map_optimizer_args!(cache::OptimizationCache, opt::NLopt.Opt; return nothing end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -169,7 +169,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ _loss = function (θ) x = cache.f(θ, cache.p) - opt_state = Optimization.OptimizationState(u = θ, p = cache.p, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ, p = cache.p, objective = x[1]) if cache.callback(opt_state, x...) NLopt.force_stop!(opt_setup) end @@ -252,8 +252,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ end end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) __map_optimizer_args!(cache, opt_setup; callback = cache.callback, maxiters = maxiters, maxtime = maxtime, @@ -267,7 +267,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ if retcode == ReturnCode.Failure @warn "NLopt failed to converge: $(ret)" end - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(cache, cache.opt, minx, minf; original = opt_setup, retcode = retcode, stats = stats) diff --git a/lib/OptimizationNOMAD/Project.toml b/lib/OptimizationNOMAD/Project.toml index f66e906d9..0d7e17538 100644 --- a/lib/OptimizationNOMAD/Project.toml +++ b/lib/OptimizationNOMAD/Project.toml @@ -4,18 +4,20 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.2" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" NOMAD = "02130f1c-4665-5b79-af82-ff1385104aa0" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + [compat] julia = "1.10" +OptimizationBase = "2.10" NOMAD = "2.4.1" -Optimization = "4.4" +SciMLBase = "2.58" Reexport = "1.2" -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - [targets] test = ["Test"] diff --git a/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl b/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl index 9bfd28b61..bd7834b91 100644 --- a/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl +++ b/lib/OptimizationNOMAD/src/OptimizationNOMAD.jl @@ -1,8 +1,8 @@ module OptimizationNOMAD using Reexport -@reexport using Optimization -using NOMAD, Optimization.SciMLBase +@reexport using OptimizationBase +using NOMAD, SciMLBase export NOMADOpt struct NOMADOpt end @@ -54,8 +54,8 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::NOMADOpt; kwargs...) local x - maxiters = Optimization._check_and_convert_maxiters(maxiters) - maxtime = Optimization._check_and_convert_maxtime(maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(maxtime) _loss = function (θ) x = prob.f(θ, prob.p) @@ -108,7 +108,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::NOMADOpt; t0 = time() opt_res = NOMAD.solve(opt_setup, prob.u0) t1 = time() - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(SciMLBase.DefaultOptimizationCache(prob.f, prob.p), opt, opt_res.x_best_feas, first(opt_res.bbo_best_feas); original = opt_res, stats = stats) diff --git a/lib/OptimizationODE/Project.toml b/lib/OptimizationODE/Project.toml index c21a81607..651663ea5 100644 --- a/lib/OptimizationODE/Project.toml +++ b/lib/OptimizationODE/Project.toml @@ -6,7 +6,7 @@ version = "0.1.2" [deps] DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -17,7 +17,7 @@ NonlinearSolve = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" [compat] DiffEqBase = "6.190" ForwardDiff = "0.10, 1" -Optimization = "4.4" +OptimizationBase = "2.10" OrdinaryDiffEq = "6.70" NonlinearSolve = "4" Reexport = "1" diff --git a/lib/OptimizationODE/src/OptimizationODE.jl b/lib/OptimizationODE/src/OptimizationODE.jl index 5edc8e020..eb8638b9b 100644 --- a/lib/OptimizationODE/src/OptimizationODE.jl +++ b/lib/OptimizationODE/src/OptimizationODE.jl @@ -1,7 +1,7 @@ module OptimizationODE using Reexport -@reexport using Optimization, SciMLBase +@reexport using OptimizationBase, SciMLBase using LinearAlgebra, ForwardDiff using DiffEqBase @@ -49,21 +49,21 @@ SciMLBase.requiresconshess(::DAEOptimizer) = false function SciMLBase.__init(prob::OptimizationProblem, opt::ODEOptimizer; - callback=Optimization.DEFAULT_CALLBACK, progress=false, dt=nothing, + callback=OptimizationBase.DEFAULT_CALLBACK, progress=false, dt=nothing, maxiters=nothing, kwargs...) return OptimizationCache(prob, opt; callback=callback, progress=progress, dt=dt, maxiters=maxiters, kwargs...) end function SciMLBase.__init(prob::OptimizationProblem, opt::DAEOptimizer; - callback=Optimization.DEFAULT_CALLBACK, progress=false, dt=nothing, + callback=OptimizationBase.DEFAULT_CALLBACK, progress=false, dt=nothing, maxiters=nothing, kwargs...) return OptimizationCache(prob, opt; callback=callback, progress=progress, dt=dt, maxiters=maxiters, kwargs...) end function SciMLBase.__solve( - cache::OptimizationCache{F,RC,LB,UB,LC,UC,S,O,D,P,C} + cache::OptimizationBase.OptimizationCache{F,RC,LB,UB,LC,UC,S,O,D,P,C} ) where {F,RC,LB,UB,LC,UC,S,O<:Union{ODEOptimizer,DAEOptimizer},D,P,C} dt = get(cache.solver_args, :dt, nothing) @@ -97,7 +97,7 @@ function solve_ode(cache, dt, maxit, u0, p) algorithm = DynamicSS(cache.opt.solver) - if cache.callback !== Optimization.DEFAULT_CALLBACK + if cache.callback !== OptimizationBase.DEFAULT_CALLBACK condition = (u, t, integrator) -> true affect! = (integrator) -> begin u_opt = integrator.u isa AbstractArray ? integrator.u : integrator.u.u @@ -123,7 +123,7 @@ function solve_ode(cache, dt, maxit, u0, p) has_destats = hasproperty(sol, :destats) has_t = hasproperty(sol, :t) && !isempty(sol.t) - stats = Optimization.OptimizationStats( + stats = OptimizationBase.OptimizationStats( iterations = has_destats ? get(sol.destats, :iters, 10) : (has_t ? length(sol.t) - 1 : 10), time = has_t ? sol.t[end] : 0.0, fevals = has_destats ? get(sol.destats, :f_calls, 0) : 0, @@ -158,7 +158,7 @@ function solve_dae_mass_matrix(cache, dt, maxit, u0, p) ss_prob = SteadyStateProblem(ODEFunction(f_mass!, mass_matrix = M), u0, p) - if cache.callback !== Optimization.DEFAULT_CALLBACK + if cache.callback !== OptimizationBase.DEFAULT_CALLBACK condition = (u, t, integrator) -> true affect! = (integrator) -> begin u_opt = integrator.u isa AbstractArray ? integrator.u : integrator.u.u @@ -209,7 +209,7 @@ function solve_dae_implicit(cache, dt, maxit, u0, p) du0 = zero(u0) prob = DAEProblem(dae_residual!, du0, u0, tspan, p) - if cache.callback !== Optimization.DEFAULT_CALLBACK + if cache.callback !== OptimizationBase.DEFAULT_CALLBACK condition = (u, t, integrator) -> true affect! = (integrator) -> begin u_opt = integrator.u isa AbstractArray ? integrator.u : integrator.u.u diff --git a/lib/OptimizationOptimJL/Project.toml b/lib/OptimizationOptimJL/Project.toml index 7feb64eaf..a547dac62 100644 --- a/lib/OptimizationOptimJL/Project.toml +++ b/lib/OptimizationOptimJL/Project.toml @@ -4,27 +4,29 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.4.5" [deps] -Optim = "429524aa-4258-5aef-a3af-852621145aeb" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[compat] -Optim = "1" -Optimization = "4.4" -PrecompileTools = "1.2" -Reexport = "1.2" -SparseArrays = "1.6" -julia = "1.10" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" + +[compat] +julia = "1.10" +PrecompileTools = "1.2" +OptimizationBase = "2.10" +SparseArrays = "1.6" +Optim = "1" +Reexport = "1.2" +SciMLBase = "2.58" [targets] test = ["ForwardDiff", "ModelingToolkit", "Random", "ReverseDiff", "Test", "Zygote"] diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index ca51d29b2..262fcb27c 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -1,8 +1,8 @@ module OptimizationOptimJL using Reexport -@reexport using Optim, Optimization -using Optimization.SciMLBase, SparseArrays +@reexport using Optim, OptimizationBase +using SciMLBase, SparseArrays decompose_trace(trace::Optim.OptimizationTrace) = last(trace) decompose_trace(trace::Optim.OptimizationState) = trace @@ -35,7 +35,7 @@ end SciMLBase.requiresgradient(opt::Optim.Fminbox) = true # SciMLBase.allowsfg(opt::Union{Optim.AbstractOptimizer, Optim.ConstrainedOptimizer, Optim.Fminbox, Optim.SAMIN}) = true -function __map_optimizer_args(cache::OptimizationCache, +function __map_optimizer_args(cache::OptimizationBase.OptimizationCache, opt::Union{Optim.AbstractOptimizer, Optim.Fminbox, Optim.SAMIN, Optim.ConstrainedOptimizer}; callback = nothing, @@ -112,14 +112,14 @@ function SciMLBase.__init(prob::OptimizationProblem, end end - maxiters = Optimization._check_and_convert_maxiters(maxiters) - maxtime = Optimization._check_and_convert_maxtime(maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(maxtime) return OptimizationCache(prob, opt; callback, maxiters, maxtime, abstol, reltol, progress, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -149,7 +149,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ trace_state = decompose_trace(trace) metadata = trace_state.metadata θ = metadata[cache.opt isa Optim.NelderMead ? "centroid" : "x"] - opt_state = Optimization.OptimizationState(iter = trace_state.iteration, + opt_state = OptimizationBase.OptimizationState(iter = trace_state.iteration, u = θ, p = cache.p, objective = trace_state.value, @@ -227,7 +227,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ opt_res = Optim.optimize(optim_f, cache.u0, cache.opt, opt_args) t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + stats = OptimizationBase.OptimizationStats(; iterations = opt_res.iterations, time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, @@ -237,7 +237,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -270,7 +270,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ θ = !(cache.opt isa Optim.SAMIN) && cache.opt.method == Optim.NelderMead() ? metadata["centroid"] : metadata["x"] - opt_state = Optimization.OptimizationState(iter = trace_state.iteration, + opt_state = OptimizationBase.OptimizationState(iter = trace_state.iteration, u = θ, p = cache.p, objective = trace_state.value, @@ -323,7 +323,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ opt_res = Optim.optimize(optim_f, cache.lb, cache.ub, cache.u0, cache.opt, opt_args) t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + stats = OptimizationBase.OptimizationStats(; iterations = opt_res.iterations, time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, @@ -331,7 +331,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ original = opt_res, retcode = opt_ret, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -357,7 +357,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ function _cb(trace) metadata = decompose_trace(trace).metadata - opt_state = Optimization.OptimizationState(iter = trace.iteration, + opt_state = OptimizationBase.OptimizationState(iter = trace.iteration, u = metadata["x"], p = cache.p, grad = get(metadata, "g(x)", nothing), @@ -467,7 +467,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ end t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + stats = OptimizationBase.OptimizationStats(; iterations = opt_res.iterations, time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationOptimisers/Project.toml b/lib/OptimizationOptimisers/Project.toml index e88a0de9c..1c9c67061 100644 --- a/lib/OptimizationOptimisers/Project.toml +++ b/lib/OptimizationOptimisers/Project.toml @@ -4,28 +4,30 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.11" [deps] -Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -[compat] -Optimisers = "0.2, 0.3, 0.4" -Optimization = "4.4" -ProgressLogging = "0.1" -Reexport = "1.2" -julia = "1.10" - [extras] -ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -Lux = "b2108857-7c20-44ae-9111-449ecde12c47" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" +Lux = "b2108857-7c20-44ae-9111-449ecde12c47" + +[compat] +julia = "1.10" +OptimizationBase = "2.10" +ProgressLogging = "0.1" +SciMLBase = "2.58" +Optimisers = "0.2, 0.3, 0.4" +Reexport = "1.2" [targets] test = ["ComponentArrays", "ForwardDiff", "Lux", "MLDataDevices", "MLUtils", "Random", "Test", "Zygote"] diff --git a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl index 6244a1694..c718bb555 100644 --- a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl +++ b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl @@ -1,8 +1,8 @@ module OptimizationOptimisers using Reexport, Printf, ProgressLogging -@reexport using Optimisers, Optimization -using Optimization.SciMLBase, Optimization.OptimizationBase +@reexport using Optimisers, OptimizationBase +using SciMLBase @static if isdefined(SciMLBase, :supports_opt_cache_interface) SciMLBase.supports_opt_cache_interface(opt::AbstractRule) = true @@ -23,7 +23,7 @@ function SciMLBase.__init( save_best, progress, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -73,8 +73,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ elseif isnothing(cache.solver_args.epochs) cache.solver_args.maxiters / length(data), cache.solver_args.maxiters end - epochs = Optimization._check_and_convert_maxiters(epochs) - maxiters = Optimization._check_and_convert_maxiters(maxiters) + epochs = OptimizationBase._check_and_convert_maxiters(epochs) + maxiters = OptimizationBase._check_and_convert_maxiters(maxiters) # At this point, both of them should be fine; but, let's assert it. @assert (!isnothing(epochs)&&!isnothing(maxiters) && @@ -124,7 +124,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ fevals += 2 gevals += 1 end - opt_state = Optimization.OptimizationState( + opt_state = OptimizationBase.OptimizationState( iter = i + (epoch - 1) * length(data), u = θ, p = d, @@ -151,7 +151,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ x = min_err θ = min_θ cache.f.grad(G, θ, d) - opt_state = Optimization.OptimizationState(iter = iterations, + opt_state = OptimizationBase.OptimizationState(iter = iterations, u = θ, p = d, objective = x[1], @@ -167,7 +167,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ end t1 = time() - stats = Optimization.OptimizationStats(; iterations, + stats = OptimizationBase.OptimizationStats(; iterations, time = t1 - t0, fevals, gevals) SciMLBase.build_solution(cache, cache.opt, θ, first(x)[1], stats = stats) end diff --git a/lib/OptimizationPRIMA/Project.toml b/lib/OptimizationPRIMA/Project.toml index 7aa307a76..7eaa036e7 100644 --- a/lib/OptimizationPRIMA/Project.toml +++ b/lib/OptimizationPRIMA/Project.toml @@ -4,22 +4,23 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" PRIMA = "0a7d04aa-8ac2-47b3-b7a7-9dbd6ad661ed" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +[extras] +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" [compat] julia = "1.10" -Optimization = "4.4" +OptimizationBase = "2.10" PRIMA = "0.2.0" +SciMLBase = "2.58" Reexport = "1" -[extras] -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - [targets] test = ["Test", "ForwardDiff", "ModelingToolkit", "ReverseDiff"] diff --git a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl index 4cb5293c5..3d4e158e5 100644 --- a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl +++ b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl @@ -1,6 +1,6 @@ module OptimizationPRIMA -using Optimization, Optimization.SciMLBase, Reexport +using OptimizationBase, SciMLBase, Reexport @reexport using PRIMA abstract type PRIMASolvers end @@ -25,7 +25,7 @@ SciMLBase.requiresconshess(opt::COBYLA) = true function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, opt::PRIMASolvers; - callback = Optimization.DEFAULT_CALLBACK, + callback = OptimizationBase.DEFAULT_CALLBACK, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, abstol::Union{Number, Nothing} = nothing, @@ -109,7 +109,7 @@ function sciml_prima_retcode(rc::AbstractString) end end -function SciMLBase.__solve(cache::Optimization.OptimizationCache{ +function SciMLBase.__solve(cache::Optimization.OptimizationBase.OptimizationCache{ F, RC, LB, @@ -138,7 +138,7 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ _loss = function (θ) x = cache.f(θ, cache.p) iter += 1 - opt_state = Optimization.OptimizationState(u = θ, p = cache.p, objective = x[1], iter = iter) + opt_state = OptimizationBase.OptimizationState(u = θ, p = cache.p, objective = x[1], iter = iter) if cache.callback(opt_state, x...) error("Optimization halted by callback.") end @@ -147,8 +147,8 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ optfunc = get_solve_func(cache.opt) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) kws = __map_optimizer_args!(cache, cache.opt; callback = cache.callback, maxiters = maxiters, @@ -197,7 +197,7 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ t1 = time() retcode = sciml_prima_retcode(PRIMA.reason(inf)) - stats = Optimization.OptimizationStats(; time = t1 - t0, fevals = inf.nf) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0, fevals = inf.nf) SciMLBase.build_solution(cache, cache.opt, minx, inf.fx; retcode = retcode, stats = stats, original = inf) diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index d5b1de7db..7ef8147d3 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -4,21 +4,23 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" - -[compat] -Optimization = "4.4" -OptimizationOptimJL = "0.4" -OptimizationOptimisers = "0.3" -Reexport = "1.2" -julia = "1.10" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +[compat] +julia = "1.10" +OptimizationBase = "2.10" +OptimizationOptimisers = "0.3" +SciMLBase = "2.58" +Reexport = "1.2" +OptimizationOptimJL = "0.4" + [targets] test = ["ForwardDiff", "Test"] diff --git a/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl index 8c19e394e..700d10669 100644 --- a/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl +++ b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl @@ -1,8 +1,8 @@ module OptimizationPolyalgorithms using Reexport -@reexport using Optimization -using Optimization.SciMLBase, OptimizationOptimJL, OptimizationOptimisers +@reexport using OptimizationBase +using SciMLBase, OptimizationOptimJL, OptimizationOptimisers struct PolyOpt end diff --git a/lib/OptimizationPyCMA/Project.toml b/lib/OptimizationPyCMA/Project.toml index fcf95a382..2fb54bf9f 100644 --- a/lib/OptimizationPyCMA/Project.toml +++ b/lib/OptimizationPyCMA/Project.toml @@ -4,16 +4,18 @@ authors = ["Maximilian Pochapski <67759684+mxpoch@users.noreply.github.com>"] version = "1.1.0" [deps] +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" -Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" [compat] +julia = "1.10" +OptimizationBase = "2.10" CondaPkg = "0.2" -Optimization = "4.4" -PythonCall = "0.9" -Reexport = "1.2" Test = "1.10" -julia = "1.10" +SciMLBase = "2.58" +Reexport = "1.2" +PythonCall = "0.9" diff --git a/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl b/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl index 61827e8ae..2503e5636 100644 --- a/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl +++ b/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl @@ -1,8 +1,8 @@ module OptimizationPyCMA using Reexport -@reexport using Optimization -using PythonCall, Optimization.SciMLBase +@reexport using OptimizationBase +using PythonCall, SciMLBase export PyCMAOpt @@ -32,7 +32,7 @@ SciMLBase.requiresconsjac(::PyCMAOpt) = false SciMLBase.requiresconshess(::PyCMAOpt) = false # wrapping Optimization.jl args into a python dict as arguments to PyCMA opts -function __map_optimizer_args(prob::OptimizationCache, opt::PyCMAOpt; +function __map_optimizer_args(prob::OptimizationBase.OptimizationCache, opt::PyCMAOpt; maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, abstol::Union{Number, Nothing} = nothing, @@ -95,7 +95,7 @@ function __map_pycma_retcode(stop_dict::Dict{String, Any}) end end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -130,7 +130,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ end _cb = function (es) - opt_state = Optimization.OptimizationState(; iter = pyconvert(Int, es.countiter), + opt_state = OptimizationBase.OptimizationState(; iter = pyconvert(Int, es.countiter), u = pyconvert(Vector{Float64}, es.best.x), p = cache.p, objective = pyconvert(Float64, es.best.f), @@ -146,8 +146,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ end # doing conversions - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) # converting the Optimization.jl Args to PyCMA format opt_args = __map_optimizer_args(cache, cache.opt; cache.solver_args..., @@ -167,7 +167,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ retcode = __map_pycma_retcode(pyconvert(Dict{String, Any}, opt_ret_dict)) # logging and returning results of the optimization - stats = Optimization.OptimizationStats(; + stats = OptimizationBase.OptimizationStats(; iterations = pyconvert(Int, es.countiter), time = t1 - t0, fevals = pyconvert(Int, es.countevals)) diff --git a/lib/OptimizationQuadDIRECT/Project.toml b/lib/OptimizationQuadDIRECT/Project.toml index c584d0379..dd3520a15 100644 --- a/lib/OptimizationQuadDIRECT/Project.toml +++ b/lib/OptimizationQuadDIRECT/Project.toml @@ -4,18 +4,20 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" QuadDIRECT = "dae52e8d-d666-5120-a592-9e15c33b8d7a" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -[compat] -Optimization = "4.4" -Reexport = "1.2" -julia = "1.10" - [extras] Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +[compat] +julia = "1.10" +OptimizationBase = "2.10" +SciMLBase = "2.58" +Reexport = "1.2" + [targets] test = ["Pkg", "Test"] diff --git a/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl b/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl index 4ef8b1e1a..035c3dedd 100644 --- a/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl +++ b/lib/OptimizationQuadDIRECT/src/OptimizationQuadDIRECT.jl @@ -1,8 +1,8 @@ module OptimizationQuadDIRECT using Reexport -@reexport using Optimization -using QuadDIRECT, Optimization.SciMLBase +@reexport using OptimizationBase +using QuadDIRECT, SciMLBase export QuadDirect @@ -48,7 +48,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::QuadDirect; kwargs...) local x, _loss - maxiters = Optimization._check_and_convert_maxiters(maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(maxiters) if splits === nothing error("You must provide the initial locations at which to evaluate the function in `splits` (a list of 3-vectors with values in strictly increasing order and within the specified bounds).") @@ -66,7 +66,7 @@ function SciMLBase.__solve(prob::OptimizationProblem, opt::QuadDirect; root, x0 = QuadDIRECT.analyze(_loss, splits, prob.lb, prob.ub; opt_arg...) box = minimum(root) t1 = time() - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(SciMLBase.DefaultOptimizationCache(prob.f, prob.p), opt, QuadDIRECT.position(box, x0), QuadDIRECT.value(box); original = root, stats = stats) diff --git a/lib/OptimizationSciPy/Project.toml b/lib/OptimizationSciPy/Project.toml index 5a0c695eb..7225ac632 100644 --- a/lib/OptimizationSciPy/Project.toml +++ b/lib/OptimizationSciPy/Project.toml @@ -4,23 +4,25 @@ authors = ["Aditya Pandey and contributors"] version = "0.4.2" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" - -[compat] -Optimization = "4.4" -PythonCall = "0.9" -Reexport = "1.2" -julia = "1.10" +PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" [extras] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" + +[compat] +julia = "1.10" +OptimizationBase = "2.10" +SciMLBase = "2.58" +Reexport = "1.2" +PythonCall = "0.9" [targets] test = ["ForwardDiff", "ModelingToolkit", "Random", "ReverseDiff", "Test", "Zygote"] diff --git a/lib/OptimizationSciPy/src/OptimizationSciPy.jl b/lib/OptimizationSciPy/src/OptimizationSciPy.jl index a01f90ead..47cba7414 100644 --- a/lib/OptimizationSciPy/src/OptimizationSciPy.jl +++ b/lib/OptimizationSciPy/src/OptimizationSciPy.jl @@ -2,8 +2,8 @@ module OptimizationSciPy using Reexport -@reexport using Optimization -using Optimization.SciMLBase +@reexport using OptimizationBase +using SciMLBase using PythonCall # We keep a handle to the actual Python SciPy module here. @@ -72,7 +72,7 @@ function extract_stats(result, time_elapsed) if pyhasattr(result, "nhev") && !pyis(result.nhev, pybuiltins.None) stats_dict[:hevals] = pyconvert(Int, result.nhev) end - return Optimization.OptimizationStats(; stats_dict...) + return OptimizationBase.OptimizationStats(; stats_dict...) end # Map SciPy status integers onto SciML ReturnCode symbols. @@ -323,7 +323,7 @@ function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, opt::ScipyOptimiz return OptimizationCache(prob, opt; cons_tol, callback, progress, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyMinimize, D, P, C} local cons_cache = nothing @@ -331,7 +331,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, cons_cache = zeros(eltype(cache.u0), length(cache.lcons)) end _loss = _create_loss(cache) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) abstol = cache.solver_args.abstol reltol = cache.solver_args.reltol options = Dict{String, Any}() @@ -515,7 +515,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyMinimizeScalar, D, P, C} maxtime = get(cache.solver_args, :maxtime, nothing) @@ -527,7 +527,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, θ_vec = [θ] x = cache.f(θ_vec, cache.p) x = isa(x, Tuple) ? x : (x,) - opt_state = Optimization.OptimizationState(u = θ_vec, p = cache.p, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ_vec, p = cache.p, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback") end @@ -584,7 +584,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyLeastSquares, D, P, C} _residuals = nothing @@ -606,7 +606,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, @warn "Method 'lm' does not support bounds. Ignoring bounds." end kwargs[:jac] = "2-point" - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) if !isnothing(maxiters) kwargs[:max_nfev] = maxiters end @@ -669,7 +669,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyRootScalar, D, P, C} x0 = cache.u0[1] @@ -682,7 +682,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, θ_vec = [θ] x = cache.f(θ_vec, cache.p) x = isa(x, Tuple) ? x : (x,) - opt_state = Optimization.OptimizationState(u = θ_vec, p = cache.p, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ_vec, p = cache.p, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback") end @@ -778,14 +778,14 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, ; end end - stats = Optimization.OptimizationStats(; stats_dict...) + stats = OptimizationBase.OptimizationStats(; stats_dict...) return SciMLBase.build_solution(cache, cache.opt, minimizer, minimum; original = result, retcode = retcode, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyRoot, D, P, C} _func = _create_loss(cache, vector_output = true) @@ -858,7 +858,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyLinprog, D, P, C} c = cache.f(cache.u0, cache.p) @@ -902,7 +902,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, if !(isnothing(A_eq) == isnothing(b_eq)) throw(ArgumentError("Both A_eq and b_eq must be provided together")) end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) options = nothing if !isnothing(maxiters) options = pydict(Dict("maxiter" => maxiters)) @@ -961,7 +961,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyMilp, D, P, C} c = cache.f(cache.u0, cache.p) @@ -1041,12 +1041,12 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyDifferentialEvolution, D, P, C} _loss = _create_loss(cache) bounds = _build_bounds(cache.lb, cache.ub) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) de_kwargs = Dict{Symbol, Any}() de_kwargs[:maxiter] = isnothing(maxiters) ? 1000 : maxiters de_kwargs[:popsize] = 15 @@ -1107,11 +1107,11 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyBasinhopping, D, P, C} _loss = _create_loss(cache) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) bh_kwargs = Dict{Symbol, Any}() bh_kwargs[:niter] = isnothing(maxiters) ? 100 : maxiters bh_kwargs[:T] = 1.0 @@ -1167,14 +1167,14 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyDualAnnealing, D, P, C} _loss = _create_loss(cache) bounds = _build_bounds(cache.lb, cache.ub) da_kwargs = Dict{Symbol, Any}() da_kwargs[:maxiter] = begin - mi = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + mi = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) isnothing(mi) ? 1000 : mi end da_kwargs[:initial_temp] = 5230.0 @@ -1232,7 +1232,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyShgo, D, P, C} local cons_cache = nothing @@ -1328,12 +1328,12 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyDirect, D, P, C} _loss = _create_loss(cache) bounds = _build_bounds(cache.lb, cache.ub) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) direct_kwargs = Dict{Symbol, Any}() direct_kwargs[:eps] = 0.0001 direct_kwargs[:maxiter] = isnothing(maxiters) ? 1000 : maxiters @@ -1389,7 +1389,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, stats = stats) end -function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, UB, LC, UC, S, O, D, P, C}) where {F, RC, LB, UB, LC, UC, S, O <: ScipyBrute, D, P, C} _loss = _create_loss(cache) @@ -1436,7 +1436,7 @@ function SciMLBase.__solve(cache::OptimizationCache{F, RC, LB, UB, LC, UC, S, O, minimum = -minimum end retcode = SciMLBase.ReturnCode.Success - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) return SciMLBase.build_solution(cache, cache.opt, minimizer, minimum; original = result, retcode = retcode, @@ -1469,7 +1469,7 @@ function _create_loss(cache; vector_output::Bool = false) elseif isa(x, Number) x = (x,) end - opt_state = Optimization.OptimizationState(u = θ_julia, p = cache.p, objective = sum(abs2, x)) + opt_state = OptimizationBase.OptimizationState(u = θ_julia, p = cache.p, objective = sum(abs2, x)) if cache.callback(opt_state, x...) error("Optimization halted by callback") end @@ -1489,7 +1489,7 @@ function _create_loss(cache; vector_output::Bool = false) elseif isa(x, Number) x = (x,) end - opt_state = Optimization.OptimizationState(u = θ_julia, p = cache.p, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ_julia, p = cache.p, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback") end diff --git a/lib/OptimizationSophia/Project.toml b/lib/OptimizationSophia/Project.toml index a6d2f2468..97d877f89 100644 --- a/lib/OptimizationSophia/Project.toml +++ b/lib/OptimizationSophia/Project.toml @@ -4,9 +4,9 @@ authors = ["paramthakkar123 "] version = "1.0.0" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" [extras] ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" @@ -21,10 +21,10 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" ComponentArrays = "0.15.29" Lux = "1.16.0" MLUtils = "0.4.8" -Optimization = "4.5.0" -OptimizationBase = "2.10.0" +OptimizationBase = "2.10" OrdinaryDiffEqTsit5 = "1.2.0" Random = "1.10.0" +SciMLBase = "2.58" SciMLSensitivity = "7.88.0" Test = "1.10.0" Zygote = "0.7.10" diff --git a/lib/OptimizationSophia/src/OptimizationSophia.jl b/lib/OptimizationSophia/src/OptimizationSophia.jl index c5ce693b6..0b100abc2 100644 --- a/lib/OptimizationSophia/src/OptimizationSophia.jl +++ b/lib/OptimizationSophia/src/OptimizationSophia.jl @@ -1,8 +1,8 @@ module OptimizationSophia -using OptimizationBase.SciMLBase +using SciMLBase using OptimizationBase: OptimizationCache -using Optimization +using OptimizationBase using Random """ @@ -28,7 +28,7 @@ first-order methods like Adam and SGD while avoiding the computational cost of f ## Example ```julia -using Optimization, OptimizationOptimisers +using OptimizationBase, OptimizationOptimisers # Define optimization problem rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 @@ -84,7 +84,7 @@ function SciMLBase.__init(prob::OptimizationProblem, opt::Sophia; save_best, kwargs...) end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -118,7 +118,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ λ = uType(cache.opt.λ) ρ = uType(cache.opt.ρ) - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) if OptimizationBase.isa_dataiterator(cache.p) data = cache.p @@ -146,7 +146,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.f.grad(gₜ, θ) x = cache.f(θ) end - opt_state = Optimization.OptimizationState(; + opt_state = OptimizationBase.OptimizationState(; iter = i + (epoch - 1) * length(data), u = θ, objective = first(x), diff --git a/lib/OptimizationSpeedMapping/Project.toml b/lib/OptimizationSpeedMapping/Project.toml index 4f0b89ee7..235e5e2ef 100644 --- a/lib/OptimizationSpeedMapping/Project.toml +++ b/lib/OptimizationSpeedMapping/Project.toml @@ -4,19 +4,21 @@ authors = ["Vaibhav Dixit and contributors"] version = "0.2.1" [deps] -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" +SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" + +[extras] +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -Optimization = "4.4" -Reexport = "1.2" +OptimizationBase = "2.10" SpeedMapping = "0.3" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +SciMLBase = "2.58" +Reexport = "1.2" [targets] test = ["ForwardDiff", "Test"] diff --git a/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl b/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl index 5dbcd93f9..e7fd92d10 100644 --- a/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl +++ b/lib/OptimizationSpeedMapping/src/OptimizationSpeedMapping.jl @@ -1,8 +1,8 @@ module OptimizationSpeedMapping using Reexport -@reexport using Optimization -using SpeedMapping, Optimization.SciMLBase +@reexport using OptimizationBase +using SpeedMapping, SciMLBase export SpeedMappingOpt @@ -18,7 +18,7 @@ end end SciMLBase.requiresgradient(opt::SpeedMappingOpt) = true -function __map_optimizer_args(cache::OptimizationCache, opt::SpeedMappingOpt; +function __map_optimizer_args(cache::OptimizationBase.OptimizationCache, opt::SpeedMappingOpt; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -48,7 +48,7 @@ function __map_optimizer_args(cache::OptimizationCache, opt::SpeedMappingOpt; return mapped_args end -function SciMLBase.__solve(cache::OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ F, RC, LB, @@ -85,8 +85,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ @info "SpeedMapping's ForwardDiff AD backend is used to calculate the gradient information." end - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) - maxtime = Optimization._check_and_convert_maxtime(cache.solver_args.maxtime) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) + maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) opt_args = __map_optimizer_args(cache, cache.opt, maxiters = maxiters, maxtime = maxtime, abstol = cache.solver_args.abstol, @@ -98,7 +98,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ upper = cache.ub, opt_args...) t1 = time() opt_ret = Symbol(opt_res.converged) - stats = Optimization.OptimizationStats(; time = t1 - t0) + stats = OptimizationBase.OptimizationStats(; time = t1 - t0) SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, _loss(opt_res.minimizer); original = opt_res, retcode = opt_ret, stats = stats) From f61c79d09e1043c7ae6c141b6f3946719acdced2 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Thu, 2 Oct 2025 12:25:45 +0200 Subject: [PATCH 02/17] remove includes --- src/Optimization.jl | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Optimization.jl b/src/Optimization.jl index 47797ee04..e419377ca 100644 --- a/src/Optimization.jl +++ b/src/Optimization.jl @@ -20,9 +20,6 @@ import SciMLBase: OptimizationProblem, MaxSense, MinSense, OptimizationStats export ObjSense, MaxSense, MinSense -include("utils.jl") -include("state.jl") - export solve end # module From 320cf9e413610479a233db5c9ffaa447e9209492 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Thu, 2 Oct 2025 12:49:35 +0200 Subject: [PATCH 03/17] finish refactor --- .../src/OptimizationAuglag.jl | 4 +-- .../src/augmented_lagrangian.jl | 2 +- .../src/OptimizationGCMAES.jl | 2 +- lib/OptimizationIpopt/src/cache.jl | 12 ++++---- lib/OptimizationMOI/src/moi.jl | 8 ++--- lib/OptimizationMOI/src/nlp.jl | 6 ++-- .../src/OptimizationManopt.jl | 4 +-- .../src/OptimizationMultistartOptimization.jl | 16 +++++----- .../src/OptimizationNLPModels.jl | 4 +-- .../src/OptimizationNLopt.jl | 2 +- .../src/OptimizationOptimJL.jl | 30 +++++++++---------- .../src/OptimizationOptimisers.jl | 2 +- .../src/OptimizationPRIMA.jl | 16 +++++----- .../src/OptimizationPolyalgorithms.jl | 10 +++---- .../src/OptimizationSciPy.jl | 28 ++++++++--------- .../src/OptimizationSophia.jl | 2 +- 16 files changed, 74 insertions(+), 74 deletions(-) diff --git a/lib/OptimizationAuglag/src/OptimizationAuglag.jl b/lib/OptimizationAuglag/src/OptimizationAuglag.jl index 9c5ff8308..c5088c8db 100644 --- a/lib/OptimizationAuglag/src/OptimizationAuglag.jl +++ b/lib/OptimizationAuglag/src/OptimizationAuglag.jl @@ -83,7 +83,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ P, C } - maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters) + maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) local x @@ -115,7 +115,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.f.cons(cons_tmp, θ) cons_tmp[eq_inds] .= cons_tmp[eq_inds] - cache.lcons[eq_inds] cons_tmp[ineq_inds] .= cons_tmp[ineq_inds] .- cache.ucons[ineq_inds] - opt_state = Optimization.OptimizationState(u = θ, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback.") end diff --git a/lib/OptimizationBase/src/augmented_lagrangian.jl b/lib/OptimizationBase/src/augmented_lagrangian.jl index 879090089..a09cc2c06 100644 --- a/lib/OptimizationBase/src/augmented_lagrangian.jl +++ b/lib/OptimizationBase/src/augmented_lagrangian.jl @@ -4,7 +4,7 @@ function generate_auglag(θ) cache.f.cons(cons_tmp, θ) cons_tmp[eq_inds] .= cons_tmp[eq_inds] - cache.lcons[eq_inds] cons_tmp[ineq_inds] .= cons_tmp[ineq_inds] .- cache.ucons[ineq_inds] - opt_state = Optimization.OptimizationState(u = θ, objective = x[1]) + opt_state = OptimizationBase.OptimizationState(u = θ, objective = x[1]) if cache.callback(opt_state, x...) error("Optimization halted by callback.") end diff --git a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl index 02e840548..471198447 100644 --- a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl +++ b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl @@ -110,7 +110,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ maxtime = maxtime) t0 = time() - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense opt_xmin, opt_fmin, opt_ret = GCMAES.maximize( isnothing(cache.f.grad) ? _loss : diff --git a/lib/OptimizationIpopt/src/cache.jl b/lib/OptimizationIpopt/src/cache.jl index 972b5fb83..5e78bb490 100644 --- a/lib/OptimizationIpopt/src/cache.jl +++ b/lib/OptimizationIpopt/src/cache.jl @@ -80,11 +80,11 @@ function IpoptCache(prob, opt; num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) if prob.f.adtype isa ADTypes.AutoSymbolics || (prob.f.adtype isa ADTypes.AutoSparse && prob.f.adtype.dense_ad isa ADTypes.AutoSymbolics) - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache, prob.f.adtype, num_cons; g = true, h = true, cons_j = true, cons_h = true) else - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache, prob.f.adtype, num_cons; g = true, h = true, cons_j = true, cons_vjp = true, lag_h = true) end @@ -150,7 +150,7 @@ end function eval_objective(cache::IpoptCache, x) l = cache.f(x, cache.p) cache.f_calls += 1 - return cache.sense === Optimization.MaxSense ? -l : l + return cache.sense === OptimizationBase.MaxSense ? -l : l end function eval_constraint(cache::IpoptCache, g, x) @@ -167,7 +167,7 @@ function eval_objective_gradient(cache::IpoptCache, G, x) cache.f.grad(G, x) cache.f_grad_calls += 1 - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end @@ -256,7 +256,7 @@ function eval_hessian_lagrangian(cache::IpoptCache{T}, if cache.f.lag_h !== nothing cache.f.lag_h(h, x, σ, Vector(μ)) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense h .*= -one(eltype(h)) end @@ -320,7 +320,7 @@ function eval_hessian_lagrangian(cache::IpoptCache{T}, end end - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense h .*= -one(eltype(h)) end diff --git a/lib/OptimizationMOI/src/moi.jl b/lib/OptimizationMOI/src/moi.jl index 455155a11..8b2cf5f0f 100644 --- a/lib/OptimizationMOI/src/moi.jl +++ b/lib/OptimizationMOI/src/moi.jl @@ -14,11 +14,11 @@ end function MOIOptimizationCache(prob::OptimizationProblem, opt; kwargs...) f = prob.f - reinit_cache = Optimization.ReInitCache(prob.u0, prob.p) + reinit_cache = OptimizationBase.ReInitCache(prob.u0, prob.p) if isnothing(f.sys) - if f.adtype isa Optimization.AutoModelingToolkit + if f.adtype isa OptimizationBase.AutoModelingToolkit num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) - f = Optimization.instantiate_function(prob.f, + f = OptimizationBase.instantiate_function(prob.f, reinit_cache, prob.f.adtype, num_cons) @@ -120,7 +120,7 @@ function SciMLBase.__solve(cache::MOIOptimizationCache) Theta = _add_moi_variables!(opt_setup, cache) MOI.set(opt_setup, MOI.ObjectiveSense(), - cache.sense === Optimization.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE) + cache.sense === OptimizationBase.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE) if !isnothing(cache.cons_expr) for cons_expr in cache.cons_expr diff --git a/lib/OptimizationMOI/src/nlp.jl b/lib/OptimizationMOI/src/nlp.jl index f78fdd7f7..fb8aabe71 100644 --- a/lib/OptimizationMOI/src/nlp.jl +++ b/lib/OptimizationMOI/src/nlp.jl @@ -113,11 +113,11 @@ function MOIOptimizationNLPCache(prob::OptimizationProblem, num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) if prob.f.adtype isa ADTypes.AutoSymbolics || (prob.f.adtype isa ADTypes.AutoSparse && prob.f.adtype.dense_ad isa ADTypes.AutoSymbolics) - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache, prob.f.adtype, num_cons; g = true, h = true, cons_j = true, cons_h = true) else - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache, prob.f.adtype, num_cons; g = true, h = true, cons_j = true, cons_vjp = true, lag_h = true) end @@ -534,7 +534,7 @@ function SciMLBase.__solve(cache::MOIOptimizationNLPCache) θ = _add_moi_variables!(opt_setup, cache.evaluator) MOI.set(opt_setup, MOI.ObjectiveSense(), - cache.evaluator.sense === Optimization.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE) + cache.evaluator.sense === OptimizationBase.MaxSense ? MOI.MAX_SENSE : MOI.MIN_SENSE) xor(isnothing(cache.evaluator.lcons), isnothing(cache.evaluator.ucons)) && throw(ArgumentError("Expected `cache.evaluator.lcons` and `cache.evaluator.lcons` to be supplied both or none.")) if isnothing(cache.evaluator.lcons) && isnothing(cache.evaluator.ucons) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 369da04ee..98cc6e30a 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -251,7 +251,7 @@ function build_loss(f::OptimizationFunction, prob, cb) x = f.f(θ, prob.p) cb(x, θ) __x = first(x) - return prob.sense === Optimization.MaxSense ? -__x : __x + return prob.sense === OptimizationBase.MaxSense ? -__x : __x end end @@ -367,7 +367,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ return SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, - cache.sense === Optimization.MaxSense ? + cache.sense === OptimizationBase.MaxSense ? -opt_res.minimum : opt_res.minimum; original = opt_res.options, retcode = opt_ret) diff --git a/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl index 8bc79152f..e0bef218d 100644 --- a/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl +++ b/lib/OptimizationMultistartOptimization/src/OptimizationMultistartOptimization.jl @@ -4,18 +4,18 @@ using Reexport @reexport using MultistartOptimization, OptimizationBase using SciMLBase -SciMLBase.requiresbounds(opt::MultistartOptimization.TikTak) = true -SciMLBase.allowsbounds(opt::MultistartOptimization.TikTak) = true -SciMLBase.allowscallback(opt::MultistartOptimization.TikTak) = false +SciMLBase.requiresbounds(opt::MultistartOptimizationBase.TikTak) = true +SciMLBase.allowsbounds(opt::MultistartOptimizationBase.TikTak) = true +SciMLBase.allowscallback(opt::MultistartOptimizationBase.TikTak) = false @static if isdefined(SciMLBase, :supports_opt_cache_interface) - SciMLBase.supports_opt_cache_interface(opt::MultistartOptimization.TikTak) = true + SciMLBase.supports_opt_cache_interface(opt::MultistartOptimizationBase.TikTak) = true end @static if isdefined(OptimizationBase, :supports_opt_cache_interface) - OptimizationBase.supports_opt_cache_interface(opt::MultistartOptimization.TikTak) = true + OptimizationBase.supports_opt_cache_interface(opt::MultistartOptimizationBase.TikTak) = true end function SciMLBase.__init(prob::SciMLBase.OptimizationProblem, - opt::MultistartOptimization.TikTak, + opt::MultistartOptimizationBase.TikTak, local_opt; use_threads = true, kwargs...) @@ -45,7 +45,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ UC, S, O <: - MultistartOptimization.TikTak, + MultistartOptimizationBase.TikTak, D, P, C @@ -57,7 +57,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ return first(x) end - opt_setup = MultistartOptimization.MinimizationProblem(_loss, cache.lb, cache.ub) + opt_setup = MultistartOptimizationBase.MinimizationProblem(_loss, cache.lb, cache.ub) _local_optimiser = function (pb, θ0, prob) prob_tmp = remake(prob, u0 = θ0) diff --git a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl index 2533cb3ae..5636547fb 100644 --- a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl +++ b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl @@ -51,12 +51,12 @@ function SciMLBase.OptimizationProblem(nlpmodel::AbstractNLPModel, else (nothing, nothing) end - sense = nlpmodel.meta.minimize ? Optimization.MinSense : Optimization.MaxSense + sense = nlpmodel.meta.minimize ? OptimizationBase.MinSense : OptimizationBase.MaxSense # The number of variables, geometry of u0, etc.. are valid and were checked when the # nlpmodel was created. - return Optimization.OptimizationProblem( + return OptimizationBase.OptimizationProblem( f, u0; lb = lb, ub = ub, lcons = lcons, ucons = ucons, sense = sense, kwargs...) end diff --git a/lib/OptimizationNLopt/src/OptimizationNLopt.jl b/lib/OptimizationNLopt/src/OptimizationNLopt.jl index 3b0abcf8e..ee9640024 100644 --- a/lib/OptimizationNLopt/src/OptimizationNLopt.jl +++ b/lib/OptimizationNLopt/src/OptimizationNLopt.jl @@ -192,7 +192,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ NLopt.Opt(cache.opt, length(cache.u0)) end - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense NLopt.max_objective!(opt_setup, fg!) else NLopt.min_objective!(opt_setup, fg!) diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 262fcb27c..8af15fdbb 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -166,14 +166,14 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ _loss = function (θ) x = cache.f.f(θ, cache.p) __x = first(x) - return cache.sense === Optimization.MaxSense ? -__x : __x + return cache.sense === OptimizationBase.MaxSense ? -__x : __x end if cache.f.fg === nothing fg! = function (G, θ) if G !== nothing cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end @@ -186,7 +186,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ if cache.opt isa Optim.KrylovTrustRegion hv = function (H, θ, v) cache.f.hv(H, θ, v) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense H .*= -one(eltype(H)) end end @@ -194,14 +194,14 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ else gg = function (G, θ) cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end hh = function (H, θ) cache.f.hess(H, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense H .*= -one(eltype(H)) end end @@ -232,7 +232,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, - cache.sense === Optimization.MaxSense ? -opt_res.minimum : + cache.sense === OptimizationBase.MaxSense ? -opt_res.minimum : opt_res.minimum; original = opt_res, retcode = opt_ret, stats = stats) end @@ -287,14 +287,14 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ _loss = function (θ) x = cache.f.f(θ, cache.p) __x = first(x) - return cache.sense === Optimization.MaxSense ? -__x : __x + return cache.sense === OptimizationBase.MaxSense ? -__x : __x end if cache.f.fg === nothing fg! = function (G, θ) if G !== nothing cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end @@ -306,7 +306,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ gg = function (G, θ) cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end @@ -374,14 +374,14 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ _loss = function (θ) x = cache.f.f(θ, cache.p) __x = first(x) - return cache.sense === Optimization.MaxSense ? -__x : __x + return cache.sense === OptimizationBase.MaxSense ? -__x : __x end if cache.f.fg === nothing fg! = function (G, θ) if G !== nothing cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end @@ -393,14 +393,14 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ gg = function (G, θ) cache.f.grad(G, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense G .*= -one(eltype(G)) end end hh = function (H, θ) cache.f.hess(H, θ) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense H .*= -one(eltype(H)) end end @@ -485,8 +485,8 @@ PrecompileTools.@compile_workload begin end function solve_nonnegative_least_squares(A, b, solver) - optf = Optimization.OptimizationFunction(obj_f, Optimization.AutoForwardDiff()) - prob = Optimization.OptimizationProblem(optf, ones(size(A, 2)), (A, b), + optf = OptimizationBase.OptimizationFunction(obj_f, OptimizationBase.AutoForwardDiff()) + prob = OptimizationBase.OptimizationProblem(optf, ones(size(A, 2)), (A, b), lb = zeros(size(A, 2)), ub = Inf * ones(size(A, 2))) x = OptimizationOptimJL.solve(prob, solver, maxiters = 5000, maxtime = 100) diff --git a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl index c718bb555..9b8b4f82c 100644 --- a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl +++ b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl @@ -95,7 +95,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ gevals = 0 t0 = time() breakall = false - Optimization.@withprogress cache.progress name="Training" begin + OptimizationBase.@withprogress cache.progress name="Training" begin for epoch in 1:epochs if breakall break diff --git a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl index 3d4e158e5..2ba274ea2 100644 --- a/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl +++ b/lib/OptimizationPRIMA/src/OptimizationPRIMA.jl @@ -23,7 +23,7 @@ SciMLBase.requiresconstraints(opt::COBYLA) = true SciMLBase.requiresconsjac(opt::COBYLA) = true SciMLBase.requiresconshess(opt::COBYLA) = true -function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, +function OptimizationBase.OptimizationCache(prob::SciMLBase.OptimizationProblem, opt::PRIMASolvers; callback = OptimizationBase.DEFAULT_CALLBACK, maxiters::Union{Number, Nothing} = nothing, @@ -32,26 +32,26 @@ function Optimization.OptimizationCache(prob::SciMLBase.OptimizationProblem, reltol::Union{Number, Nothing} = nothing, progress = false, kwargs...) - reinit_cache = Optimization.ReInitCache(prob.u0, prob.p) + reinit_cache = OptimizationBase.ReInitCache(prob.u0, prob.p) num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) if prob.f.adtype isa SciMLBase.NoAD && opt isa COBYLA throw("We evaluate the jacobian and hessian of the constraints once to automatically detect linear and nonlinear constraints, please provide a valid AD backend for using COBYLA.") else if opt isa COBYLA - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache.u0, prob.f.adtype, reinit_cache.p, num_cons, cons_j = true, cons_h = true) else - f = Optimization.instantiate_function( + f = OptimizationBase.instantiate_function( prob.f, reinit_cache.u0, prob.f.adtype, reinit_cache.p, num_cons) end end - return Optimization.OptimizationCache(f, reinit_cache, prob.lb, prob.ub, prob.lcons, + return OptimizationBase.OptimizationCache(f, reinit_cache, prob.lb, prob.ub, prob.lcons, prob.ucons, prob.sense, opt, progress, callback, nothing, - Optimization.OptimizationBase.AnalysisResults(nothing, nothing), + OptimizationBase.OptimizationBase.AnalysisResults(nothing, nothing), merge((; maxiters, maxtime, abstol, reltol), NamedTuple(kwargs))) end @@ -70,7 +70,7 @@ function get_solve_func(opt::PRIMASolvers) end end -function __map_optimizer_args!(cache::Optimization.OptimizationCache, opt::PRIMASolvers; +function __map_optimizer_args!(cache::OptimizationBase.OptimizationCache, opt::PRIMASolvers; callback = nothing, maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -109,7 +109,7 @@ function sciml_prima_retcode(rc::AbstractString) end end -function SciMLBase.__solve(cache::Optimization.OptimizationBase.OptimizationCache{ +function SciMLBase.__solve(cache::OptimizationBase.OptimizationBase.OptimizationCache{ F, RC, LB, diff --git a/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl index 700d10669..b03cd88b0 100644 --- a/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl +++ b/lib/OptimizationPolyalgorithms/src/OptimizationPolyalgorithms.jl @@ -23,21 +23,21 @@ function SciMLBase.__solve(prob::OptimizationProblem, if isempty(args) && deterministic && prob.lb === nothing && prob.ub === nothing # If deterministic then ADAM -> finish with BFGS if maxiters === nothing - res1 = Optimization.solve(prob, Optimisers.ADAM(0.01), args...; maxiters = 300, + res1 = OptimizationBase.solve(prob, Optimisers.ADAM(0.01), args...; maxiters = 300, kwargs...) else - res1 = Optimization.solve(prob, Optimisers.ADAM(0.01), args...; maxiters, + res1 = OptimizationBase.solve(prob, Optimisers.ADAM(0.01), args...; maxiters, kwargs...) end optprob2 = remake(prob, u0 = res1.u) - res1 = Optimization.solve(optprob2, BFGS(initial_stepnorm = 0.01), args...; + res1 = OptimizationBase.solve(optprob2, BFGS(initial_stepnorm = 0.01), args...; maxiters, kwargs...) elseif isempty(args) && deterministic - res1 = Optimization.solve(prob, BFGS(initial_stepnorm = 0.01), args...; maxiters, + res1 = OptimizationBase.solve(prob, BFGS(initial_stepnorm = 0.01), args...; maxiters, kwargs...) else - res1 = Optimization.solve(prob, Optimisers.ADAM(0.1), args...; maxiters, kwargs...) + res1 = OptimizationBase.solve(prob, Optimisers.ADAM(0.1), args...; maxiters, kwargs...) end end diff --git a/lib/OptimizationSciPy/src/OptimizationSciPy.jl b/lib/OptimizationSciPy/src/OptimizationSciPy.jl index 47cba7414..e1feb2d3a 100644 --- a/lib/OptimizationSciPy/src/OptimizationSciPy.jl +++ b/lib/OptimizationSciPy/src/OptimizationSciPy.jl @@ -370,7 +370,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, θ_julia = ensure_julia_array(θ, eltype(cache.u0)) grad = zeros(eltype(cache.u0), length(θ_julia)) cache.f.grad(grad, θ_julia, cache.p) - return cache.sense === Optimization.MaxSense ? -grad : grad + return cache.sense === OptimizationBase.MaxSense ? -grad : grad end jac = _grad end @@ -381,7 +381,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, θ_julia = ensure_julia_array(θ, eltype(cache.u0)) H = zeros(eltype(cache.u0), length(θ_julia), length(θ_julia)) cache.f.hess(H, θ_julia, cache.p) - return cache.sense === Optimization.MaxSense ? -H : H + return cache.sense === OptimizationBase.MaxSense ? -H : H end hess = _hess else @@ -501,7 +501,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, catch end end - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = scipy_status_to_retcode(status, py_success) @@ -573,7 +573,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, end minimum = pyis(result.fun, pybuiltins.None) ? NaN : safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -940,7 +940,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = pyis(result.fun, pybuiltins.None) ? NaN : safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end status = 0 @@ -1027,7 +1027,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = pyis(result.fun, pybuiltins.None) ? NaN : safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1093,7 +1093,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1153,7 +1153,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, lowest_result = result.lowest_optimization_result py_success = pyconvert(Bool, pybool(lowest_result.success)) py_message = safe_get_message(lowest_result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1218,7 +1218,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1314,7 +1314,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1375,7 +1375,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimum = safe_to_float(result.fun) py_success = pyconvert(Bool, pybool(result.success)) py_message = safe_get_message(result) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = py_success ? SciMLBase.ReturnCode.Success : SciMLBase.ReturnCode.Failure @@ -1432,7 +1432,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{F, RC, LB, minimizer = pyconvert(Vector{eltype(cache.u0)}, result[0]) end minimum = safe_to_float(result[1]) - if cache.sense === Optimization.MaxSense + if cache.sense === OptimizationBase.MaxSense minimum = -minimum end retcode = SciMLBase.ReturnCode.Success @@ -1474,7 +1474,7 @@ function _create_loss(cache; vector_output::Bool = false) error("Optimization halted by callback") end - arr = cache.sense === Optimization.MaxSense ? -x : x + arr = cache.sense === OptimizationBase.MaxSense ? -x : x return arr end else @@ -1493,7 +1493,7 @@ function _create_loss(cache; vector_output::Bool = false) if cache.callback(opt_state, x...) error("Optimization halted by callback") end - return cache.sense === Optimization.MaxSense ? -x[1] : x[1] + return cache.sense === OptimizationBase.MaxSense ? -x[1] : x[1] end end end diff --git a/lib/OptimizationSophia/src/OptimizationSophia.jl b/lib/OptimizationSophia/src/OptimizationSophia.jl index 0b100abc2..f02dfff1e 100644 --- a/lib/OptimizationSophia/src/OptimizationSophia.jl +++ b/lib/OptimizationSophia/src/OptimizationSophia.jl @@ -33,7 +33,7 @@ using OptimizationBase, OptimizationOptimisers # Define optimization problem rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) -optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) +optf = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optf, x0) # Solve with Sophia From eb4dff7bec6a54bc31d3497b0820e080ca392349 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Thu, 2 Oct 2025 14:52:29 +0200 Subject: [PATCH 04/17] Update tests to LBFGSB --- lib/OptimizationBase/test/cvxtest.jl | 8 ++++---- lib/OptimizationNLPModels/test/runtests.jl | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/OptimizationBase/test/cvxtest.jl b/lib/OptimizationBase/test/cvxtest.jl index cef7859bb..3df0d964d 100644 --- a/lib/OptimizationBase/test/cvxtest.jl +++ b/lib/OptimizationBase/test/cvxtest.jl @@ -1,5 +1,5 @@ using Optimization, OptimizationBase, ForwardDiff, SymbolicAnalysis, LinearAlgebra, - Manifolds, OptimizationManopt + Manifolds, OptimizationManopt, OptimizationLBFGSB function f(x, p = nothing) return exp(x[1]) + x[1]^2 @@ -8,7 +8,7 @@ end optf = OptimizationFunction(f, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, [0.4], structural_analysis = true) -@time sol = solve(prob, Optimization.LBFGS(), maxiters = 1000) +@time sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test sol.cache.analysis_results.objective.curvature == SymbolicAnalysis.Convex @test sol.cache.analysis_results.constraints === nothing @@ -18,7 +18,7 @@ l1 = rosenbrock(x0) optf = OptimizationFunction(rosenbrock, AutoEnzyme()) prob = OptimizationProblem(optf, x0, structural_analysis = true) -@time res = solve(prob, Optimization.LBFGS(), maxiters = 100) +@time res = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 100) @test res.cache.analysis_results.objective.curvature == SymbolicAnalysis.UnknownCurvature function con2_c(res, x, p) @@ -28,7 +28,7 @@ end optf = OptimizationFunction(rosenbrock, AutoZygote(), cons = con2_c) prob = OptimizationProblem(optf, x0, lcons = [1.0, -Inf], ucons = [1.0, 0.0], lb = [-1.0, -1.0], ub = [1.0, 1.0], structural_analysis = true) -@time res = solve(prob, Optimization.LBFGS(), maxiters = 100) +@time res = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 100) @test res.cache.analysis_results.objective.curvature == SymbolicAnalysis.UnknownCurvature @test res.cache.analysis_results.constraints[1].curvature == SymbolicAnalysis.Convex @test res.cache.analysis_results.constraints[2].curvature == diff --git a/lib/OptimizationNLPModels/test/runtests.jl b/lib/OptimizationNLPModels/test/runtests.jl index d96d2010d..26358240f 100644 --- a/lib/OptimizationNLPModels/test/runtests.jl +++ b/lib/OptimizationNLPModels/test/runtests.jl @@ -1,6 +1,5 @@ using OptimizationNLPModels, Optimization, NLPModelsTest, Ipopt, OptimizationMOI, Zygote, - ReverseDiff, - OptimizationOptimJL + ReverseDiff, OptimizationLBFGSB, OptimizationOptimJL using Test @testset "NLPModels" begin @@ -18,8 +17,8 @@ using Test nlpmo = NLPModelsTest.HS5() converted = OptimizationNLPModels.OptimizationProblem(nlpmo, Optimization.AutoZygote()) - sol_native = solve(oprob, Optimization.LBFGS(), maxiters = 1000) - sol_converted = solve(converted, Optimization.LBFGS(), maxiters = 1000) + sol_native = solve(oprob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) + sol_converted = solve(converted, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test sol_converted.retcode == sol_native.retcode @test sol_converted.u ≈ sol_native.u From 234cc1d2c841600d3b310a2d19a4d91bad75bc12 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 3 Oct 2025 08:56:31 +0200 Subject: [PATCH 05/17] Fix dependency structures --- lib/OptimizationAuglag/test/runtests.jl | 2 +- lib/OptimizationBBO/test/runtests.jl | 14 ++--- .../ext/OptimizationEnzymeExt.jl | 4 +- .../ext/OptimizationMTKExt.jl | 4 +- .../ext/OptimizationZygoteExt.jl | 2 +- lib/OptimizationBase/src/OptimizationDIExt.jl | 2 +- .../src/OptimizationDISparseExt.jl | 2 +- lib/OptimizationBase/src/function.jl | 4 +- lib/OptimizationBase/src/solve.jl | 4 +- lib/OptimizationBase/test/cvxtest.jl | 4 +- lib/OptimizationEvolutionary/test/runtests.jl | 6 +- lib/OptimizationGCMAES/test/runtests.jl | 14 ++--- .../src/OptimizationIpopt.jl | 2 +- lib/OptimizationIpopt/src/cache.jl | 2 +- lib/OptimizationIpopt/src/callback.jl | 2 +- .../test/additional_tests.jl | 20 +++---- .../test/advanced_features.jl | 22 +++---- lib/OptimizationIpopt/test/problem_types.jl | 10 ++-- lib/OptimizationIpopt/test/runtests.jl | 16 ++--- lib/OptimizationLBFGSB/test/runtests.jl | 2 +- lib/OptimizationMOI/src/nlp.jl | 2 +- lib/OptimizationMOI/test/runtests.jl | 30 +++++----- .../src/OptimizationManopt.jl | 2 +- lib/OptimizationManopt/test/runtests.jl | 36 ++++++------ .../test/runtests.jl | 2 +- .../test/runtests.jl | 4 +- lib/OptimizationNLPModels/test/runtests.jl | 44 +++++++------- lib/OptimizationNLopt/test/runtests.jl | 26 ++++----- lib/OptimizationNOMAD/test/runtests.jl | 8 +-- lib/OptimizationOptimJL/test/runtests.jl | 48 +++++++-------- lib/OptimizationOptimisers/test/runtests.jl | 24 ++++---- lib/OptimizationPRIMA/test/runtests.jl | 18 +++--- .../test/runtests.jl | 4 +- .../src/OptimizationPyCMA.jl | 10 ++-- .../src/OptimizationSciPy.jl | 2 +- lib/OptimizationSciPy/test/runtests.jl | 58 +++++++++---------- lib/OptimizationSophia/test/runtests.jl | 4 +- lib/OptimizationSpeedMapping/test/runtests.jl | 10 ++-- 38 files changed, 235 insertions(+), 235 deletions(-) diff --git a/lib/OptimizationAuglag/test/runtests.jl b/lib/OptimizationAuglag/test/runtests.jl index 60f994265..46d2155df 100644 --- a/lib/OptimizationAuglag/test/runtests.jl +++ b/lib/OptimizationAuglag/test/runtests.jl @@ -4,7 +4,7 @@ using OptimizationOptimisers using OptimizationAuglag using ForwardDiff using OptimizationBase: OptimizationCache -using OptimizationBase.SciMLBase: OptimizationFunction +using SciMLBase: OptimizationFunction using Test @testset "OptimizationAuglag.jl" begin diff --git a/lib/OptimizationBBO/test/runtests.jl b/lib/OptimizationBBO/test/runtests.jl index 1295465fc..e6575bc02 100644 --- a/lib/OptimizationBBO/test/runtests.jl +++ b/lib/OptimizationBBO/test/runtests.jl @@ -1,5 +1,5 @@ using OptimizationBBO, Optimization, BlackBoxOptim -using Optimization.SciMLBase: MultiObjectiveOptimizationFunction +using SciMLBase: MultiObjectiveOptimizationFunction using Test @testset "OptimizationBBO.jl" begin @@ -9,14 +9,14 @@ using Test l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = Optimization.OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], + prob = OptimizationBase.OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited()) @test 10 * sol.objective < l1 @test (@allocated solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited())) < 1e7 - prob = Optimization.OptimizationProblem(optprob, nothing, _p, lb = [-1.0, -1.0], + prob = OptimizationBase.OptimizationProblem(optprob, nothing, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited()) @test 10 * sol.objective < l1 @@ -74,7 +74,7 @@ using Test end mof_1 = MultiObjectiveOptimizationFunction(multi_obj_func_1) - prob_1 = Optimization.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) + prob_1 = OptimizationBase.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) sol_1 = solve(prob_1, opt, NumDimensions = 2, FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) @@ -101,7 +101,7 @@ using Test end mof_1 = MultiObjectiveOptimizationFunction(multi_obj_func_1) - prob_1 = Optimization.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) + prob_1 = OptimizationBase.OptimizationProblem(mof_1, u0; lb = lb, ub = ub) sol_1 = solve(prob_1, opt, NumDimensions = 2, FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true), callback = cb) @@ -127,7 +127,7 @@ using Test end mof_2 = MultiObjectiveOptimizationFunction(multi_obj_func_2) - prob_2 = Optimization.OptimizationProblem(mof_2, u0; lb = lb, ub = ub) + prob_2 = OptimizationBase.OptimizationProblem(mof_2, u0; lb = lb, ub = ub) sol_2 = solve(prob_2, opt, NumDimensions = 2, FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) @@ -147,7 +147,7 @@ using Test end mof_3 = MultiObjectiveOptimizationFunction(multi_obj_func_3) - prob_3 = Optimization.OptimizationProblem(mof_3, u0; lb = lb, ub = ub) + prob_3 = OptimizationBase.OptimizationProblem(mof_3, u0; lb = lb, ub = ub) sol_3 = solve(prob_3, opt, NumDimensions = 2, FitnessScheme = ParetoFitnessScheme{2}(is_minimizing = true)) diff --git a/lib/OptimizationBase/ext/OptimizationEnzymeExt.jl b/lib/OptimizationBase/ext/OptimizationEnzymeExt.jl index 03cf45794..5774c360b 100644 --- a/lib/OptimizationBase/ext/OptimizationEnzymeExt.jl +++ b/lib/OptimizationBase/ext/OptimizationEnzymeExt.jl @@ -1,8 +1,8 @@ module OptimizationEnzymeExt import OptimizationBase, OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction -import OptimizationBase.SciMLBase +import SciMLBase: OptimizationFunction +import SciMLBase import OptimizationBase.LinearAlgebra: I, dot import OptimizationBase.ADTypes: AutoEnzyme using Enzyme diff --git a/lib/OptimizationBase/ext/OptimizationMTKExt.jl b/lib/OptimizationBase/ext/OptimizationMTKExt.jl index 3526cb06f..99194dab3 100644 --- a/lib/OptimizationBase/ext/OptimizationMTKExt.jl +++ b/lib/OptimizationBase/ext/OptimizationMTKExt.jl @@ -1,8 +1,8 @@ module OptimizationMTKExt import OptimizationBase, OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase -import OptimizationBase.SciMLBase: OptimizationFunction +import SciMLBase +import SciMLBase: OptimizationFunction import OptimizationBase.ADTypes: AutoModelingToolkit, AutoSymbolics, AutoSparse using ModelingToolkit diff --git a/lib/OptimizationBase/ext/OptimizationZygoteExt.jl b/lib/OptimizationBase/ext/OptimizationZygoteExt.jl index ca5e2a80b..461efe699 100644 --- a/lib/OptimizationBase/ext/OptimizationZygoteExt.jl +++ b/lib/OptimizationBase/ext/OptimizationZygoteExt.jl @@ -3,7 +3,7 @@ module OptimizationZygoteExt using OptimizationBase, SparseArrays using OptimizationBase.FastClosures import OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction +import SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I, dot import DifferentiationInterface import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, diff --git a/lib/OptimizationBase/src/OptimizationDIExt.jl b/lib/OptimizationBase/src/OptimizationDIExt.jl index 9b814241f..adb84b55f 100644 --- a/lib/OptimizationBase/src/OptimizationDIExt.jl +++ b/lib/OptimizationBase/src/OptimizationDIExt.jl @@ -1,6 +1,6 @@ using OptimizationBase import OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction +import SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, diff --git a/lib/OptimizationBase/src/OptimizationDISparseExt.jl b/lib/OptimizationBase/src/OptimizationDISparseExt.jl index e135339fe..8ed5f46d7 100644 --- a/lib/OptimizationBase/src/OptimizationDISparseExt.jl +++ b/lib/OptimizationBase/src/OptimizationDISparseExt.jl @@ -1,6 +1,6 @@ using OptimizationBase import OptimizationBase.ArrayInterface -import OptimizationBase.SciMLBase: OptimizationFunction +import SciMLBase: OptimizationFunction import OptimizationBase.LinearAlgebra: I import DifferentiationInterface import DifferentiationInterface: prepare_gradient, prepare_hessian, prepare_hvp, diff --git a/lib/OptimizationBase/src/function.jl b/lib/OptimizationBase/src/function.jl index 9c07554ce..ae770b339 100644 --- a/lib/OptimizationBase/src/function.jl +++ b/lib/OptimizationBase/src/function.jl @@ -21,9 +21,9 @@ function rep_pars_vals!(e, p) end """ instantiate_function(f, x, ::AbstractADType, p, num_cons = 0)::OptimizationFunction -This function is used internally by Optimization.jl to construct +This function is used internally by OptimizationBase.jl to construct the necessary extra functions (gradients, Hessians, etc.) before -optimization. Each of the ADType dispatches use the supplied automatic +OptimizationBase. Each of the ADType dispatches use the supplied automatic differentiation type in order to specify how the construction process occurs. diff --git a/lib/OptimizationBase/src/solve.jl b/lib/OptimizationBase/src/solve.jl index ecd060267..70688bb54 100644 --- a/lib/OptimizationBase/src/solve.jl +++ b/lib/OptimizationBase/src/solve.jl @@ -12,11 +12,11 @@ end const OPTIMIZER_MISSING_ERROR_MESSAGE = """ Optimization algorithm not found. Either the chosen algorithm is not a valid solver choice for the `OptimizationProblem`, or the Optimization solver library is not loaded. - Make sure that you have loaded an appropriate Optimization.jl solver library, for example, + Make sure that you have loaded an appropriate OptimizationBase.jl solver library, for example, `solve(prob,Optim.BFGS())` requires `using OptimizationOptimJL` and `solve(prob,Adam())` requires `using OptimizationOptimisers`. - For more information, see the Optimization.jl documentation: . + For more information, see the OptimizationBase.jl documentation: . """ struct OptimizerMissingError <: Exception diff --git a/lib/OptimizationBase/test/cvxtest.jl b/lib/OptimizationBase/test/cvxtest.jl index 3df0d964d..c2d7ad470 100644 --- a/lib/OptimizationBase/test/cvxtest.jl +++ b/lib/OptimizationBase/test/cvxtest.jl @@ -5,7 +5,7 @@ function f(x, p = nothing) return exp(x[1]) + x[1]^2 end -optf = OptimizationFunction(f, Optimization.AutoForwardDiff()) +optf = OptimizationFunction(f, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optf, [0.4], structural_analysis = true) @time sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @@ -42,7 +42,7 @@ M = SymmetricPositiveDefinite(5) data2 = [exp(M, q, σ * rand(M; vector_at = q)) for i in 1:m]; f(x, p = nothing) = sum(SymbolicAnalysis.distance(M, data2[i], x)^2 for i in 1:5) -optf = OptimizationFunction(f, Optimization.AutoForwardDiff()) +optf = OptimizationFunction(f, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optf, data2[1]; manifold = M, structural_analysis = true) opt = OptimizationManopt.GradientDescentOptimizer() diff --git a/lib/OptimizationEvolutionary/test/runtests.jl b/lib/OptimizationEvolutionary/test/runtests.jl index 1bd810664..3af7250b0 100644 --- a/lib/OptimizationEvolutionary/test/runtests.jl +++ b/lib/OptimizationEvolutionary/test/runtests.jl @@ -1,5 +1,5 @@ using OptimizationEvolutionary, Optimization, Random -using Optimization.SciMLBase: MultiObjectiveOptimizationFunction +using SciMLBase: MultiObjectiveOptimizationFunction using Test Random.seed!(1234) @@ -9,12 +9,12 @@ Random.seed!(1234) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = Optimization.OptimizationProblem(optprob, x0, _p) + prob = OptimizationBase.OptimizationProblem(optprob, x0, _p) sol = solve(prob, CMAES(μ = 40, λ = 100), abstol = 1e-15) @test 10 * sol.objective < l1 x0 = [-0.7, 0.3] - prob = Optimization.OptimizationProblem(optprob, x0, _p, lb = [0.0, 0.0], + prob = OptimizationBase.OptimizationProblem(optprob, x0, _p, lb = [0.0, 0.0], ub = [0.5, 0.5]) sol = solve(prob, CMAES(μ = 50, λ = 60)) @test sol.u == zeros(2) diff --git a/lib/OptimizationGCMAES/test/runtests.jl b/lib/OptimizationGCMAES/test/runtests.jl index a2818b7fb..b17c18509 100644 --- a/lib/OptimizationGCMAES/test/runtests.jl +++ b/lib/OptimizationGCMAES/test/runtests.jl @@ -6,15 +6,15 @@ using Test x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f_ad = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + f_ad = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) f_noad = OptimizationFunction(rosenbrock) - prob = Optimization.OptimizationProblem(f_ad, x0, _p, lb = [-1.0, -1.0], + prob = OptimizationBase.OptimizationProblem(f_ad, x0, _p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) sol = solve(prob, GCMAESOpt(), maxiters = 1000) @test 10 * sol.objective < l1 - prob = Optimization.OptimizationProblem(f_noad, x0, _p, lb = [-1.0, -1.0], + prob = OptimizationBase.OptimizationProblem(f_noad, x0, _p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) sol = solve(prob, GCMAESOpt(), maxiters = 1000) @test 10 * sol.objective < l1 @@ -25,12 +25,12 @@ using Test p = [1.0] prob = OptimizationProblem(objective, x0, p, lb = [-10.0], ub = [10.0]) - cache = Optimization.init(prob, GCMAESOpt()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, GCMAESOpt()) + sol = OptimizationBase.solve!(cache) @test sol.u≈[1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) @test sol.u≈[2.0] atol=1e-3 end end diff --git a/lib/OptimizationIpopt/src/OptimizationIpopt.jl b/lib/OptimizationIpopt/src/OptimizationIpopt.jl index 6b3222c7e..269a127ff 100644 --- a/lib/OptimizationIpopt/src/OptimizationIpopt.jl +++ b/lib/OptimizationIpopt/src/OptimizationIpopt.jl @@ -12,7 +12,7 @@ export IpoptOptimizer """ IpoptOptimizer(; kwargs...) -Optimizer using the Interior Point Optimizer (Ipopt) for nonlinear optimization. +Optimizer using the Interior Point Optimizer (Ipopt) for nonlinear OptimizationBase. Ipopt is designed to find (local) solutions of mathematical optimization problems of the form: diff --git a/lib/OptimizationIpopt/src/cache.jl b/lib/OptimizationIpopt/src/cache.jl index 5e78bb490..170fda265 100644 --- a/lib/OptimizationIpopt/src/cache.jl +++ b/lib/OptimizationIpopt/src/cache.jl @@ -239,7 +239,7 @@ function hessian_lagrangian_structure(cache::IpoptCache) end end elseif !sparse_obj - # Performance optimization. If both are dense, no need to repeat + # Performance OptimizationBase. If both are dense, no need to repeat else for col in 1:N, row in 1:col push!(inds, (row, col)) diff --git a/lib/OptimizationIpopt/src/callback.jl b/lib/OptimizationIpopt/src/callback.jl index c1d08f810..a6d8b88fb 100644 --- a/lib/OptimizationIpopt/src/callback.jl +++ b/lib/OptimizationIpopt/src/callback.jl @@ -94,7 +94,7 @@ function (cb::IpoptProgressLogger)( end if !isnothing(cb.callback) # return `true` to keep going, or `false` to terminate the optimization - # this is the other way around compared to Optimization.jl callbacks + # this is the other way around compared to OptimizationBase.jl callbacks !cb.callback(opt_state, obj_value) else true diff --git a/lib/OptimizationIpopt/test/additional_tests.jl b/lib/OptimizationIpopt/test/additional_tests.jl index ec2836703..18b425f14 100644 --- a/lib/OptimizationIpopt/test/additional_tests.jl +++ b/lib/OptimizationIpopt/test/additional_tests.jl @@ -22,7 +22,7 @@ using LinearAlgebra res[1] = x[2] - x[1]^2 end - optfunc = OptimizationFunction(simple_objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(simple_objective, OptimizationBase.AutoZygote(); cons = simple_constraint) prob = OptimizationProblem(optfunc, [0.0, 0.0], nothing; lb = [-1.0, -Inf], @@ -62,7 +62,7 @@ using LinearAlgebra n = 5 x0 = fill(3.0, n) - optfunc = OptimizationFunction(lv1_objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(lv1_objective, OptimizationBase.AutoZygote(); cons = lv1_constraints) prob = OptimizationProblem(optfunc, x0, nothing; lcons = fill(4.0, n-2), @@ -84,7 +84,7 @@ using LinearAlgebra quadratic(x, p) = (x[1] - 2)^2 + (x[2] - 3)^2 - optfunc = OptimizationFunction(quadratic, Optimization.AutoZygote()) + optfunc = OptimizationFunction(quadratic, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, [0.5, 1.0], nothing; lb = [0.0, 0.0], ub = [1.0, 2.0]) @@ -112,7 +112,7 @@ using LinearAlgebra return sum_sq end - optfunc = OptimizationFunction(nls_objective, Optimization.AutoZygote()) + optfunc = OptimizationFunction(nls_objective, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, [1.0, 0.5], nothing) sol = solve(prob, IpoptOptimizer()) @@ -133,7 +133,7 @@ using LinearAlgebra res[1] = x[1] + x[2] end - optfunc = OptimizationFunction(objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(objective, OptimizationBase.AutoZygote(); cons = constraint) prob = OptimizationProblem(optfunc, [2.0, 2.0], nothing; lb = [0.0, 0.0], @@ -164,7 +164,7 @@ using LinearAlgebra res[1] = x[1] + x[2] end - optfunc = OptimizationFunction(barrier_objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(barrier_objective, OptimizationBase.AutoZygote(); cons = barrier_constraint) prob = OptimizationProblem(optfunc, [0.5, 0.5], nothing; lb = [1e-6, 1e-6], @@ -196,7 +196,7 @@ using LinearAlgebra res[1] = x[1] + x[n] end - optfunc = OptimizationFunction(sparse_objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(sparse_objective, OptimizationBase.AutoZygote(); cons = sparse_constraint) x0 = fill(0.1, n) prob = OptimizationProblem(optfunc, x0, nothing; @@ -218,7 +218,7 @@ end p = [1.0, 100.0] @testset "BFGS approximation" begin - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) sol = solve(prob, IpoptOptimizer( hessian_approximation = "limited-memory")) @@ -228,7 +228,7 @@ end end @testset "SR1 approximation" begin - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) sol = solve(prob, IpoptOptimizer( hessian_approximation = "limited-memory", @@ -247,7 +247,7 @@ end x0 = [0.5, 0.5] p = [1.0, 100.0] - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # First solve diff --git a/lib/OptimizationIpopt/test/advanced_features.jl b/lib/OptimizationIpopt/test/advanced_features.jl index aa42b6900..e1724a900 100644 --- a/lib/OptimizationIpopt/test/advanced_features.jl +++ b/lib/OptimizationIpopt/test/advanced_features.jl @@ -17,7 +17,7 @@ using SparseArrays x0 = [0.0, 0.0] p = [1.0, 100.0] - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Test with tight tolerances @@ -41,7 +41,7 @@ using SparseArrays res[2] = x[1]^2 + x[2]^2 - 2.0 end - optfunc = OptimizationFunction(obj, Optimization.AutoZygote(); cons = cons) + optfunc = OptimizationFunction(obj, OptimizationBase.AutoZygote(); cons = cons) prob = OptimizationProblem(optfunc, [0.5, 0.5], nothing; lcons = [0.0, 0.0], ucons = [0.0, 0.0]) @@ -60,7 +60,7 @@ using SparseArrays return sin(x[1]) * cos(x[2]) + exp(-x[1]^2 - x[2]^2) end - optfunc = OptimizationFunction(complex_obj, Optimization.AutoZygote()) + optfunc = OptimizationFunction(complex_obj, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, [0.1, 0.1], nothing) # Run with derivative test level 1 (first derivatives only) @@ -90,7 +90,7 @@ using SparseArrays return sum end - optfunc = OptimizationFunction(rosenbrock_n, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock_n, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Test with different linear solver strategies @@ -112,7 +112,7 @@ using SparseArrays res[1] = 1e3 * x[1] + 1e-3 * x[2] - 1.0 end - optfunc = OptimizationFunction(scaled_obj, Optimization.AutoZygote(); + optfunc = OptimizationFunction(scaled_obj, OptimizationBase.AutoZygote(); cons = scaled_cons) prob = OptimizationProblem(optfunc, [1.0, 1.0], nothing; lcons = [0.0], @@ -140,7 +140,7 @@ using SparseArrays res[2] = x[1]^2 + x[2]^2 - 0.5 end - optfunc = OptimizationFunction(difficult_obj, Optimization.AutoZygote(); + optfunc = OptimizationFunction(difficult_obj, OptimizationBase.AutoZygote(); cons = difficult_cons) # Start from an infeasible point prob = OptimizationProblem(optfunc, [2.0, 2.0], nothing; @@ -167,7 +167,7 @@ using SparseArrays x0 = [0.0, 0.0] p = [1.0, 100.0] - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Test adaptive mu strategy @@ -192,7 +192,7 @@ using SparseArrays return (x[1] - 1)^2 + (x[2] - 2)^2 + (x[3] - 3)^2 end - optfunc = OptimizationFunction(fixed_var_obj, Optimization.AutoZygote()) + optfunc = OptimizationFunction(fixed_var_obj, OptimizationBase.AutoZygote()) # Fix x[2] = 2.0 by setting equal bounds prob = OptimizationProblem(optfunc, [0.0, 2.0, 0.0], nothing; lb = [-Inf, 2.0, -Inf], @@ -215,9 +215,9 @@ using SparseArrays n = 5 optfunc = OptimizationFunction(slow_converge_obj, - Optimization.AutoZygote()) + OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, zeros(n), nothing; - sense = Optimization.MaxSense) + sense = OptimizationBase.MaxSense) sol = solve(prob, IpoptOptimizer( acceptable_tol = 1e-4, @@ -235,7 +235,7 @@ end x0 = [0.0, 0.0] p = [1.0, 100.0] - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) @testset "Verbose levels" begin diff --git a/lib/OptimizationIpopt/test/problem_types.jl b/lib/OptimizationIpopt/test/problem_types.jl index d47558629..9461a438c 100644 --- a/lib/OptimizationIpopt/test/problem_types.jl +++ b/lib/OptimizationIpopt/test/problem_types.jl @@ -196,7 +196,7 @@ using SparseArrays res[4] = flows[4] + flows[5] - required_flow end - optfunc = OptimizationFunction(flow_cost, Optimization.AutoZygote(); + optfunc = OptimizationFunction(flow_cost, OptimizationBase.AutoZygote(); cons = flow_constraints) flows0 = fill(required_flow / 2, 5) prob = OptimizationProblem(optfunc, flows0, nothing; @@ -233,7 +233,7 @@ using SparseArrays end n = 3 - optfunc = OptimizationFunction(robust_objective, Optimization.AutoZygote(); + optfunc = OptimizationFunction(robust_objective, OptimizationBase.AutoZygote(); cons = robust_constraints) x0 = fill(1.0/n, n) prob = OptimizationProblem(optfunc, x0, nothing; @@ -263,7 +263,7 @@ using SparseArrays # res[2] = x[1] + x[2] - 1.0 # end - # optfunc = OptimizationFunction(mpcc_objective, Optimization.AutoZygote(); + # optfunc = OptimizationFunction(mpcc_objective, OptimizationBase.AutoZygote(); # cons = mpcc_constraints) # x0 = [0.5, 0.5] # prob = OptimizationProblem(optfunc, x0, nothing; @@ -294,7 +294,7 @@ end return 0.5 * dot(x, Q * x) - dot(b, x) end - optfunc = OptimizationFunction(large_quadratic, Optimization.AutoZygote()) + optfunc = OptimizationFunction(large_quadratic, OptimizationBase.AutoZygote()) x0 = randn(n) prob = OptimizationProblem(optfunc, x0) @@ -316,7 +316,7 @@ end end n = 10 - optfunc = OptimizationFunction(trig_objective, Optimization.AutoZygote()) + optfunc = OptimizationFunction(trig_objective, OptimizationBase.AutoZygote()) x0 = randn(n) prob = OptimizationProblem(optfunc, x0; lb = fill(-2π, n), diff --git a/lib/OptimizationIpopt/test/runtests.jl b/lib/OptimizationIpopt/test/runtests.jl index 8d335ec0c..8fbf9a6eb 100644 --- a/lib/OptimizationIpopt/test/runtests.jl +++ b/lib/OptimizationIpopt/test/runtests.jl @@ -11,8 +11,8 @@ x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) -optfunc = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) -prob = OptimizationProblem(optfunc, x0, _p; sense = Optimization.MaxSense) +optfunc = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) +prob = OptimizationProblem(optfunc, x0, _p; sense = OptimizationBase.MaxSense) callback = function (state, l) display(l) @@ -40,7 +40,7 @@ function _test_sparse_derivatives_hs071(backend, optimizer) prob = OptimizationProblem( OptimizationFunction(objective, backend; cons = constraints), [1.0, 5.0, 5.0, 1.0]; - sense = Optimization.MinSense, + sense = OptimizationBase.MinSense, lb = [1.0, 1.0, 1.0, 1.0], ub = [5.0, 5.0, 5.0, 5.0], lcons = [25.0, 40.0], @@ -91,7 +91,7 @@ include("problem_types.jl") end lag_hess_prototype = sparse([1 1; 0 1]) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons, lag_h = lagh, lag_hess_prototype) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) sol = solve(prob, IpoptOptimizer()) @@ -111,7 +111,7 @@ end @test sol.u ≈ [1.0] # ≈ [1] @test_broken begin # needs reinit/remake fixes - cache = Optimization.reinit!(cache; p = [2.0]) + cache = OptimizationBase.reinit!(cache; p = [2.0]) sol = solve!(cache) @test sol.u ≈ [2.0] # ≈ [2] end @@ -123,7 +123,7 @@ end p = [1.0, 100.0] @testset "additional_options dictionary" begin - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Test with various option types @@ -150,7 +150,7 @@ end end @testset "Common interface arguments override" begin - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Test that reltol overrides default tolerance @@ -175,7 +175,7 @@ end end @testset "Priority: struct < additional_options < solve args" begin - optfunc = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optfunc = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optfunc, x0, p) # Struct field is overridden by solve argument diff --git a/lib/OptimizationLBFGSB/test/runtests.jl b/lib/OptimizationLBFGSB/test/runtests.jl index c362a399a..90c11f0ee 100644 --- a/lib/OptimizationLBFGSB/test/runtests.jl +++ b/lib/OptimizationLBFGSB/test/runtests.jl @@ -1,6 +1,6 @@ using OptimizationBase using OptimizationBase: ReturnCode -using OptimizationBase.SciMLBase: OptimizationFunction, OptimizationProblem +using SciMLBase: OptimizationFunction, OptimizationProblem using ForwardDiff, Zygote using OptimizationLBFGSB using MLUtils diff --git a/lib/OptimizationMOI/src/nlp.jl b/lib/OptimizationMOI/src/nlp.jl index fb8aabe71..57313dea6 100644 --- a/lib/OptimizationMOI/src/nlp.jl +++ b/lib/OptimizationMOI/src/nlp.jl @@ -359,7 +359,7 @@ function MOI.hessian_lagrangian_structure(evaluator::MOIOptimizationNLPEvaluator end end elseif !sparse_obj - # Performance optimization. If both are dense, no need to repeat + # Performance OptimizationBase. If both are dense, no need to repeat else for col in 1:N, row in 1:col diff --git a/lib/OptimizationMOI/test/runtests.jl b/lib/OptimizationMOI/test/runtests.jl index 81f15fb1c..691b66797 100644 --- a/lib/OptimizationMOI/test/runtests.jl +++ b/lib/OptimizationMOI/test/runtests.jl @@ -17,7 +17,7 @@ function _test_sparse_derivatives_hs071(backend, optimizer) prob = OptimizationProblem( OptimizationFunction(objective, backend; cons = constraints), [1.0, 5.0, 5.0, 1.0]; - sense = Optimization.MinSense, + sense = OptimizationBase.MinSense, lb = [1.0, 1.0, 1.0, 1.0], ub = [5.0, 5.0, 5.0, 5.0], lcons = [25.0, 40.0], @@ -37,7 +37,7 @@ end _p = [1.0, 100.0] cons_circ = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optprob = OptimizationFunction( - rosenbrock, Optimization.AutoZygote(); + rosenbrock, OptimizationBase.AutoZygote(); cons = cons_circ) prob = OptimizationProblem(optprob, x0, _p, ucons = [Inf], lcons = [0.0]) evaluator = init(prob, Ipopt.Optimizer()).evaluator @@ -63,8 +63,8 @@ end _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) callback = function (state, l) display(l) @@ -79,8 +79,8 @@ end sol = solve!(cache) @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MinSense) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MinSense) opt = Ipopt.Optimizer() sol = solve(prob, opt) @@ -126,7 +126,7 @@ end cons_circ = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optprob = OptimizationFunction( - rosenbrock, Optimization.AutoModelingToolkit(true, true); + rosenbrock, OptimizationBase.AutoModelingToolkit(true, true); cons = cons_circ) prob = OptimizationProblem(optprob, x0, _p, ucons = [Inf], lcons = [0.0]) @@ -141,10 +141,10 @@ end @testset "backends" begin backends = ( - Optimization.AutoModelingToolkit(false, false), - Optimization.AutoModelingToolkit(true, false), - Optimization.AutoModelingToolkit(false, true), - Optimization.AutoModelingToolkit(true, true)) + OptimizationBase.AutoModelingToolkit(false, false), + OptimizationBase.AutoModelingToolkit(true, false), + OptimizationBase.AutoModelingToolkit(false, true), + OptimizationBase.AutoModelingToolkit(true, true)) for backend in backends @testset "$backend" begin _test_sparse_derivatives_hs071(backend, Ipopt.Optimizer()) @@ -167,7 +167,7 @@ end u0 = [0.0, 0.0, 0.0, 1.0] optfun = OptimizationFunction((u, p) -> -v'u, cons = (res, u, p) -> res .= w'u, - Optimization.AutoForwardDiff()) + OptimizationBase.AutoForwardDiff()) optprob = OptimizationProblem(optfun, u0; lb = zero.(u0), ub = one.(u0), int = ones(Bool, length(u0)), @@ -185,7 +185,7 @@ end u0 = [1.0] optfun = OptimizationFunction((u, p) -> sum(abs2, x * u[1] .- y), - Optimization.AutoForwardDiff()) + OptimizationBase.AutoForwardDiff()) optprob = OptimizationProblem(optfun, u0; lb = one.(u0), ub = 6.0 .* u0, int = ones(Bool, length(u0))) @@ -264,7 +264,7 @@ end cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) sol = solve(prob, AmplNLWriter.Optimizer(Ipopt_jll.amplexe)) @@ -285,7 +285,7 @@ end end lag_hess_prototype = sparse([1 1; 0 1]) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons, lag_h = lagh, lag_hess_prototype) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) sol = solve(prob, Ipopt.Optimizer()) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 98cc6e30a..ecae49c4e 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -231,7 +231,7 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end -## Optimization.jl stuff +## OptimizationBase.jl stuff function SciMLBase.requiresgradient(opt::Union{ GradientDescentOptimizer, ConjugateGradientDescentOptimizer, QuasiNewtonOptimizer, ConvexBundleOptimizer, FrankWolfeOptimizer, diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 09074e602..5eaefb42e 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -4,7 +4,7 @@ using Manifolds using ForwardDiff, Zygote, Enzyme, FiniteDiff, ReverseDiff using Manopt, RipQP, QuadraticModels using Test -using Optimization.SciMLBase +using SciMLBase using LinearAlgebra rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 @@ -23,9 +23,9 @@ R2 = Euclidean(2) stepsize = Manopt.ArmijoLinesearch(R2) opt = OptimizationManopt.GradientDescentOptimizer() - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + optprob_forwarddiff = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob_forwarddiff = OptimizationProblem(optprob_forwarddiff, x0, p) - @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") Optimization.solve( + @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") OptimizationBase.solve( prob_forwarddiff, opt) end @@ -36,15 +36,15 @@ R2 = Euclidean(2) stepsize = Manopt.ArmijoLinesearch(R2) opt = OptimizationManopt.GradientDescentOptimizer() - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoEnzyme()) + optprob_forwarddiff = OptimizationFunction(rosenbrock, OptimizationBase.AutoEnzyme()) prob_forwarddiff = OptimizationProblem( optprob_forwarddiff, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_forwarddiff, opt) + sol = OptimizationBase.solve(prob_forwarddiff, opt) @test sol.minimum < 0.2 optprob_grad = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) prob_grad = OptimizationProblem(optprob_grad, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_grad, opt) + sol = OptimizationBase.solve(prob_grad, opt) @test sol.minimum < 0.2 end @@ -57,7 +57,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) + sol = OptimizationBase.solve(prob, opt) @test sol.minimum < 0.7 end @@ -68,10 +68,10 @@ R2 = Euclidean(2) stepsize = Manopt.ArmijoLinesearch(R2) opt = OptimizationManopt.ConjugateGradientDescentOptimizer() - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt, stepsize = stepsize) + sol = OptimizationBase.solve(prob, opt, stepsize = stepsize) @test sol.minimum < 0.5 end @@ -85,10 +85,10 @@ R2 = Euclidean(2) println(l) return false end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt, callback = callback, maxiters = 30) + sol = OptimizationBase.solve(prob, opt, callback = callback, maxiters = 30) @test sol.minimum < 1e-14 end @@ -101,7 +101,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) + sol = OptimizationBase.solve(prob, opt) @test sol.minimum < 0.1 end @@ -114,7 +114,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) + sol = OptimizationBase.solve(prob, opt) @test sol.minimum < 0.1 end @@ -127,7 +127,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve( + sol = OptimizationBase.solve( prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver) @test sol.minimum < 0.1 end @@ -141,7 +141,7 @@ R2 = Euclidean(2) # optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) # prob = OptimizationProblem(optprob, x0, p; manifold = R2) - # sol = Optimization.solve(prob, opt) + # sol = OptimizationBase.solve(prob, opt) # @test_broken sol.minimum < 0.1 # end @@ -156,7 +156,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) + sol = OptimizationBase.solve(prob, opt) @test sol.minimum < 0.1 end @@ -171,7 +171,7 @@ R2 = Euclidean(2) optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) + sol = OptimizationBase.solve(prob, opt) @test sol.minimum < 0.1 end @@ -185,6 +185,6 @@ R2 = Euclidean(2) optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) prob_cons = OptimizationProblem(optprob_cons, x0, p) #TODO: What is this? - @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) + @test_throws SciMLBase.IncompatibleOptimizerError OptimizationBase.solve(prob_cons, opt) end end \ No newline at end of file diff --git a/lib/OptimizationMetaheuristics/test/runtests.jl b/lib/OptimizationMetaheuristics/test/runtests.jl index 8ec272254..b78714760 100644 --- a/lib/OptimizationMetaheuristics/test/runtests.jl +++ b/lib/OptimizationMetaheuristics/test/runtests.jl @@ -8,7 +8,7 @@ Random.seed!(42) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) optprob = OptimizationFunction(rosenbrock) - prob = Optimization.OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], + prob = OptimizationBase.OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) sol = solve(prob, ECA()) @test 10 * sol.objective < l1 diff --git a/lib/OptimizationMultistartOptimization/test/runtests.jl b/lib/OptimizationMultistartOptimization/test/runtests.jl index 58109d2ba..43572bb7e 100644 --- a/lib/OptimizationMultistartOptimization/test/runtests.jl +++ b/lib/OptimizationMultistartOptimization/test/runtests.jl @@ -6,8 +6,8 @@ using Test, ReverseDiff x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob = Optimization.OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [1.5, 1.5]) + f = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) + prob = OptimizationBase.OptimizationProblem(f, x0, _p, lb = [-1.0, -1.0], ub = [1.5, 1.5]) sol = solve(prob, OptimizationMultistartOptimization.TikTak(100), OptimizationNLopt.Opt(:LD_LBFGS, 2)) @test 10 * sol.objective < l1 diff --git a/lib/OptimizationNLPModels/test/runtests.jl b/lib/OptimizationNLPModels/test/runtests.jl index 26358240f..b1075e832 100644 --- a/lib/OptimizationNLPModels/test/runtests.jl +++ b/lib/OptimizationNLPModels/test/runtests.jl @@ -7,15 +7,15 @@ using Test # https://jso.dev/NLPModelsTest.jl/dev/reference/#NLPModelsTest.HS5 # Problem with box bounds hs5f(u, p) = sin(u[1] + u[2]) + (u[1] - u[2])^2 - (3 / 2) * u[1] + (5 / 2)u[2] + 1 - f = Optimization.OptimizationFunction(hs5f, Optimization.AutoZygote()) + f = OptimizationBase.OptimizationFunction(hs5f, OptimizationBase.AutoZygote()) lb = [-1.5; -3] ub = [4.0; 3.0] u0 = [0.0; 0.0] - oprob = Optimization.OptimizationProblem( - f, u0, lb = lb, ub = ub, sense = Optimization.MinSense) + oprob = OptimizationBase.OptimizationProblem( + f, u0, lb = lb, ub = ub, sense = OptimizationBase.MinSense) nlpmo = NLPModelsTest.HS5() - converted = OptimizationNLPModels.OptimizationProblem(nlpmo, Optimization.AutoZygote()) + converted = OptimizationNLPModels.OptimizationProblem(nlpmo, OptimizationBase.AutoZygote()) sol_native = solve(oprob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) sol_converted = solve(converted, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @@ -29,12 +29,12 @@ using Test function brown_dennis(u, p) return sum([((u[1] + (i / 5) * u[2] - exp(i / 5))^2 + (u[3] + sin(i / 5) * u[4] - cos(i / 5))^2)^2 for i in 1:20]) end - f = Optimization.OptimizationFunction(brown_dennis, Optimization.AutoZygote()) + f = OptimizationBase.OptimizationFunction(brown_dennis, OptimizationBase.AutoZygote()) u0 = [25.0; 5.0; -5.0; -1.0] - oprob = Optimization.OptimizationProblem(f, u0, sense = Optimization.MinSense) + oprob = OptimizationBase.OptimizationProblem(f, u0, sense = OptimizationBase.MinSense) nlpmo = NLPModelsTest.BROWNDEN() - converted = OptimizationNLPModels.OptimizationProblem(nlpmo, Optimization.AutoZygote()) + converted = OptimizationNLPModels.OptimizationProblem(nlpmo, OptimizationBase.AutoZygote()) sol_native = solve(oprob, BFGS()) sol_converted = solve(converted, BFGS()) @@ -50,14 +50,14 @@ using Test lcons = [0.0] ucons = [Inf] u0 = [-10.0; 10.0] - f = Optimization.OptimizationFunction( - hs10, Optimization.AutoForwardDiff(); cons = hs10_cons) - oprob = Optimization.OptimizationProblem( - f, u0, lcons = lcons, ucons = ucons, sense = Optimization.MinSense) + f = OptimizationBase.OptimizationFunction( + hs10, OptimizationBase.AutoForwardDiff(); cons = hs10_cons) + oprob = OptimizationBase.OptimizationProblem( + f, u0, lcons = lcons, ucons = ucons, sense = OptimizationBase.MinSense) nlpmo = NLPModelsTest.HS10() converted = OptimizationNLPModels.OptimizationProblem( - nlpmo, Optimization.AutoForwardDiff()) + nlpmo, OptimizationBase.AutoForwardDiff()) sol_native = solve(oprob, Ipopt.Optimizer()) sol_converted = solve(converted, Ipopt.Optimizer()) @@ -75,14 +75,14 @@ using Test lb = [0.0; 0.0] ub = [Inf; Inf] u0 = [-2.0; -2.0] - f = Optimization.OptimizationFunction( - hs13, Optimization.AutoForwardDiff(); cons = hs13_cons) - oprob = Optimization.OptimizationProblem(f, u0, lb = lb, ub = ub, lcons = lcons, - ucons = ucons, sense = Optimization.MinSense) + f = OptimizationBase.OptimizationFunction( + hs13, OptimizationBase.AutoForwardDiff(); cons = hs13_cons) + oprob = OptimizationBase.OptimizationProblem(f, u0, lb = lb, ub = ub, lcons = lcons, + ucons = ucons, sense = OptimizationBase.MinSense) nlpmo = NLPModelsTest.HS13() converted = OptimizationNLPModels.OptimizationProblem( - nlpmo, Optimization.AutoForwardDiff()) + nlpmo, OptimizationBase.AutoForwardDiff()) sol_native = solve(oprob, Ipopt.Optimizer()) sol_converted = solve(converted, Ipopt.Optimizer()) @@ -99,14 +99,14 @@ using Test lcons = [-1.0; 0.0] ucons = [-1.0; Inf] u0 = [2.0; 2.0] - f = Optimization.OptimizationFunction( - hs14, Optimization.AutoForwardDiff(); cons = hs14_cons) - oprob = Optimization.OptimizationProblem( - f, u0, lcons = lcons, ucons = ucons, sense = Optimization.MinSense) + f = OptimizationBase.OptimizationFunction( + hs14, OptimizationBase.AutoForwardDiff(); cons = hs14_cons) + oprob = OptimizationBase.OptimizationProblem( + f, u0, lcons = lcons, ucons = ucons, sense = OptimizationBase.MinSense) nlpmo = NLPModelsTest.HS14() converted = OptimizationNLPModels.OptimizationProblem( - nlpmo, Optimization.AutoForwardDiff()) + nlpmo, OptimizationBase.AutoForwardDiff()) sol_native = solve(oprob, Ipopt.Optimizer()) sol_converted = solve(converted, Ipopt.Optimizer()) diff --git a/lib/OptimizationNLopt/test/runtests.jl b/lib/OptimizationNLopt/test/runtests.jl index 44898bc56..d00010017 100644 --- a/lib/OptimizationNLopt/test/runtests.jl +++ b/lib/OptimizationNLopt/test/runtests.jl @@ -7,13 +7,13 @@ using Test, Random _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) sol = solve(prob, NLopt.Opt(:LN_BOBYQA, 2)) @test sol.retcode == ReturnCode.Success @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, NLopt.Opt(:LD_LBFGS, 2)) @@ -63,15 +63,15 @@ using Test, Random x0 = zeros(1) p = [1.0] - optf = OptimizationFunction(objective, Optimization.AutoZygote()) + optf = OptimizationFunction(objective, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optf, x0, p) - cache = Optimization.init(prob, NLopt.Opt(:LD_LBFGS, 1)) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, NLopt.Opt(:LD_LBFGS, 1)) + sol = OptimizationBase.solve!(cache) @test sol.retcode == ReturnCode.Success @test sol.u≈[1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) # @test sol.retcode == ReturnCode.Success @test sol.u≈[2.0] atol=1e-3 end @@ -94,8 +94,8 @@ using Test, Random system(x, p) = sum((A * x - b) .^ 2) x0 = zeros(n) __p = Float64[] - optprob = OptimizationFunction((x, p) -> -system(x, p), Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, __p; sense = Optimization.MaxSense) + optprob = OptimizationFunction((x, p) -> -system(x, p), OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, __p; sense = OptimizationBase.MaxSense) sol = solve(prob, NLopt.Opt(:LD_LBFGS, n), maxtime = 1e-6) @test sol.retcode == ReturnCode.MaxTime end @@ -104,7 +104,7 @@ using Test, Random # Test that dual_ftol_rel parameter can be passed to NLopt without errors # This parameter is specific to MMA/CCSA algorithms for dual optimization tolerance x0_test = zeros(2) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0_test, _p) # Test with NLopt.Opt interface @@ -127,7 +127,7 @@ using Test, Random Random.seed!(1) cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2 - 1.0] x0 = zeros(2) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [0.0], ucons = [0.0]) sol = solve(prob, NLopt.LN_COBYLA()) @@ -159,7 +159,7 @@ using Test, Random # FTOL_REACHED optprob = OptimizationFunction( - rosenbrock, Optimization.AutoForwardDiff(); cons = con2_c) + rosenbrock, OptimizationBase.AutoForwardDiff(); cons = con2_c) Random.seed!(1) prob = OptimizationProblem( optprob, rand(2), _p, lcons = [0.0, -Inf], ucons = [0.0, 0.0]) diff --git a/lib/OptimizationNOMAD/test/runtests.jl b/lib/OptimizationNOMAD/test/runtests.jl index ef800c666..8f03bd5d4 100644 --- a/lib/OptimizationNOMAD/test/runtests.jl +++ b/lib/OptimizationNOMAD/test/runtests.jl @@ -10,17 +10,17 @@ using Test f = OptimizationFunction(rosenbrock) prob = OptimizationProblem(f, x0, _p) - sol = Optimization.solve(prob, NOMADOpt()) + sol = OptimizationBase.solve(prob, NOMADOpt()) @test 10 * sol.objective < l1 prob = OptimizationProblem(f, x0, _p; lb = [-1.0, -1.0], ub = [1.5, 1.5]) - sol = Optimization.solve(prob, NOMADOpt()) + sol = OptimizationBase.solve(prob, NOMADOpt()) @test 10 * sol.objective < l1 cons = (res, x, p) -> (res[1] = x[1]^2 + x[2]^2; nothing) f = OptimizationFunction(rosenbrock, cons = cons) prob = OptimizationProblem(f, x0, _p; lcons = [-Inf], ucons = [1.0]) - sol = Optimization.solve(prob, NOMADOpt(), maxiters = 5000) + sol = OptimizationBase.solve(prob, NOMADOpt(), maxiters = 5000) @test 10 * sol.objective < l1 function con2_c(res, x, p) @@ -29,6 +29,6 @@ using Test f = OptimizationFunction(rosenbrock, cons = con2_c) prob = OptimizationProblem(f, x0, _p; lcons = [-Inf, -Inf], ucons = [0.5, 0.0]) - sol = Optimization.solve(prob, NOMADOpt(), maxiters = 5000) + sol = OptimizationBase.solve(prob, NOMADOpt(), maxiters = 5000) @test sol.objective < l1 end diff --git a/lib/OptimizationOptimJL/test/runtests.jl b/lib/OptimizationOptimJL/test/runtests.jl index 52f01eee8..4f59bff76 100644 --- a/lib/OptimizationOptimJL/test/runtests.jl +++ b/lib/OptimizationOptimJL/test/runtests.jl @@ -1,6 +1,6 @@ using OptimizationOptimJL, OptimizationOptimJL.Optim, Optimization, ForwardDiff, Zygote, ReverseDiff, - Random, ModelingToolkit, Optimization.OptimizationBase.DifferentiationInterface + Random, ModelingToolkit, OptimizationBase.OptimizationBase.DifferentiationInterface using Test struct CallbackTester @@ -85,14 +85,14 @@ end @test sol.original.iterations > 2 cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2] - optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [-5.0], ucons = [10.0]) sol = solve(prob, IPNewton()) @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [-Inf], ucons = [Inf]) @@ -108,23 +108,23 @@ end res .= [x[1]^2 + x[2]^2, x[2] * sin(x[1]) - x[1]] end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons = [-Inf, -Inf], ucons = [Inf, Inf]) sol = solve(prob, IPNewton()) @test 10 * sol.objective < l1 cons_circ = (res, x, p) -> res .= [x[1]^2 + x[2]^2] - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons_circ) prob = OptimizationProblem(optprob, x0, _p, lcons = [-Inf], ucons = [0.25^2]) - cache = Optimization.init(prob, Optim.IPNewton()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, Optim.IPNewton()) + sol = OptimizationBase.solve!(cache) res = Array{Float64}(undef, 1) cons(res, sol.u, nothing) @test sqrt(res[1])≈0.25 rtol=1e-6 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve( @@ -133,12 +133,12 @@ end Random.seed!(1234) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) - cache = Optimization.init(prob, Optim.SAMIN()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, Optim.SAMIN()) + sol = OptimizationBase.solve!(cache) @test 10 * sol.objective < l1 - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) sol = solve(prob, NelderMead()) @test 10 * sol.objective < l1 @@ -150,19 +150,19 @@ end G[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1] G[2] = 200.0 * (x[2] - x[1]^2) end - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote(), + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote(), grad = g!) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) sol = solve(prob, BFGS()) @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, Optim.BFGS()) @test 10 * sol.objective < l1 optprob = OptimizationFunction(rosenbrock, - Optimization.AutoModelingToolkit(true, false)) + OptimizationBase.AutoModelingToolkit(true, false)) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, Optim.Newton()) @test 10 * sol.objective < l1 @@ -171,7 +171,7 @@ end @test 10 * sol.objective < l1 prob = OptimizationProblem( - optprob, x0, _p; sense = Optimization.MaxSense, lb = [-1.0, -1.0], ub = [0.8, 0.8]) + optprob, x0, _p; sense = OptimizationBase.MaxSense, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, BFGS()) @test 10 * sol.objective < l1 @@ -181,13 +181,13 @@ end return nothing end - # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with explicit gradient function + # https://github.com/SciML/OptimizationBase.jl/issues/754 Optim.BFGS() with explicit gradient function optprob = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) prob = OptimizationProblem(optprob, x0, _p) @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown @test 10 * sol.objective < l1 - # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with bounds and explicit gradient function + # https://github.com/SciML/OptimizationBase.jl/issues/754 Optim.BFGS() with bounds and explicit gradient function optprob = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8]) @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown @@ -205,12 +205,12 @@ end p = [1.0] prob = OptimizationProblem(objective, x0, p) - cache = Optimization.init(prob, Optim.NelderMead()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, Optim.NelderMead()) + sol = OptimizationBase.solve!(cache) @test sol.u≈[1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) @test sol.u≈[2.0] atol=1e-3 end @@ -226,7 +226,7 @@ end @test sol isa Any # just test it doesn't throw # Test with Fminbox(NelderMead) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, Optim.Fminbox(NelderMead()), store_trace = true) @test sol isa Any # just test it doesn't throw diff --git a/lib/OptimizationOptimisers/test/runtests.jl b/lib/OptimizationOptimisers/test/runtests.jl index d6be11a82..8a5fc0b50 100644 --- a/lib/OptimizationOptimisers/test/runtests.jl +++ b/lib/OptimizationOptimisers/test/runtests.jl @@ -9,7 +9,7 @@ using Lux, MLUtils, Random, ComponentArrays, Printf, MLDataDevices _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) @@ -22,7 +22,7 @@ using Lux, MLUtils, Random, ComponentArrays, Printf, MLDataDevices sumfunc(x0, _p) = sum(abs2, (x0 - _p)) l1 = sumfunc(x0, _p) - optprob = OptimizationFunction(sumfunc, Optimization.AutoZygote()) + optprob = OptimizationFunction(sumfunc, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) @@ -34,7 +34,7 @@ using Lux, MLUtils, Random, ComponentArrays, Printf, MLDataDevices @testset "epochs & maxiters" begin optprob = SciMLBase.OptimizationFunction( - (u, data) -> sum(u) + sum(data), Optimization.AutoZygote()) + (u, data) -> sum(u) + sum(data), OptimizationBase.AutoZygote()) prob = SciMLBase.OptimizationProblem( optprob, ones(2), MLUtils.DataLoader(ones(2, 2))) @test_throws ArgumentError("The number of iterations must be specified with either the epochs or maxiters kwarg. Where maxiters = epochs * length(data).") solve( @@ -58,14 +58,14 @@ using Lux, MLUtils, Random, ComponentArrays, Printf, MLDataDevices prob = OptimizationProblem( OptimizationFunction(objective, - Optimization.AutoForwardDiff()), x0, + OptimizationBase.AutoForwardDiff()), x0, p) - cache = Optimization.init(prob, Optimisers.Adam(0.1), maxiters = 1000) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, Optimisers.Adam(0.1), maxiters = 1000) + sol = OptimizationBase.solve!(cache) @test sol.u≈[1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) @test_broken sol.u≈[2.0] atol=1e-3 end @@ -75,7 +75,7 @@ using Lux, MLUtils, Random, ComponentArrays, Printf, MLDataDevices _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) function callback(state, l) @@ -116,13 +116,13 @@ end optf = OptimizationFunction(loss, AutoZygote()) prob = OptimizationProblem(optf, ps_ca, data) - res = Optimization.solve(prob, Optimisers.Adam(), epochs = 50) + res = OptimizationBase.solve(prob, Optimisers.Adam(), epochs = 50) @test res.stats.iterations == 50 * length(data) @test res.stats.fevals == 50 * length(data) @test res.stats.gevals == 50 * length(data) - res = Optimization.solve(prob, Optimisers.Adam(), callback = callback, epochs = 100) + res = OptimizationBase.solve(prob, Optimisers.Adam(), callback = callback, epochs = 100) @test res.objective < 1e-3 @@ -130,7 +130,7 @@ end optf = OptimizationFunction(loss, AutoZygote()) prob = OptimizationProblem(optf, ps_ca, data) - res = Optimization.solve(prob, Optimisers.Adam(), callback = callback, epochs = 10000) + res = OptimizationBase.solve(prob, Optimisers.Adam(), callback = callback, epochs = 10000) @test res.objective < 1e-4 end diff --git a/lib/OptimizationPRIMA/test/runtests.jl b/lib/OptimizationPRIMA/test/runtests.jl index dace6ce6c..cb5ee8d9b 100644 --- a/lib/OptimizationPRIMA/test/runtests.jl +++ b/lib/OptimizationPRIMA/test/runtests.jl @@ -8,15 +8,15 @@ using Test l1 = rosenbrock(x0, _p) prob = OptimizationProblem(rosenbrock, x0, _p) - sol = Optimization.solve(prob, UOBYQA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, UOBYQA(), maxiters = 1000) @test 10 * sol.objective < l1 - sol = Optimization.solve(prob, NEWUOA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, NEWUOA(), maxiters = 1000) @test 10 * sol.objective < l1 - sol = Optimization.solve(prob, BOBYQA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, BOBYQA(), maxiters = 1000) @test 10 * sol.objective < l1 - sol = Optimization.solve(prob, LINCOA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, LINCOA(), maxiters = 1000) @test 10 * sol.objective < l1 - @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob, + @test_throws SciMLBase.IncompatibleOptimizerError OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @@ -25,7 +25,7 @@ using Test end optprob = OptimizationFunction(rosenbrock, AutoForwardDiff(), cons = con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons = [1, -100], ucons = [1, 100]) - sol = Optimization.solve(prob, COBYLA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @test sol.objective < l1 function con2_c(res, x, p) @@ -33,11 +33,11 @@ using Test end optprob = OptimizationFunction(rosenbrock, AutoForwardDiff(), cons = con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons = [1], ucons = [1]) - sol = Optimization.solve(prob, COBYLA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @test sol.objective < l1 prob = OptimizationProblem(optprob, x0, _p, lcons = [1], ucons = [5]) - sol = Optimization.solve(prob, COBYLA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @test sol.objective < l1 function con2_c(res, x, p) @@ -45,6 +45,6 @@ using Test end optprob = OptimizationFunction(rosenbrock, AutoModelingToolkit(), cons = con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons = [10], ucons = [50]) - sol = Optimization.solve(prob, COBYLA(), maxiters = 1000) + sol = OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @test 10 * sol.objective < l1 end diff --git a/lib/OptimizationPolyalgorithms/test/runtests.jl b/lib/OptimizationPolyalgorithms/test/runtests.jl index 75abe52d0..d93c831f4 100644 --- a/lib/OptimizationPolyalgorithms/test/runtests.jl +++ b/lib/OptimizationPolyalgorithms/test/runtests.jl @@ -7,8 +7,8 @@ using Test _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, _p) - sol = Optimization.solve(prob, PolyOpt(), maxiters = 1000) + sol = OptimizationBase.solve(prob, PolyOpt(), maxiters = 1000) @test 10 * sol.objective < l1 end diff --git a/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl b/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl index 2503e5636..312732b5f 100644 --- a/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl +++ b/lib/OptimizationPyCMA/src/OptimizationPyCMA.jl @@ -31,7 +31,7 @@ SciMLBase.requireshessian(::PyCMAOpt) = false SciMLBase.requiresconsjac(::PyCMAOpt) = false SciMLBase.requiresconshess(::PyCMAOpt) = false -# wrapping Optimization.jl args into a python dict as arguments to PyCMA opts +# wrapping OptimizationBase.jl args into a python dict as arguments to PyCMA opts function __map_optimizer_args(prob::OptimizationBase.OptimizationCache, opt::PyCMAOpt; maxiters::Union{Number, Nothing} = nothing, maxtime::Union{Number, Nothing} = nothing, @@ -42,15 +42,15 @@ function __map_optimizer_args(prob::OptimizationBase.OptimizationCache, opt::PyC @warn "common reltol is currently not used by $(opt)" end - # Converting Optimization.jl args to PyCMA opts - # Optimization.jl kwargs will overwrite PyCMA kwargs supplied to solve() + # Converting OptimizationBase.jl args to PyCMA opts + # OptimizationBase.jl kwargs will overwrite PyCMA kwargs supplied to solve() mapped_args = Dict{String, Any}() # adding PyCMA args merge!(mapped_args, Dict(string(k) => v for (k, v) in PyCMAargs)) - # mapping Optimization.jl args + # mapping OptimizationBase.jl args mapped_args["bounds"] = (prob.lb, prob.ub) if !("verbose" ∈ keys(mapped_args)) @@ -149,7 +149,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ maxiters = OptimizationBase._check_and_convert_maxiters(cache.solver_args.maxiters) maxtime = OptimizationBase._check_and_convert_maxtime(cache.solver_args.maxtime) - # converting the Optimization.jl Args to PyCMA format + # converting the OptimizationBase.jl Args to PyCMA format opt_args = __map_optimizer_args(cache, cache.opt; cache.solver_args..., maxiters = maxiters, maxtime = maxtime) diff --git a/lib/OptimizationSciPy/src/OptimizationSciPy.jl b/lib/OptimizationSciPy/src/OptimizationSciPy.jl index e1feb2d3a..712958b8d 100644 --- a/lib/OptimizationSciPy/src/OptimizationSciPy.jl +++ b/lib/OptimizationSciPy/src/OptimizationSciPy.jl @@ -1,4 +1,4 @@ -#This file lets you drive SciPy optimizers through SciML's Optimization.jl API. +#This file lets you drive SciPy optimizers through SciML's OptimizationBase.jl API. module OptimizationSciPy using Reexport diff --git a/lib/OptimizationSciPy/test/runtests.jl b/lib/OptimizationSciPy/test/runtests.jl index 0dcd18365..9138def9d 100644 --- a/lib/OptimizationSciPy/test/runtests.jl +++ b/lib/OptimizationSciPy/test/runtests.jl @@ -1,6 +1,6 @@ using OptimizationSciPy, Optimization, Zygote, ReverseDiff, ForwardDiff using Test, Random -using Optimization.SciMLBase: ReturnCode, NonlinearLeastSquaresProblem +using SciMLBase: ReturnCode, NonlinearLeastSquaresProblem using PythonCall function rosenbrock(x, p) @@ -21,15 +21,15 @@ end l1 = rosenbrock(x0, _p) @testset "MaxSense" begin - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), Optimization.AutoZygote()) - prob = OptimizationProblem(optprob, x0, _p; sense = Optimization.MaxSense) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) + prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) sol = solve(prob, ScipyNelderMead()) @test sol.retcode == ReturnCode.Success @test 10 * sol.objective < l1 end @testset "unconstrained with gradient" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, ScipyBFGS()) @test sol.retcode == ReturnCode.Success @@ -40,7 +40,7 @@ end end @testset "bounded" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, ScipyLBFGSB()) @test sol.retcode == ReturnCode.Success @@ -48,7 +48,7 @@ end end @testset "global optimization" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, ScipyDifferentialEvolution(), maxiters = 100) @test sol.retcode == ReturnCode.Success @@ -73,7 +73,7 @@ end end @testset "various methods" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, ScipyNelderMead()) @test sol.retcode == ReturnCode.Success @@ -90,7 +90,7 @@ end end @testset "with Hessian" begin - optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); hess = rosenbrock_hess) + optf = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); hess = rosenbrock_hess) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, ScipyNewtonCG(), maxiters = 200) @test sol.retcode == ReturnCode.Success @@ -98,7 +98,7 @@ end end @testset "bounded optimization" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p, lb = [-1.0, -1.0], ub = [0.8, 0.8]) sol = solve(prob, ScipyLBFGSB()) @test sol.retcode == ReturnCode.Success @@ -109,7 +109,7 @@ end end @testset "trust region with Hessian" begin - optf_hess = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); hess = rosenbrock_hess) + optf_hess = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); hess = rosenbrock_hess) x0_trust = [0.5, 0.5] prob = OptimizationProblem(optf_hess, x0_trust, _p) for method in @@ -162,7 +162,7 @@ end prob_multidim = OptimizationProblem(rosenbrock, x0, _p) @test_throws ArgumentError solve(prob_multidim, ScipyMinimizeScalar("brent")) @test_throws ArgumentError solve(prob, ScipyBounded()) - optf_grad = OptimizationFunction(f_scalar, Optimization.AutoZygote()) + optf_grad = OptimizationFunction(f_scalar, OptimizationBase.AutoZygote()) prob_grad = OptimizationProblem(optf_grad, x0_scalar, p_scalar) sol = solve(prob_grad, ScipyBrent()) @test sol.retcode == ReturnCode.Success @@ -276,14 +276,14 @@ end objective(x, p) = (p[1] - x[1])^2 x0 = zeros(1) p = [1.0] - optf = OptimizationFunction(objective, Optimization.AutoZygote()) + optf = OptimizationFunction(objective, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optf, x0, p) - cache = Optimization.init(prob, ScipyBFGS()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, ScipyBFGS()) + sol = OptimizationBase.solve!(cache) @test sol.retcode == ReturnCode.Success @test sol.u ≈ [1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) @test sol.u ≈ [2.0] atol=1e-3 end @@ -291,7 +291,7 @@ end cbstopping = function (state, loss) return state.objective < 0.7 end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) @test_throws ErrorException solve(prob, ScipyBFGS(), callback = cbstopping) end @@ -304,7 +304,7 @@ end res[1, 2] = 2*x[2] end x0 = zeros(2) - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons, cons_j = cons_j) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons, cons_j = cons_j) prob_cobyla = OptimizationProblem(optprob, x0, _p, lcons = [-1e-6], ucons = [1e-6]) sol = solve(prob_cobyla, ScipyCOBYLA(), maxiters = 10000) @test sol.retcode == ReturnCode.Success @@ -322,7 +322,7 @@ end function con2_c(res, x, p) res .= [x[1]^2 + x[2]^2 - 1.0, x[2] * sin(x[1]) - x[1] - 2.0] end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = con2_c) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = con2_c) Random.seed!(456) prob = OptimizationProblem( optprob, rand(2), _p, lcons = [0.0, -Inf], ucons = [0.0, 0.0]) @@ -338,7 +338,7 @@ end end @testset "method-specific options" begin - simple_optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + simple_optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) unconstrained_prob = OptimizationProblem( simple_optprob, x0, _p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) sol = solve(unconstrained_prob, ScipyDifferentialEvolution(), @@ -370,9 +370,9 @@ end end @testset "AutoDiff backends" begin - for adtype in [Optimization.AutoZygote(), - Optimization.AutoReverseDiff(), - Optimization.AutoForwardDiff()] + for adtype in [OptimizationBase.AutoZygote(), + OptimizationBase.AutoReverseDiff(), + OptimizationBase.AutoForwardDiff()] optf = OptimizationFunction(rosenbrock, adtype) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, ScipyBFGS()) @@ -382,14 +382,14 @@ end end @testset "optimization stats" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, ScipyBFGS()) @test sol.stats.time > 0 end @testset "original result access" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, ScipyBFGS()) @test !isnothing(sol.original) @@ -398,7 +398,7 @@ end end @testset "tolerance settings" begin - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, ScipyNelderMead(), abstol = 1e-8) @test sol.objective < 1e-7 @@ -408,7 +408,7 @@ end @testset "constraint satisfaction" begin cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2 - 1.0] - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff(); cons = cons) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); cons = cons) prob = OptimizationProblem(optprob, [0.5, 0.5], _p, lcons = [-0.01], ucons = [0.01]) sol = solve(prob, ScipySLSQP()) @test sol.retcode == ReturnCode.Success @@ -433,7 +433,7 @@ end sol = solve(prob, ScipyBFGS()) @test sol.retcode == ReturnCode.Success @test sol.u ≈ [3.0] atol=1e-6 - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0, _p) @test_throws SciMLBase.IncompatibleOptimizerError solve(prob, ScipyDifferentialEvolution()) @test_throws SciMLBase.IncompatibleOptimizerError solve(prob, ScipyDirect()) @@ -446,7 +446,7 @@ end @testset "Type stability" begin x0_f32 = Float32[0.0, 0.0] p_f32 = Float32[1.0, 100.0] - optprob = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) prob = OptimizationProblem(optprob, x0_f32, p_f32) sol = solve(prob, ScipyBFGS()) @test sol.retcode == ReturnCode.Success diff --git a/lib/OptimizationSophia/test/runtests.jl b/lib/OptimizationSophia/test/runtests.jl index 63f17408d..efd24f863 100644 --- a/lib/OptimizationSophia/test/runtests.jl +++ b/lib/OptimizationSophia/test/runtests.jl @@ -1,5 +1,5 @@ using OptimizationBase, Optimization -using OptimizationBase.SciMLBase: solve, OptimizationFunction, OptimizationProblem +using SciMLBase: solve, OptimizationFunction, OptimizationProblem using OptimizationSophia using Lux, MLUtils, Random, ComponentArrays using SciMLSensitivity @@ -75,4 +75,4 @@ optf_sophia = OptimizationFunction(rosenbrock_comp, AutoEnzyme()) prob_sophia = OptimizationProblem(optf_sophia, x0_comp) res_sophia = solve(prob_sophia, OptimizationSophia.Sophia(η=0.01, k=5), maxiters = 50) @test res_sophia.objective < rosenbrock_comp(x0_comp) # Test optimization progress -@test res_sophia.retcode == Optimization.SciMLBase.ReturnCode.Success +@test res_sophia.retcode == SciMLBase.ReturnCode.Success diff --git a/lib/OptimizationSpeedMapping/test/runtests.jl b/lib/OptimizationSpeedMapping/test/runtests.jl index 33cb82561..12e0f03c9 100644 --- a/lib/OptimizationSpeedMapping/test/runtests.jl +++ b/lib/OptimizationSpeedMapping/test/runtests.jl @@ -6,7 +6,7 @@ using Test x0 = zeros(2) _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - f = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + f = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff()) prob = OptimizationProblem(f, x0, _p) sol = solve(prob, SpeedMappingOpt()) @test 10 * sol.objective < l1 @@ -30,12 +30,12 @@ using Test p = [1.0] prob = OptimizationProblem(objective, x0, p) - cache = Optimization.init(prob, SpeedMappingOpt()) - sol = Optimization.solve!(cache) + cache = OptimizationBase.init(prob, SpeedMappingOpt()) + sol = OptimizationBase.solve!(cache) @test sol.u≈[1.0] atol=1e-3 - cache = Optimization.reinit!(cache; p = [2.0]) - sol = Optimization.solve!(cache) + cache = OptimizationBase.reinit!(cache; p = [2.0]) + sol = OptimizationBase.solve!(cache) @test sol.u≈[2.0] atol=1e-3 end end From a7d2429322573b723630691bfe2147302a7c4c47 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 3 Oct 2025 11:42:48 +0200 Subject: [PATCH 06/17] added sources --- lib/OptimizationAuglag/Project.toml | 5 ++++- lib/OptimizationBBO/Project.toml | 5 ++++- lib/OptimizationBase/Project.toml | 2 +- lib/OptimizationCMAEvolutionStrategy/Project.toml | 5 ++++- lib/OptimizationEvolutionary/Project.toml | 5 ++++- lib/OptimizationGCMAES/Project.toml | 5 ++++- lib/OptimizationIpopt/Project.toml | 5 ++++- lib/OptimizationLBFGSB/Project.toml | 5 ++++- lib/OptimizationMOI/Project.toml | 5 ++++- lib/OptimizationManopt/Project.toml | 5 ++++- lib/OptimizationMetaheuristics/Project.toml | 5 ++++- lib/OptimizationMultistartOptimization/Project.toml | 5 ++++- lib/OptimizationNLPModels/Project.toml | 5 ++++- lib/OptimizationNLopt/Project.toml | 5 ++++- lib/OptimizationNOMAD/Project.toml | 5 ++++- lib/OptimizationODE/Project.toml | 5 ++++- lib/OptimizationOptimJL/Project.toml | 5 ++++- lib/OptimizationOptimisers/Project.toml | 5 ++++- lib/OptimizationPRIMA/Project.toml | 5 ++++- lib/OptimizationPolyalgorithms/Project.toml | 5 ++++- lib/OptimizationPyCMA/Project.toml | 5 ++++- lib/OptimizationQuadDIRECT/Project.toml | 5 ++++- lib/OptimizationSciPy/Project.toml | 5 ++++- lib/OptimizationSophia/Project.toml | 5 ++++- lib/OptimizationSpeedMapping/Project.toml | 5 ++++- 25 files changed, 97 insertions(+), 25 deletions(-) diff --git a/lib/OptimizationAuglag/Project.toml b/lib/OptimizationAuglag/Project.toml index 341ee27e2..6bf05b4cf 100644 --- a/lib/OptimizationAuglag/Project.toml +++ b/lib/OptimizationAuglag/Project.toml @@ -3,6 +3,9 @@ uuid = "2ea93f80-9333-43a1-a68d-1f53b957a421" authors = ["paramthakkar123 "] version = "1.0.0" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] ForwardDiff = "1.0.1" -OptimizationBase = "2.10.0" +OptimizationBase = "2.13" MLUtils = "0.4.8" OptimizationOptimisers = "0.3.8" Test = "1.10.0" diff --git a/lib/OptimizationBBO/Project.toml b/lib/OptimizationBBO/Project.toml index 42b148cf0..118d3f549 100644 --- a/lib/OptimizationBBO/Project.toml +++ b/lib/OptimizationBBO/Project.toml @@ -3,6 +3,9 @@ uuid = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" authors = ["Vaibhav Dixit and contributors"] version = "0.4.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" BlackBoxOptim = "0.6" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationBase/Project.toml b/lib/OptimizationBase/Project.toml index f2c165656..7f3bd3da6 100644 --- a/lib/OptimizationBase/Project.toml +++ b/lib/OptimizationBase/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationBase" uuid = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" authors = ["Vaibhav Dixit and contributors"] -version = "2.12.0" +version = "2.13.0" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" diff --git a/lib/OptimizationCMAEvolutionStrategy/Project.toml b/lib/OptimizationCMAEvolutionStrategy/Project.toml index 6633beb3e..f9e1d198a 100644 --- a/lib/OptimizationCMAEvolutionStrategy/Project.toml +++ b/lib/OptimizationCMAEvolutionStrategy/Project.toml @@ -3,6 +3,9 @@ uuid = "bd407f91-200f-4536-9381-e4ba712f53f8" authors = ["Vaibhav Dixit and contributors"] version = "0.3.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] CMAEvolutionStrategy = "8d3b24bd-414e-49e0-94fb-163cc3a3e411" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] CMAEvolutionStrategy = "0.2" julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationEvolutionary/Project.toml b/lib/OptimizationEvolutionary/Project.toml index c86f60289..1bc181d56 100644 --- a/lib/OptimizationEvolutionary/Project.toml +++ b/lib/OptimizationEvolutionary/Project.toml @@ -3,6 +3,9 @@ uuid = "cb963754-43f6-435e-8d4b-99009ff27753" authors = ["Vaibhav Dixit and contributors"] version = "0.4.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" @@ -15,7 +18,7 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" Evolutionary = "0.11" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationGCMAES/Project.toml b/lib/OptimizationGCMAES/Project.toml index 90ec0a797..9b71c90c4 100644 --- a/lib/OptimizationGCMAES/Project.toml +++ b/lib/OptimizationGCMAES/Project.toml @@ -3,6 +3,9 @@ uuid = "6f0a0517-dbc2-4a7a-8a20-99ae7f27e911" authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" GCMAES = "0.1" diff --git a/lib/OptimizationIpopt/Project.toml b/lib/OptimizationIpopt/Project.toml index fbbbeb66d..a73e001fa 100644 --- a/lib/OptimizationIpopt/Project.toml +++ b/lib/OptimizationIpopt/Project.toml @@ -3,6 +3,9 @@ uuid = "43fad042-7963-4b32-ab19-e2a4f9a67124" authors = ["Sebastian Micluța-Câmpeanu and contributors"] version = "0.2.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -15,7 +18,7 @@ SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" Ipopt = "1.10.3" LinearAlgebra = "1.10.0" ModelingToolkit = "10.23" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.90.0" SparseArrays = "1.10.0" SymbolicIndexingInterface = "0.3.40" diff --git a/lib/OptimizationLBFGSB/Project.toml b/lib/OptimizationLBFGSB/Project.toml index c27054335..e6bb6142c 100644 --- a/lib/OptimizationLBFGSB/Project.toml +++ b/lib/OptimizationLBFGSB/Project.toml @@ -3,6 +3,9 @@ uuid = "22f7324a-a79d-40f2-bebe-3af60c77bd15" authors = ["paramthakkar123 "] version = "1.0.0" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" LBFGSB = "5be7bae1-8223-5378-bac3-9e7378a2f6e6" @@ -20,7 +23,7 @@ DocStringExtensions = "0.9.5" ForwardDiff = "1.0.1" LBFGSB = "0.4.1" MLUtils = "0.4.8" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Zygote = "0.7.10" julia = "1.10" diff --git a/lib/OptimizationMOI/Project.toml b/lib/OptimizationMOI/Project.toml index dc97f8713..7918a1c55 100644 --- a/lib/OptimizationMOI/Project.toml +++ b/lib/OptimizationMOI/Project.toml @@ -3,6 +3,9 @@ uuid = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" authors = ["Vaibhav Dixit and contributors"] version = "0.5.7" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" @@ -28,7 +31,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] HiGHS = "1" -OptimizationBase = "2.10" +OptimizationBase = "2.13" Test = "1.6" Symbolics = "6" AmplNLWriter = "1" diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index a9d23bd1b..672db60b6 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -3,6 +3,9 @@ uuid = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6" authors = ["Mateusz Baran ", "Ronny Bergmann "] version = "1.0.0" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] Manopt = "0fc0a36d-df90-57f3-8f93-d78a9fc72bb5" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" @@ -27,7 +30,7 @@ Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" [compat] julia = "1.10" Manopt = "0.5" -OptimizationBase = "2.10" +OptimizationBase = "2.13" LinearAlgebra = "1.10" ManifoldsBase = "1" ManifoldDiff = "0.4" diff --git a/lib/OptimizationMetaheuristics/Project.toml b/lib/OptimizationMetaheuristics/Project.toml index c3cc36998..a483e7f2c 100644 --- a/lib/OptimizationMetaheuristics/Project.toml +++ b/lib/OptimizationMetaheuristics/Project.toml @@ -3,6 +3,9 @@ uuid = "3aafef2f-86ae-4776-b337-85a36adf0b55" authors = ["Vaibhav Dixit and contributors"] version = "0.3.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" @@ -15,7 +18,7 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" Metaheuristics = "3" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationMultistartOptimization/Project.toml b/lib/OptimizationMultistartOptimization/Project.toml index 46455c097..08b7020b3 100644 --- a/lib/OptimizationMultistartOptimization/Project.toml +++ b/lib/OptimizationMultistartOptimization/Project.toml @@ -3,6 +3,9 @@ uuid = "e4316d97-8bbb-4fd3-a7d8-3851d2a72823" authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" MultistartOptimization = "3933049c-43be-478e-a8bb-6e0f7fd53575" @@ -18,7 +21,7 @@ OptimizationNLopt = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" MultistartOptimization = "0.2, 0.3" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index b89c1774c..da186aa5a 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -3,6 +3,9 @@ uuid = "064b21be-54cf-11ef-1646-cdfee32b588f" authors = ["Vaibhav Dixit and contributors"] version = "0.0.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -23,7 +26,7 @@ OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" julia = "1.10" NLPModels = "0.21" ADTypes = "1.7" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationNLopt/Project.toml b/lib/OptimizationNLopt/Project.toml index 4b8611573..942f10ab5 100644 --- a/lib/OptimizationNLopt/Project.toml +++ b/lib/OptimizationNLopt/Project.toml @@ -3,6 +3,9 @@ uuid = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" authors = ["Vaibhav Dixit and contributors"] version = "0.3.4" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -17,7 +20,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" NLopt = "1.1" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationNOMAD/Project.toml b/lib/OptimizationNOMAD/Project.toml index 0d7e17538..854f31e81 100644 --- a/lib/OptimizationNOMAD/Project.toml +++ b/lib/OptimizationNOMAD/Project.toml @@ -3,6 +3,9 @@ uuid = "2cab0595-8222-4775-b714-9828e6a9e01b" authors = ["Vaibhav Dixit and contributors"] version = "0.3.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" NOMAD = "02130f1c-4665-5b79-af82-ff1385104aa0" @@ -14,7 +17,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" NOMAD = "2.4.1" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationODE/Project.toml b/lib/OptimizationODE/Project.toml index 651663ea5..4ff11893f 100644 --- a/lib/OptimizationODE/Project.toml +++ b/lib/OptimizationODE/Project.toml @@ -3,6 +3,9 @@ uuid = "dfa73e59-e644-4d8a-bf84-188d7ecb34e4" authors = ["Paras Puneet Singh "] version = "0.1.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -17,7 +20,7 @@ NonlinearSolve = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" [compat] DiffEqBase = "6.190" ForwardDiff = "0.10, 1" -OptimizationBase = "2.10" +OptimizationBase = "2.13" OrdinaryDiffEq = "6.70" NonlinearSolve = "4" Reexport = "1" diff --git a/lib/OptimizationOptimJL/Project.toml b/lib/OptimizationOptimJL/Project.toml index a547dac62..6c80460dc 100644 --- a/lib/OptimizationOptimJL/Project.toml +++ b/lib/OptimizationOptimJL/Project.toml @@ -3,6 +3,9 @@ uuid = "36348300-93cb-4f02-beb5-3c3902f8871e" authors = ["Vaibhav Dixit and contributors"] version = "0.4.5" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" @@ -22,7 +25,7 @@ ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" [compat] julia = "1.10" PrecompileTools = "1.2" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SparseArrays = "1.6" Optim = "1" Reexport = "1.2" diff --git a/lib/OptimizationOptimisers/Project.toml b/lib/OptimizationOptimisers/Project.toml index 1c9c67061..d672627d3 100644 --- a/lib/OptimizationOptimisers/Project.toml +++ b/lib/OptimizationOptimisers/Project.toml @@ -3,6 +3,9 @@ uuid = "42dfb2eb-d2b4-4451-abcd-913932933ac1" authors = ["Vaibhav Dixit and contributors"] version = "0.3.11" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" @@ -23,7 +26,7 @@ Lux = "b2108857-7c20-44ae-9111-449ecde12c47" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" ProgressLogging = "0.1" SciMLBase = "2.58" Optimisers = "0.2, 0.3, 0.4" diff --git a/lib/OptimizationPRIMA/Project.toml b/lib/OptimizationPRIMA/Project.toml index 7eaa036e7..48aec8e71 100644 --- a/lib/OptimizationPRIMA/Project.toml +++ b/lib/OptimizationPRIMA/Project.toml @@ -3,6 +3,9 @@ uuid = "72f8369c-a2ea-4298-9126-56167ce9cbc2" authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" PRIMA = "0a7d04aa-8ac2-47b3-b7a7-9dbd6ad661ed" @@ -17,7 +20,7 @@ ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" PRIMA = "0.2.0" SciMLBase = "2.58" Reexport = "1" diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index 7ef8147d3..d7bc40d68 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -3,6 +3,9 @@ uuid = "500b13db-7e66-49ce-bda4-eed966be6282" authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" @@ -16,7 +19,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" OptimizationOptimisers = "0.3" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationPyCMA/Project.toml b/lib/OptimizationPyCMA/Project.toml index 2fb54bf9f..f7ea5bfeb 100644 --- a/lib/OptimizationPyCMA/Project.toml +++ b/lib/OptimizationPyCMA/Project.toml @@ -3,6 +3,9 @@ uuid = "fb0822aa-1fe5-41d8-99a6-e7bf6c238d3b" authors = ["Maximilian Pochapski <67759684+mxpoch@users.noreply.github.com>"] version = "1.1.0" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" @@ -13,7 +16,7 @@ PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" CondaPkg = "0.2" Test = "1.10" SciMLBase = "2.58" diff --git a/lib/OptimizationQuadDIRECT/Project.toml b/lib/OptimizationQuadDIRECT/Project.toml index dd3520a15..faa6da896 100644 --- a/lib/OptimizationQuadDIRECT/Project.toml +++ b/lib/OptimizationQuadDIRECT/Project.toml @@ -3,6 +3,9 @@ uuid = "842ac81e-713d-465f-80f7-84eddaced298" authors = ["Vaibhav Dixit and contributors"] version = "0.3.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" QuadDIRECT = "dae52e8d-d666-5120-a592-9e15c33b8d7a" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" diff --git a/lib/OptimizationSciPy/Project.toml b/lib/OptimizationSciPy/Project.toml index 7225ac632..6b34960a5 100644 --- a/lib/OptimizationSciPy/Project.toml +++ b/lib/OptimizationSciPy/Project.toml @@ -3,6 +3,9 @@ uuid = "cce07bd8-c79b-4b00-aee8-8db9cce22837" authors = ["Aditya Pandey and contributors"] version = "0.4.2" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" @@ -19,7 +22,7 @@ ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SciMLBase = "2.58" Reexport = "1.2" PythonCall = "0.9" diff --git a/lib/OptimizationSophia/Project.toml b/lib/OptimizationSophia/Project.toml index 97d877f89..033f4b28e 100644 --- a/lib/OptimizationSophia/Project.toml +++ b/lib/OptimizationSophia/Project.toml @@ -3,6 +3,9 @@ uuid = "892fee11-dca1-40d6-b698-84ba0d87399a" authors = ["paramthakkar123 "] version = "1.0.0" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -21,7 +24,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" ComponentArrays = "0.15.29" Lux = "1.16.0" MLUtils = "0.4.8" -OptimizationBase = "2.10" +OptimizationBase = "2.13" OrdinaryDiffEqTsit5 = "1.2.0" Random = "1.10.0" SciMLBase = "2.58" diff --git a/lib/OptimizationSpeedMapping/Project.toml b/lib/OptimizationSpeedMapping/Project.toml index 235e5e2ef..bf785b9e8 100644 --- a/lib/OptimizationSpeedMapping/Project.toml +++ b/lib/OptimizationSpeedMapping/Project.toml @@ -3,6 +3,9 @@ uuid = "3d669222-0d7d-4eb9-8a9f-d8528b0d9b91" authors = ["Vaibhav Dixit and contributors"] version = "0.2.1" +[sources] +OptimizationBase = {path = "../OptimizationBase"} + [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" @@ -15,7 +18,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] julia = "1.10" -OptimizationBase = "2.10" +OptimizationBase = "2.13" SpeedMapping = "0.3" SciMLBase = "2.58" Reexport = "1.2" From 53e5b889c10a71960bf76c4489d1a819e911712e Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 3 Oct 2025 16:35:41 +0200 Subject: [PATCH 07/17] missing sources --- Project.toml | 3 +++ lib/OptimizationAuglag/Project.toml | 1 + 2 files changed, 4 insertions(+) diff --git a/Project.toml b/Project.toml index d70392538..4704275b2 100644 --- a/Project.toml +++ b/Project.toml @@ -20,6 +20,9 @@ TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" [sources] OptimizationBase = {path = "lib/OptimizationBase"} +OptimizationMOI = {path = "lib/OptimizationMOI"} +OptimizationOptimJL = {path = "lib/OptimizationOptimJL"} +OptimizationOptimisers = {path = "lib/OptimizationOptimisers"} [compat] ADTypes = "1.2" diff --git a/lib/OptimizationAuglag/Project.toml b/lib/OptimizationAuglag/Project.toml index 6bf05b4cf..c2e04244f 100644 --- a/lib/OptimizationAuglag/Project.toml +++ b/lib/OptimizationAuglag/Project.toml @@ -5,6 +5,7 @@ version = "1.0.0" [sources] OptimizationBase = {path = "../OptimizationBase"} +OptimizationOptimisers = {path = "../OptimizationOptimisers"} [deps] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" From 2b76183e1a451ada3b88bc8fdbb247a6f60bd7ce Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 5 Oct 2025 12:42:07 +0000 Subject: [PATCH 08/17] add missing sources --- lib/OptimizationMultistartOptimization/Project.toml | 1 + lib/OptimizationNLPModels/Project.toml | 2 ++ lib/OptimizationPolyalgorithms/Project.toml | 2 ++ 3 files changed, 5 insertions(+) diff --git a/lib/OptimizationMultistartOptimization/Project.toml b/lib/OptimizationMultistartOptimization/Project.toml index 08b7020b3..a7069b6e1 100644 --- a/lib/OptimizationMultistartOptimization/Project.toml +++ b/lib/OptimizationMultistartOptimization/Project.toml @@ -5,6 +5,7 @@ version = "0.3.1" [sources] OptimizationBase = {path = "../OptimizationBase"} +OptimizationNLopt = {path = "../OptimizationNLopt"} [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index da186aa5a..8bf306cdf 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -5,6 +5,8 @@ version = "0.0.2" [sources] OptimizationBase = {path = "../OptimizationBase"} +OptimizationMOI = {path = "../OptimizationMOI"} +OptimizationOptimJL = {path = "../OptimizationOptimJL"} [deps] NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index d7bc40d68..be00945c5 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -5,6 +5,8 @@ version = "0.3.1" [sources] OptimizationBase = {path = "../OptimizationBase"} +OptimizationOptimisers = {path = "../OptimizationOptimisers"} +OptimizationOptimJL = {path = "../OptimizationOptimJL"} [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" From b984682f3d1cbe4ccbb0fe70ebb013deb79f4cce Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 07:16:57 +0000 Subject: [PATCH 09/17] Update lib/OptimizationMOI/src/moi.jl --- lib/OptimizationMOI/src/moi.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/OptimizationMOI/src/moi.jl b/lib/OptimizationMOI/src/moi.jl index 8b2cf5f0f..829d3277c 100644 --- a/lib/OptimizationMOI/src/moi.jl +++ b/lib/OptimizationMOI/src/moi.jl @@ -1,4 +1,4 @@ -struct MOIOptimizationBase.OptimizationCache{F <: OptimizationFunction, RC, LB, UB, I, S, EX, +struct MOIOptimizationCache{F <: OptimizationFunction, RC, LB, UB, I, S, EX, CEX, O} <: SciMLBase.AbstractOptimizationCache f::F reinit_cache::RC From 71aaec9871a9276eb7b9983888f90c807cfa31cf Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 04:00:16 -0400 Subject: [PATCH 10/17] Update utils.jl --- lib/OptimizationBase/src/utils.jl | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lib/OptimizationBase/src/utils.jl b/lib/OptimizationBase/src/utils.jl index 36afe4c23..a96fd2286 100644 --- a/lib/OptimizationBase/src/utils.jl +++ b/lib/OptimizationBase/src/utils.jl @@ -4,36 +4,6 @@ function get_maxiters(data) typemax(Int) : length(data) end -maybe_with_logger(f, logger) = logger === nothing ? f() : Logging.with_logger(f, logger) - -function default_logger(logger) - Logging.min_enabled_level(logger) ≤ ProgressLogging.ProgressLevel && return nothing - if Sys.iswindows() || (isdefined(Main, :IJulia) && Main.IJulia.inited) - progresslogger = ConsoleProgressMonitor.ProgressLogger() - else - progresslogger = TerminalLoggers.TerminalLogger() - end - logger1 = LoggingExtras.EarlyFilteredLogger(progresslogger) do log - log.level == ProgressLogging.ProgressLevel - end - logger2 = LoggingExtras.EarlyFilteredLogger(logger) do log - log.level != ProgressLogging.ProgressLevel - end - LoggingExtras.TeeLogger(logger1, logger2) -end - -macro withprogress(progress, exprs...) - quote - if $progress - $maybe_with_logger($default_logger($Logging.current_logger())) do - $ProgressLogging.@withprogress $(exprs...) - end - else - $(exprs[end]) - end - end |> esc -end - decompose_trace(trace) = trace function _check_and_convert_maxiters(maxiters) From a5e5ec3bf8377d78990bc1c9857bf3ab3fd8b57c Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 04:01:01 -0400 Subject: [PATCH 11/17] Update OptimizationOptimisers.jl --- lib/OptimizationOptimisers/src/OptimizationOptimisers.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl index 9b8b4f82c..a86b23ac8 100644 --- a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl +++ b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl @@ -95,7 +95,7 @@ function SciMLBase.__solve(cache::OptimizationBase.OptimizationCache{ gevals = 0 t0 = time() breakall = false - OptimizationBase.@withprogress cache.progress name="Training" begin + begin for epoch in 1:epochs if breakall break From b9d43c2ac97455524de9af0ce7690989ccc02d6b Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 04:25:33 -0400 Subject: [PATCH 12/17] Update utils.jl --- test/utils.jl | 44 +------------------------------------------- 1 file changed, 1 insertion(+), 43 deletions(-) diff --git a/test/utils.jl b/test/utils.jl index 27c83d347..8ca47aa7e 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -1,15 +1,10 @@ using Test using Optimization -using Optimization: get_maxiters, maybe_with_logger, default_logger, @withprogress, +using Optimization: get_maxiters, decompose_trace, _check_and_convert_maxiters, _check_and_convert_maxtime, deduce_retcode, STOP_REASON_MAP using SciMLBase: ReturnCode -using Logging -using ProgressLogging -using LoggingExtras -using ConsoleProgressMonitor -using TerminalLoggers @testset "Utils Tests" begin @testset "get_maxiters" begin @@ -25,43 +20,6 @@ using TerminalLoggers end end - @testset "maybe_with_logger" begin - # Test with no logger (nothing) - result = maybe_with_logger(() -> 42, nothing) - @test result == 42 - - # Test with logger - test_logger = NullLogger() - result = maybe_with_logger(() -> 24, test_logger) - @test result == 24 - end - - @testset "default_logger" begin - # Test with logger that has progress level enabled - progress_logger = ConsoleLogger(stderr, Logging.Debug) - result = default_logger(progress_logger) - @test result === nothing - - # Test with logger that doesn't have progress level enabled - info_logger = ConsoleLogger(stderr, Logging.Info) - result = default_logger(info_logger) - @test result isa LoggingExtras.TeeLogger - end - - @testset "@withprogress macro" begin - # Test with progress = false - result = @withprogress false begin - 42 - end - @test result == 42 - - # Test with progress = true - result = @withprogress true begin - 24 - end - @test result == 24 - end - @testset "decompose_trace" begin # Test that it returns the input unchanged test_trace = [1, 2, 3] From 051a0f133c2691fea81edfd4eb49fd98184f8947 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 04:44:59 -0400 Subject: [PATCH 13/17] Update test/utils.jl --- test/utils.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils.jl b/test/utils.jl index 8ca47aa7e..ccc05a621 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -1,6 +1,6 @@ using Test using Optimization -using Optimization: get_maxiters, +using OptimizationBase: get_maxiters, decompose_trace, _check_and_convert_maxiters, _check_and_convert_maxtime, deduce_retcode, STOP_REASON_MAP From 83cefb6bc2eff587001dfcfd3efb1fa1d81474f7 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 09:12:53 +0000 Subject: [PATCH 14/17] fix AD Tests --- test/ADtests.jl | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/ADtests.jl b/test/ADtests.jl index 716424645..ab59fc0f5 100644 --- a/test/ADtests.jl +++ b/test/ADtests.jl @@ -1,4 +1,5 @@ -using Optimization, OptimizationOptimJL, OptimizationMOI, Ipopt, Test +using Optimization, OptimizationOptimJL, OptimizationMOI, OptimizationLBFGSB +using Ipopt, Test using ForwardDiff, Zygote, ReverseDiff, FiniteDiff, Tracker, Mooncake using Enzyme, Random @@ -22,7 +23,7 @@ end optf = OptimizationFunction(rosenbrock; grad = g!, hess = h!) prob = OptimizationProblem(optf, x0) - sol = solve(prob, Optimization.LBFGS()) + sol = solve(prob, OptimizationLBFGSB.LBFGSB()) @test 10 * sol.objective < l1 @test sol.retcode == ReturnCode.Success @@ -66,7 +67,7 @@ end end end - sol = solve(prob, Optimization.LBFGS(), maxiters = 1000) + sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test 10 * sol.objective < l1 @test sol.retcode == ReturnCode.Success end @@ -82,7 +83,7 @@ end prob = OptimizationProblem( optf, x0, lb = [-1.0, -1.0], ub = [1.0, 1.0], lcons = [0.0], ucons = [0.0]) - sol = solve(prob, Optimization.LBFGS(), maxiters = 1000) + sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test 10 * sol.objective < l1 # Requires Hession, which Mooncake doesn't support at the moment. @@ -107,7 +108,7 @@ end prob = OptimizationProblem(optf, x0, lb = [-1.0, -1.0], ub = [1.0, 1.0], lcons = [1.0, -2.0], ucons = [1.0, 2.0]) - sol = solve(prob, Optimization.LBFGS(), maxiters = 1000) + sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test 10 * sol.objective < l1 # Requires Hession, which Mooncake doesn't support at the moment. From cf8f67edfbab55d5a78154cac212c3c685c815fc Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 09:36:56 +0000 Subject: [PATCH 15/17] add OptimizationLBFGSB as a test dep --- Project.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 4704275b2..391c08743 100644 --- a/Project.toml +++ b/Project.toml @@ -87,6 +87,7 @@ Lux = "b2108857-7c20-44ae-9111-449ecde12c47" MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" Optim = "429524aa-4258-5aef-a3af-852621145aeb" +OptimizationLBFGSB = "22f7324a-a79d-40f2-bebe-3af60c77bd15" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" @@ -106,6 +107,6 @@ Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6" [targets] test = ["Aqua", "BenchmarkTools", "Boltz", "ComponentArrays", "DiffEqFlux", "Enzyme", "FiniteDiff", "Flux", "ForwardDiff", - "Ipopt", "IterTools", "Lux", "MLUtils", "ModelingToolkit", "Optim", "OptimizationMOI", "OptimizationOptimJL", "OptimizationOptimisers", + "Ipopt", "IterTools", "Lux", "MLUtils", "ModelingToolkit", "Optim", "OptimizationLBFGSB", "OptimizationMOI", "OptimizationOptimJL", "OptimizationOptimisers", "OrdinaryDiffEqTsit5", "Pkg", "Random", "ReverseDiff", "SafeTestsets", "SciMLSensitivity", "SparseArrays", "Symbolics", "Test", "Tracker", "Zygote", "Mooncake"] From bf84af69fd7e43d64831915f47c0a3449e796fc2 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 11:02:11 +0000 Subject: [PATCH 16/17] remove extra sources --- Project.toml | 1 + lib/OptimizationAuglag/Project.toml | 1 - lib/OptimizationMultistartOptimization/Project.toml | 1 - lib/OptimizationNLPModels/Project.toml | 2 -- lib/OptimizationPolyalgorithms/Project.toml | 2 -- 5 files changed, 1 insertion(+), 6 deletions(-) diff --git a/Project.toml b/Project.toml index 391c08743..d7fbbf4ba 100644 --- a/Project.toml +++ b/Project.toml @@ -50,6 +50,7 @@ Mooncake = "0.4.138" Optim = ">= 1.4.1" Optimisers = ">= 0.2.5" OptimizationBase = "2" +OptimizationLBFGSB = "1" OptimizationMOI = "0.5" OptimizationOptimJL = "0.4" OptimizationOptimisers = "0.3" diff --git a/lib/OptimizationAuglag/Project.toml b/lib/OptimizationAuglag/Project.toml index c2e04244f..6bf05b4cf 100644 --- a/lib/OptimizationAuglag/Project.toml +++ b/lib/OptimizationAuglag/Project.toml @@ -5,7 +5,6 @@ version = "1.0.0" [sources] OptimizationBase = {path = "../OptimizationBase"} -OptimizationOptimisers = {path = "../OptimizationOptimisers"} [deps] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" diff --git a/lib/OptimizationMultistartOptimization/Project.toml b/lib/OptimizationMultistartOptimization/Project.toml index a7069b6e1..08b7020b3 100644 --- a/lib/OptimizationMultistartOptimization/Project.toml +++ b/lib/OptimizationMultistartOptimization/Project.toml @@ -5,7 +5,6 @@ version = "0.3.1" [sources] OptimizationBase = {path = "../OptimizationBase"} -OptimizationNLopt = {path = "../OptimizationNLopt"} [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index 8bf306cdf..da186aa5a 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -5,8 +5,6 @@ version = "0.0.2" [sources] OptimizationBase = {path = "../OptimizationBase"} -OptimizationMOI = {path = "../OptimizationMOI"} -OptimizationOptimJL = {path = "../OptimizationOptimJL"} [deps] NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index be00945c5..d7bc40d68 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -5,8 +5,6 @@ version = "0.3.1" [sources] OptimizationBase = {path = "../OptimizationBase"} -OptimizationOptimisers = {path = "../OptimizationOptimisers"} -OptimizationOptimJL = {path = "../OptimizationOptimJL"} [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" From fc3d2f111619aebb31524a6d87567aa1fe4c8cdd Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 6 Oct 2025 12:45:45 +0000 Subject: [PATCH 17/17] fix new test source --- Project.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Project.toml b/Project.toml index d7fbbf4ba..a4c55c6f7 100644 --- a/Project.toml +++ b/Project.toml @@ -20,6 +20,7 @@ TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" [sources] OptimizationBase = {path = "lib/OptimizationBase"} +OptimizationLBFGSB = {path = "lib/OptimizationLBFGSB"} OptimizationMOI = {path = "lib/OptimizationMOI"} OptimizationOptimJL = {path = "lib/OptimizationOptimJL"} OptimizationOptimisers = {path = "lib/OptimizationOptimisers"}