From a0744228a0fb3e26e062213e0d3f80cb909583f0 Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Fri, 12 Sep 2025 10:39:58 +0200 Subject: [PATCH 01/12] Starts adapting and reworking to Manopt 0.5, Manifolds 0.10, ManifoldsBase 1.0 --- lib/OptimizationManopt/Project.toml | 8 +- .../src/OptimizationManopt.jl | 163 ++++-------------- 2 files changed, 38 insertions(+), 133 deletions(-) diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index 8c6d7f149..8aa070a8d 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -14,10 +14,10 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69" [compat] LinearAlgebra = "1.10" -ManifoldDiff = "0.3.10" -Manifolds = "0.9.18" -ManifoldsBase = "0.15.10" -Manopt = "0.4.63" +ManifoldDiff = "0.4" +Manifolds = "0.10" +ManifoldsBase = "1" +Manopt = "0.5" Optimization = "4.4" Reexport = "1.2" julia = "1.10" diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 22244a0fe..80404cf1a 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -65,20 +65,14 @@ function call_manopt_optimizer( loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = Manopt.AllocatingEvaluation(), - stepsize::Stepsize = ArmijoLinesearch(M), kwargs...) - opts = gradient_descent(M, + opts = Manopt.gradient_descent(M, loss, gradF, x0; - return_state = true, - evaluation, - stepsize, - stopping_criterion, - kwargs...) - # we unwrap DebugOptions here + return_state = true, # return the (full, decorated) solver state + kwargs... + ) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -90,13 +84,8 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, opt::NelderMea loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - kwargs...) - opts = NelderMead(M, - loss; - return_state = true, - stopping_criterion, kwargs...) + opts = NelderMead(M, loss; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -109,19 +98,14 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - stepsize::Stepsize = ArmijoLinesearch(M), kwargs...) - opts = conjugate_gradient_descent(M, + opts = Manopt.conjugate_gradient_descent(M, loss, gradF, x0; return_state = true, - evaluation, - stepsize, - stopping_criterion, - kwargs...) + kwargs... + ) # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) @@ -135,25 +119,10 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), population_size::Int = 100, - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - inverse_retraction_method::AbstractInverseRetractionMethod = default_inverse_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - kwargs...) - initial_population = vcat([x0], [rand(M) for _ in 1:(population_size - 1)]) - opts = particle_swarm(M, - loss; - x0 = initial_population, - n = population_size, - return_state = true, - retraction_method, - inverse_retraction_method, - vector_transport_method, - stopping_criterion, kwargs...) - # we unwrap DebugOptions here + swarm = [x0, [rand(M) for _ in 1:(population_size - 1)]...] + opts = particle_swarm(M, loss, swarm; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -167,27 +136,9 @@ function call_manopt_optimizer(M::Manopt.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - stepsize = WolfePowellLinesearch(M; - retraction_method = retraction_method, - vector_transport_method = vector_transport_method, - linesearch_stopsize = 1e-12), kwargs... ) - opts = quasi_Newton(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - vector_transport_method, - stepsize, - stopping_criterion, - kwargs...) + opts = quasi_Newton(M, loss, gradF, x0; return_state = true, kwargs...) # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) @@ -200,18 +151,8 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - basis = Manopt.DefaultOrthonormalBasis(), - kwargs...) - opt = cma_es(M, - loss, - x0; - return_state = true, - stopping_criterion, kwargs...) + opt = cma_es(M, loss, x0; return_state = true, kwargs...) # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) @@ -224,21 +165,8 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M), - kwargs...) - opt = convex_bundle_method!(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - vector_transport_method, - stopping_criterion, kwargs...) + opt = convex_bundle_method(M, loss, gradF, x0; return_state = true, kwargs...) # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) @@ -252,21 +180,13 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, gradF, x0; hessF = nothing, - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - kwargs...) - opt = adaptive_regularization_with_cubics(M, - loss, - gradF, - hessF, - x0; - return_state = true, - evaluation, - retraction_method, - stopping_criterion, kwargs...) - # we unwrap DebugOptions here + + opt = if isnothing(hessF) + adaptive_regularization_with_cubics(M, loss, gradF, x0; return_state = true, kwargs...) + else + adaptive_regularization_with_cubics(M, loss, gradF, hessF, x0; return_state = true, kwargs...) + end minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -279,20 +199,12 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, gradF, x0; hessF = nothing, - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - kwargs...) - opt = trust_regions(M, - loss, - gradF, - hessF, - x0; - return_state = true, - evaluation, - retraction = retraction_method, - stopping_criterion, kwargs...) + opt = if isnothing(hessF) + trust_regions(M, loss, gradF, x0; return_state = true, kwargs...) + else + trust_regions(M, loss, gradF, hessF, x0; return_state = true, kwargs...) + end # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) @@ -305,21 +217,8 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; - stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet}, - evaluation::AbstractEvaluationType = InplaceEvaluation(), - retraction_method::AbstractRetractionMethod = default_retraction_method(M), - stepsize::Stepsize = DecreasingStepsize(; length = 2.0, shift = 2), - kwargs...) - opt = Frank_Wolfe_method(M, - loss, - gradF, - x0; - return_state = true, - evaluation, - retraction_method, - stopping_criterion, - stepsize, kwargs...) + opt = Frank_Wolfe_method(M, loss, gradF, x0; return_state = true, kwargs...) # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) @@ -332,13 +231,14 @@ function SciMLBase.requiresgradient(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) true end +# TODO: WHY? they both still accept not passing it function SciMLBase.requireshessian(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) true end function build_loss(f::OptimizationFunction, prob, cb) - function (::AbstractManifold, θ) + return function (::AbstractManifold, θ) x = f.f(θ, prob.p) cb(x, θ) __x = first(x) @@ -346,6 +246,7 @@ function build_loss(f::OptimizationFunction, prob, cb) end end +#TODO: What does the “true” mean here? function build_gradF(f::OptimizationFunction{true}) function g(M::AbstractManifold, G, θ) f.grad(G, θ) @@ -356,6 +257,7 @@ function build_gradF(f::OptimizationFunction{true}) f.grad(G, θ) return riemannian_gradient(M, θ, G) end + return g end function build_hessF(f::OptimizationFunction{true}) @@ -373,6 +275,7 @@ function build_hessF(f::OptimizationFunction{true}) f.grad(G, θ) return riemannian_Hessian(M, θ, G, H, X) end + return h end function SciMLBase.__solve(cache::OptimizationCache{ @@ -395,8 +298,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ LC, UC, S, - O <: - AbstractManoptOptimizer, + O <: AbstractManoptOptimizer, D, P, C @@ -418,6 +320,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ u = θ, p = cache.p, objective = x[1]) + #TODO: What is this callback for? cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") @@ -448,10 +351,12 @@ function SciMLBase.__solve(cache::OptimizationCache{ stopping_criterion = Manopt.StopAfterIteration(500) end + # TODO: With the new keyword warnings we can not just always pass down hessF! opt_res = call_manopt_optimizer(manifold, cache.opt, _loss, gradF, cache.u0; solver_kwarg..., stopping_criterion = stopping_criterion, hessF) asc = get_stopping_criterion(opt_res.options) + # TODO: Switch to `has_converged` once that was released. opt_ret = Manopt.indicates_convergence(asc) ? ReturnCode.Success : ReturnCode.Failure return SciMLBase.build_solution(cache, From a06467213744806a36575d9cbf57aa6314270e70 Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Fri, 12 Sep 2025 10:46:10 +0200 Subject: [PATCH 02/12] fix a few tests, --- lib/OptimizationManopt/test/runtests.jl | 365 ++++++++++++------------ 1 file changed, 183 insertions(+), 182 deletions(-) diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 2c84d8623..532a67efe 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -15,233 +15,234 @@ function rosenbrock_grad!(storage, x, p) end R2 = Euclidean(2) +@testset "OptimizationManopt.jl" begin + @testset "Error on no or mismatching manifolds" begin + x0 = zeros(2) + p = [1.0, 100.0] + + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.GradientDescentOptimizer() + + optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob_forwarddiff = OptimizationProblem(optprob_forwarddiff, x0, p) + @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") Optimization.solve( + prob_forwarddiff, opt) + end -@testset "Error on no or mismatching manifolds" begin - x0 = zeros(2) - p = [1.0, 100.0] - - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.GradientDescentOptimizer() - - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob_forwarddiff = OptimizationProblem(optprob_forwarddiff, x0, p) - @test_throws ArgumentError("Manifold not specified in the problem for e.g. `OptimizationProblem(f, x, p; manifold = SymmetricPositiveDefinite(5))`.") Optimization.solve( - prob_forwarddiff, opt) -end - -@testset "Gradient descent" begin - x0 = zeros(2) - p = [1.0, 100.0] - - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.GradientDescentOptimizer() - - optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoEnzyme()) - prob_forwarddiff = OptimizationProblem( - optprob_forwarddiff, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_forwarddiff, opt) - @test sol.minimum < 0.2 + @testset "Gradient descent" begin + x0 = zeros(2) + p = [1.0, 100.0] - optprob_grad = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) - prob_grad = OptimizationProblem(optprob_grad, x0, p; manifold = R2, stepsize = stepsize) - sol = Optimization.solve(prob_grad, opt) - @test sol.minimum < 0.2 -end + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.GradientDescentOptimizer() -@testset "Nelder-Mead" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob_forwarddiff = OptimizationFunction(rosenbrock, Optimization.AutoEnzyme()) + prob_forwarddiff = OptimizationProblem( + optprob_forwarddiff, x0, p; manifold = R2, stepsize = stepsize) + sol = Optimization.solve(prob_forwarddiff, opt) + @test sol.minimum < 0.2 - opt = OptimizationManopt.NelderMeadOptimizer() + optprob_grad = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!) + prob_grad = OptimizationProblem(optprob_grad, x0, p; manifold = R2, stepsize = stepsize) + sol = Optimization.solve(prob_grad, opt) + @test sol.minimum < 0.2 + end - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "Nelder-Mead" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.7 -end + opt = OptimizationManopt.NelderMeadOptimizer() -@testset "Conjugate gradient descent" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - stepsize = Manopt.ArmijoLinesearch(R2) - opt = OptimizationManopt.ConjugateGradientDescentOptimizer() + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.7 + end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + @testset "Conjugate gradient descent" begin + x0 = zeros(2) + p = [1.0, 100.0] - sol = Optimization.solve(prob, opt, stepsize = stepsize) - @test sol.minimum < 0.5 -end + stepsize = Manopt.ArmijoLinesearch(R2) + opt = OptimizationManopt.ConjugateGradientDescentOptimizer() -@testset "Quasi Newton" begin - x0 = zeros(2) - p = [1.0, 100.0] + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - opt = OptimizationManopt.QuasiNewtonOptimizer() - function callback(state, l) - println(state.u) - println(l) - return false + sol = Optimization.solve(prob, opt, stepsize = stepsize) + @test sol.minimum < 0.5 end - optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt, callback = callback, maxiters = 30) - @test sol.minimum < 1e-14 -end + @testset "Quasi Newton" begin + x0 = zeros(2) + p = [1.0, 100.0] + + opt = OptimizationManopt.QuasiNewtonOptimizer() + function callback(state, l) + println(state.u) + println(l) + return false + end + optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) + + sol = Optimization.solve(prob, opt, callback = callback, maxiters = 30) + @test sol.minimum < 1e-14 + end -@testset "Particle swarm" begin - x0 = zeros(2) - p = [1.0, 100.0] + @testset "Particle swarm" begin + x0 = zeros(2) + p = [1.0, 100.0] - opt = OptimizationManopt.ParticleSwarmOptimizer() + opt = OptimizationManopt.ParticleSwarmOptimizer() - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -@testset "CMA-ES" begin - x0 = zeros(2) - p = [1.0, 100.0] + @testset "CMA-ES" begin + x0 = zeros(2) + p = [1.0, 100.0] - opt = OptimizationManopt.CMAESOptimizer() + opt = OptimizationManopt.CMAESOptimizer() - optprob = OptimizationFunction(rosenbrock) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + optprob = OptimizationFunction(rosenbrock) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -@testset "ConvexBundle" begin - x0 = zeros(2) - p = [1.0, 100.0] + @testset "ConvexBundle" begin + x0 = zeros(2) + p = [1.0, 100.0] - opt = OptimizationManopt.ConvexBundleOptimizer() + opt = OptimizationManopt.ConvexBundleOptimizer() - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve( - prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver!) - @test sol.minimum < 0.1 -end + sol = Optimization.solve( + prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver!) + @test sol.minimum < 0.1 + end -# @testset "TruncatedConjugateGradientDescent" begin -# x0 = zeros(2) -# p = [1.0, 100.0] + # @testset "TruncatedConjugateGradientDescent" begin + # x0 = zeros(2) + # p = [1.0, 100.0] -# opt = OptimizationManopt.TruncatedConjugateGradientDescentOptimizer() + # opt = OptimizationManopt.TruncatedConjugateGradientDescentOptimizer() -# optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) -# prob = OptimizationProblem(optprob, x0, p; manifold = R2) + # optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + # prob = OptimizationProblem(optprob, x0, p; manifold = R2) -# sol = Optimization.solve(prob, opt) -# @test_broken sol.minimum < 0.1 -# end + # sol = Optimization.solve(prob, opt) + # @test_broken sol.minimum < 0.1 + # end -@testset "AdaptiveRegularizationCubic" begin - x0 = zeros(2) - p = [1.0, 100.0] + @testset "AdaptiveRegularizationCubic" begin + x0 = zeros(2) + p = [1.0, 100.0] - opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() + opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -@testset "TrustRegions" begin - x0 = zeros(2) - p = [1.0, 100.0] + @testset "TrustRegions" begin + x0 = zeros(2) + p = [1.0, 100.0] - opt = OptimizationManopt.TrustRegionsOptimizer() + opt = OptimizationManopt.TrustRegionsOptimizer() - optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) - prob = OptimizationProblem(optprob, x0, p; manifold = R2) + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) + prob = OptimizationProblem(optprob, x0, p; manifold = R2) - sol = Optimization.solve(prob, opt) - @test sol.minimum < 0.1 -end + sol = Optimization.solve(prob, opt) + @test sol.minimum < 0.1 + end -# @testset "Circle example from Manopt" begin -# Mc = Circle() -# pc = 0.0 -# data = [-π / 4, 0.0, π / 4] -# fc(y, _) = 1 / 2 * sum([distance(M, y, x)^2 for x in data]) -# sgrad_fc(G, y, _) = G .= -log(Mc, y, rand(data)) + # @testset "Circle example from Manopt" begin + # Mc = Circle() + # pc = 0.0 + # data = [-π / 4, 0.0, π / 4] + # fc(y, _) = 1 / 2 * sum([distance(M, y, x)^2 for x in data]) + # sgrad_fc(G, y, _) = G .= -log(Mc, y, rand(data)) -# opt = OptimizationManopt.StochasticGradientDescentOptimizer() + # opt = OptimizationManopt.StochasticGradientDescentOptimizer() -# optprob = OptimizationFunction(fc, grad = sgrad_fc) -# prob = OptimizationProblem(optprob, pc; manifold = Mc) + # optprob = OptimizationFunction(fc, grad = sgrad_fc) + # prob = OptimizationProblem(optprob, pc; manifold = Mc) -# sol = Optimization.solve(prob, opt) + # sol = Optimization.solve(prob, opt) -# @test all([is_point(Mc, q, true) for q in [q1, q2, q3, q4, q5]]) -# end + # @test all([is_point(Mc, q, true) for q in [q1, q2, q3, q4, q5]]) + # end -@testset "Custom constraints" begin - cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) + @testset "Custom constraints" begin + cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) - x0 = zeros(2) - p = [1.0, 100.0] - opt = OptimizationManopt.GradientDescentOptimizer() + x0 = zeros(2) + p = [1.0, 100.0] + opt = OptimizationManopt.GradientDescentOptimizer() - optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) - prob_cons = OptimizationProblem(optprob_cons, x0, p) - @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) -end + optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) + prob_cons = OptimizationProblem(optprob_cons, x0, p) + @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) + end -@testset "SPD Manifold" begin - M = SymmetricPositiveDefinite(5) - m = 100 - σ = 0.005 - q = Matrix{Float64}(I, 5, 5) .+ 2.0 - data2 = [exp(M, q, σ * rand(M; vector_at = q)) for i in 1:m] - - f(x, p = nothing) = sum(distance(M, x, data2[i])^2 for i in 1:m) - - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M, maxiters = 1000) - - opt = OptimizationManopt.GradientDescentOptimizer() - @time sol = Optimization.solve(prob, opt) - - @test sol.u≈q rtol=1e-2 - - function closed_form_solution!(M::SymmetricPositiveDefinite, q, L, U, p, X) - # extract p^1/2 and p^{-1/2} - (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) - # Compute D & Q - e2 = eigen(p_sqrt_inv * X * p_sqrt_inv) # decompose Sk = QDQ' - D = Diagonal(1.0 .* (e2.values .< 0)) - Q = e2.vectors - Uprime = Q' * p_sqrt_inv * U * p_sqrt_inv * Q - Lprime = Q' * p_sqrt_inv * L * p_sqrt_inv * Q - P = cholesky(Hermitian(Uprime - Lprime)) - - z = P.U' * D * P.U + Lprime - copyto!(M, q, p_sqrt * Q * z * Q' * p_sqrt) - return q + @testset "SPD Manifold" begin + M = SymmetricPositiveDefinite(5) + m = 100 + σ = 0.005 + q = Matrix{Float64}(I, 5, 5) .+ 2.0 + data2 = [exp(M, q, σ * rand(M; vector_at = q)) for i in 1:m] + + f(x, p = nothing) = sum(distance(M, x, data2[i])^2 for i in 1:m) + + optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) + prob = OptimizationProblem(optf, data2[1]; manifold = M, maxiters = 1000) + + opt = OptimizationManopt.GradientDescentOptimizer() + @time sol = Optimization.solve(prob, opt) + + @test sol.u≈q rtol=1e-2 + + function closed_form_solution!(M::SymmetricPositiveDefinite, q, L, U, p, X) + # extract p^1/2 and p^{-1/2} + (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) + # Compute D & Q + e2 = eigen(p_sqrt_inv * X * p_sqrt_inv) # decompose Sk = QDQ' + D = Diagonal(1.0 .* (e2.values .< 0)) + Q = e2.vectors + Uprime = Q' * p_sqrt_inv * U * p_sqrt_inv * Q + Lprime = Q' * p_sqrt_inv * L * p_sqrt_inv * Q + P = cholesky(Hermitian(Uprime - Lprime)) + + z = P.U' * D * P.U + Lprime + copyto!(M, q, p_sqrt * Q * z * Q' * p_sqrt) + return q + end + N = m + U = mean(data2) + L = inv(sum(1 / N * inv(matrix) for matrix in data2)) + + opt = OptimizationManopt.FrankWolfeOptimizer() + optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) + prob = OptimizationProblem(optf, data2[1]; manifold = M) + + @time sol = Optimization.solve( + prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X), + maxiters = 1000) + @test sol.u≈q rtol=1e-2 end - N = m - U = mean(data2) - L = inv(sum(1 / N * inv(matrix) for matrix in data2)) - - opt = OptimizationManopt.FrankWolfeOptimizer() - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M) - - @time sol = Optimization.solve( - prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X), - maxiters = 1000) - @test sol.u≈q rtol=1e-2 -end +end \ No newline at end of file From 98b43be15e642edd73ef363ac78f9b617389eb8c Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Fri, 12 Sep 2025 13:05:03 +0200 Subject: [PATCH 03/12] Move all tests to the allocating case default. --- lib/OptimizationManopt/test/runtests.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 532a67efe..9d71c5dd1 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -128,7 +128,7 @@ R2 = Euclidean(2) prob = OptimizationProblem(optprob, x0, p; manifold = R2) sol = Optimization.solve( - prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver!) + prob, opt, sub_problem = Manopt.convex_bundle_method_subsolver) @test sol.minimum < 0.1 end @@ -197,6 +197,7 @@ R2 = Euclidean(2) optprob_cons = OptimizationFunction(rosenbrock; grad = rosenbrock_grad!, cons = cons) prob_cons = OptimizationProblem(optprob_cons, x0, p) + #TODO: What is this? @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) end @@ -217,7 +218,8 @@ R2 = Euclidean(2) @test sol.u≈q rtol=1e-2 - function closed_form_solution!(M::SymmetricPositiveDefinite, q, L, U, p, X) + function closed_form_solution(M::SymmetricPositiveDefinite, L, U, p, X) + q = copy(M, p) # extract p^1/2 and p^{-1/2} (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) # Compute D & Q From dbddd2521c59dc082b9b4eaac15f54f7593f3f34 Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Fri, 12 Sep 2025 13:20:45 +0200 Subject: [PATCH 04/12] Collect a few comments on why and where currently tests still fail. --- lib/OptimizationManopt/test/runtests.jl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 9d71c5dd1..9f12f6e30 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -151,6 +151,8 @@ R2 = Euclidean(2) opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() + #TODO: This autodiff currently provides a Hessian that seem to not procide a Hessian + # ARC Fails but also AD before that warns. So it passes _some_ hessian but a wrong one, even in format optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) @@ -164,6 +166,8 @@ R2 = Euclidean(2) opt = OptimizationManopt.TrustRegionsOptimizer() + #TODO: This autodiff currently provides a Hessian that seem to not procide a Hessian + # TR Fails but also AD before that warns. So it passes _some_ hessian but a wrong one, even in format optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) @@ -219,7 +223,6 @@ R2 = Euclidean(2) @test sol.u≈q rtol=1e-2 function closed_form_solution(M::SymmetricPositiveDefinite, L, U, p, X) - q = copy(M, p) # extract p^1/2 and p^{-1/2} (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) # Compute D & Q @@ -231,8 +234,7 @@ R2 = Euclidean(2) P = cholesky(Hermitian(Uprime - Lprime)) z = P.U' * D * P.U + Lprime - copyto!(M, q, p_sqrt * Q * z * Q' * p_sqrt) - return q + return p_sqrt * Q * z * Q' * p_sqrt end N = m U = mean(data2) @@ -243,7 +245,7 @@ R2 = Euclidean(2) prob = OptimizationProblem(optf, data2[1]; manifold = M) @time sol = Optimization.solve( - prob, opt, sub_problem = (M, q, p, X) -> closed_form_solution!(M, q, L, U, p, X), + prob, opt, sub_problem = (M, p, X) -> closed_form_solution(M, p, L, U, X), maxiters = 1000) @test sol.u≈q rtol=1e-2 end From 0ac3af5bf4054a5a210fa301531cdea38e8261eb Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Tue, 23 Sep 2025 16:10:29 +0200 Subject: [PATCH 05/12] Fix a few bugs in the existing code. --- docs/src/optimization_packages/manopt.md | 8 ++- .../src/OptimizationManopt.jl | 21 +++--- lib/OptimizationManopt/test/runtests.jl | 64 +------------------ 3 files changed, 18 insertions(+), 75 deletions(-) diff --git a/docs/src/optimization_packages/manopt.md b/docs/src/optimization_packages/manopt.md index 64cc69880..422337e15 100644 --- a/docs/src/optimization_packages/manopt.md +++ b/docs/src/optimization_packages/manopt.md @@ -1,7 +1,9 @@ # Manopt.jl -[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimization solvers on manifolds supported by -[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl). +[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package providing solvers +for optimization problems defined on Riemannian manifolds. +The implementation is based on [ManifoldsBase.jl](https://github.com/JuliaManifolds/ManifoldsBase.jl) interface and can hence be used for all maniolds defined in +[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl) or any other manifold implemented using the interface. ## Installation: OptimizationManopt.jl @@ -29,7 +31,7 @@ The common kwargs `maxiters`, `maxtime` and `abstol` are supported by all the op function or `OptimizationProblem`. !!! note - + The `OptimizationProblem` has to be passed the manifold as the `manifold` keyword argument. ## Examples diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 80404cf1a..e72db9643 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -65,6 +65,7 @@ function call_manopt_optimizer( loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opts = Manopt.gradient_descent(M, loss, @@ -84,6 +85,7 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, opt::NelderMea loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opts = NelderMead(M, loss; return_state = true, kwargs...) minimizer = Manopt.get_solver_result(opts) @@ -98,6 +100,7 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opts = Manopt.conjugate_gradient_descent(M, loss, @@ -106,7 +109,6 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, return_state = true, kwargs... ) - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -119,6 +121,7 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver population_size::Int = 100, kwargs...) swarm = [x0, [rand(M) for _ in 1:(population_size - 1)]...] @@ -136,10 +139,10 @@ function call_manopt_optimizer(M::Manopt.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs... ) opts = quasi_Newton(M, loss, gradF, x0; return_state = true, kwargs...) - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opts) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts) end @@ -151,9 +154,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opt = cma_es(M, loss, x0; return_state = true, kwargs...) - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -165,9 +168,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opt = convex_bundle_method(M, loss, gradF, x0; return_state = true, kwargs...) - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -205,7 +208,6 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, else trust_regions(M, loss, gradF, hessF, x0; return_state = true, kwargs...) end - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -217,9 +219,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, loss, gradF, x0; + hessF=nothing, # ignore that keyword for this solver kwargs...) opt = Frank_Wolfe_method(M, loss, gradF, x0; return_state = true, kwargs...) - # we unwrap DebugOptions here minimizer = Manopt.get_solver_result(opt) return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt) end @@ -238,6 +240,9 @@ function SciMLBase.requireshessian(opt::Union{ end function build_loss(f::OptimizationFunction, prob, cb) + # TODO: I do not understand this. Why is the manifold not used? + # Either this is an Euclidean cost, then we should probably still call `embed`, + # or it is not, then we need M. return function (::AbstractManifold, θ) x = f.f(θ, prob.p) cb(x, θ) @@ -351,13 +356,11 @@ function SciMLBase.__solve(cache::OptimizationCache{ stopping_criterion = Manopt.StopAfterIteration(500) end - # TODO: With the new keyword warnings we can not just always pass down hessF! opt_res = call_manopt_optimizer(manifold, cache.opt, _loss, gradF, cache.u0; solver_kwarg..., stopping_criterion = stopping_criterion, hessF) asc = get_stopping_criterion(opt_res.options) - # TODO: Switch to `has_converged` once that was released. - opt_ret = Manopt.indicates_convergence(asc) ? ReturnCode.Success : ReturnCode.Failure + opt_ret = Manopt.has_converged(asc) ? ReturnCode.Success : ReturnCode.Failure return SciMLBase.build_solution(cache, cache.opt, diff --git a/lib/OptimizationManopt/test/runtests.jl b/lib/OptimizationManopt/test/runtests.jl index 9f12f6e30..09074e602 100644 --- a/lib/OptimizationManopt/test/runtests.jl +++ b/lib/OptimizationManopt/test/runtests.jl @@ -151,7 +151,7 @@ R2 = Euclidean(2) opt = OptimizationManopt.AdaptiveRegularizationCubicOptimizer() - #TODO: This autodiff currently provides a Hessian that seem to not procide a Hessian + #TODO: This autodiff currently provides a Hessian that seem to not provide a Hessian # ARC Fails but also AD before that warns. So it passes _some_ hessian but a wrong one, even in format optprob = OptimizationFunction(rosenbrock, AutoForwardDiff()) prob = OptimizationProblem(optprob, x0, p; manifold = R2) @@ -175,23 +175,6 @@ R2 = Euclidean(2) @test sol.minimum < 0.1 end - # @testset "Circle example from Manopt" begin - # Mc = Circle() - # pc = 0.0 - # data = [-π / 4, 0.0, π / 4] - # fc(y, _) = 1 / 2 * sum([distance(M, y, x)^2 for x in data]) - # sgrad_fc(G, y, _) = G .= -log(Mc, y, rand(data)) - - # opt = OptimizationManopt.StochasticGradientDescentOptimizer() - - # optprob = OptimizationFunction(fc, grad = sgrad_fc) - # prob = OptimizationProblem(optprob, pc; manifold = Mc) - - # sol = Optimization.solve(prob, opt) - - # @test all([is_point(Mc, q, true) for q in [q1, q2, q3, q4, q5]]) - # end - @testset "Custom constraints" begin cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) @@ -204,49 +187,4 @@ R2 = Euclidean(2) #TODO: What is this? @test_throws SciMLBase.IncompatibleOptimizerError Optimization.solve(prob_cons, opt) end - - @testset "SPD Manifold" begin - M = SymmetricPositiveDefinite(5) - m = 100 - σ = 0.005 - q = Matrix{Float64}(I, 5, 5) .+ 2.0 - data2 = [exp(M, q, σ * rand(M; vector_at = q)) for i in 1:m] - - f(x, p = nothing) = sum(distance(M, x, data2[i])^2 for i in 1:m) - - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M, maxiters = 1000) - - opt = OptimizationManopt.GradientDescentOptimizer() - @time sol = Optimization.solve(prob, opt) - - @test sol.u≈q rtol=1e-2 - - function closed_form_solution(M::SymmetricPositiveDefinite, L, U, p, X) - # extract p^1/2 and p^{-1/2} - (p_sqrt_inv, p_sqrt) = Manifolds.spd_sqrt_and_sqrt_inv(p) - # Compute D & Q - e2 = eigen(p_sqrt_inv * X * p_sqrt_inv) # decompose Sk = QDQ' - D = Diagonal(1.0 .* (e2.values .< 0)) - Q = e2.vectors - Uprime = Q' * p_sqrt_inv * U * p_sqrt_inv * Q - Lprime = Q' * p_sqrt_inv * L * p_sqrt_inv * Q - P = cholesky(Hermitian(Uprime - Lprime)) - - z = P.U' * D * P.U + Lprime - return p_sqrt * Q * z * Q' * p_sqrt - end - N = m - U = mean(data2) - L = inv(sum(1 / N * inv(matrix) for matrix in data2)) - - opt = OptimizationManopt.FrankWolfeOptimizer() - optf = OptimizationFunction(f, Optimization.AutoFiniteDiff()) - prob = OptimizationProblem(optf, data2[1]; manifold = M) - - @time sol = Optimization.solve( - prob, opt, sub_problem = (M, p, X) -> closed_form_solution(M, p, L, U, X), - maxiters = 1000) - @test sol.u≈q rtol=1e-2 - end end \ No newline at end of file From efc42eacc435c71b14c25babe3d21ddad7c46026 Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Tue, 23 Sep 2025 16:31:27 +0200 Subject: [PATCH 06/12] Fix the tests. --- lib/OptimizationManopt/src/OptimizationManopt.jl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index e72db9643..48edd72b8 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -251,7 +251,9 @@ function build_loss(f::OptimizationFunction, prob, cb) end end -#TODO: What does the “true” mean here? +# cf. https://github.com/SciML/SciMLBase.jl/blob/master/src/problems/optimization_problems.jl +# {iip} is the parameter here – nowhere explained but very much probably “is in place” +# so this refers to whether the gradient/hessian is computed in place or not function build_gradF(f::OptimizationFunction{true}) function g(M::AbstractManifold, G, θ) f.grad(G, θ) @@ -268,14 +270,16 @@ end function build_hessF(f::OptimizationFunction{true}) function h(M::AbstractManifold, H1, θ, X) H = zeros(eltype(θ), length(θ)) + # an Optimization function has both hess (the matrix) and hv (Hessian with direction) + # we need hv here f.hv(H, θ, X) G = zeros(eltype(θ), length(θ)) f.grad(G, θ) riemannian_Hessian!(M, H1, θ, G, H, X) end function h(M::AbstractManifold, θ, X) - H = zeros(eltype(θ), length(θ), length(θ)) - f.hess(H, θ) + H = zeros(eltype(θ), length(θ)) + f.hv(H, θ, X) G = zeros(eltype(θ), length(θ)) f.grad(G, θ) return riemannian_Hessian(M, θ, G, H, X) From b488dc42b7b14cfc2df77e46ec609932029c4fcb Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Tue, 23 Sep 2025 17:27:27 +0200 Subject: [PATCH 07/12] Bump version number --- docs/Project.toml | 2 +- lib/OptimizationManopt/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index aefa3caf2..5a9beddb0 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -72,7 +72,7 @@ OptimizationEvolutionary = "0.4" OptimizationGCMAES = "0.3" OptimizationIpopt = "0.1" OptimizationMOI = "0.5" -OptimizationManopt = "0.0.4" +OptimizationManopt = "0.1.0" OptimizationMetaheuristics = "0.3" OptimizationNLPModels = "0.0.2" OptimizationNLopt = "0.3" diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index 8aa070a8d..c1351a2b9 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationManopt" uuid = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6" authors = ["Mateusz Baran "] -version = "0.0.4" +version = "0.1.0" [deps] LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" From 1b4864db6d1104452ea1afee8f03fd55df8f02f1 Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Tue, 23 Sep 2025 17:41:28 +0200 Subject: [PATCH 08/12] =?UTF-8?q?Bump=20docs=20versions=20=E2=80=93=20also?= =?UTF-8?q?=20for=20OptimizationIpOpt=20since=20that=20currently=20errored?= =?UTF-8?q?=20on=20CI.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/Project.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index 5a9beddb0..7db9bd79d 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -58,8 +58,8 @@ IterTools = "1" Juniper = "0.9" Lux = "1" MLUtils = "0.4.4" -Manifolds = "0.9" -Manopt = "0.4" +Manifolds = "0.10" +Manopt = "0.5" ModelingToolkit = "10" NLPModels = "0.21" NLPModelsTest = "0.10" @@ -70,7 +70,7 @@ OptimizationBase = "2" OptimizationCMAEvolutionStrategy = "0.3" OptimizationEvolutionary = "0.4" OptimizationGCMAES = "0.3" -OptimizationIpopt = "0.1" +OptimizationIpopt = "0.2" OptimizationMOI = "0.5" OptimizationManopt = "0.1.0" OptimizationMetaheuristics = "0.3" From 213760edf8f10710404d6dc5e909535560e611f8 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 1 Oct 2025 12:51:41 -0400 Subject: [PATCH 09/12] Update lib/OptimizationManopt/src/OptimizationManopt.jl --- lib/OptimizationManopt/src/OptimizationManopt.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 9e9a092de..9aded6519 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -334,7 +334,6 @@ function SciMLBase.__solve(cache::OptimizationCache{ u = θ, p = cache.p, objective = x[1]) - #TODO: What is this callback for? cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") From 0a47945fcc9d2d7b1d490359b6081781d18e8282 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 1 Oct 2025 12:51:47 -0400 Subject: [PATCH 10/12] Update lib/OptimizationManopt/src/OptimizationManopt.jl --- lib/OptimizationManopt/src/OptimizationManopt.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 9aded6519..ceb1d1ee1 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -275,8 +275,6 @@ end function build_hessF(f::OptimizationFunction{true}) function h(M::AbstractManifold, H1, θ, X) H = zeros(eltype(θ), length(θ)) - # an Optimization function has both hess (the matrix) and hv (Hessian with direction) - # we need hv here f.hv(H, θ, X) G = zeros(eltype(θ), length(θ)) f.grad(G, θ) From b9803059c2a0191e724631c96d7be2808887ca34 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 1 Oct 2025 12:51:53 -0400 Subject: [PATCH 11/12] Update lib/OptimizationManopt/src/OptimizationManopt.jl --- lib/OptimizationManopt/src/OptimizationManopt.jl | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index ceb1d1ee1..8ff2cdd76 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -256,9 +256,6 @@ function build_loss(f::OptimizationFunction, prob, cb) end end -# cf. https://github.com/SciML/SciMLBase.jl/blob/master/src/problems/optimization_problems.jl -# {iip} is the parameter here – nowhere explained but very much probably “is in place” -# so this refers to whether the gradient/hessian is computed in place or not function build_gradF(f::OptimizationFunction{true}) function g(M::AbstractManifold, G, θ) f.grad(G, θ) From 2e46ca121038f63e36431209cc8de18d64d6425f Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Wed, 1 Oct 2025 12:52:00 -0400 Subject: [PATCH 12/12] Update lib/OptimizationManopt/src/OptimizationManopt.jl --- lib/OptimizationManopt/src/OptimizationManopt.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/OptimizationManopt/src/OptimizationManopt.jl b/lib/OptimizationManopt/src/OptimizationManopt.jl index 8ff2cdd76..1bc3270f0 100644 --- a/lib/OptimizationManopt/src/OptimizationManopt.jl +++ b/lib/OptimizationManopt/src/OptimizationManopt.jl @@ -238,10 +238,9 @@ function SciMLBase.requiresgradient(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) true end -# TODO: WHY? they both still accept not passing it function SciMLBase.requireshessian(opt::Union{ AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer}) - true + false end function build_loss(f::OptimizationFunction, prob, cb)