Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ IterTools = "1"
Juniper = "0.9"
Lux = "1"
MLUtils = "0.4.4"
Manifolds = "0.9"
Manopt = "0.4"
Manifolds = "0.10"
Manopt = "0.5"
ModelingToolkit = "10.23"
NLPModels = "0.21"
NLPModelsTest = "0.10"
Expand All @@ -73,7 +73,7 @@ OptimizationEvolutionary = "0.4"
OptimizationGCMAES = "0.3"
OptimizationIpopt = "0.2"
OptimizationMOI = "0.5"
OptimizationManopt = "0.0.5"
OptimizationManopt = "0.1.0"
OptimizationMetaheuristics = "0.3"
OptimizationNLPModels = "0.0.2"
OptimizationNLopt = "0.3"
Expand Down
8 changes: 5 additions & 3 deletions docs/src/optimization_packages/manopt.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Manopt.jl

[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package with implementations of a variety of optimization solvers on manifolds supported by
[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl).
[Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) is a package providing solvers
for optimization problems defined on Riemannian manifolds.
The implementation is based on [ManifoldsBase.jl](https://github.com/JuliaManifolds/ManifoldsBase.jl) interface and can hence be used for all maniolds defined in
[Manifolds](https://github.com/JuliaManifolds/Manifolds.jl) or any other manifold implemented using the interface.

## Installation: OptimizationManopt.jl

Expand Down Expand Up @@ -29,7 +31,7 @@ The common kwargs `maxiters`, `maxtime` and `abstol` are supported by all the op
function or `OptimizationProblem`.

!!! note

The `OptimizationProblem` has to be passed the manifold as the `manifold` keyword argument.

## Examples
Expand Down
12 changes: 6 additions & 6 deletions lib/OptimizationManopt/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "OptimizationManopt"
uuid = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6"
authors = ["Mateusz Baran <mateuszbaran89@gmail.com>"]
version = "0.0.5"
authors = ["Mateusz Baran <mateuszbaran89@gmail.com>", "Ronny Bergmann <manopt@ronnybergmann.net>"]
version = "0.1.0"

[deps]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand All @@ -14,10 +14,10 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69"

[compat]
LinearAlgebra = "1.10"
ManifoldDiff = "0.3.10"
Manifolds = "0.9.18"
ManifoldsBase = "0.15.10"
Manopt = "0.4.63"
ManifoldDiff = "0.4"
Manifolds = "0.10"
ManifoldsBase = "1"
Manopt = "0.5"
Optimization = "4.4"
Reexport = "1.2"
julia = "1.10"
Expand Down
183 changes: 44 additions & 139 deletions lib/OptimizationManopt/src/OptimizationManopt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,20 +70,15 @@ function call_manopt_optimizer(
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = Manopt.AllocatingEvaluation(),
stepsize::Stepsize = ArmijoLinesearch(M),
hessF=nothing, # ignore that keyword for this solver
kwargs...)
opts = gradient_descent(M,
opts = Manopt.gradient_descent(M,
loss,
gradF,
x0;
return_state = true,
evaluation,
stepsize,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here
return_state = true, # return the (full, decorated) solver state
kwargs...
)
minimizer = Manopt.get_solver_result(opts)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts)
end
Expand All @@ -95,13 +90,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold, opt::NelderMea
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
kwargs...)
opts = NelderMead(M,
loss;
return_state = true,
stopping_criterion,
hessF=nothing, # ignore that keyword for this solver
kwargs...)
opts = NelderMead(M, loss; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opts)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts)
end
Expand All @@ -114,20 +105,15 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
stepsize::Stepsize = ArmijoLinesearch(M),
hessF=nothing, # ignore that keyword for this solver
kwargs...)
opts = conjugate_gradient_descent(M,
opts = Manopt.conjugate_gradient_descent(M,
loss,
gradF,
x0;
return_state = true,
evaluation,
stepsize,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here
kwargs...
)
minimizer = Manopt.get_solver_result(opts)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts)
end
Expand All @@ -140,25 +126,11 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
hessF=nothing, # ignore that keyword for this solver
population_size::Int = 100,
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
inverse_retraction_method::AbstractInverseRetractionMethod = default_inverse_retraction_method(M),
vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M),
kwargs...)
initial_population = vcat([x0], [rand(M) for _ in 1:(population_size - 1)])
opts = particle_swarm(M,
loss;
x0 = initial_population,
n = population_size,
return_state = true,
retraction_method,
inverse_retraction_method,
vector_transport_method,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here
swarm = [x0, [rand(M) for _ in 1:(population_size - 1)]...]
opts = particle_swarm(M, loss, swarm; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opts)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts)
end
Expand All @@ -172,28 +144,10 @@ function call_manopt_optimizer(M::Manopt.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M),
stepsize = WolfePowellLinesearch(M;
retraction_method = retraction_method,
vector_transport_method = vector_transport_method,
linesearch_stopsize = 1e-12),
hessF=nothing, # ignore that keyword for this solver
kwargs...
)
opts = quasi_Newton(M,
loss,
gradF,
x0;
return_state = true,
evaluation,
retraction_method,
vector_transport_method,
stepsize,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here
opts = quasi_Newton(M, loss, gradF, x0; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opts)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opts)
end
Expand All @@ -205,19 +159,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M),
basis = Manopt.DefaultOrthonormalBasis(),
kwargs...)
opt = cma_es(M,
loss,
x0;
return_state = true,
stopping_criterion,
hessF=nothing, # ignore that keyword for this solver
kwargs...)
# we unwrap DebugOptions here
opt = cma_es(M, loss, x0; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opt)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt)
end
Expand All @@ -229,22 +173,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
vector_transport_method::AbstractVectorTransportMethod = default_vector_transport_method(M),
kwargs...)
opt = convex_bundle_method!(M,
loss,
gradF,
x0;
return_state = true,
evaluation,
retraction_method,
vector_transport_method,
stopping_criterion,
hessF=nothing, # ignore that keyword for this solver
kwargs...)
# we unwrap DebugOptions here
opt = convex_bundle_method(M, loss, gradF, x0; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opt)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt)
end
Expand All @@ -257,21 +188,13 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
gradF,
x0;
hessF = nothing,
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
kwargs...)
opt = adaptive_regularization_with_cubics(M,
loss,
gradF,
hessF,
x0;
return_state = true,
evaluation,
retraction_method,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here

opt = if isnothing(hessF)
adaptive_regularization_with_cubics(M, loss, gradF, x0; return_state = true, kwargs...)
else
adaptive_regularization_with_cubics(M, loss, gradF, hessF, x0; return_state = true, kwargs...)
end
minimizer = Manopt.get_solver_result(opt)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt)
end
Expand All @@ -284,21 +207,12 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
gradF,
x0;
hessF = nothing,
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
kwargs...)
opt = trust_regions(M,
loss,
gradF,
hessF,
x0;
return_state = true,
evaluation,
retraction = retraction_method,
stopping_criterion,
kwargs...)
# we unwrap DebugOptions here
opt = if isnothing(hessF)
trust_regions(M, loss, gradF, x0; return_state = true, kwargs...)
else
trust_regions(M, loss, gradF, hessF, x0; return_state = true, kwargs...)
end
minimizer = Manopt.get_solver_result(opt)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt)
end
Expand All @@ -310,22 +224,9 @@ function call_manopt_optimizer(M::ManifoldsBase.AbstractManifold,
loss,
gradF,
x0;
stopping_criterion::Union{Manopt.StoppingCriterion, Manopt.StoppingCriterionSet},
evaluation::AbstractEvaluationType = InplaceEvaluation(),
retraction_method::AbstractRetractionMethod = default_retraction_method(M),
stepsize::Stepsize = DecreasingStepsize(; length = 2.0, shift = 2),
kwargs...)
opt = Frank_Wolfe_method(M,
loss,
gradF,
x0;
return_state = true,
evaluation,
retraction_method,
stopping_criterion,
stepsize,
hessF=nothing, # ignore that keyword for this solver
kwargs...)
# we unwrap DebugOptions here
opt = Frank_Wolfe_method(M, loss, gradF, x0; return_state = true, kwargs...)
minimizer = Manopt.get_solver_result(opt)
return (; minimizer = minimizer, minimum = loss(M, minimizer), options = opt)
end
Expand All @@ -339,11 +240,14 @@ function SciMLBase.requiresgradient(opt::Union{
end
function SciMLBase.requireshessian(opt::Union{
AdaptiveRegularizationCubicOptimizer, TrustRegionsOptimizer})
true
false
end

function build_loss(f::OptimizationFunction, prob, cb)
function (::AbstractManifold, θ)
# TODO: I do not understand this. Why is the manifold not used?
# Either this is an Euclidean cost, then we should probably still call `embed`,
# or it is not, then we need M.
return function (::AbstractManifold, θ)
Comment on lines +247 to +250
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we should check what best to do, the current one works in some cases, but not all.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This I don't know. Do you need to know the manifold to know how to calculate the loss? I guess to know the mapping for some parameter values in some representations?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The signature of cost/grad/hess always has the first parameter as the manifold, since it allows to implement several costs for arbitrary manifolds, e..g. the Karcher mean to minimise the distances squared.

My main problem is that I do not understand which cost that is

  1. on a manifold it would contradict what the gradient function next does
  2. in the embedding we would have to call embed(M, θ) before passing it to the function f that is defined in the embedding.

as long as embed is the identity, like for SPDs and the sphere the current code works. But for fixed rank it for example would not work.

x = f.f(θ, prob.p)
cb(x, θ)
__x = first(x)
Expand All @@ -361,6 +265,7 @@ function build_gradF(f::OptimizationFunction{true})
f.grad(G, θ)
return riemannian_gradient(M, θ, G)
end
return g
end

function build_hessF(f::OptimizationFunction{true})
Expand All @@ -372,12 +277,13 @@ function build_hessF(f::OptimizationFunction{true})
riemannian_Hessian!(M, H1, θ, G, H, X)
end
function h(M::AbstractManifold, θ, X)
H = zeros(eltype(θ), length(θ), length(θ))
f.hess(H, θ)
H = zeros(eltype(θ), length(θ))
f.hv(H, θ, X)
G = zeros(eltype(θ), length(θ))
f.grad(G, θ)
return riemannian_Hessian(M, θ, G, H, X)
end
return h
end

function SciMLBase.__solve(cache::OptimizationCache{
Expand All @@ -400,8 +306,7 @@ function SciMLBase.__solve(cache::OptimizationCache{
LC,
UC,
S,
O <:
AbstractManoptOptimizer,
O <: AbstractManoptOptimizer,
D,
P,
C
Expand Down Expand Up @@ -457,7 +362,7 @@ function SciMLBase.__solve(cache::OptimizationCache{
solver_kwarg..., stopping_criterion = stopping_criterion, hessF)

asc = get_stopping_criterion(opt_res.options)
opt_ret = Manopt.indicates_convergence(asc) ? ReturnCode.Success : ReturnCode.Failure
opt_ret = Manopt.has_converged(asc) ? ReturnCode.Success : ReturnCode.Failure

return SciMLBase.build_solution(cache,
cache.opt,
Expand Down
Loading
Loading