Skip to content

User-defined gradients need to accept AbstractVector #2638

@odow

Description

@odow

Copying the example in the documentation leads to an error: https://jump.dev/JuMP.jl/stable/manual/nlp/#Multivariate-functions

julia> using JuMP, Ipopt

julia> f(x...) = (x[1] - 1)^2 + (x[2] - 2)^2
f (generic function with 1 method)

julia> function ∇f(g::Vector{T}, x::T...) where {T}
           g[1] = 2 * (x[1] - 1)
           g[2] = 2 * (x[2] - 2)
           return
       end
∇f (generic function with 1 method)

julia> model = Model(Ipopt.Optimizer)
A JuMP Model
Feasibility problem with:
Variables: 0
Model mode: AUTOMATIC
CachingOptimizer state: EMPTY_OPTIMIZER
Solver name: Ipopt

julia> register(model, :my_square, 2, f, ∇f)

julia> @variable(model, x[1:2] >= 0)
2-element Vector{VariableRef}:
 x[1]
 x[2]

julia> @NLobjective(model, Min, my_square(x...))

julia> optimize!(model)

******************************************************************************
This program contains Ipopt, a library for large-scale nonlinear optimization.
 Ipopt is released as open source code under the Eclipse Public License (EPL).
         For more information visit https://github.com/coin-or/Ipopt
******************************************************************************

This is Ipopt version 3.13.4, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).

Number of nonzeros in equality constraint Jacobian...:        0
Number of nonzeros in inequality constraint Jacobian.:        0
Number of nonzeros in Lagrangian Hessian.............:        0

ERROR: MethodError: no method matching ∇f(::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, ::Float64, ::Float64)
Closest candidates are:
  ∇f(::Vector{T}, ::T...) where T at REPL[6]:1
Stacktrace:
  [1] (::JuMP.var"#148#151"{typeof(∇f)})(g::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, x::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true})
    @ JuMP ~/.julia/packages/JuMP/Xrr7O/src/nlp.jl:1916
  [2] eval_objective_gradient(d::JuMP._UserFunctionEvaluator, grad::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true}, x::SubArray{Float64, 1, Vector{Float64}, Tuple{UnitRange{Int64}}, true})
    @ JuMP ~/.julia/packages/JuMP/Xrr7O/src/nlp.jl:1747
  [3] forward_eval(storage::Vector{Float64}, partials_storage::Vector{Float64}, nd::Vector{JuMP._Derivatives.NodeData}, adj::SparseArrays.SparseMatrixCSC{Bool, Int64}, const_values::Vector{Float64}, parameter_values::Vector{Float64}, x_values::Vector{Float64}, subexpression_values::Vector{Float64}, user_input_buffer::Vector{Float64}, user_output_buffer::Vector{Float64}, user_operators::JuMP._Derivatives.UserOperatorRegistry)
    @ JuMP._Derivatives ~/.julia/packages/JuMP/Xrr7O/src/_Derivatives/forward.jl:181
  [4] _forward_eval_all(d::NLPEvaluator, x::Vector{Float64})
    @ JuMP ~/.julia/packages/JuMP/Xrr7O/src/nlp.jl:667
  [5] macro expansion
    @ ~/.julia/packages/JuMP/Xrr7O/src/nlp.jl:736 [inlined]
  [6] macro expansion
    @ ./timing.jl:287 [inlined]
  [7] eval_objective_gradient(d::NLPEvaluator, g::Vector{Float64}, x::Vector{Float64})
    @ JuMP ~/.julia/packages/JuMP/Xrr7O/src/nlp.jl:734
  [8] eval_objective_gradient(model::Ipopt.Optimizer, grad::Vector{Float64}, x::Vector{Float64})
    @ Ipopt ~/.julia/packages/Ipopt/vtrOr/src/MOI_wrapper.jl:1322
  [9] (::Ipopt.var"#eval_grad_f_cb#43"{Ipopt.Optimizer})(x::Vector{Float64}, grad_f::Vector{Float64})
    @ Ipopt ~/.julia/packages/Ipopt/vtrOr/src/MOI_wrapper.jl:1571
 [10] eval_grad_f_wrapper(n::Int32, x_ptr::Ptr{Float64}, new_x::Int32, grad_f_ptr::Ptr{Float64}, user_data::Ptr{Nothing})
    @ Ipopt ~/.julia/packages/Ipopt/vtrOr/src/Ipopt.jl:163
 [11] solveProblem(prob::IpoptProblem)
    @ Ipopt ~/.julia/packages/Ipopt/vtrOr/src/Ipopt.jl:532
 [12] optimize!(model::Ipopt.Optimizer)
    @ Ipopt ~/.julia/packages/Ipopt/vtrOr/src/MOI_wrapper.jl:1713
 [13] optimize!(b::MathOptInterface.Bridges.LazyBridgeOptimizer{Ipopt.Optimizer})
    @ MathOptInterface.Bridges ~/.julia/packages/MathOptInterface/YDdD3/src/Bridges/bridge_optimizer.jl:319
 [14] optimize!(m::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer, MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.GenericModel{Float64, MathOptInterface.Utilities.ModelFunctionConstraints{Float64}}}})
    @ MathOptInterface.Utilities ~/.julia/packages/MathOptInterface/YDdD3/src/Utilities/cachingoptimizer.jl:252
 [15] optimize!(model::Model, optimizer_factory::Nothing; bridge_constraints::Bool, ignore_optimize_hook::Bool, kwargs::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ JuMP ~/.julia/packages/JuMP/Xrr7O/src/optimizer_interface.jl:185
 [16] optimize! (repeats 2 times)
    @ ~/.julia/packages/JuMP/Xrr7O/src/optimizer_interface.jl:157 [inlined]
 [17] top-level scope
    @ REPL[11]:1
function ∇f(g::Vector{T}, x::T...) where {T}

should be

function ∇f(g::AbstractVector{T}, x::T...) where {T}

Reported on the forum: https://discourse.julialang.org/t/jump-optimization-with-vector-input-and-analytical-gradient/63664/2

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions