Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions src/quasi-newton.jl
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,14 @@ function NLPModels.reset_data!(nlp::QuasiNewtonModel)
end

# the following methods are not affected by the Hessian approximation
for meth in (:obj, :grad, :cons, :jac_coord, :jac)
for meth in (:obj, :grad, :cons, :cons_lin, :cons_nln, :jac_coord, :jac_lin_coord, :jac_nln_coord, :jac, :jac_lin, :jac_nln)
@eval NLPModels.$meth(nlp::QuasiNewtonModel, x::AbstractVector) = $meth(nlp.model, x)
end
for meth in (:grad!, :cons!, :jprod, :jtprod, :objgrad, :objgrad!, :jac_coord!)
for meth in (:grad!, :cons!, :cons_lin!, :cons_nln!, :jprod, :jprod_lin, :jprod_nln, :jtprod, :jtprod_lin, :jtprod_lin, :objgrad, :objgrad!, :jac_coord!, :jac_lin_coord!, :jac_nln_coord!)
@eval NLPModels.$meth(nlp::QuasiNewtonModel, x::AbstractVector, y::AbstractVector) =
$meth(nlp.model, x, y)
end
for meth in (:jprod!, :jtprod!)
for meth in (:jprod!, :jprod_lin!, :jprod_nln!, :jtprod!, :jtprod_lin!, :jtprod_nln!)
@eval NLPModels.$meth(
nlp::QuasiNewtonModel,
x::AbstractVector,
Expand All @@ -65,6 +65,16 @@ NLPModels.jac_structure!(
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) = jac_structure!(nlp.model, rows, cols)
NLPModels.jac_lin_structure!(
nlp::QuasiNewtonModel,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) = jac_lin_structure!(nlp.model, rows, cols)
NLPModels.jac_nln_structure!(
nlp::QuasiNewtonModel,
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) = jac_nln_structure!(nlp.model, rows, cols)

# the following methods are affected by the Hessian approximation
NLPModels.hess_op(nlp::QuasiNewtonModel, x::AbstractVector; kwargs...) = nlp.op
Expand Down
40 changes: 36 additions & 4 deletions test/nlp/quasi-newton.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@
f(x) = (x[1] - 2)^2 + (x[2] - 1)^2
∇f(x) = [2 * (x[1] - 2); 2 * (x[2] - 1)]
c(x) = [x[1] - 2x[2] + 1; -x[1]^2 / 4 - x[2]^2 + 1]
clin(x) = [x[1] - 2x[2] + 1]
cnln(x) = [-x[1]^2 / 4 - x[2]^2 + 1]
J(x) = [1.0 -2.0; -0.5x[1] -2.0x[2]]
Jlin(x) = [1.0 -2.0]
Jnln(x) = [-0.5x[1] -2.0x[2]]

for (QNM, QNO) in [(LSR1Model, LSR1Operator), (LBFGSModel, LBFGSOperator)],
T in [Float64, Float32],
Expand Down Expand Up @@ -37,6 +41,18 @@
@test jac(nlp, x) ≈ J(x)
@test jprod(nlp, x, v) ≈ J(x) * v
@test jtprod(nlp, x, w) ≈ J(x)' * w
if nlp.meta.lin_nnzj > 0
@test cons_lin(nlp, x) ≈ clin(x)
@test jac_lin(nlp, x) ≈ Jlin(x)
@test jprod_lin(nlp, x, v) ≈ Jlin(x) * v
@test jtprod_lin(nlp, x, w[nlp.meta.lin]) ≈ Jlin(x)' * w[nlp.meta.lin]
end
if nlp.meta.nln_nnzj > 0
@test cons_nln(nlp, x) ≈ cnln(x)
@test jac_nln(nlp, x) ≈ Jnln(x)
@test jprod_nln(nlp, x, v) ≈ Jnln(x) * v
@test jtprod_nln(nlp, x, w[nlp.meta.nln]) ≈ Jnln(x)' * w[nlp.meta.nln]
end

# Increasing coverage
fx, cx = objcons(nlp, x)
Expand All @@ -53,12 +69,28 @@
@test gx ≈ ∇f(x)
@test jprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), v, Jv) ≈ J(x) * v
@test jtprod!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), w, Jtw) ≈ J(x)' * w
@test jprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), v, Jv[nlp.meta.lin]) ≈ Jlin(x) * v
@test jtprod_lin!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), w[nlp.meta.lin], Jtw) ≈ Jlin(x)' * w[nlp.meta.lin]
@test jprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), v, Jv[nlp.meta.nln]) ≈ Jnln(x) * v
@test jtprod_nln!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), w[nlp.meta.nln], Jtw) ≈ Jnln(x)' * w[nlp.meta.nln]
Jop = jac_op!(nlp, x, Jv, Jtw)
@test Jop * v ≈ J(x) * v
@test Jop' * w ≈ J(x)' * w
Jop = jac_op!(nlp, jac_structure(nlp)..., jac_coord(nlp, x), Jv, Jtw)
@test Jop * v ≈ J(x) * v
@test Jop' * w ≈ J(x)' * w
Jop = jac_lin_op!(nlp, x, Jv[nlp.meta.lin], Jtw)
@test Jop * v ≈ Jlin(x) * v
@test Jop' * w[nlp.meta.lin] ≈ Jlin(x)' * w[nlp.meta.lin]
Jop = jac_lin_op!(nlp, jac_lin_structure(nlp)..., jac_lin_coord(nlp, x), Jv[nlp.meta.lin], Jtw)
@test Jop * v ≈ Jlin(x) * v
@test Jop' * w[nlp.meta.lin] ≈ Jlin(x)' * w[nlp.meta.lin]
Jop = jac_nln_op!(nlp, x, Jv[nlp.meta.nln], Jtw)
@test Jop * v ≈ Jnln(x) * v
@test Jop' * w[nlp.meta.nln] ≈ Jnln(x)' * w[nlp.meta.nln]
Jop = jac_nln_op!(nlp, jac_nln_structure(nlp)..., jac_nln_coord(nlp, x), Jv[nlp.meta.nln], Jtw)
@test Jop * v ≈ Jnln(x) * v
@test Jop' * w[nlp.meta.nln] ≈ Jnln(x)' * w[nlp.meta.nln]
Hop = hess_op(nlp, x)
@test Hop * v ≈ H(x) * v
Hop = hess_op!(nlp, x, Hv)
Expand All @@ -85,8 +117,8 @@
low/upp: ████████████████████ 2 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nnzh: ( 33.33% sparsity) 2 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nonlinear: ████████████████████ 2
nnzh: ( 33.33% sparsity) 2 linear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nonlinear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nnzj: ( 0.00% sparsity) 4

Counters:
Expand Down Expand Up @@ -114,8 +146,8 @@
low/upp: ████████████████████ 2 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nnzh: ( 33.33% sparsity) 2 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nonlinear: ████████████████████ 2
nnzh: ( 33.33% sparsity) 2 linear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nonlinear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nnzj: ( 0.00% sparsity) 4

Counters:
Expand Down
111 changes: 89 additions & 22 deletions test/nlp/simple-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ mutable struct SimpleNLPMeta{T, S} <: AbstractNLPModelMeta{T, S}

nnzo::Int
nnzj::Int
lin_nnzj::Int
nln_nnzj::Int
nnzh::Int

nlin::Int
Expand All @@ -72,11 +74,10 @@ mutable struct SimpleNLPMeta{T, S} <: AbstractNLPModelMeta{T, S}
ucon::S = fill!(S(undef, ncon), T(Inf)),
nnzo = nvar,
nnzj = nvar * ncon,
lin_nnzj = 0,
nln_nnzj = nvar * ncon,
nnzh = nvar * (nvar + 1) / 2,
lin = Int[],
nln = 1:ncon,
nlin = length(lin),
nnln = length(nln),
minimize = true,
islp = false,
name = "Generic",
Expand All @@ -87,9 +88,7 @@ mutable struct SimpleNLPMeta{T, S} <: AbstractNLPModelMeta{T, S}

@lencheck nvar x0 lvar uvar
@lencheck ncon y0 lcon ucon
@lencheck nlin lin
@lencheck nnln nln
@rangecheck 1 ncon lin nln
@rangecheck 1 ncon lin

ifix = findall(lvar .== uvar)
ilow = findall((lvar .> T(-Inf)) .& (uvar .== T(Inf)))
Expand All @@ -108,6 +107,10 @@ mutable struct SimpleNLPMeta{T, S} <: AbstractNLPModelMeta{T, S}
nnzj = max(0, nnzj)
nnzh = max(0, nnzh)

nln = setdiff(1:ncon, lin)
nlin = length(lin)
nnln = length(nln)

new{T, S}(
nvar,
x0,
Expand All @@ -134,6 +137,8 @@ mutable struct SimpleNLPMeta{T, S} <: AbstractNLPModelMeta{T, S}
jinf,
nnzo,
nnzj,
lin_nnzj,
nln_nnzj,
nnzh,
nlin,
nnln,
Expand All @@ -159,6 +164,9 @@ function SimpleNLPModel(::Type{T}, ::Type{NLPModelMeta}) where {T}
lcon = T[0; 0],
ucon = T[0; Inf],
name = "Simple NLP Model",
lin_nnzj = 2,
nln_nnzj = 2,
lin = [1],
)
return SimpleNLPModel(meta, Counters())
end
Expand All @@ -173,6 +181,9 @@ function SimpleNLPModel(::Type{T}, ::Type{SimpleNLPMeta}) where {T}
lcon = T[0; 0],
ucon = T[0; Inf],
name = "Simple NLP Model",
lin_nnzj = 2,
nln_nnzj = 2,
lin = [1],
)
return SimpleNLPModel(meta, Counters())
end
Expand Down Expand Up @@ -235,53 +246,109 @@ function NLPModels.hprod!(
return Hv
end

function NLPModels.cons!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x cx
function NLPModels.cons_lin!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons)
cx .= [x[1] - 2 * x[2] + 1; -x[1]^2 / 4 - x[2]^2 + 1]
cx .= [x[1] - 2 * x[2] + 1]
return cx
end

function NLPModels.jac_structure!(
function NLPModels.cons_nln!(nlp::SimpleNLPModel, x::AbstractVector, cx::AbstractVector)
@lencheck 2 x
@lencheck 1 cx
increment!(nlp, :neval_cons)
cx .= [-x[1]^2 / 4 - x[2]^2 + 1]
return cx
end

function NLPModels.jac_lin_structure!(
nlp::SimpleNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 4 rows cols
rows .= [1, 2, 1, 2]
cols .= [1, 1, 2, 2]
@lencheck 2 rows cols
rows .= [1, 1]
cols .= [1, 2]
return rows, cols
end

function NLPModels.jac_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
function NLPModels.jac_nln_structure!(
nlp::SimpleNLPModel,
rows::AbstractVector{Int},
cols::AbstractVector{Int},
)
@lencheck 2 rows cols
rows .= [1, 1]
cols .= [1, 2]
return rows, cols
end

function NLPModels.jac_lin_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x
@lencheck 4 vals
@lencheck 2 vals
increment!(nlp, :neval_jac)
vals .= [1, -x[1] / 2, -2, -2 * x[2]]
vals .= [1, -2]
return vals
end

function NLPModels.jprod!(
function NLPModels.jac_nln_coord!(nlp::SimpleNLPModel, x::AbstractVector, vals::AbstractVector)
@lencheck 2 x
@lencheck 2 vals
increment!(nlp, :neval_jac)
vals .= [-x[1] / 2, -2 * x[2]]
return vals
end

function NLPModels.jprod_lin!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 x v
@lencheck 1 Jv
increment!(nlp, :neval_jprod)
Jv .= [v[1] - 2 * v[2]]
return Jv
end

function NLPModels.jprod_nln!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
@lencheck 2 x v Jv
@lencheck 2 x v
@lencheck 1 Jv
increment!(nlp, :neval_jprod)
Jv .= [v[1] - 2 * v[2]; -x[1] * v[1] / 2 - 2 * x[2] * v[2]]
Jv .= [-x[1] * v[1] / 2 - 2 * x[2] * v[2]]
return Jv
end

function NLPModels.jtprod!(
function NLPModels.jtprod_lin!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 2 x Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod)
Jtv .= [v[1]; -2 * v[1]]
return Jtv
end

function NLPModels.jtprod_nln!(
nlp::SimpleNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
@lencheck 2 x v Jtv
@lencheck 2 x Jtv
@lencheck 1 v
increment!(nlp, :neval_jtprod)
Jtv .= [v[1] - x[1] * v[2] / 2; -2 * v[1] - 2 * x[2] * v[2]]
Jtv .= [- x[1] * v[1] / 2; - 2 * x[2] * v[1]]
return Jtv
end

Expand Down
4 changes: 2 additions & 2 deletions test/nlp/slack-model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@
low/upp: ██████████████⋅⋅⋅⋅⋅⋅ 2 low/upp: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
fixed: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 fixed: ████████████████████ 2
infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0 infeas: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nnzh: ( 66.67% sparsity) 2 linear: ⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 0
nonlinear: ████████████████████ 2
nnzh: ( 66.67% sparsity) 2 linear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nonlinear: ██████████⋅⋅⋅⋅⋅⋅⋅⋅⋅⋅ 1
nnzj: ( 16.67% sparsity) 5

Counters:
Expand Down