Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add singe variable less than equal to #76

Merged
merged 4 commits into from
Mar 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 31 additions & 12 deletions src/MOI_wrapper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,9 @@ Base.@kwdef struct QPCache
Matrix{Float64}, Vector{Float64}, # A, b
Int, Vector{VI}, # nz, var_list
Int, Vector{MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}}}, # nineq, ineq_con_idx
Int, Vector{MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}}} # neq, eq_con_idx
Int, Vector{MOI.ConstraintIndex{MOI.SingleVariable, MOI.LessThan{Float64}}}, # nineq_sv_le, ineq_con_sv_le_idx
Int, Vector{MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, MOI.EqualTo{Float64}}}, # neq, eq_con_idx
Int, Vector{MOI.ConstraintIndex{MOI.SingleVariable, MOI.EqualTo{Float64}}}, # neq_sv, eq_con_sv_idx
}
inequality_duals::Vector{Float64}
equality_duals::Vector{Float64}
Expand Down Expand Up @@ -288,37 +290,54 @@ For more info refer eqn(7) and eqn(8) of https://arxiv.org/pdf/1703.00443.pdf
function backward!(model::Optimizer, params::Vector{String}, dl_dz::Vector{Float64})
z = model.primal_optimal
if model.gradient_cache !== nothing
(Q, q, G, h, A, b, nz, var_idx, nineq, ineq_con_idx, neq, eq_con_idx) = model.gradient_cache.problem_data
(
Q, q, G, h, A, b, nz, var_list,
nineq, ineq_con_idx, nineq_sv_le, ineq_con_sv_le_idx, neq, eq_con_idx,
neq_sv, eq_con_sv_idx,
) = model.gradient_cache.problem_data
λ = model.gradient_cache.inequality_duals
ν = model.gradient_cache.equality_duals
LHS = model.gradient_cache.lhs

else # full computation
Q, q, G, h, A, b, nz, var_idx, nineq, ineq_con_idx, neq, eq_con_idx = get_problem_data(model.optimizer)
problem_data = get_problem_data(model.optimizer)
(
Q, q, G, h, A, b, nz, var_list,
nineq, ineq_con_idx, nineq_sv_le, ineq_con_sv_le_idx, neq, eq_con_idx,
neq_sv, eq_con_sv_idx,
) = problem_data

# separate λ, ν

λ = [MOI.get(model.optimizer, MOI.ConstraintDual(), con) for con in ineq_con_idx]
ν = [MOI.get(model.optimizer, MOI.ConstraintDual(), con) for con in eq_con_idx]
λ = MOI.get.(model.optimizer, MOI.ConstraintDual(), ineq_con_idx)
append!(
λ,
MOI.get.(model.optimizer, MOI.ConstraintDual(), ineq_con_sv_le_idx)
)
ν = MOI.get.(model.optimizer, MOI.ConstraintDual(), eq_con_idx)
append!(
ν,
MOI.get.(model.optimizer, MOI.ConstraintDual(), eq_con_sv_idx)
)

LHS = create_LHS_matrix(z, λ, Q, G, h, A)
model.gradient_cache = QPCache(
(Q, q, G, h, A, b, nz, var_idx, nineq, ineq_con_idx, neq, eq_con_idx),
problem_data,
λ,
ν,
LHS,
)
end

RHS = [dl_dz; zeros(neq+nineq)]
partial_grads = -(LHS \ RHS)
RHS = [dl_dz; zeros(neq + nineq + neq_sv + nineq_sv_le)]
partial_grads = -LHS \ RHS
dz = partial_grads[1:nz]

if nineq > 0
dλ = partial_grads[nz+1:nz+nineq]
if nineq+nineq_sv_le > 0
dλ = partial_grads[nz+1:nz+nineq+nineq_sv_le]
end
if neq > 0
dν = partial_grads[nz+nineq+1:nz+nineq+neq]
if neq+neq_sv > 0
dν = partial_grads[nz+nineq+nineq_sv_le+1:nz+nineq+nineq_sv_le+neq+neq_sv]
end

grads = []
Expand Down
50 changes: 44 additions & 6 deletions src/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,19 @@ function get_problem_data(model::MOI.AbstractOptimizer)
model,
MOI.ListOfConstraintIndices{
MOI.ScalarAffineFunction{Float64},
MOI.LessThan{Float64}
MOI.LessThan{Float64},
}())
nineq = length(ineq_con_idx)
ineq_con_sv_le_idx = MOI.get(
model,
MOI.ListOfConstraintIndices{
MOI.SingleVariable,
MOI.LessThan{Float64},
}())
nineq_sv_le = length(ineq_con_sv_le_idx)

G = spzeros(nineq, nz)
h = spzeros(nineq)
G = spzeros(nineq + nineq_sv_le, nz)
h = spzeros(nineq + nineq_sv_le)

for i in 1:nineq
con = ineq_con_idx[i]
Expand All @@ -93,6 +100,14 @@ function get_problem_data(model::MOI.AbstractOptimizer)
end
h[i] = set.upper - func.constant
end
for i in eachindex(ineq_con_sv_le_idx)
con = ineq_con_sv_le_idx[i]
func = MOI.get(model, MOI.ConstraintFunction(), con)
set = MOI.get(model, MOI.ConstraintSet(), con)
vidx = findfirst(v -> v == func.variable, var_list)
G[i+nineq,vidx] = 1
h[i+nineq] = MOI.constant(set)
end

# handle equality constraints
eq_con_idx = MOI.get(
Expand All @@ -103,8 +118,16 @@ function get_problem_data(model::MOI.AbstractOptimizer)
}())
neq = length(eq_con_idx)

A = spzeros(neq, nz)
b = spzeros(neq)
eq_con_sv_idx = MOI.get(
model,
MOI.ListOfConstraintIndices{
MOI.SingleVariable,
MOI.EqualTo{Float64}
}())
neq_sv = length(eq_con_sv_idx)

A = spzeros(neq + neq_sv, nz)
b = spzeros(neq + neq_sv)

for i in 1:neq
con = eq_con_idx[i]
Expand All @@ -119,6 +142,14 @@ function get_problem_data(model::MOI.AbstractOptimizer)
end
b[i] = set.value - func.constant
end
for i in 1:neq_sv
con = eq_con_sv_idx[i]
func = MOI.get(model, MOI.ConstraintFunction(), con)
set = MOI.get(model, MOI.ConstraintSet(), con)
vidx = findfirst(v -> v == func.variable, var_list)
A[i+neq,vidx] = 1
b[i+neq] = set.value
end


# handle objective
Expand Down Expand Up @@ -146,7 +177,14 @@ function get_problem_data(model::MOI.AbstractOptimizer)
q = MOI.coefficient.(objective_function.affine_terms)
end

return (Q, q, G, h, A, b, nz, var_list, nineq, ineq_con_idx, neq, eq_con_idx)
return (
Q, q, G, h, A, b,
nz, var_list,
nineq, ineq_con_idx,
nineq_sv_le, ineq_con_sv_le_idx,
neq, eq_con_idx,
neq_sv, eq_con_sv_idx,
)
end

# used for testing mostly
Expand Down
135 changes: 135 additions & 0 deletions test/MOI_wrapper.jl
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,141 @@ end
@test grads[4] ≈ [0.0; 1/3; -1/3; 2/3; 0.0] atol=ATOL rtol=RTOL
end

@testset "Differentiating LP with variable bounds" begin
# max 2x + 3y + 4z
# s.t. 3x+2y+z <= 10
# 2x+5y+3z <= 15
# x ≤ 3
# 0 ≤ y ≤ 2
# z ≥ 2
# x,y,z >= 0
# variant of previous test with same solution

model = diff_optimizer(GLPK.Optimizer)
MOI.set(model, MOI.Silent(), true)
v = MOI.add_variables(model, 3)

# define objective
objective_function = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-2.0, -3.0, -4.0], v), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), objective_function)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)

# set constraints
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([3.0, 2.0, 1.0], v), 0.),
MOI.LessThan(10.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 5.0, 3.0], v), 0.),
MOI.LessThan(15.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-1.0, 0.0, 0.0], v), 0.),
MOI.LessThan(0.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([0.0, -1.0, 0.0], v), 0.),
MOI.LessThan(0.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([0.0, 0.0, -1.0], v), 0.),
MOI.LessThan(0.0),
)
# x ≤ 3
MOI.add_constraint(
model,
MOI.SingleVariable(v[1]),
MOI.LessThan(3.0),
)
# 0 ≤ y ≤ 2
MOI.add_constraint(
model,
MOI.SingleVariable(v[2]),
MOI.LessThan(2.0),
)
# z ≥ 2
MOI.add_constraint(
model,
MOI.SingleVariable(v[3]),
MOI.LessThan(6.0),
)

MOI.optimize!(model)

# obtain gradients
grads = backward!(model, ["Q", "q", "G", "h"], ones(3)) # using dl_dz=[1,1,1]

@test grads[1] ≈ zeros(3,3) atol=ATOL rtol=RTOL
@test grads[2] ≈ zeros(3) atol=ATOL rtol=RTOL
@test grads[3] ≈ [0.0 0.0 0.0;
0.0 0.0 -5/3;
0.0 0.0 5/3;
0.0 0.0 -10/3;
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
] atol=ATOL rtol=RTOL
@test grads[4] ≈ [0.0, 1/3, -1/3, 2/3, 0.0, 0.0, 0.0, 0.0] atol=ATOL rtol=RTOL

model = diff_optimizer(GLPK.Optimizer)
MOI.set(model, MOI.Silent(), true)
v = MOI.add_variables(model, 3)

# define objective
objective_function = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-2.0, -3.0, -4.0], v), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), objective_function)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)

# set constraints
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([3.0, 2.0, 1.0], v), 0.0),
MOI.LessThan(10.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([2.0, 5.0, 3.0], v), 0.0),
MOI.LessThan(15.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([0.0, -1.0, 0.0], v), 0.0),
MOI.LessThan(0.0),
)
MOI.add_constraint(
model,
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([0.0, 0.0, -1.0], v), 0.0),
MOI.LessThan(0.0),
)
# 0 = x
MOI.add_constraint(
model,
MOI.SingleVariable(v[1]),
MOI.EqualTo(0.0),
)

MOI.optimize!(model)

# obtain gradients
grads = backward!(model, ["Q", "q", "G", "h", "A", "b"], ones(3)) # using dl_dz=[1,1,1]

@test grads[1] ≈ zeros(3,3) atol=ATOL rtol=RTOL
@test grads[2] ≈ zeros(3) atol=ATOL rtol=RTOL
@test grads[3] ≈ [0.0 0.0 0.0;
0.0 0.0 -5/3;
0.0 0.0 -10/3;
0.0 0.0 0.0
] atol=ATOL rtol=RTOL
@test grads[4] ≈ [0.0, 1/3, 2/3, 0.0] atol=ATOL rtol=RTOL
@test grads[5] ≈ zeros(1, 3) .+ [0.0 0.0 -5/3]
@test grads[6] ≈ [1/3]
end

@testset "Differentiating simple SOCP" begin
# referred from _soc2test, https://github.com/jump-dev/MathOptInterface.jl/blob/master/src/Test/contconic.jl#L1355
Expand Down
1 change: 1 addition & 0 deletions test/solver_interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ end
"solve_qcp_edge_cases", # currently only affine or conic constraints
"solve_objbound_edge_cases",
"solve_qp_edge_cases", # No quadratics
"solve_qp_zero_offdiag",
"update_dimension_nonnegative_variables", # TODO: fix this

# see below
Expand Down