Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@
.ipynb_checkpoints/
Manifest.toml
benchmark/*.json
test/Project.toml
8 changes: 4 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,26 +6,26 @@ version = "0.12.6"
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MathProgBase = "fdba3010-5040-5b88-9595-932c9decdf73"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
AbstractTrees = "^0.2.1"
BenchmarkTools = "^0.4"
MathProgBase = "^0.7"
MathOptInterface = "~0.9"
OrderedCollections = "^1.0"
julia = "^1.0"

[extras]
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
GLPKMathProgInterface = "3c7084bd-78ad-589a-b5bb-dbd673274bea"
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["ECOS", "GLPKMathProgInterface", "LinearAlgebra", "Random", "SCS", "Statistics", "Test"]
test = ["ECOS", "GLPK", "LinearAlgebra", "Random", "SCS", "Statistics", "Test"]
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
[![](https://img.shields.io/badge/docs-stable-blue.svg)](https://www.juliaopt.org/Convex.jl/stable)
[![](https://img.shields.io/badge/docs-dev-blue.svg)](https://www.juliaopt.org/Convex.jl/dev)

**Convex.jl** is a [Julia](http://julialang.org) package for [Disciplined Convex Programming](http://dcp.stanford.edu/). Convex.jl can solve linear programs, mixed-integer linear programs, and DCP-compliant convex programs using a variety of solvers, including [Mosek](https://github.com/JuliaOpt/Mosek.jl), [Gurobi](https://github.com/JuliaOpt/Gurobi.jl), [ECOS](https://github.com/JuliaOpt/ECOS.jl), [SCS](https://github.com/JuliaOpt/SCS.jl), and [GLPK](https://github.com/JuliaOpt/GLPK.jl), through the [MathProgBase](http://mathprogbasejl.readthedocs.org/en/latest/) interface. It also supports optimization with complex variables and coefficients.
**Convex.jl** is a [Julia](http://julialang.org) package for [Disciplined Convex Programming](http://dcp.stanford.edu/). Convex.jl can solve linear programs, mixed-integer linear programs, and DCP-compliant convex programs using a variety of solvers, including [Mosek](https://github.com/JuliaOpt/Mosek.jl), [Gurobi](https://github.com/JuliaOpt/Gurobi.jl), [ECOS](https://github.com/JuliaOpt/ECOS.jl), [SCS](https://github.com/JuliaOpt/SCS.jl), and [GLPK](https://github.com/JuliaOpt/GLPK.jl), through [MathOptInterface](https://github.com/JuliaOpt/MathOptInterface.jl). It also supports optimization with complex variables and coefficients.

**Installation**: `julia> Pkg.add("Convex")`

Expand Down Expand Up @@ -38,7 +38,7 @@ x = Variable(n)
problem = minimize(sumsquares(A * x - b), [x >= 0])

# Solve the problem by calling solve!
solve!(problem, SCSSolver())
solve!(problem, SCS.Optimizer())

# Check the status of the problem
problem.status # :Optimal, :Infeasible, :Unbounded etc.
Expand Down
11 changes: 8 additions & 3 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@ using Pkg
tempdir = mktempdir()
Pkg.activate(tempdir)
Pkg.develop(PackageSpec(path=joinpath(@__DIR__, "..")))
Pkg.add(["BenchmarkTools", "PkgBenchmark"])
Pkg.add(["BenchmarkTools", "PkgBenchmark", "MathOptInterface"])
Pkg.resolve()

using Convex: Convex, ProblemDepot
using BenchmarkTools

using MathOptInterface
const MOI = MathOptInterface
const MOIU = MOI.Utilities

const SUITE = BenchmarkGroup()

Expand All @@ -30,4 +32,7 @@ problems = [
"mip_integer_variables",
]

SUITE["formulation"] = ProblemDepot.benchmark_suite(Convex.conic_problem, problems)
SUITE["formulation"] = ProblemDepot.benchmark_suite(problems) do problem
model = MOIU.MockOptimizer(MOIU.Model{Float64}())
Convex.load_MOI_model!(model, problem)
end
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
GLPKMathProgInterface = "3c7084bd-78ad-589a-b5bb-dbd673274bea"
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
Interact = "c601a237-2ae4-5e1e-952c-7a85b0c7eef1"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
Expand Down
10 changes: 5 additions & 5 deletions docs/examples_literate/general_examples/basic_usage.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ end

using SCS
## passing in verbose=0 to hide output from SCS
solver = SCSSolver(verbose=0)
solver = SCS.Optimizer(verbose=0)

# ### Linear program
#
Expand Down Expand Up @@ -73,7 +73,7 @@ p.optval

x = Variable(4)
p = satisfy(norm(x) <= 100, exp(x[1]) <= 5, x[2] >= 7, geomean(x[3], x[4]) >= x[2])
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
println(p.status)
x.value

Expand All @@ -82,7 +82,7 @@ x.value

y = Semidefinite(2)
p = maximize(lambdamin(y), tr(y)<=6)
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
p.optval

#-
Expand All @@ -105,10 +105,10 @@ y.value
# $$
#

using GLPKMathProgInterface
using GLPK
x = Variable(4, :Int)
p = minimize(sum(x), x >= 0.5)
solve!(p, GLPKSolverMIP())
solve!(p, GLPK.Optimizer())
x.value

#-
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ p.constraints += a1' * x_c + r * norm(a1, 2) <= b[1];
p.constraints += a2' * x_c + r * norm(a2, 2) <= b[2];
p.constraints += a3' * x_c + r * norm(a3, 2) <= b[3];
p.constraints += a4' * x_c + r * norm(a4, 2) <= b[4];
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
p.optval

# Generate the figure
Expand Down
2 changes: 1 addition & 1 deletion docs/examples_literate/general_examples/control.jl
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ push!(constraints, velocity[:, T] == 0)

## Solve the problem
problem = minimize(sumsquares(force), constraints)
solve!(problem, SCSSolver(verbose=0))
solve!(problem, SCS.Optimizer(verbose=0))

# We can plot the trajectory taken by the object.

Expand Down
6 changes: 3 additions & 3 deletions docs/examples_literate/general_examples/huber_regression.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,18 +40,18 @@ for i=1:length(p_vals)
fit = norm(beta - beta_true) / norm(beta_true);
cost = norm(X' * beta - Y);
prob = minimize(cost);
solve!(prob, SCSSolver(verbose=0));
solve!(prob, SCS.Optimizer(verbose=0));
lsq_data[i] = evaluate(fit);

## Form and solve a prescient regression problem,
## i.e., where the sign changes are known.
cost = norm(factor .* (X'*beta) - Y);
solve!(minimize(cost), SCSSolver(verbose=0))
solve!(minimize(cost), SCS.Optimizer(verbose=0))
prescient_data[i] = evaluate(fit);

## Form and solve the Huber regression problem.
cost = sum(huber(X' * beta - Y, 1));
solve!(minimize(cost), SCSSolver(verbose=0))
solve!(minimize(cost), SCS.Optimizer(verbose=0))
huber_data[i] = evaluate(fit);
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ X = hcat(ones(size(iris, 1)), iris.SepalLength, iris.SepalWidth, iris.PetalLengt
n, p = size(X)
beta = Variable(p)
problem = minimize(logisticloss(-Y.*(X*beta)))
solve!(problem, SCSSolver(verbose=false))
solve!(problem, SCS.Optimizer(verbose=false))

# Let's see how well the model fits.
using Plots
Expand Down
2 changes: 1 addition & 1 deletion docs/examples_literate/general_examples/max_entropy.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ b = rand(m, 1);

x = Variable(n);
problem = maximize(entropy(x), sum(x) == 1, A * x <= b)
solve!(problem, SCSSolver(verbose=false))
solve!(problem, SCS.Optimizer(verbose=false))
problem.optval

#-
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ D = Variable(m, n);
Si = [min(R[i]*dot(P[i,:], D[i,:]'), B[i]) for i=1:m];
problem = maximize(sum(Si),
[D >= 0, sum(D, dims=1)' <= T, sum(D, dims=2) >= c]);
solve!(problem, SCSSolver(verbose=0));
solve!(problem, SCS.Optimizer(verbose=0));

#-

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,18 +43,18 @@ x = Variable(n)

# Case 1: Nominal optimal solution
p = minimize(norm(A * x - b, 2))
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
x_nom = evaluate(x)

# Case 2: Stochastic robust approximation
P = 1 / 3 * B' * B;
p = minimize(square(pos(norm(A * x - b))) + quadform(x, Symmetric(P)))
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
x_stoch = evaluate(x)

# Case 3: Worst-case robust approximation
p = minimize(max(norm((A - B) * x - b), norm((A + B) * x - b)))
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
x_wc = evaluate(x)

# Plot residuals:
Expand Down
2 changes: 1 addition & 1 deletion docs/examples_literate/general_examples/svm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ neg_data = rand(MvNormal([-1.0, 2.0], 1.0), M);

#-

function svm(pos_data, neg_data, solver=SCSSolver(verbose=0))
function svm(pos_data, neg_data, solver=SCS.Optimizer(verbose=0))
## Create variables for the separating hyperplane w'*x = b.
w = Variable(n)
b = Variable()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ beta_vals = zeros(length(beta), TRIALS);
for i = 1:TRIALS
lambda = lambda_vals[i];
problem = minimize(loss/m + lambda*reg);
solve!(problem, ECOSSolver(verbose=0));
## solve!(problem, SCSSolver(verbose=0,linear_solver=SCS.Direct, eps=1e-3))
solve!(problem, ECOS.Optimizer(verbose=0));
## solve!(problem, SCS.Optimizer(verbose=0,linear_solver=SCS.Direct, eps=1e-3))
train_error[i] = sum(float(sign.(X*beta_true .+ offset) .!= sign.(evaluate(X*beta - v))))/m;
test_error[i] = sum(float(sign.(X_test*beta_true .+ offset) .!= sign.(evaluate(X_test*beta - v))))/TEST;
beta_vals[:, i] = evaluate(beta);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ x = Variable(n);
for i=1:length(gammas)
cost = sumsquares(A*x - b) + gammas[i]*norm(x,1);
problem = minimize(cost, [norm(x, Inf) <= 1]);
solve!(problem, SCSSolver(verbose=0));
solve!(problem, SCS.Optimizer(verbose=0));
x_values[:,i] = evaluate(x);
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ w = Variable(n);
ret = dot(r, w);
risk = sum(quadform(w, Sigma_nom));
problem = minimize(risk, [sum(w) == 1, ret >= 0.1, norm(w, 1) <= 2])
solve!(problem, SCSSolver(verbose=0));
solve!(problem, SCS.Optimizer(verbose=0));
wval = vec(evaluate(w))

#-
Expand All @@ -31,7 +31,7 @@ problem = maximize(risk, [Sigma == Sigma_nom + Delta,
diag(Delta) == 0,
abs(Delta) <= 0.2,
Delta == Delta']);
solve!(problem, SCSSolver(verbose=0));
solve!(problem, SCS.Optimizer(verbose=0));
println("standard deviation = ", round(sqrt(wval' * Sigma_nom * wval), sigdigits=2));
println("worst-case standard deviation = ", round(sqrt(evaluate(risk)), sigdigits=2));
println("worst-case Delta = ");
Expand Down
4 changes: 2 additions & 2 deletions docs/examples_literate/mixed_integer/binary_knapsack.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ n = length(w)

#-

using Convex, GLPKMathProgInterface
using Convex, GLPK
x = Variable(n, :Bin)
problem = maximize(dot(p, x), dot(w, x) <= C)
solve!(problem, GLPKSolverMIP())
solve!(problem, GLPK.Optimizer())
evaluate(x)
4 changes: 2 additions & 2 deletions docs/examples_literate/mixed_integer/n_queens.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# # N queens

using Convex, GLPKMathProgInterface, LinearAlgebra, SparseArrays, Test
using Convex, GLPK, LinearAlgebra, SparseArrays, Test
aux(str) = joinpath(@__DIR__, "aux", str) # path to auxiliary files
include(aux("antidiag.jl"))

Expand All @@ -16,7 +16,7 @@ constr += Constraint[sum(diag(x, k)) <= 1 for k = -n+2:n-2]
## Exactly one queen per row and one queen per column
constr += Constraint[sum(x, dims=1) == 1, sum(x, dims=2) == 1]
p = satisfy(constr)
solve!(p, GLPKSolverMIP())
solve!(p, GLPK.Optimizer())

# Let us test the results:
for k = -n+2:n-2
Expand Down
4 changes: 2 additions & 2 deletions docs/examples_literate/mixed_integer/section_allocation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# The goal will be to get an allocation matrix $X$, where $X_{ij} = 1$ if
# student $i$ is assigned to section $j$ and $0$ otherwise.

using Convex, GLPKMathProgInterface
using Convex, GLPK
aux(str) = joinpath(@__DIR__, "aux", str) # path to auxiliary files

# Load our preference matrix, `P`
Expand All @@ -30,5 +30,5 @@ constraints = [sum(X, dims=2) == 1, sum(X, dims=1) <= 10, sum(X, dims=1) >= 6]
# students since the ranking of the first choice is 1.
p = minimize(vec(X)' * vec(P), constraints)

solve!(p, GLPKSolverMIP())
solve!(p, GLPK.Optimizer())
p.optval
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Z = ComplexVariable(n,n)
objective = 0.5*real(tr(Z+Z'))
constraint = [P Z;Z' Q] ⪰ 0
problem = maximize(objective,constraint)
solve!(problem, SCSSolver(verbose=0))
solve!(problem, SCS.Optimizer(verbose=0))
computed_fidelity = evaluate(objective)

#-
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ objective = inner_product(U,M)
c1 = diag(U) == 1
c2 = U in :SDP
p = minimize(objective,c1,c2)
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
U.value

#-
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ function get_visibility(K)
constraints += t*K[3] + (1-t)*noise[3] == P[2][2] + P[4][2] + P[6][1]
constraints += t*K[4] + (1-t)*noise[4] == P[3][2] + P[5][2] + P[6][2]
p = maximize(t, constraints)
solve!(p, SCSSolver(verbose=0))
solve!(p, SCS.Optimizer(verbose=0))
return p.optval
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ c3 = real(W[1,1]) == 1.06^2;
push!(c1, c2)
push!(c1, c3)
p = maximize(objective, c1);
solve!(p, SCSSolver(verbose = 0))
solve!(p, SCS.Optimizer(verbose = 0))
p.optval
#15.125857662600703
evaluate(objective)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ p = minimize( risk,
w_lower <= w,
w <= w_upper )

solve!(p, SCSSolver()) #use SCSSolver(verbose = false) to suppress printing
solve!(p, SCS.Optimizer()) #use SCS.Optimizer(verbose = false) to suppress printing

#-

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ for i = 1:N
λ = λ_vals[i]
p = minimize( λ*risk - (1-λ)*ret,
sum(w) == 1 )
solve!(p, SCSSolver(verbose = false))
solve!(p, SCS.Optimizer(verbose = false))
MeanVarA[i,:]= [evaluate(ret),evaluate(risk)[1]] #risk is a 1x1 matrix
end

Expand All @@ -68,7 +68,7 @@ for i = 1:N
sum(w) == 1,
w_lower <= w, #w[i] is bounded
w <= w_upper )
solve!(p, SCSSolver(verbose = false))
solve!(p, SCS.Optimizer(verbose = false))
MeanVarB[i,:]= [evaluate(ret),evaluate(risk)[1]]
end

Expand All @@ -95,7 +95,7 @@ for i = 1:N
p = minimize( λ*risk - (1-λ)*ret,
sum(w) == 1,
(norm(w, 1)-1) <= Lmax)
solve!(p, SCSSolver(verbose = false))
solve!(p, SCS.Optimizer(verbose = false))
MeanVarC[i,:]= [evaluate(ret),evaluate(risk)[1]]
end

Expand Down
Loading