From 06ef05f5d56122ed30919a784b817f69b7139787 Mon Sep 17 00:00:00 2001 From: Eric <5846501+ericphanson@users.noreply.github.com> Date: Thu, 28 Nov 2019 19:16:28 +0000 Subject: [PATCH 1/3] Update to Documenter 0.24, small fixes --- docs/Project.toml | 3 ++ .../general_examples/basic_usage.jl | 14 ++--- .../general_examples/logistic_regression.jl | 25 +++++---- .../general_examples/max_entropy.jl | 6 +-- .../general_examples/optimal_advertising.jl | 2 +- .../general_examples/robust_approx_fitting.jl | 6 +-- .../examples_literate/general_examples/svm.jl | 2 +- .../mixed_integer/binary_knapsack.jl | 4 +- .../mixed_integer/n_queens.jl | 2 +- .../Fidelity in Quantum Information Theory.jl | 4 +- .../phase_recovery_using_MaxCut.jl | 51 +++++++++++++------ .../portfolio_optimization.jl | 10 ++-- .../portfolio_optimization2.jl | 4 +- .../supplemental_material/paper_examples.jl | 10 ++-- .../time_series/time_series.jl | 4 +- docs/make.jl | 2 +- docs/src/credits.md | 46 ++++++++--------- docs/src/quick_tutorial.md | 10 ++-- docs/src/types.md | 12 ++--- 19 files changed, 121 insertions(+), 96 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index 998360988..1712b9532 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -17,3 +17,6 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[compat] +Documenter = "0.24" diff --git a/docs/examples_literate/general_examples/basic_usage.jl b/docs/examples_literate/general_examples/basic_usage.jl index 193089ded..da5c89191 100644 --- a/docs/examples_literate/general_examples/basic_usage.jl +++ b/docs/examples_literate/general_examples/basic_usage.jl @@ -14,8 +14,8 @@ solver = SCSSolver(verbose=0) # # $$ # \begin{array}{ll} -# \mbox{maximize} & c^T x \\ -# \mbox{subject to} & A x \leq b\\ +# \text{maximize} & c^T x \\ +# \text{subject to} & A x \leq b\\ # & x \geq 1 \\ # & x \leq 10 \\ # & x_2 \leq 5 \\ @@ -41,8 +41,8 @@ println(evaluate(x[1] + x[4] - x[2])) # # $$ # \begin{array}{ll} -# \mbox{minimize} & \| X \|_F + y \\ -# \mbox{subject to} & 2 X \leq 1\\ +# \text{minimize} & \| X \|_F + y \\ +# \text{subject to} & 2 X \leq 1\\ # & X' + y \geq 1 \\ # & X \geq 0 \\ # & y \geq 0 \\ @@ -63,7 +63,7 @@ p.optval # # $$ # \begin{array}{ll} -# \mbox{satisfy} & \| x \|_2 \leq 100 \\ +# \text{satisfy} & \| x \|_2 \leq 100 \\ # & e^{x_1} \leq 5 \\ # & x_2 \geq 7 \\ # & \sqrt{x_3 x_4} \geq x_2 @@ -98,8 +98,8 @@ y.value # # $$ # \begin{array}{ll} -# \mbox{minimize} & \sum_{i=1}^n x_i \\ -# \mbox{subject to} & x \in \mathbb{Z}^n \\ +# \text{minimize} & \sum_{i=1}^n x_i \\ +# \text{subject to} & x \in \mathbb{Z}^n \\ # & x \geq 0.5 \\ # \end{array} # $$ diff --git a/docs/examples_literate/general_examples/logistic_regression.jl b/docs/examples_literate/general_examples/logistic_regression.jl index 97b3c0b5f..b40ae1074 100644 --- a/docs/examples_literate/general_examples/logistic_regression.jl +++ b/docs/examples_literate/general_examples/logistic_regression.jl @@ -5,28 +5,27 @@ using RDatasets using Convex using SCS -#- -## we'll use iris data -## predict whether the iris species is versicolor using the sepal length and width and petal length and width -iris = dataset("datasets", "iris") -## outcome variable: +1 for versicolor, -1 otherwise +# This is an example logistic regression using `RDatasets`'s iris data. +# Our goal is to gredict whether the iris species is versicolor +# using the sepal length and width and petal length and width. +iris = dataset("datasets", "iris"); +iris[1:10,:] + +# We'll define `Y` as the outcome variable: +1 for versicolor, -1 otherwise. Y = [species == "versicolor" ? 1.0 : -1.0 for species in iris.Species] -## create data matrix with one column for each feature (first column corresponds to offset) -X = hcat(ones(size(iris, 1)), iris.SepalLength, iris.SepalWidth, iris.PetalLength, iris.PetalWidth); -#- +# We'll create our data matrix with one column for each feature +# (first column corresponds to offset). +X = hcat(ones(size(iris, 1)), iris.SepalLength, iris.SepalWidth, iris.PetalLength, iris.PetalWidth); -## solve the logistic regression problem +# Now to soolve the logistic regression problem. n, p = size(X) beta = Variable(p) problem = minimize(logisticloss(-Y.*(X*beta))) - solve!(problem, SCSSolver(verbose=false)) -#- - -## let's see how well the model fits +# Let's see how well the model fits. using Plots logistic(x::Real) = inv(exp(-x) + one(x)) perm = sortperm(vec(X*beta.value)) diff --git a/docs/examples_literate/general_examples/max_entropy.jl b/docs/examples_literate/general_examples/max_entropy.jl index f824c473d..c7a5e3e21 100644 --- a/docs/examples_literate/general_examples/max_entropy.jl +++ b/docs/examples_literate/general_examples/max_entropy.jl @@ -4,8 +4,8 @@ # # $$ # \begin{array}{ll} -# \mbox{maximize} & -\sum_{i=1}^n x_i \log x_i \\ -# \mbox{subject to} & \mathbf{1}' x = 1 \\ +# \text{maximize} & -\sum_{i=1}^n x_i \log x_i \\ +# \text{subject to} & \mathbf{1}' x = 1 \\ # & Ax \leq b # \end{array} # $$ @@ -23,7 +23,7 @@ b = rand(m, 1); x = Variable(n); problem = maximize(entropy(x), sum(x) == 1, A * x <= b) -solve!(problem, SCSSolver(verbose=0)) +solve!(problem, SCSSolver(verbose=false)) problem.optval #- diff --git a/docs/examples_literate/general_examples/optimal_advertising.jl b/docs/examples_literate/general_examples/optimal_advertising.jl index ad436f273..a47af4f37 100644 --- a/docs/examples_literate/general_examples/optimal_advertising.jl +++ b/docs/examples_literate/general_examples/optimal_advertising.jl @@ -2,7 +2,7 @@ # This example is taken from . -# Setup +# Setup: # # * We have $m$ adverts and $n$ timeslots # * The total traffic in time slot $t$ is $T_t$ diff --git a/docs/examples_literate/general_examples/robust_approx_fitting.jl b/docs/examples_literate/general_examples/robust_approx_fitting.jl index 51b0f58d4..01ca3fd64 100644 --- a/docs/examples_literate/general_examples/robust_approx_fitting.jl +++ b/docs/examples_literate/general_examples/robust_approx_fitting.jl @@ -6,7 +6,7 @@ # # Adapted for Convex.jl by Karanveer Mohan and David Zeng - 26/05/14 # Original cvx code and plots here: -# http://web.cvxr.com/cvx/examples/cvxbook/Ch06_approx_fitting/html/fig6_15.html +# # # Consider the least-squares problem: # minimize $\|(A + tB)x - b\|_2$ @@ -20,7 +20,7 @@ # (reduces to minimizing $\mathbb{E} \|(A+tB)x-b\|^2 = \|A*x-b\|^2 + x^TPx$ # where $P = \mathbb{E}(t^2) B^TB = (1/3) B^TB$ ) # 3. worst-case robust approximation: -# minimize $sup_{-1\leq u\leq 1} \|(A+tB)x - b\|_2$ +# minimize $\mathrm{sup}_{-1\leq u\leq 1} \|(A+tB)x - b\|_2$ # (reduces to minimizing $\max\{\|(A-B)x - b\|_2, \|(A+B)x - b\|_2\}$ ). # using Convex, LinearAlgebra, SCS @@ -57,7 +57,7 @@ p = minimize(max(norm((A - B) * x - b), norm((A + B) * x - b))) solve!(p, SCSSolver(verbose=0)) x_wc = evaluate(x) -# plot residuals +# Plot residuals: parvals = range(-2, stop=2, length=100); errvals(x) = [ norm((A + parvals[k] * B) * x - b) for k = eachindex(parvals)] diff --git a/docs/examples_literate/general_examples/svm.jl b/docs/examples_literate/general_examples/svm.jl index ac3889191..7d40068f8 100644 --- a/docs/examples_literate/general_examples/svm.jl +++ b/docs/examples_literate/general_examples/svm.jl @@ -8,7 +8,7 @@ # # $$ # \begin{array}{ll} -# \mbox{minimize} & \|w\|^2 + C * (\sum_{i=1}^N \text{max} \{1 + b - w^T x_i, 0\} + \sum_{i=1}^M \text{max} \{1 - b + w^T y_i, 0\}) +# \text{minimize} & \|w\|^2 + C * (\sum_{i=1}^N \text{max} \{1 + b - w^T x_i, 0\} + \sum_{i=1}^M \text{max} \{1 - b + w^T y_i, 0\}) # \end{array}, # $$ # diff --git a/docs/examples_literate/mixed_integer/binary_knapsack.jl b/docs/examples_literate/mixed_integer/binary_knapsack.jl index f39c494c7..10cd88137 100644 --- a/docs/examples_literate/mixed_integer/binary_knapsack.jl +++ b/docs/examples_literate/mixed_integer/binary_knapsack.jl @@ -5,8 +5,8 @@ # # $$ # \begin{array}{ll} -# \mbox{maximize} & x' p \\ -# \mbox{subject to} & x \in \{0, 1\} \\ +# \text{maximize} & x' p \\ +# \text{subject to} & x \in \{0, 1\} \\ # & w' x \leq C \\ # \end{array} # $$ diff --git a/docs/examples_literate/mixed_integer/n_queens.jl b/docs/examples_literate/mixed_integer/n_queens.jl index d2d60b98b..3f1d0cfbe 100644 --- a/docs/examples_literate/mixed_integer/n_queens.jl +++ b/docs/examples_literate/mixed_integer/n_queens.jl @@ -5,7 +5,7 @@ aux(str) = joinpath(@__DIR__, "aux", str) # path to auxiliary files include(aux("antidiag.jl")) n = 8 -# We encode the locations of the queens with a matrix of binary random variables +# We encode the locations of the queens with a matrix of binary random variables. x = Variable((n, n), :Bin) # Now we impose the constraints: at most one queen on any anti-diagonal, at most one queen on any diagonal, and we must have exactly one queen per row and per column. diff --git a/docs/examples_literate/optimization_with_complex_variables/Fidelity in Quantum Information Theory.jl b/docs/examples_literate/optimization_with_complex_variables/Fidelity in Quantum Information Theory.jl index 96d1bbb76..62f78dd96 100644 --- a/docs/examples_literate/optimization_with_complex_variables/Fidelity in Quantum Information Theory.jl +++ b/docs/examples_literate/optimization_with_complex_variables/Fidelity in Quantum Information Theory.jl @@ -11,8 +11,8 @@ # # $$ # \begin{array}{ll} -# \mbox{maximize} & \frac{1}{2}\text{tr}(Z+Z^\dagger) \\ -# \mbox{subject to} &\\ +# \text{maximize} & \frac{1}{2}\text{tr}(Z+Z^\dagger) \\ +# \text{subject to} &\\ # & \left[\begin{array}{cc}P&Z\\{Z}^{\dagger}&Q\end{array}\right] \succeq 0\\ # & Z \in \mathbf {C}^{n \times n}\\ # \end{array} diff --git a/docs/examples_literate/optimization_with_complex_variables/phase_recovery_using_MaxCut.jl b/docs/examples_literate/optimization_with_complex_variables/phase_recovery_using_MaxCut.jl index 881af08fb..706c8a45c 100644 --- a/docs/examples_literate/optimization_with_complex_variables/phase_recovery_using_MaxCut.jl +++ b/docs/examples_literate/optimization_with_complex_variables/phase_recovery_using_MaxCut.jl @@ -1,34 +1,55 @@ # # Phase recovery using MaxCut -# In this example, we relax the phase retrieval problem similar to the classical [MaxCut](http://www-math.mit.edu/~goemans/PAPERS/maxcut-jacm.pdf) semidefinite program and recover the phase of the signal given the magnitude of the linear measurements. +# +# In this example, we relax the phase retrieval problem similar to the classical +# [MaxCut](http://www-math.mit.edu/~goemans/PAPERS/maxcut-jacm.pdf) semidefinite +# program and recover the phase of the signal given the magnitude of the linear +# measurements. # -# Phase recovery has wide applications such as in X-ray and crystallography imaging, diffraction imaging or microscopy and audio signal processing. In all these applications, the detectors cannot measure the phase of the incoming wave and only record its amplitude i.e complex measurements of a signal $x \in \mathbb{C}^p$ are obtained from a linear injective operator A, **but we can only measure the magnitude vector Ax, not the phase fo Ax**. +# Phase recovery has wide applications such as in X-ray and crystallography +# imaging, diffraction imaging or microscopy and audio signal processing. In all +# these applications, the detectors cannot measure the phase of the incoming wave +# and only record its amplitude i.e complex measurements of a signal +# $x \in \mathbb{C}^p$ are obtained from a linear injective operator $A$, **but we +# can only measure the magnitude vector $Ax$, not the phase of $Ax$**. # # Recovering the phase of $Ax$ from $|Ax|$ is a **nonconvex optimization problem**. Using results from [this paper](https://arxiv.org/abs/1206.0102), the problem can be relaxed to a (complex) semidefinite program (complex SDP). # # The original reprsentation of the problem is as follows: # -# >>>> find $x$ +# $$ +# \begin{array}{ll} +# \text{find} & x \in \mathbb{C}^p \\ +# \text{subject to} & |Ax| = b +# \end{array} +# $$ # -# >>>> such that $|Ax| = b$ -# -# >>>> where $x \in \mathbb{C}^p$, $A \in \mathbb{C}^{n \times p}$ and $b \in \mathbb{R}^n$. - -#- +# where $A \in \mathbb{C}^{n \times p}$ and $b \in \mathbb{R}^n$. -# In this example, **the problem is to find the phase of Ax given the value |Ax|**. Given a linear operator $A$ and a vector $b= |Ax|$ of measured amplitudes, in the noiseless case, we can write Ax = diag(b)u where $u \in \mathbb{C}^n$ is a phase vector, satisfying |$\mathbb{u}_i$| = 1 for i = 1,. . . , n. +# In this example, **the problem is to find the phase of $Ax$ given the value $|Ax|$**. +# Given a linear operator $A$ and a vector $b= |Ax|$ of measured amplitudes, +# in the noiseless case, we can write $Ax = \text{diag}(b)u$ where +# $u \in \mathbb{C}^n$ is a phase vector, satisfying +# $|\mathbb{u}_i| = 1$ for $i = 1,\ldots, n$. # # We relax this problem as Complex Semidefinite Programming. # # ### Relaxed Problem similar to [MaxCut](http://www-math.mit.edu/~goemans/PAPERS/maxcut-jacm.pdf) # -# Define the positive semidefinite hermitian matrix $M = \text{diag}(b) (I - A A^*) \text{diag}(b)$. The problem is: +# Define the positive semidefinite hermitian matrix +# $M = \text{diag}(b) (I - A A^*) \text{diag}(b)$. The problem is: # -# minimize < U,M > -# subject to -# diag(U) = 1 -# U in :HermitianSemiDefinite +# $$ +# \begin{array}{ll} +# \text{minimize} & \langle U, M \rangle \\ +# \text{subject to} & \text{diag}(U) = 1\\ +# & U \succeq 0 +# \end{array} +# $$ # -# Here the variable $U$ must be hermitian ($U \in \mathbb{H}_n $), and we have a solution to the phase recovery problem if $U = u u^*$ has rank one. Otherwise, the leading singular vector of $U$ can be used to approximate the solution. +# Here the variable $U$ must be hermitian ($U \in \mathbb{H}_n $), +# and we have a solution to the phase recovery problem if $U = u u^*$ +# has rank one. Otherwise, the leading singular vector of $U$ can be used +# to approximate the solution. using Convex, SCS, LinearAlgebra if VERSION < v"1.2.0-DEV.0" diff --git a/docs/examples_literate/portfolio_optimization/portfolio_optimization.jl b/docs/examples_literate/portfolio_optimization/portfolio_optimization.jl index 2b63f4fa0..3fb5a90de 100644 --- a/docs/examples_literate/portfolio_optimization/portfolio_optimization.jl +++ b/docs/examples_literate/portfolio_optimization/portfolio_optimization.jl @@ -1,17 +1,17 @@ # # Portfolio Optimization # -# In this problem, we will find the portfolio allocation that minimizes risk while achieving a given expected return $R_\mbox{target}$. +# In this problem, we will find the portfolio allocation that minimizes risk while achieving a given expected return $R_\text{target}$. # -# Suppose that we know the mean returns $\mu \in \mathbf{R}^n$ and the covariance $\Sigma \in \mathbf{R}^{n \times n}$ of the $n$ assets. We would like to find a portfolio allocation $w \in \mathbf{R}^n$, $\sum_i w_i = 1$, minimizing the *risk* of the portfolio, which we measure as the variance $w^T \Sigma w$ of the portfolio. The requirement that the portfolio allocation achieve the target expected return can be expressed as $w^T \mu >= R_\mbox{target}$. We suppose further that our portfolio allocation must comply with some lower and upper bounds on the allocation, $w_\mbox{lower} \leq w \leq w_\mbox{upper}$. +# Suppose that we know the mean returns $\mu \in \mathbf{R}^n$ and the covariance $\Sigma \in \mathbf{R}^{n \times n}$ of the $n$ assets. We would like to find a portfolio allocation $w \in \mathbf{R}^n$, $\sum_i w_i = 1$, minimizing the *risk* of the portfolio, which we measure as the variance $w^T \Sigma w$ of the portfolio. The requirement that the portfolio allocation achieve the target expected return can be expressed as $w^T \mu >= R_\text{target}$. We suppose further that our portfolio allocation must comply with some lower and upper bounds on the allocation, $w_\text{lower} \leq w \leq w_\text{upper}$. # # This problem can be written as # # $$ # \begin{array}{ll} -# \mbox{minimize} & w^T \Sigma w \\ -# \mbox{subject to} & w^T \mu >= R_\mbox{target} \\ +# \text{minimize} & w^T \Sigma w \\ +# \text{subject to} & w^T \mu >= R_\text{target} \\ # & \sum_i w_i = 1 \\ -# & w_\mbox{lower} \leq w \leq w_\mbox{upper} +# & w_\text{lower} \leq w \leq w_\text{upper} # \end{array} # $$ # diff --git a/docs/examples_literate/portfolio_optimization/portfolio_optimization2.jl b/docs/examples_literate/portfolio_optimization/portfolio_optimization2.jl index 6a9944744..177204771 100644 --- a/docs/examples_literate/portfolio_optimization/portfolio_optimization2.jl +++ b/docs/examples_literate/portfolio_optimization/portfolio_optimization2.jl @@ -8,8 +8,8 @@ # # $$ # \begin{array}{ll} -# \mbox{minimize} & \lambda*w^T \Sigma w - (1-\lambda)*w^T \mu \\ -# \mbox{subject to} & \sum_i w_i = 1 +# \text{minimize} & \lambda*w^T \Sigma w - (1-\lambda)*w^T \mu \\ +# \text{subject to} & \sum_i w_i = 1 # \end{array} # $$ # diff --git a/docs/examples_literate/supplemental_material/paper_examples.jl b/docs/examples_literate/supplemental_material/paper_examples.jl index f83335f8f..3a9f5ae16 100644 --- a/docs/examples_literate/supplemental_material/paper_examples.jl +++ b/docs/examples_literate/supplemental_material/paper_examples.jl @@ -13,7 +13,7 @@ e = 0; end p = minimize(e, x>=1); end -@time solve!(p, ECOSSolver()) +@time solve!(p, ECOSSolver(verbose=0)) # Indexing. println("Indexing example") @@ -26,7 +26,7 @@ e = 0; end p = minimize(e, x >= ones(1000, 1)); end -@time solve!(p, ECOSSolver()) +@time solve!(p, ECOSSolver(verbose=0)) # Matrix constraints. println("Matrix constraint example") @@ -37,7 +37,7 @@ b = randn(p, n); @time begin p = minimize(norm(vec(X)), A * X == b); end -@time solve!(p, ECOSSolver()) +@time solve!(p, ECOSSolver(verbose=0)) # Transpose. println("Transpose example") @@ -46,12 +46,12 @@ A = randn(5, 5); @time begin p = minimize(norm2(X - A), X' == X); end -@time solve!(p, ECOSSolver()) +@time solve!(p, ECOSSolver(verbose=0)) n = 3 A = randn(n, n); #@time begin X = Variable(n, n); p = minimize(norm(vec(X' - A)), X[1,1] == 1); - solve!(p, ECOSSolver()) + solve!(p, ECOSSolver(verbose=0)) #end diff --git a/docs/examples_literate/time_series/time_series.jl b/docs/examples_literate/time_series/time_series.jl index 7c6e21573..85b834901 100644 --- a/docs/examples_literate/time_series/time_series.jl +++ b/docs/examples_literate/time_series/time_series.jl @@ -60,13 +60,13 @@ root_mean_square_error = sqrt(sum( x -> x^2, residuals) / length(residuals)) # We now make the hypothesis that the residual temperature on a given day is some linear combination of the previous $5$ days. Such a model is called autoregressive. We are essentially trying to fit the residuals as a function of other parts of the data itself. We want to find a vector of coefficients $a$ such that # # $$ -# \mbox{r}(i) \approx \sum_{j = 1}^5 a_j \mbox{r}(i - j) +# \text{r}(i) \approx \sum_{j = 1}^5 a_j \text{r}(i - j) # $$ # # This can be done by simply minimizing the following sum of squares objective # # $$ -# \sum_{i = 6}^n \left(\mbox{r}(i) - \sum_{j = 1}^5 a_j \mbox{r}(i - j)\right)^2 +# \sum_{i = 6}^n \left(\text{r}(i) - \sum_{j = 1}^5 a_j \text{r}(i - j)\right)^2 # $$ # # The following Convex code solves this problem and plots our autoregressive model against the actual residual temperatures: diff --git a/docs/make.jl b/docs/make.jl index 90a1546b8..df6a0b089 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -149,7 +149,7 @@ makedocs(; repo = "https://github.com/JuliaOpt/Convex.jl/blob/{commit}{path}#L{line}", sitename = "Convex.jl") -deploydocs(repo = "github.com/JuliaOpt/Convex.jl.git") +deploydocs(repo = "github.com/JuliaOpt/Convex.jl.git", push_preview = true) # restore the environmental variable `GKSwstype`. ENV["GKSwstype"] = previous_GKSwstype; diff --git a/docs/src/credits.md b/docs/src/credits.md index ff1ee06d4..8a821d007 100644 --- a/docs/src/credits.md +++ b/docs/src/credits.md @@ -1,30 +1,30 @@ Credits ======= -Currently, Convex.jl is developed and maintained by: +Convex.jl was developed and maintained by: -> - [Jenny Hong](http://www.stanford.edu/~jyunhong/) -> - [Karanveer Mohan](http://www.stanford.edu/~kvmohan/) -> - [Madeleine Udell](http://www.stanford.edu/~udell/) -> - [David Zeng](http://www.stanford.edu/~dzeng0/) +- [Jenny Hong](http://www.stanford.edu/~jyunhong/) +- [Karanveer Mohan](http://www.stanford.edu/~kvmohan/) +- [Madeleine Udell](http://www.stanford.edu/~udell/) +- [David Zeng](http://www.stanford.edu/~dzeng0/) The Convex.jl developers also thank: -> - the [JuliaOpt](http://www.juliaopt.org/) team: [Iain -> Dunning](http://iaindunning.com/), [Joey -> Huchette](http://www.mit.edu/~huchette/) and [Miles -> Lubin](http://www.mit.edu/~mlubin/) -> - [Stephen Boyd](http://www.stanford.edu/~boyd/), co-author of the -> book [Convex -> Optimization](http://www.stanford.edu/~boyd/books.html) -> - [Steven Diamond](http://www.stanford.edu/~stevend2/), developer of -> [CVXPY](https://github.com/cvxgrp/cvxpy) and of a [DCP tutorial -> website](http://dcp.stanford.edu/) to teach disciplined convex -> programming. -> - [Michael Grant](http://www.cvxr.com/bio), developer of -> [CVX](http://www.cvxr.com). -> - [John Duchi](http://www.stanford.edu/~jduchi) and Hongseok -> Namkoong for developing the [representation of power cones in -> terms of SOCP -> constraints](https://github.com/JuliaOpt/Convex.jl/raw/master/docs/supplementary/rational_to_socp.pdf) -> used in this package. +- the [JuliaOpt](http://www.juliaopt.org/) team: [Iain + Dunning](http://iaindunning.com/), [Joey + Huchette](http://www.mit.edu/~huchette/) and [Miles + Lubin](http://www.mit.edu/~mlubin/) +- [Stephen Boyd](http://www.stanford.edu/~boyd/), co-author of the + book [Convex + Optimization](http://www.stanford.edu/~boyd/books.html) +- [Steven Diamond](http://www.stanford.edu/~stevend2/), developer of + [CVXPY](https://github.com/cvxgrp/cvxpy) and of a [DCP tutorial + website](http://dcp.stanford.edu/) to teach disciplined convex + programming. +- [Michael Grant](http://www.cvxr.com/bio), developer of + [CVX](http://www.cvxr.com). +- [John Duchi](http://www.stanford.edu/~jduchi) and Hongseok + Namkoong for developing the [representation of power cones in + terms of SOCP + constraints](https://github.com/JuliaOpt/Convex.jl/raw/master/docs/supplementary/rational_to_socp.pdf) + used in this package. diff --git a/docs/src/quick_tutorial.md b/docs/src/quick_tutorial.md index 643870bfe..faa5fd96b 100644 --- a/docs/src/quick_tutorial.md +++ b/docs/src/quick_tutorial.md @@ -3,12 +3,14 @@ Quick Tutorial Consider a constrained least squares problem -$$\begin{aligned} +$$ +\begin{aligned} \begin{array}{ll} -\mbox{minimize} & \|Ax - b\|_2^2 \\ -\mbox{subject to} & x \geq 0 +\text{minimize} & \|Ax - b\|_2^2 \\ +\text{subject to} & x \geq 0 \end{array} -\end{aligned}$$ +\end{aligned} +$$ with variable $x\in \mathbf{R}^{n}$, and problem data $A \in \mathbf{R}^{m \times n}$, $b \in \mathbf{R}^{m}$. diff --git a/docs/src/types.md b/docs/src/types.md index cd79b8c1a..a53d663cf 100644 --- a/docs/src/types.md +++ b/docs/src/types.md @@ -26,12 +26,12 @@ x = Variable(4, 6) Variables may also be declared as having special properties, such as being -> - (entrywise) positive: `x = Variable(4, Positive())` -> - (entrywise) negative: `x = Variable(4, Negative())` -> - integral: `x = Variable(4, :Int)` -> - binary: `x = Variable(4, :Bin)` -> - (for a matrix) being symmetric, with nonnegative eigenvalues (ie, -> positive semidefinite): `z = Semidefinite(4)` +- (entrywise) positive: `x = Variable(4, Positive())` +- (entrywise) negative: `x = Variable(4, Negative())` +- integral: `x = Variable(4, :Int)` +- binary: `x = Variable(4, :Bin)` +- (for a matrix) being symmetric, with nonnegative eigenvalues (ie, + positive semidefinite): `z = Semidefinite(4)` Constants --------- From 501a12719161dabc5f4bccbde32e64a3691933bc Mon Sep 17 00:00:00 2001 From: Eric <5846501+ericphanson@users.noreply.github.com> Date: Thu, 28 Nov 2019 19:17:33 +0000 Subject: [PATCH 2/3] Fix typo --- docs/examples_literate/general_examples/logistic_regression.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples_literate/general_examples/logistic_regression.jl b/docs/examples_literate/general_examples/logistic_regression.jl index b40ae1074..d1caba41c 100644 --- a/docs/examples_literate/general_examples/logistic_regression.jl +++ b/docs/examples_literate/general_examples/logistic_regression.jl @@ -19,7 +19,7 @@ Y = [species == "versicolor" ? 1.0 : -1.0 for species in iris.Species] # (first column corresponds to offset). X = hcat(ones(size(iris, 1)), iris.SepalLength, iris.SepalWidth, iris.PetalLength, iris.PetalWidth); -# Now to soolve the logistic regression problem. +# Now to solve the logistic regression problem. n, p = size(X) beta = Variable(p) problem = minimize(logisticloss(-Y.*(X*beta))) From 717e42fc7f346af5b19984c7aea6591343c1153f Mon Sep 17 00:00:00 2001 From: Eric <5846501+ericphanson@users.noreply.github.com> Date: Fri, 29 Nov 2019 21:16:28 +0000 Subject: [PATCH 3/3] Fix math blocks, set verbose=false to work around Documenter#1174 --- docs/src/complex-domain_optimization.md | 2 +- docs/src/operations.md | 84 ++++++++++++------------- docs/src/quick_tutorial.md | 6 +- 3 files changed, 46 insertions(+), 46 deletions(-) diff --git a/docs/src/complex-domain_optimization.md b/docs/src/complex-domain_optimization.md index c6f1e728d..97c34541d 100644 --- a/docs/src/complex-domain_optimization.md +++ b/docs/src/complex-domain_optimization.md @@ -106,7 +106,7 @@ constraints = [partialtrace(ρ, 1, [2; 2]) == [1 0; 0 0] tr(ρ) == 1 ρ in :SDP] p = satisfy(constraints) -solve!(p, SCSSolver()) +solve!(p, SCSSolver(verbose=false)) p.status ``` diff --git a/docs/src/operations.md b/docs/src/operations.md index fedc7a52e..2ff68f9b8 100644 --- a/docs/src/operations.md +++ b/docs/src/operations.md @@ -32,38 +32,38 @@ LP solver. | operation | description | vexity | slope | notes | | ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | --------------------------------------------------------------------------------------------------------- | ------------------------------ | | `x+y` or `x.+y` | addition | affine | increasing | none | -| `x-y` or `x.-y` | subtraction | affine | increasing in \\(x\\) decreasing in \\(y\\) | none none | -| `x*y` | multiplication | affine | increasing if constant term \\(\\ge 0\\) decreasing if constant term \\(\\le 0\\) not monotonic otherwise | PR: one argument is constant | -| `x/y` | division | affine | increasing | PR: \\(y\\) is scalar constant | +| `x-y` or `x.-y` | subtraction | affine | increasing in $x$ decreasing in $y$ | none none | +| `x*y` | multiplication | affine | increasing if constant term $\\ge 0$ decreasing if constant term $\\le 0$ not monotonic otherwise | PR: one argument is constant | +| `x/y` | division | affine | increasing | PR: $y$ is scalar constant | | `dot(*)(x, y)` | elementwise multiplication | affine | increasing | PR: one argument is constant | | `dot(/)(x, y)` | elementwise division | affine | increasing | PR: one argument is constant | | `x[1:4, 2:3]` | indexing and slicing | affine | increasing | none | -| `diag(x, k)` | \\(k\\)-th diagonal of a matrix | affine | increasing | none | -| `diagm(x)` | construct diagonal matrix | affine | increasing | PR: \\(x\\) is a vector | +| `diag(x, k)` | $k$-th diagonal of a matrix | affine | increasing | none | +| `diagm(x)` | construct diagonal matrix | affine | increasing | PR: $x$ is a vector | | `x'` | transpose | affine | increasing | none | | `vec(x)` | vector representation | affine | increasing | none | -| `dot(x,y)` | \\(\\sum\_i x\_i y\_i\\) | affine | increasing | PR: one argument is constant | +| `dot(x,y)` | $\\sum\_i x\_i y\_i$ | affine | increasing | PR: one argument is constant | | `kron(x,y)` | Kronecker product | affine | increasing | PR: one argument is constant | | `vecdot(x,y)` | `dot(vec(x),vec(y))` | affine | increasing | PR: one argument is constant | -| `sum(x)` | \\(\\sum\_{ij} x\_{ij}\\) | affine | increasing | none | -| `sum(x, k)` | sum elements across dimension \\(k\\) | affine | increasing | none | -| `sumlargest(x, k)` | sum of \\(k\\) largest elements of \\(x\\) | convex | increasing | none | -| `sumsmallest(x, k)` | sum of \\(k\\) smallest elements of \\(x\\) | concave | increasing | none | +| `sum(x)` | $\\sum\_{ij} x\_{ij}$ | affine | increasing | none | +| `sum(x, k)` | sum elements across dimension $k$ | affine | increasing | none | +| `sumlargest(x, k)` | sum of $k$ largest elements of $x$ | convex | increasing | none | +| `sumsmallest(x, k)` | sum of $k$ smallest elements of $x$ | concave | increasing | none | | `dotsort(a, b)` | `dot(sort(a),sort(b))` | convex | increasing | PR: one argument is constant | -| `reshape(x, m, n)` | reshape into \\(m \\times n\\) | affine | increasing | none | -| `minimum(x)` | \\(\\min(x)\\) | concave | increasing | none | -| `maximum(x)` | \\(\\max(x)\\) | convex | increasing | none | +| `reshape(x, m, n)` | reshape into $m \\times n$ | affine | increasing | none | +| `minimum(x)` | $\\min(x)$ | concave | increasing | none | +| `maximum(x)` | $\\max(x)$ | convex | increasing | none | | `[x y]` or `[x; y]` `hcat(x, y)` or `vcat(x, y)` | stacking | affine | increasing | none | -| `tr(x)` | \\(\\mathrm{tr} \\left(X \\right)\\) | affine | increasing | none | +| `tr(x)` | $\\mathrm{tr} \\left(X \\right)$ | affine | increasing | none | | `partialtrace(x,sys,dims)` | Partial trace | affine | increasing | none | | `partialtranspose(x,sys,dims)` | Partial transpose | affine | increasing | none | -| `conv(h,x)` | \\(h \\in \\mathbb{R}^m\\) \\(x \\in \\mathbb{R}^m\\) \\(h\*x \\in \\mathbb{R}^{m+n-1}\\) entry \\(i\\) is given by \\(\\sum\_{j=1}^m h\_jx\_{i-j}\\) | affine | increasing if \\(h\\ge 0\\) decreasing if \\(h\\le 0\\) not monotonic otherwise | PR: \\(h\\) is constant | -| `min(x,y)` | \\(\\min(x,y)\\) | concave | increasing | none | -| `max(x,y)` | \\(\\max(x,y)\\) | convex | increasing | none | -| `pos(x)` | \\(\\max(x,0)\\) | convex | increasing | none | -| `neg(x)` | \\(\\max(-x,0)\\) | convex | decreasing | none | -| `invpos(x)` | \\(1/x\\) | convex | decreasing | IC: \\(x\>0\\) | -| `abs(x)` | \\(\\left\|x\\right\|\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | none | +| `conv(h,x)` | $h \\in \\mathbb{R}^m$ $x \\in \\mathbb{R}^m$ $h\*x \\in \\mathbb{R}^{m+n-1}$ entry $i$ is given by $\\sum\_{j=1}^m h\_jx\_{i-j}$ | affine | increasing if $h\\ge 0$ decreasing if $h\\le 0$ not monotonic otherwise | PR: $h$ is constant | +| `min(x,y)` | $\\min(x,y)$ | concave | increasing | none | +| `max(x,y)` | $\\max(x,y)$ | convex | increasing | none | +| `pos(x)` | $\\max(x,0)$ | convex | increasing | none | +| `neg(x)` | $\\max(-x,0)$ | convex | decreasing | none | +| `invpos(x)` | $1/x$ | convex | decreasing | IC: $x\>0$ | +| `abs(x)` | $\\left\|x\\right\|$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | none | Second-Order Cone Representable Functions ----------------------------------------- @@ -75,16 +75,16 @@ any solver that can solve both LPs and SOCPs can solve the problem. | operation | description | vexity | slope | notes | | ------------------- | ----------------------------------------------------------------------------------- | ----------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -| `norm(x, p)` | \\((\\sum x\_i^p)^{1/p}\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | PR: `p >= 1` | -| `vecnorm(x, p)` | \\((\\sum x\_{ij}^p)^{1/p}\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | PR: `p >= 1` | -| `quadform(x, P)` | \\(x^T P x\\) | convex in \\(x\\) affine in \\(P\\) | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) increasing in \\(P\\) | PR: either \\(x\\) or \\(P\\) must be constant; if \\(x\\) is not constant, then \\(P\\) must be symmetric and positive semidefinite | -| `quadoverlin(x, y)` | \\(x^T x/y\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) decreasing in \\(y\\) | IC: \\(y \> 0\\) | -| `sumsquares(x)` | \\(\\sum x\_i^2\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | none | -| `sqrt(x)` | \\(\\sqrt{x}\\) | concave | decreasing | IC: \\(x\>0\\) | -| `square(x), x^2` | \\(x^2\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | PR : \\(x\\) is scalar | -| `dot(^)(x,2)` | \\(x.^2\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | elementwise | -| `geomean(x, y)` | \\(\\sqrt{xy}\\) | concave | increasing | IC: \\(x\\ge0\\), \\(y\\ge0\\) | -| `huber(x, M=1)` | \\(\\begin{cases} x^2 &\|x\| \\leq M \\\\ 2M\|x\| - M^2 &\|x\| \> M \\end{cases}\\) | convex | increasing on \\(x \\ge 0\\) decreasing on \\(x \\le 0\\) | PR: \\(M\>=1\\) | +| `norm(x, p)` | $(\\sum x\_i^p)^{1/p}$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | PR: `p >= 1` | +| `vecnorm(x, p)` | $(\\sum x\_{ij}^p)^{1/p}$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | PR: `p >= 1` | +| `quadform(x, P)` | $x^T P x$ | convex in $x$ affine in $P$ | increasing on $x \\ge 0$ decreasing on $x \\le 0$ increasing in $P$ | PR: either $x$ or $P$ must be constant; if $x$ is not constant, then $P$ must be symmetric and positive semidefinite | +| `quadoverlin(x, y)` | $x^T x/y$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ decreasing in $y$ | IC: $y \> 0$ | +| `sumsquares(x)` | $\\sum x\_i^2$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | none | +| `sqrt(x)` | $\\sqrt{x}$ | concave | decreasing | IC: $x\>0$ | +| `square(x), x^2` | $x^2$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | PR : $x$ is scalar | +| `dot(^)(x,2)` | $x.^2$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | elementwise | +| `geomean(x, y)` | $\\sqrt{xy}$ | concave | increasing | IC: $x\\ge0$, $y\\ge0$ | +| `huber(x, M=1)` | $\\begin{cases} x^2 &\|x\| \\leq M \\\\ 2M\|x\| - M^2 &\|x\| \> M \\end{cases}$ | convex | increasing on $x \\ge 0$ decreasing on $x \\le 0$ | PR: $M\>=1$ | Exponential Cone Representable Functions @@ -95,11 +95,11 @@ exponential cone solver (SCS). | operation | description | vexity | slope | notes | | ----------------- | ------------------------------------------ | ------- | ------------- | -------------- | -| `logsumexp(x)` | \\(\\log(\\sum\_i \\exp(x\_i))\\) | convex | increasing | none | -| `exp(x)` | \\(\\exp(x)\\) | convex | increasing | none | -| `log(x)` | \\(\\log(x)\\) | concave | increasing | IC: \\(x\>0\\) | -| `entropy(x)` | \\(\\sum\_{ij} -x\_{ij} \\log (x\_{ij})\\) | concave | not monotonic | IC: \\(x\>0\\) | -| `logisticloss(x)` | \\(\\log(1 + \\exp(x\_i))\\) | convex | increasing | none | +| `logsumexp(x)` | $\\log(\\sum\_i \\exp(x\_i))$ | convex | increasing | none | +| `exp(x)` | $\\exp(x)$ | convex | increasing | none | +| `log(x)` | $\\log(x)$ | concave | increasing | IC: $x\>0$ | +| `entropy(x)` | $\\sum\_{ij} -x\_{ij} \\log (x\_{ij})$ | concave | not monotonic | IC: $x\>0$ | +| `logisticloss(x)` | $\\log(1 + \\exp(x\_i))$ | convex | increasing | none | Semidefinite Program Representable Functions -------------------------------------------- @@ -109,11 +109,11 @@ solver (including SCS and Mosek). | operation | description | vexity | slope | notes | | ------------------ | --------------------------------- | ------- | ------------- | ------------------------------ | -| `nuclearnorm(x)` | sum of singular values of \\(x\\) | convex | not monotonic | none | -| `operatornorm(x)` | max of singular values of \\(x\\) | convex | not monotonic | none | -| `lambdamax(x)` | max eigenvalue of \\(x\\) | convex | not monotonic | none | -| `lambdamin(x)` | min eigenvalue of \\(x\\) | concave | not monotonic | none | -| `matrixfrac(x, P)` | \\(x^TP^{-1}x\\) | convex | not monotonic | IC: P is positive semidefinite | +| `nuclearnorm(x)` | sum of singular values of $x$ | convex | not monotonic | none | +| `operatornorm(x)` | max of singular values of $x$ | convex | not monotonic | none | +| `lambdamax(x)` | max eigenvalue of $x$ | convex | not monotonic | none | +| `lambdamin(x)` | min eigenvalue of $x$ | concave | not monotonic | none | +| `matrixfrac(x, P)` | $x^TP^{-1}x$ | convex | not monotonic | IC: P is positive semidefinite | Exponential + SDP representable Functions ----------------------------------------- @@ -124,7 +124,7 @@ constraints simultaneously (SCS). | operation | description | vexity | slope | notes | | ----------- | ----------------------------- | ------- | ---------- | ------------------------------ | -| `logdet(x)` | log of determinant of \\(x\\) | concave | increasing | IC: x is positive semidefinite | +| `logdet(x)` | log of determinant of $x$ | concave | increasing | IC: x is positive semidefinite | Promotions ---------- diff --git a/docs/src/quick_tutorial.md b/docs/src/quick_tutorial.md index faa5fd96b..d12be116f 100644 --- a/docs/src/quick_tutorial.md +++ b/docs/src/quick_tutorial.md @@ -3,14 +3,14 @@ Quick Tutorial Consider a constrained least squares problem -$$ +```math \begin{aligned} \begin{array}{ll} \text{minimize} & \|Ax - b\|_2^2 \\ \text{subject to} & x \geq 0 \end{array} \end{aligned} -$$ +``` with variable $x\in \mathbf{R}^{n}$, and problem data $A \in \mathbf{R}^{m \times n}$, $b \in \mathbf{R}^{m}$. @@ -33,7 +33,7 @@ x = Variable(n) problem = minimize(sumsquares(A * x - b), [x >= 0]) # Solve the problem by calling solve! -solve!(problem, SCSSolver()) +solve!(problem, SCSSolver(verbose=false)) # Check the status of the problem problem.status # :Optimal, :Infeasible, :Unbounded etc.