diff --git a/src/OptimizationProblems.jl b/src/OptimizationProblems.jl index 1d0bf7b7..5bfb6c43 100644 --- a/src/OptimizationProblems.jl +++ b/src/OptimizationProblems.jl @@ -2,8 +2,12 @@ module OptimizationProblems using JuMP +include("arglina.jl") +include("arglinb.jl") +include("arglinc.jl") include("arwhead.jl") include("bdqrtic.jl") +include("brownden.jl") include("broydn7d.jl") include("brybnd.jl") include("chainwoo.jl") @@ -30,6 +34,7 @@ include("freuroth.jl") include("genhumps.jl") include("indef_mod.jl") include("liarwhd.jl") +include("meyer3.jl") include("morebv.jl") include("ncb20.jl") include("ncb20b.jl") @@ -37,6 +42,7 @@ include("noncvxu2.jl") include("noncvxun.jl") include("nondia.jl") include("nondquar.jl") +include("palmer1c.jl") include("penalty2.jl") include("penalty3.jl") include("powellsg.jl") diff --git a/src/arglina.jl b/src/arglina.jl new file mode 100644 index 00000000..53267bf5 --- /dev/null +++ b/src/arglina.jl @@ -0,0 +1,31 @@ +# Linear function - full rank +# +# Source: Problem 32 in +# J.J. More', B.S. Garbow and K.E. Hillstrom, +# "Testing Unconstrained Optimization Software", +# ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981. +# +# See also Buckley#80 (with different N and M) +# +# classification SUR2-AN-V-0 + +export arglina + +"Linear function with `n` parameters and `m` observations - full rank" +function arglina(n::Int=100, m::Int=200) + + m < n && warn("arglina: must have m ≥ n") + m = max(m, n) + + nlp = Model() + + @variable(nlp, x[j=1:n], start=1.0) + + @NLobjective( + nlp, + Min, + sum((x[i] - 2/m * sum(x[j] for j = 1:n) - 1)^2 for i = 1:n) + sum((-2/m * sum(x[j] for j = 1:n) - 1)^2 for i = n+1:m) + ) + + return nlp +end diff --git a/src/arglinb.jl b/src/arglinb.jl new file mode 100644 index 00000000..511159eb --- /dev/null +++ b/src/arglinb.jl @@ -0,0 +1,31 @@ +# Linear function - rank 1 +# +# Source: Problem 33 in +# J.J. More', B.S. Garbow and K.E. Hillstrom, +# "Testing Unconstrained Optimization Software", +# ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981. +# +# See also Buckley#93 (with different N and M) +# +# classification SUR2-AN-V-0 + +export arglinb + +"Linear function with `n` parameters and `m` observations - rank 1" +function arglinb(n::Int=10, m::Int=20) + + m < n && warn("arglinb: must have m ≥ n") + m = max(m, n) + + nlp = Model() + + @variable(nlp, x[j=1:n], start=1.0) + + @NLobjective( + nlp, + Min, + sum((i * sum(j * x[j] for j = 1:n) - 1)^2 for i = 1:m) + ) + + return nlp +end diff --git a/src/arglinc.jl b/src/arglinc.jl new file mode 100644 index 00000000..58f1f64c --- /dev/null +++ b/src/arglinc.jl @@ -0,0 +1,31 @@ +# Linear function - rank 1, zero columns and rows +# +# Source: Problem 34 in +# J.J. More', B.S. Garbow and K.E. Hillstrom, +# "Testing Unconstrained Optimization Software", +# ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981. +# +# See also Buckley#101 (with different N and M) +# +# classification SUR2-AN-V-0 + +export arglinc + +"Linear function with `n` parameters and `m` observations - rank 1, zero columns and rows" +function arglinc(n::Int=10, m::Int=20) + + m < n && warn("arglinc: must have m ≥ n") + m = max(m, n) + + nlp = Model() + + @variable(nlp, x[j=1:n], start=1.0) + + @NLobjective( + nlp, + Min, + 2 + sum(((i-1) * sum(j * x[j] for j = 2:n-1) - 1)^2 for i = 2:m-1) + ) + + return nlp +end diff --git a/src/brownden.jl b/src/brownden.jl new file mode 100644 index 00000000..4be19ad6 --- /dev/null +++ b/src/brownden.jl @@ -0,0 +1,35 @@ +# Brown and Dennis function +# +# Source: Problem 16 in +# J.J. More', B.S. Garbow and K.E. Hillstrom, +# "Testing Unconstrained Optimization Software", +# ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981. +# +# See also Buckley#30 +# +# classification SUR2-AN-4-0 + +export brownden + +"Brown and Dennis function" +function brownden(m::Int=20) + + m < 4 && warn("brownden: must have m ≥ 4") + m = max(m, 4) + + nlp = Model() + + x0 = [25.0, 5, -5, -1] + + @variable(nlp, x[j=1:4], start=x0[j]) + + t = Float64[i/5 for i = 1:m] + + @NLobjective( + nlp, + Min, + sum(((x[1] + t[i] * x[2] - exp(t[i]))^2 + (x[3] + x[4] * sin(t[i]) - cos(t[i]))^2)^2 for i = 1:m) + ) + + return nlp +end diff --git a/src/meyer3.jl b/src/meyer3.jl new file mode 100644 index 00000000..8ca57b5f --- /dev/null +++ b/src/meyer3.jl @@ -0,0 +1,36 @@ +# Meyer function +# +# Source: Problem 10 in +# J.J. More', B.S. Garbow and K.E. Hillstrom, +# "Testing Unconstrained Optimization Software", +# ACM Transactions on Mathematical Software, vol. 7(1), pp. 17-41, 1981. +# +# See also Buckley #29 (p. 73). +# +# classification SUR2-RN-3-0 + +export meyer3 + +"Meyer function" +function meyer3() + + nlp = Model() + + x0 = [0.02, 4000, 250] + + @variable(nlp, x[j=1:3], start=x0[j]) + + y = [34780, 28610, 23650, 19630, 16370, 13720, 11540, 9744, 8261, 7030, 6005, + 5147, 4427, 3820, 3307, 2872] + + t = 45 + 5 * (1:16) + + @NLobjective( + nlp, + Min, + sum((x[1] * exp(x[2]/(t[i] + x[3])) - y[i])^2 for i = 1:16) + ) + + return nlp + +end diff --git a/src/palmer1c.jl b/src/palmer1c.jl new file mode 100644 index 00000000..83ebf6d0 --- /dev/null +++ b/src/palmer1c.jl @@ -0,0 +1,43 @@ +# A linear least squares problem +# arising from chemical kinetics. +# +# model: H-N=N=N TZVP+MP2 +# fitting Y to A0 + A2 X**2 + A4 X**4 + A6 X**6 + A8 X**8 + +# A10 X**10 + A12 X**12 + A14 X**14 +# +# Source: +# M. Palmer, Edinburgh, private communication. +# +# classification QUR2-RN-8-0 + +export palmer1c + +"A linear least squares problem arising from chemical kinetics." +function palmer1c() + + nlp = Model() + + @variable(nlp, x[j=1:8], start=1.0) + + X = [-1.788963, -1.745329, -1.658063, -1.570796, -1.483530, -1.396263, + -1.308997, -1.218612, -1.134464, -1.047198, -0.872665, -0.698132, -0.523599, + -0.349066, -0.174533, 0.0000000, 1.788963, 1.745329, 1.658063, 1.570796, + 1.483530, 1.396263, 1.308997, 1.218612, 1.134464, 1.047198, 0.872665, + 0.698132, 0.523599, 0.349066, 0.174533, -1.8762289, -1.8325957, 1.8762289, + 1.8325957] + + Y = [ 78.596218, 65.77963, 43.96947, 27.038816, 14.6126, 6.2614, 1.538330, + 0.000000, 1.188045, 4.6841, 16.9321, 33.6988, 52.3664, 70.1630, 83.4221, + 88.3995, 78.596218, 65.77963, 43.96947, 27.038816, 14.6126, 6.2614, 1.538330, + 0.000000, 1.188045, 4.6841, 16.9321, 33.6988, 52.3664, 70.1630, 83.4221, + 108.18086, 92.733676, 108.18086, 92.733676] + + @NLobjective( + nlp, + Min, + sum((Y[i] - (x[1] + x[2] * X[i]^2 + x[3] * X[i]^4 + x[4] * X[i]^6 + x[5] * X[i]^8 + x[6] * X[i]^10 + x[7] * X[i]^12 + x[8] * X[i]^14))^2 for i = 1:35) + ) + + return nlp + +end