diff --git a/testopt/mgh.go b/testopt/mgh.go new file mode 100644 index 0000000..50329e8 --- /dev/null +++ b/testopt/mgh.go @@ -0,0 +1,896 @@ +package testopt + +import "math" + +// Beale implements the Beale's function. +// +// Standard starting points: +// Easy: [1, 1] +// Hard: [1, 4] +// +// References: +// - Beale, E.: On an Iterative Method for Finding a Local Minimum of a +// Function of More than One Variable. Technical Report 25, Statistical +// Techniques Research Group, Princeton University (1958) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type Beale struct{} + +func (Beale) F(x []float64) float64 { + f1 := 1.5 - x[0]*(1-x[1]) + f2 := 2.25 - x[0]*(1-x[1]*x[1]) + f3 := 2.625 - x[0]*(1-x[1]*x[1]*x[1]) + return f1*f1 + f2*f2 + f3*f3 +} + +func (Beale) Df(x, grad []float64) { + t1 := 1 - x[1] + t2 := 1 - x[1]*x[1] + t3 := 1 - x[1]*x[1]*x[1] + + f1 := 1.5 - x[0]*t1 + f2 := 2.25 - x[0]*t2 + f3 := 2.625 - x[0]*t3 + + grad[0] = -2 * (f1*t1 + f2*t2 + f3*t3) + grad[1] = 2 * x[0] * (f1 + 2*f2*x[1] + 3*f3*x[1]*x[1]) +} + +func (Beale) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{3, 0.5}, + F: 0, + Global: true, + }, + } +} + +// BiggsEXP2 implements the the Biggs' EXP2 function. +// +// Standard starting point: +// [1, 2] +// +// Reference: +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +type BiggsEXP2 struct{} + +func (BiggsEXP2) F(x []float64) (sum float64) { + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y + sum += f * f + } + return sum +} + +func (BiggsEXP2) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := math.Exp(-x[0]*z) - 5*math.Exp(-x[1]*z) - y + + dfdx0 := -z * math.Exp(-x[0]*z) + dfdx1 := 5 * z * math.Exp(-x[1]*z) + + grad[0] += 2 * f * dfdx0 + grad[1] += 2 * f * dfdx1 + } +} + +func (BiggsEXP2) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10}, + F: 0, + Global: true, + }, + } +} + +// BiggsEXP3 implements the the Biggs' EXP3 function. +// +// Standard starting point: +// [1, 2, 1] +// +// Reference: +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +type BiggsEXP3 struct{} + +func (BiggsEXP3) F(x []float64) (sum float64) { + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y + sum += f * f + } + return sum +} + +func (BiggsEXP3) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := math.Exp(-x[0]*z) - x[2]*math.Exp(-x[1]*z) - y + + dfdx0 := -z * math.Exp(-x[0]*z) + dfdx1 := x[2] * z * math.Exp(-x[1]*z) + dfdx2 := -math.Exp(-x[1] * z) + + grad[0] += 2 * f * dfdx0 + grad[1] += 2 * f * dfdx1 + grad[2] += 2 * f * dfdx2 + } +} + +func (BiggsEXP3) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10, 5}, + F: 0, + Global: true, + }, + } +} + +// BiggsEXP4 implements the the Biggs' EXP4 function. +// +// Standard starting point: +// [1, 2, 1, 1] +// +// Reference: +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +type BiggsEXP4 struct{} + +func (BiggsEXP4) F(x []float64) (sum float64) { + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y + sum += f * f + } + return sum +} + +func (BiggsEXP4) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + for i := 1; i <= 10; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) - y + + dfdx0 := -z * x[2] * math.Exp(-x[0]*z) + dfdx1 := z * x[3] * math.Exp(-x[1]*z) + dfdx2 := math.Exp(-x[0] * z) + dfdx3 := -math.Exp(-x[1] * z) + + grad[0] += 2 * f * dfdx0 + grad[1] += 2 * f * dfdx1 + grad[2] += 2 * f * dfdx2 + grad[3] += 2 * f * dfdx3 + } +} + +func (BiggsEXP4) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10, 1, 5}, + F: 0, + Global: true, + }, + } +} + +// BiggsEXP5 implements the the Biggs' EXP5 function. +// +// Standard starting point: +// [1, 2, 1, 1, 1] +// +// Reference: +// Biggs, M.C.: Minimization algorithms making use of non-quadratic properties +// of the objective function. IMA J Appl Math 8 (1971), 315-327; doi:10.1093/imamat/8.3.315 +type BiggsEXP5 struct{} + +func (BiggsEXP5) F(x []float64) (sum float64) { + for i := 1; i <= 11; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y + sum += f * f + } + return sum +} + +func (BiggsEXP5) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + for i := 1; i <= 11; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + 3*math.Exp(-x[4]*z) - y + + dfdx0 := -z * x[2] * math.Exp(-x[0]*z) + dfdx1 := z * x[3] * math.Exp(-x[1]*z) + dfdx2 := math.Exp(-x[0] * z) + dfdx3 := -math.Exp(-x[1] * z) + dfdx4 := -3 * z * math.Exp(-x[4]*z) + + grad[0] += 2 * f * dfdx0 + grad[1] += 2 * f * dfdx1 + grad[2] += 2 * f * dfdx2 + grad[3] += 2 * f * dfdx3 + grad[4] += 2 * f * dfdx4 + } +} + +func (BiggsEXP5) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10, 1, 5, 4}, + F: 0, + Global: true, + }, + } +} + +// BiggsEXP6 implements the the Biggs' EXP6 function. +// +// Standard starting point: +// [1, 2, 1, 1, 1, 1] +// +// References: +// - Biggs, M.C.: Minimization algorithms making use of non-quadratic +// properties of the objective function. IMA J Appl Math 8 (1971), 315-327; +// doi:10.1093/imamat/8.3.315 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type BiggsEXP6 struct{} + +func (BiggsEXP6) F(x []float64) (sum float64) { + for i := 1; i <= 13; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y + sum += f * f + } + return sum +} + +func (BiggsEXP6) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + for i := 1; i <= 13; i++ { + z := float64(i) / 10 + y := math.Exp(-z) - 5*math.Exp(-10*z) + 3*math.Exp(-4*z) + f := x[2]*math.Exp(-x[0]*z) - x[3]*math.Exp(-x[1]*z) + x[5]*math.Exp(-x[4]*z) - y + + dfdx0 := -z * x[2] * math.Exp(-x[0]*z) + dfdx1 := z * x[3] * math.Exp(-x[1]*z) + dfdx2 := math.Exp(-x[0] * z) + dfdx3 := -math.Exp(-x[1] * z) + dfdx4 := -z * x[5] * math.Exp(-x[4]*z) + dfdx5 := math.Exp(-x[4] * z) + + grad[0] += 2 * f * dfdx0 + grad[1] += 2 * f * dfdx1 + grad[2] += 2 * f * dfdx2 + grad[3] += 2 * f * dfdx3 + grad[4] += 2 * f * dfdx4 + grad[5] += 2 * f * dfdx5 + } +} + +func (BiggsEXP6) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10, 1, 5, 4, 3}, + F: 0, + Global: true, + }, + { + F: 0.005655649925, + Global: false, + }, + } +} + +// Box3D implements the Box' three-dimensional function. +// +// Standard starting point: +// [0, 10, 20] +// +// References: +// - Box, M.J.: A comparison of several current optimization methods, and the +// use of transformations in constrained problems. Comput J 9 (1966), 67-77 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type Box3D struct{} + +func (Box3D) F(x []float64) (sum float64) { + for i := 1; i <= 10; i++ { + c := -float64(i) / 10 + y := math.Exp(c) - math.Exp(10*c) + f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y + sum += f * f + } + return sum +} + +func (Box3D) Df(x, grad []float64) { + grad[0] = 0 + grad[1] = 0 + grad[2] = 0 + for i := 1; i <= 10; i++ { + c := -float64(i) / 10 + y := math.Exp(c) - math.Exp(10*c) + f := math.Exp(c*x[0]) - math.Exp(c*x[1]) - x[2]*y + + grad[0] += 2 * f * c * math.Exp(c*x[0]) + grad[1] += -2 * f * c * math.Exp(c*x[1]) + grad[2] += -2 * f * y + } +} + +func (Box3D) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 10, 1}, + F: 0, + Global: true, + }, + { + X: []float64{10, 1, -1}, + F: 0, + Global: true, + }, + // { + // X: []float64{a, a, 0}, + // F: 0, + // Global: true, + // }, + } +} + +// BrownBadlyScaled implements the Brown's badly scaled function. +// +// Standard starting point: +// [1, 1] +// +// References: +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type BrownBadlyScaled struct{} + +func (BrownBadlyScaled) F(x []float64) float64 { + f1 := x[0] - 1e6 + f2 := x[1] - 2e-6 + f3 := x[0]*x[1] - 2 + return f1*f1 + f2*f2 + f3*f3 +} + +func (BrownBadlyScaled) Df(x, grad []float64) { + f1 := x[0] - 1e6 + f2 := x[1] - 2e-6 + f3 := x[0]*x[1] - 2 + + grad[0] = 2*f1 + 2*f3*x[1] + grad[1] = 2*f2 + 2*f3*x[0] +} + +func (BrownBadlyScaled) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1e6, 2e-6}, + F: 0, + Global: true, + }, + } +} + +// BrownAndDennis implements the Brown and Dennis function. +// +// Standard starting point: +// [25, 5, -5, -1] +// +// References: +// - Brown, K.M., Dennis, J.E.: New computational algorithms for minimizing a +// sum of squares of nonlinear functions. Research Report Number 71-6, Yale +// University (1971) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type BrownAndDennis struct{} + +func (BrownAndDennis) F(x []float64) (sum float64) { + for i := 0; i < 20; i++ { + c := float64(i+1) / 5 + d1 := x[0] + c*x[1] - math.Exp(c) + d2 := x[2] + x[3]*math.Sin(c) - math.Cos(c) + f := d1*d1 + d2*d2 + sum += f * f + } + return sum +} + +func (BrownAndDennis) Df(x, grad []float64) { + for i := range grad { + grad[i] = 0 + } + + for i := 0; i < 20; i++ { + c := float64(i+1) / 5 + d1 := x[0] + c*x[1] - math.Exp(c) + d2 := x[2] + x[3]*math.Sin(c) - math.Cos(c) + f := d1*d1 + d2*d2 + grad[0] += 4 * f * d1 + grad[1] += 4 * f * d1 * c + grad[2] += 4 * f * d2 + grad[3] += 4 * f * d2 * math.Sin(c) + } +} + +func (BrownAndDennis) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{-11.594439904133422, + 13.203630051570688, + -0.4034394885659164, + 0.23677877176276796, + }, + F: 85822.20162635628, + Global: true, + }, + } +} + +// ExtendedPowellSingular implements the extended Powell's function. +// Its Hessian matrix is singular at the minimizer. +// +// Standard starting point: +// [3, -1, 0, 3, 3, -1, 0, 3, ..., 3, -1, 0, 3] +// +// References: +// - Spedicato E.: Computational experience with quasi-Newton algorithms for +// minimization problems of moderatly large size. Towards Global +// Optimization 2 (1978), 209-219 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type ExtendedPowellSingular struct{} + +func (ExtendedPowellSingular) F(x []float64) (sum float64) { + dim := len(x) + if dim%4 != 0 { + panic("dimension of the problem must be a multiple of 4") + } + + for i := 0; i < dim; i += 4 { + f1 := x[i] + 10*x[i+1] + f2 := x[i+2] - x[i+3] + t := x[i+1] - 2*x[i+2] + f3 := t * t + t = x[i] - x[i+3] + f4 := t * t + sum += f1*f1 + 5*f2*f2 + f3*f3 + 10*f4*f4 + } + return sum +} + +func (ExtendedPowellSingular) Df(x, grad []float64) { + dim := len(x) + if dim%4 != 0 { + panic("dimension of the problem must be a multiple of 4") + } + + for i := 0; i < dim; i += 4 { + f1 := x[i] + 10*x[i+1] + f2 := x[i+2] - x[i+3] + t1 := x[i+1] - 2*x[i+2] + f3 := t1 * t1 + t2 := x[i] - x[i+3] + f4 := t2 * t2 + + grad[i] = 2*f1 + 40*f4*t2 + grad[i+1] = 20*f1 + 4*f3*t1 + grad[i+2] = 10*f2 - 8*f3*t1 + grad[i+3] = -10*f2 - 40*f4*t2 + } +} + +func (ExtendedPowellSingular) Minima(dim int) []Minimum { + if dim%4 != 0 { + panic("dimension of the problem must be a multiple of 4") + } + + x := make([]float64, dim) + for i := range x { + x[i] = 0 + } + return []Minimum{ + { + X: x, + F: 0, + Global: true, + }, + } +} + +// Gaussian implements the Gaussian function. +// +// Standard starting point: +// [0.4, 1, 0] +// +// Reference: +// More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained optimization +// software. ACM Trans Math Softw 7 (1981), 17-41 +type Gaussian struct{} + +func (Gaussian) y(i int) (yi float64) { + switch i { + case 1, 15: + yi = 0.0009 + case 2, 14: + yi = 0.0044 + case 3, 13: + yi = 0.0175 + case 4, 12: + yi = 0.0540 + case 5, 11: + yi = 0.1295 + case 6, 10: + yi = 0.2420 + case 7, 9: + yi = 0.3521 + case 8: + yi = 0.3989 + } + return yi +} + +func (g Gaussian) F(x []float64) (sum float64) { + for i := 1; i <= 15; i++ { + c := 0.5 * float64(8-i) + b := c - x[2] + d := b * b + e := math.Exp(-0.5 * x[1] * d) + f := x[0]*e - g.y(i) + sum += f * f + } + return sum +} + +func (g Gaussian) Df(x, grad []float64) { + grad[0] = 0 + grad[1] = 0 + grad[2] = 0 + for i := 1; i <= 15; i++ { + c := 0.5 * float64(8-i) + b := c - x[2] + d := b * b + e := math.Exp(-0.5 * x[1] * d) + f := x[0]*e - g.y(i) + + grad[0] += 2 * f * e + grad[1] -= f * e * d * x[0] + grad[2] += 2 * f * e * x[0] * x[1] * b + } +} + +func (Gaussian) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{0.3989561, 1.0000191, 0}, + F: 1.12793e-8, + Global: true, + }, + } +} + +// GulfResearchAndDevelopment implements the Gulf Research and Development function. +// +// Standard starting point: +// [5, 2.5, 0.15] +// +// References: +// - Cox, R.A.: Comparison of the performance of seven optimization algorithms +// on twelve unconstrained minimization problems. Ref. 1335CNO4, Gulf +// Research and Development Company, Pittsburg (1969) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type GulfResearchAndDevelopment struct{} + +func (GulfResearchAndDevelopment) F(x []float64) (sum float64) { + for i := 1; i <= 99; i++ { + c := float64(i) / 100 + yi := 25 + math.Pow(-50*math.Log(c), 2.0/3.0) + d := math.Abs(yi - x[1]) + e := math.Pow(d, x[2]) / x[0] + f := math.Exp(-e) - c + + sum += f * f + } + return sum +} + +func (GulfResearchAndDevelopment) Df(x, grad []float64) { + for i := 0; i < len(grad); i++ { + grad[i] = 0 + } + + for i := 1; i <= 99; i++ { + c := float64(i) / 100 + yi := 25 + math.Pow(-50*math.Log(c), 2.0/3.0) + d := math.Abs(yi - x[1]) + e := math.Pow(d, x[2]) / x[0] + f := math.Exp(-e) - c + + grad[0] += 2 * f * math.Exp(-e) * math.Pow(d, x[2]) / x[0] / x[0] + grad[1] += 2 * f * math.Exp(-e) * e * x[2] / d + grad[2] -= 2 * f * math.Exp(-e) * e * math.Log(d) + } +} + +func (GulfResearchAndDevelopment) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{50, 25, 1.5}, + F: 0, + Global: true, + }, + { + X: []float64{99.89537833835886, 60.61453903025014, 9.16124389236433}, + F: 32.8345, + Global: false, + }, + { + X: []float64{201.662589489426, 60.616331504682, 10.224891158489}, + F: 32.8345, + Global: false, + }, + } +} + +// HelicalValley implements the helical valley function of Fletcher and Powell. +// +// Standard starting point: +// [-1, 0, 0] +// +// References: +// - Fletcher, R., Powell, M.J.D.: A rapidly convergent descent method for +// minimization. Comput J 6 (1963), 163-168 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type HelicalValley struct{} + +func (HelicalValley) F(x []float64) float64 { + theta := 0.5 * math.Atan2(x[1], x[0]) / math.Pi + r := math.Hypot(x[0], x[1]) + + f1 := 10 * (x[2] - 10*theta) + f2 := 10 * (r - 1) + f3 := x[2] + + return f1*f1 + f2*f2 + f3*f3 +} + +func (HelicalValley) Df(x, grad []float64) { + theta := 0.5 * math.Atan2(x[1], x[0]) / math.Pi + r := math.Hypot(x[0], x[1]) + s := x[2] - 10*theta + t := 5 * s / r / r / math.Pi + + grad[0] = 200 * (x[0] - x[0]/r + x[1]*t) + grad[1] = 200 * (x[1] - x[1]/r - x[0]*t) + grad[2] = 2 * (x[2] + 100*s) +} + +func (HelicalValley) Minima(_ int) []Minimum { + return []Minimum{ + { + X: []float64{1, 0, 0}, + F: 0, + Global: true, + }, + } +} + +// PenaltyI implements the first penalty function by Gill, Murray and Pitfield. +// +// Standard starting point: +// [1, ..., n] +// +// References: +// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised +// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, +// National Phys Lab (1972), 82-83 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type PenaltyI struct{} + +func (PenaltyI) F(x []float64) (sum float64) { + for i := 0; i < len(x); i++ { + t := x[i] - 1 + sum += t * t + } + sum *= 1e-5 + + var s float64 + for i := 0; i < len(x); i++ { + s += x[i] * x[i] + } + sum += (s - 0.25) * (s - 0.25) + + return sum +} + +func (PenaltyI) Df(x, grad []float64) { + s := -0.25 + for i := 0; i < len(x); i++ { + s += x[i] * x[i] + } + + for i := 0; i < len(grad); i++ { + grad[i] = 2 * (2*s*x[i] + 1e-5*(x[i]-1)) + } +} + +func (PenaltyI) Minima(dim int) []Minimum { + switch dim { + case 4: + return []Minimum{ + { + X: []float64{0.2500075, 0.2500075, 0.2500075, 0.2500075}, + F: 2.2499775e-5, + Global: true, + }, + } + case 10: + return []Minimum{ + { + X: []float64{0.158122301, 0.158122301, 0.158122301, 0.158122301, + 0.158122301, 0.158122301, 0.158122301, 0.158122301, 0.158122301, + 0.158122301}, + F: 7.0876515e-5, + Global: true, + }, + } + default: + return nil + } +} + +// PenaltyII implements the second penalty function by Gill, Murray and Pitfield. +// +// Standard starting point: +// [0.5, ..., 0.5] +// +// References: +// - Gill, P.E., Murray, W., Pitfield, R.A.: The implementation of two revised +// quasi-Newton algorithms for unconstrained optimization. Report NAC 11, +// National Phys Lab (1972), 82-83 +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type PenaltyII struct{} + +func (PenaltyII) F(x []float64) (sum float64) { + dim := len(x) + + s := -1.0 + for i := 0; i < dim; i++ { + s += float64(dim-i) * x[i] * x[i] + } + for i := 1; i < dim; i++ { + yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10) + f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi + sum += f * f + } + for i := 1; i < dim; i++ { + f := math.Exp(x[i]/10) - math.Exp(-1.0/10) + sum += f * f + } + sum *= 1e-5 + + sum += (x[0] - 0.2) * (x[0] - 0.2) + sum += s * s + + return sum +} + +func (PenaltyII) Df(x, grad []float64) { + dim := len(x) + + s := -1.0 + for i := 0; i < dim; i++ { + s += float64(dim-i) * x[i] * x[i] + } + + for i := 0; i < dim; i++ { + grad[i] = 4 * s * float64(dim-i) * x[i] + } + for i := 1; i < dim; i++ { + yi := math.Exp(float64(i+1)/10) + math.Exp(float64(i)/10) + f := math.Exp(x[i]/10) + math.Exp(x[i-1]/10) - yi + grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5 + grad[i-1] += 1e-5 * f * math.Exp(x[i-1]/10) / 5 + } + for i := 1; i < dim; i++ { + f := math.Exp(x[i]/10) - math.Exp(-1.0/10) + grad[i] += 1e-5 * f * math.Exp(x[i]/10) / 5 + } + grad[0] += 2 * (x[0] - 0.2) +} + +func (PenaltyII) Minima(dim int) []Minimum { + switch dim { + case 4: + return []Minimum{ + { + X: []float64{0.19999933335, 0.19131670128566283, 0.4801014860897, 0.5188454026659}, + F: 9.37629e-6, + Global: true, + }, + } + case 10: + return []Minimum{ + { + X: []float64{0.19998360520892217, 0.010350644318663525, + 0.01960493546891094, 0.03208906550305253, 0.04993267593895693, + 0.07651399534454084, 0.11862407118600789, 0.1921448731780023, + 0.3473205862372022, 0.36916437893066273}, + F: 2.936605e-4, + Global: true, + }, + } + default: + return nil + } +} + +// PowellBadlyScaled implements the Powell's badly scaled function. +// +// Standard starting point: +// [0, 1] +// +// References: +// - Powell, M.J.D.: A Hybrid Method for Nonlinear Equations. Numerical +// Methods for Nonlinear Algebraic Equations, P. Rabinowitz (ed.), Gordon +// and Breach (1970) +// - More, J., Garbow, B.S., Hillstrom, K.E.: Testing unconstrained +// optimization software. ACM Trans Math Softw 7 (1981), 17-41 +type PowellBadlyScaled struct{} + +func (PowellBadlyScaled) F(x []float64) float64 { + f1 := 1e4*x[0]*x[1] - 1 + f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001 + return f1*f1 + f2*f2 +} + +func (PowellBadlyScaled) Df(x, grad []float64) { + f1 := 1e4*x[0]*x[1] - 1 + f2 := math.Exp(-x[0]) + math.Exp(-x[1]) - 1.0001 + + grad[0] = 2 * (1e4*f1*x[1] - f2*math.Exp(-x[0])) + grad[1] = 2 * (1e4*f1*x[0] - f2*math.Exp(-x[1])) +} + +func (PowellBadlyScaled) Minima(dim int) []Minimum { + return []Minimum{ + { + X: []float64{1.09815933e-5, 9.10614674}, + F: 0, + Global: true, + }, + } +} diff --git a/testopt/mgh_test.go b/testopt/mgh_test.go new file mode 100644 index 0000000..6ba42a3 --- /dev/null +++ b/testopt/mgh_test.go @@ -0,0 +1,354 @@ +package testopt + +import ( + "math" + "reflect" + "testing" + + "github.com/gonum/floats" +) + +type funcTest struct { + x []float64 + + wantF float64 + wantGrad []float64 +} + +const ( + defaultTol = 1e-8 + defaultGradTol = 1e-4 +) + +func testFunction(f Function, tests []funcTest, t *testing.T) { + for i, test := range tests { + F := f.F(test.x) + if !floats.EqualWithinAbs(F, test.wantF, defaultTol) { + t.Errorf("Test %d: %v function value incorrect. Want: %v, Got: %v", + i, reflect.TypeOf(f), test.wantF, F) + } + + if test.wantGrad != nil { + grad := make([]float64, len(test.wantGrad)) + f.Df(test.x, grad) + + dist := floats.Distance(grad, test.wantGrad, math.Inf(1)) + if dist > defaultGradTol { + t.Errorf("Test %d: %v function gradient incorrect. |grad - wantGrad|_∞ = %v", + i, reflect.TypeOf(f), dist) + } + } + } +} + +func TestBeale(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 1}, + wantF: 14.203125, + wantGrad: []float64{0, 27.75}, + }, + { + x: []float64{1, 4}, + wantF: 4624.453125, + wantGrad: []float64{8813.25, 6585}, + }, + { + x: []float64{3, 0.5}, + wantF: 0, + wantGrad: []float64{0, 0}, + }, + } + testFunction(Beale{}, tests, t) +} + +func TestBiggsEXP2(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 10}, + wantF: 0, + wantGrad: []float64{0, 0}, + }, + } + testFunction(BiggsEXP2{}, tests, t) +} + +func TestBiggsEXP3(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 10, 5}, + wantF: 0, + wantGrad: []float64{0, 0, 0}, + }, + } + testFunction(BiggsEXP3{}, tests, t) +} + +func TestBiggsEXP4(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 10, 1, 5}, + wantF: 0, + wantGrad: []float64{0, 0, 0, 0}, + }, + } + testFunction(BiggsEXP4{}, tests, t) +} + +func TestBiggsEXP5(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 10, 1, 5, 4}, + wantF: 0, + wantGrad: []float64{0, 0, 0, 0, 0}, + }, + } + testFunction(BiggsEXP5{}, tests, t) +} + +func TestBiggsEXP6(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 2, 1, 1, 1, 1}, + wantF: 0.77907007565597, + wantGrad: []float64{-0.149371887533426, + -0.183163468182936, + -1.483958013575642, + 1.428277503849742, + -0.149371887533426, + -1.483958013575642}, + }, + { + x: []float64{1, 10, 1, 5, 4, 3}, + wantF: 0, + wantGrad: []float64{0, 0, 0, 0, 0, 0}, + }, + } + testFunction(BiggsEXP6{}, tests, t) +} + +func TestBox3D(t *testing.T) { + tests := []funcTest{ + { + x: []float64{0, 10, 20}, + wantF: 1031.15381060940, + wantGrad: []float64{98.22343149849218, -2.11937420675874, 112.38817362220350}, + }, + { + x: []float64{1, 10, 1}, + wantF: 0, + wantGrad: []float64{0, 0, 0}, + }, + { + x: []float64{10, 1, -1}, + wantF: 0, + wantGrad: []float64{0, 0, 0}, + }, + } + testFunction(Box3D{}, tests, t) +} + +func TestBrownBadlyScaled(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 1}, + wantF: 999998000003, + wantGrad: []float64{-2.00000000000000e+06, -3.99999999967093e-06}, + }, + { + x: []float64{1e6, 2e-6}, + wantF: 0, + wantGrad: []float64{0, 0}, + }, + } + testFunction(BrownBadlyScaled{}, tests, t) +} + +func TestBrownAndDennis(t *testing.T) { + tests := []funcTest{ + { + x: []float64{25, 5, -5, -1}, + wantF: 7926693.33699744, + wantGrad: []float64{1149322.836365895, + 1779291.674339785, + -254579.585463521, + -173400.429253115}, + }, + { + x: []float64{-11.594439904133422, + 13.203630051570688, + -0.4034394885659164, + 0.23677877176276796}, + wantF: 85822.20162635628, + wantGrad: []float64{0, 0, 0, 0}, + }, + } + testFunction(BrownAndDennis{}, tests, t) +} + +func TestExtendedPowellSingular(t *testing.T) { + tests := []funcTest{ + { + x: []float64{3, -1, 0, 3}, + wantF: 95, + wantGrad: []float64{-14, -144, -22, 30}, + }, + { + x: []float64{3, -1, 0, 3, 3, -1, 0, 3}, + wantF: 190, + wantGrad: []float64{-14, -144, -22, 30, -14, -144, -22, 30}, + }, + { + x: []float64{0, 0, 0, 0}, + wantF: 0, + wantGrad: []float64{0, 0, 0, 0}, + }, + { + x: []float64{0, 0, 0, 0, 0, 0, 0, 0}, + wantF: 0, + wantGrad: []float64{0, 0, 0, 0, 0, 0, 0, 0}, + }, + } + testFunction(ExtendedPowellSingular{}, tests, t) +} + +func TestGaussian(t *testing.T) { + tests := []funcTest{ + { + x: []float64{0.4, 1, 0}, + wantF: 3.88810699116688e-06, + wantGrad: []float64{7.41428466839991e-03, -7.44126392165149e-04, -5.30189685421989e-20}, + }, + { + x: []float64{0.3989561, 1.0000191, 0}, + wantF: 1.12793e-08, + wantGrad: []float64{0, 0, 0}, + }, + } + testFunction(Gaussian{}, tests, t) +} + +func TestGulfResearchAndDevelopment(t *testing.T) { + tests := []funcTest{ + { + x: []float64{5, 2.5, 0.15}, + wantF: 12.1107058255695, + wantGrad: []float64{2.0879783574289799, 0.0345792619697154, -39.6766801029386400}, + }, + { + x: []float64{50, 25, 1.5}, + wantF: 0, + wantGrad: []float64{0, 0, 0}, + }, + { + x: []float64{99.89537833835886, 60.61453903025014, 9.16124389236433}, + wantF: 32.8345, + wantGrad: []float64{0, 0, 0}, + }, + { + x: []float64{201.662589489426, 60.616331504682, 10.224891158489}, + wantF: 32.8345, + wantGrad: []float64{0, 0, 0}, + }, + } + testFunction(GulfResearchAndDevelopment{}, tests, t) +} + +func TestHelicalValley(t *testing.T) { + tests := []funcTest{ + { + x: []float64{-1, 0, 0}, + wantF: 2500, + wantGrad: []float64{0, -1.59154943091895e+03, -1e+03}, + }, + { + x: []float64{1, 0, 0}, + wantF: 0, + wantGrad: []float64{0, 0, 0}, + }, + } + testFunction(HelicalValley{}, tests, t) +} + +func TestPenaltyI(t *testing.T) { + tests := []funcTest{ + { + x: []float64{1, 2, 3, 4}, + wantF: 885.06264, + wantGrad: []float64{119, 238.00002, 357.00004, 476.00006}, + }, + { + x: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + wantF: 148032.56535, + wantGrad: []float64{1539, 3078.00002, 4617.00004, 6156.00006, + 7695.00008, 9234.0001, 10773.00012, 12312.00014, 13851.00016, + 15390.00018}, + }, + { + x: []float64{0.2500075, 0.2500075, 0.2500075, 0.2500075}, + wantF: 2.2499775e-5, + wantGrad: []float64{0, 0, 0, 0}, + }, + { + x: []float64{0.158122301, 0.158122301, 0.158122301, 0.158122301, + 0.158122301, 0.158122301, 0.158122301, 0.158122301, 0.158122301, + 0.158122301}, + wantF: 7.0876515e-5, + wantGrad: []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + } + testFunction(PenaltyI{}, tests, t) +} + +func TestPenaltyII(t *testing.T) { + tests := []funcTest{ + { + x: []float64{0.5, 0.5, 0.5, 0.5}, + wantF: 2.34000880546302, + wantGrad: []float64{12.59999952896435, + 8.99999885134508, + 5.99999776830493, + 2.99999875380719}, + }, + { + x: []float64{0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5}, + wantF: 162.652776565967, + wantGrad: []float64{255.5999995289644, 229.4999988513451, + 203.9999977683049, 178.4999965713605, 152.9999952485322, + 127.4999937865809, 101.9999921708749, 76.4999903852436, + 50.9999884118158, 25.4999938418451}, + }, + { + x: []float64{0.19999933335, 0.19131670128566283, + 0.4801014860897, 0.5188454026659}, + wantF: 9.37629e-06, + wantGrad: []float64{0, 0, 0, 0}, + }, + { + x: []float64{0.19998360520892217, 0.010350644318663525, + 0.01960493546891094, 0.03208906550305253, 0.04993267593895693, + 0.07651399534454084, 0.11862407118600789, 0.1921448731780023, + 0.3473205862372022, 0.36916437893066273}, + wantF: 2.936605e-4, + wantGrad: []float64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + } + testFunction(PenaltyII{}, tests, t) +} + +func TestPowelBadlyScaled(t *testing.T) { + tests := []funcTest{ + { + x: []float64{0, 1}, + wantF: 1.13526171734838, + wantGrad: []float64{-2.00007355588823e+04, + -2.70596990584991e-01}, + }, + { + x: []float64{1.09815933e-5, 9.10614674}, + wantF: 0, + wantGrad: []float64{0, 0}, + }, + } + testFunction(PowellBadlyScaled{}, tests, t) +} diff --git a/testopt/testopt.go b/testopt/testopt.go new file mode 100644 index 0000000..02ef4ef --- /dev/null +++ b/testopt/testopt.go @@ -0,0 +1,24 @@ +// Copyright ©2015 The gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testopt provides objective functions for testing optimization algorithms. +package testopt + +// Minimum represents information about an optimal location of a function. +type Minimum struct { + // X is the location of the minimum. If unknown, X is nil. + X []float64 + // F is the value of the objective function at X. + F float64 + // Global indicates whether the minimum is local or global. + Global bool +} + +// Function represents an objective function that can also provide information +// about its known minima. +type Function interface { + F(x []float64) float64 + Df(x, gradient []float64) + Minima(dim int) []Minimum +}