Permalink
Browse files

--

  • Loading branch information...
1 parent 70d5612 commit 8153cb8b17330a306a7aa0e42d7977c2ab370d0d @lvv committed Jun 5, 2009
Showing with 94 additions and 23 deletions.
  1. +10 −6 doc/Makefile
  2. +19 −2 doc/index.txt
  3. +42 −9 line_search.h
  4. +5 −2 note
  5. +18 −4 object_function.h
View
@@ -1,11 +1,6 @@
-example:
-
-example-gsl: LDFLAGS=-lgsl
-
-example-newuoa: LDFLAGS=-llopti -lgfortran
-
+###################################################### HTML
PREFIX ?= /usr/local
WEB_DESTDIR ?= /tmp/localweb
@@ -27,3 +22,12 @@ localweb: index.html *.gif example*.cc
clean:
rm -f *.html /tmp/localweb
git clean -df
+
+###################################################### EXAMPLES
+
+example:
+
+example-gsl: LDFLAGS=-lgsl
+
+example-newuoa: LDFLAGS=-llopti -lgfortran
+
View
@@ -343,7 +343,7 @@ include::../../volnitsky.com/project/howto-submit-patch.txt[]
- [[[2]]] R. Hooke and T. A. Jeeves, 'Direct Search Solution of Numerical and Statistical Problems', Journal of the ACM, Vol. 8, April 1961, pp. 212-229
- [[[3]]] M. J. D. Powell. 'The NEWUOA software for unconstrained optimization with derivatives'. DAMTP Report 2004/NA05, University of Cambridge, 2004.
- [[[4]]] Frank Vanden Berghen, Hugues Bersini, 'CONDOR, a new parallel, constrained extension of Powell's UOBYQA algorithm: Experimental results and comparison with the DFO algorithm',
-Journal of Computational and Applied Mathematics, Elsevier, Volume 181, Issue 1, September 2005, Pages 157-175
+ Journal of Computational and Applied Mathematics, Elsevier, Volume 181, Issue 1, September 2005, Pages 157-175
- [[[5]]] 'Intel Math Kernel Library Reference Manual'. Optimization Solver Routines: www.intel.com/software/products/mkl/docs/WebHelp/osr/osr_Intro.html
- [[[6]]] Stefen Boyd, Stanford Engineering Open Courseware, 'Convex Optimization' - see.stanford.edu/see/lecturelist.aspx?coll=2db7ced4-39d1-4fdb-90e8-364129597c87
* Backtraking Line Search, Armijo-Goldstein condition (ee364a lecture 15, 18min)
@@ -359,5 +359,22 @@ Journal of Computational and Applied Mathematics, Elsevier, Volume 181, Issue 1,
* Quasi-Newton Methods (p406, 11.2.3)
- [[[9]]] S.D.Conte, Carl de Boor 'Elementary Numerical Analysis - An Algorithmic Approach' MacGraw-Hill 1980 3rd-Ed
* Steepest Descent (p211 5.1)
-
+- [[[10]]] W.H., Teukolsky S.A., Vetterling W.T., Flannery B.P. 'Numerical Recipes - The Art of Scientific Computing'
+ 3nd ed, Cambridge University Press 2007(1988), ISBN-13 978-0-511-33555-6, ISBN-13 978-0-521-88068-8
+ * Simplex Method (p502 10.5)
+ * Direction Set (Powell's) method (p509 10.7)
+ * Quasi-Newton (BFGS) (p521 10.9)
+
+- [[[11]]] Quarteroni A., Sacco R., Saleri F. 'Numerical mathematics', 2nd ed., Springer 2007
+ * The Hooke and Jeeves Method (p300 7.2.1)
+ * Descent Methods (good overview, Newton, quasi-Newton, Gradient, Conjugate Gradient method) (p306 7.2.2)
+ * Newton (p313 7.2.6)
+ * quasi-Newton (p313 7.2.7)
+
+- [[[12]]] Schwartz R. 'Biological modeling and simulation. A survey of practical models, algorithms, and numerical methods' MIT-2008, ISBN 0262195844
+ * The Levenberg–Marquardt Method (p90 5.6.2)
+
+- [[[13]]] Zarowsky C.J. 'An introduction to numerical analysis for electrical and computer engineers', Wiley 2004
+ * Backtraking Line Search (p353)
+ * Newton’s algorithm with the backtracking (p353 8.4)
// vim: set filetype=asciidoc nu:
View
@@ -13,8 +13,8 @@
using lvv::array;
// ALGORITHM FROM [[[6]]] Stefen Boyd, "Backtraking Line Search, Armijo-Goldstein condition" (ee364a lecture 15, 18min)
- // α ∈ (0, 0.5)
- // β ∈ (0, 1.0) ≈0.5
+ // α ∈ (0, 0.5); Zarowsky: (0.1, 0.3)
+ // β ∈ (0, 1.0) ≈0.5; Zarowsky: (0.1, 0.5)
// t0 ≈ 1;
//
// t=t0;
@@ -33,20 +33,53 @@
// gnuplot:
// set grid; set yrange[-1:4]; f(x)=(x-1)**2 +1; df(x)=2*(x-1); x0=0; a=0.3; ;dx=-df(x0)/abs(df(x0)); plot [-1:2] f(x0+dx*x), f(x0)+dx*df(x0)*x, f(x0) +a*dx*df(x0)*x
- // a=0.5; x0=2.9; set grid; e=2.71828; f(x)=e**(1-x)+x; df(x)=1-e**(1-x); f_mod_low(x)=f(x0)+df(x0)*(x-x0); f_mod_high(x)=f(x0)+a*df(x0)*(x-x0); plot [-1:4] [-5:5] f(x) w p , df(x), f_mod_low(x), f_mod_high(x)
+ // a=.5; x0=3; set grid; e=2.71828; f(x)=e**(1-x)+x; df(x)=1-e**(1-x); f_mod_low(x)=f(x0)+df(x0)*(x-x0); f_mod_high(x)=f(x0)+a*df(x0)*(x-x0); plot [-1.8:12] [-2:12] f(x) w p , df(x), f_mod_low(x), f_mod_high(x)
+ //
+ // for quadratic F(), optimal step is F'/2
+ // plot [0:4] x**2, 2*x
+ //
namespace lopti {
-struct line_search_backtracking {
- int eval_cnt;
- objective
- line_search_backtracking (){};
- find
+ template<V>
+struct line_search_backtracking_t {
+ objective_p_t objective_v;
+ const T alpha;
+ const T beta;
+ const T t0;
+ line_search_backtracking_t (
+ objective_p_t objective_v,
+ const T alpha = 0.5,
+ const T beta = 0.5,
+ const T t0 = 1.
+ ) :
+ objective_v (objective_v),
+ alpha (alpha),
+ beta (beta),
+ t0 (t0)
+ {};
+
+
+ V&& find( const V& X0, const V& DX) {
+ T t = t0;
+ T f0 = objective_v->eval0 (X0);
+ V G0 = objective_v->eval1 (X0);
+ V X;
+
+ for (int i = 1; i< 50; i++) {
+ X = X0 + t * DX;
+ T f = objective_v->eval0(X);
+ if ( f < f0 + alpha * t * dot(G0,DX)) {
+ return X;
+ }
+ t = beta * t;
+ }
+
+ }
}
}
-
} // namespace lopti
#endif // LOPTI_H
View
@@ -1,9 +1,12 @@
Steepest Descent -- Couchy 1848
- Boyd Lecture 15, 18min
- Armijo-Goldstain
+
+ Armijo-Goldstain
+ Boyd Lecture 15, 18min
".. this condition sometimes is called Armijo, or .. I really
wish it should be called Armijo-Goldstain, really Goldstain,
he is from Moscow, I am almost certun this where it came from"
+ Goldstein A. A. and Price J. B. (1967) An Effective Algorithm for Minimization.
+ Numer. Math 10: 184–189
IMPLEMENTATIONS
BRENT Algorithms for Minimization Without Derivatives
View
@@ -95,23 +95,37 @@ struct objective1: objective0<V> { // Lopti Object FuncTor
V G = (*wrapped_objective_v).eval1(X);
return G;
}
-}
+ }
///////////////////////////////////////////////////////////////////////////////////////// OF: ROSENBROCK
template<typename V> struct rosenbrock : objective0<V> { OBJECTIVE_TYPES; OBJECTIVE_MEMBERS; CLONER(rosenbrock)
rosenbrock () : objective0<V>("rosenbrock") { V const X_answ = {{ 1.0, 1.0 }}; known_optimum(X_answ); };
+ // unset view; set surface; set isosamples 150,150; set contour base; set cntrparam levels 20; splot [-3:4] [-2:8] log10 (100*(y-x**2)**2 + (1-x)**2)
+ // set view map ; unset surface; set grid ; set samples 500,500; set contour base; set cntrparam levels 20; splot [-3:4] [-2:8] log10 (100*(y-x**2)**2 + (1-x)**2)
T operator() (V& X) { iter_++; return 100 * pow2(X[1+B]-pow2(X[0+B])) + pow2(1-X[0+B]); };
T eval0 (V& X) { operator() (X) };
V&& eval1 (V& X) {
V G;
- G[0+B] = -400 * X[0+B] * (X[1+B]-pow2(X[0+B])) ;
- G[1+B] = 200 * (X[1+B]-pow2(X[0+B])) ;
- };
+ G[0+B] = -400 * X[0+B] * (X[1+B] - pow2(X[0+B])) - 2*(1-X[0+B]) ;
+ G[1+B] = 200 * (X[1+B] - pow2(X[0+B])) ;
// (%o3) rb(x0,x1):=100*(x1-x0^2)^2+(1-x0)^2
// (%i5) diff(rb(x0,x1),x0);
// (%o5) -400*x0*(x1-x0^2)-2*(1-x0)
// (%i6) diff(rb(x0,x1),x1);
// (%o6) 200*(x1-x0^2)
+ return G;
+ };
+
+ /* typedef matrix<T,V::sz, V::sz> M;
+
+ M&& eval2 (V& X) {
+ M H;
+ H[1,1] = 1200*pow2(X[0+B]) − 400*X[1+B] + 2; H[1,2] = −400 * X[0+B];
+ H[2,1] = -400*X[0+B]; H[2,2] = 200;
+
+
+ return H;
+ }; */
};
template<typename V> typename V::value_type plain_fn_rosenbrock (V& X) { const int B = V::ibg; return 100 * pow2(X[1+B]-pow2(X[0+B])) + pow2(1-X[0+B]); };

0 comments on commit 8153cb8

Please sign in to comment.