-
Notifications
You must be signed in to change notification settings - Fork 0
/
LM.js
570 lines (489 loc) · 26 KB
/
LM.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
/**
* Created by acastillo on 8/5/15.
*/
var math = require("mathjs");
var numeric = require('numeric');
math.import(numeric, {wrap: true, silent: true});
var DEBUG = false;
/** Levenberg Marquardt curve-fitting: minimize sum of weighted squared residuals
---------- INPUT VARIABLES -----------
func = function of n independent variables, 't', and m parameters, 'p',
returning the simulated model: y_hat = func(t,p,c)
p = n-vector of initial guess of parameter values
t = m-vectors or matrix of independent variables (used as arg to func)
y_dat = m-vectors or matrix of data to be fit by func(t,p)
weight = weighting vector for least squares fit ( weight >= 0 ) ...
inverse of the standard measurement errors
Default: sqrt(d.o.f. / ( y_dat' * y_dat ))
dp = fractional increment of 'p' for numerical derivatives
dp(j)>0 central differences calculated
dp(j)<0 one sided 'backwards' differences calculated
dp(j)=0 sets corresponding partials to zero; i.e. holds p(j) fixed
Default: 0.001;
p_min = n-vector of lower bounds for parameter values
p_max = n-vector of upper bounds for parameter values
c = an optional matrix of values passed to func(t,p,c)
opts = vector of algorithmic parameters
parameter defaults meaning
opts(1) = prnt 3 >1 intermediate results; >2 plots
opts(2) = MaxIter 10*Npar maximum number of iterations
opts(3) = epsilon_1 1e-3 convergence tolerance for gradient
opts(4) = epsilon_2 1e-3 convergence tolerance for parameters
opts(5) = epsilon_3 1e-3 convergence tolerance for Chi-square
opts(6) = epsilon_4 1e-2 determines acceptance of a L-M step
opts(7) = lambda_0 1e-2 initial value of L-M paramter
opts(8) = lambda_UP_fac 11 factor for increasing lambda
opts(9) = lambda_DN_fac 9 factor for decreasing lambda
opts(10) = Update_Type 1 1: Levenberg-Marquardt lambda update
2: Quadratic update
3: Nielsen's lambda update equations
---------- OUTPUT VARIABLES -----------
p = least-squares optimal estimate of the parameter values
X2 = Chi squared criteria
sigma_p = asymptotic standard error of the parameters
sigma_y = asymptotic standard error of the curve-fit
corr = correlation matrix of the parameters
R_sq = R-squared cofficient of multiple determination
cvg_hst = convergence history
Henri Gavin, Dept. Civil & Environ. Engineering, Duke Univ. 22 Sep 2013
modified from: http://octave.sourceforge.net/optim/function/leasqr.html
using references by
Press, et al., Numerical Recipes, Cambridge Univ. Press, 1992, Chapter 15.
Sam Roweis http://www.cs.toronto.edu/~roweis/notes/lm.pdf
Manolis Lourakis http://www.ics.forth.gr/~lourakis/levmar/levmar.pdf
Hans Nielson http://www2.imm.dtu.dk/~hbn/publ/TR9905.ps
Mathworks optimization toolbox reference manual
K. Madsen, H.B., Nielsen, and O. Tingleff
http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
*/
var LM = {
optimize: function(func,t,t_dat,weight){
},
optimize: function(func,p,t,y_dat,weight,dp,p_min,p_max,c,opts){
var tensor_parameter = 0; // set to 1 of parameter is a tensor
var iteration = 0; // iteration counter
//func_calls = 0; // running count of function evaluations
if((typeof p[0])!="object"){
for(var i=0;i< p.length;i++){
p[i]=[p[i]];
}
}
//p = p(:); y_dat = y_dat(:); // make column vectors
var eps = 2^-52;
var Npar = p.length;//length(p); // number of parameters
var Npnt = y_dat.length;//length(y_dat); // number of data points
var p_old = math.zeros(Npar,1); // previous set of parameters
var y_old = math.zeros(Npnt,1); // previous model, y_old = y_hat(t;p_old)
var X2 = 1e-2/eps; // a really big initial Chi-sq value
var X2_old = 1e-2/eps; // a really big initial Chi-sq value
var J = math.zeros(Npnt,Npar);
/*var J = new Array(Npnt);//zeros(Npnt,Npar); // Jacobian matrix
for(var i=0;i<Npnt;i++){
J[i] = new Array(Npar);
}*/
if (t.length != y_dat.length) {
console.log('lm.m error: the length of t must equal the length of y_dat');
length_t = t.length;
length_y_dat = y_dat.length;
var X2 = 0, corr = 0, sigma_p = 0, sigma_y = 0, R_sq = 0, cvg_hist = 0;
if (!tensor_parameter) {
return;
}
}
weight = weight||Math.sqrt((Npnt-Npar+1)/(math.multiply(math.transpose(y_dat),y_dat)));
dp = dp || 0.001;
p_min = p_min || math.multiply(Math.abs(p),-100);
p_max = p_max || math.multiply(Math.abs(p),100);
c = c || 1;
// Algorithmic Paramters
//prnt MaxIter eps1 eps2 epx3 eps4 lam0 lamUP lamDN UpdateType
opts = opts ||[ 3,10*Npar, 1e-3, 1e-3, 1e-3, 1e-2, 1e-2, 11, 9, 1 ];
var prnt = opts[0]; // >1 intermediate results; >2 plots
var MaxIter = opts[1]; // maximum number of iterations
var epsilon_1 = opts[2]; // convergence tolerance for gradient
var epsilon_2 = opts[3]; // convergence tolerance for parameter
var epsilon_3 = opts[4]; // convergence tolerance for Chi-square
var epsilon_4 = opts[5]; // determines acceptance of a L-M step
var lambda_0 = opts[6]; // initial value of damping paramter, lambda
var lambda_UP_fac = opts[7]; // factor for increasing lambda
var lambda_DN_fac = opts[8]; // factor for decreasing lambda
var Update_Type = opts[9]; // 1: Levenberg-Marquardt lambda update
// 2: Quadratic update
// 3: Nielsen's lambda update equations
if ( tensor_parameter && prnt == 3 ) prnt = 2;
//plotcmd='figure(11); plot(t(:,1),y_dat,''og'',t(:,1),y_hat,''-b''); axis tight; drawnow ';
//p_min=p_min(:); p_max=p_max(:); // make column vectors
if(!dp.length || dp.length == 1){
var dp_array = new Array(Npar);
for(var i=0;i<Npar;i++)
dp_array[i]=[dp];
dp=dp_array;
}
// indices of the parameters to be fit
var idx = [];
for(var i=0;i<dp.length;i++){
if(dp[i]!=0){
idx.push(i);
}
}
var Nfit = idx.length; // number of parameters to fit
var stop = false; // termination flag
var weight_sq = null;
//console.log(weight);
if ( !weight.length || weight.length < Npnt ) {
// squared weighting vector
//weight_sq = ( weight(1)*ones(Npnt,1) ).^2;
var tmp = math.multiply(math.ones(Npnt,1),weight[0]);
weight_sq = math.dotMultiply(tmp,tmp);
}
else{
//weight_sq = (weight(:)).^2;
weight_sq = math.dotMultiply(weight,weight);
}
// initialize Jacobian with finite difference calculation
//console.log("J "+weight_sq);
var result = this.lm_matx(func,t,p_old,y_old,1,J,p,y_dat,weight_sq,dp,c);
var JtWJ = result.JtWJ,JtWdy=result.JtWdy,X2=result.Chi_sq,y_hat=result.y_hat,J=result.J;
//[JtWJ,JtWdy,X2,y_hat,J] = this.lm_matx(func,t,p_old,y_old,1,J,p,y_dat,weight_sq,dp,c);
//console.log(JtWJ);
if ( Math.max(Math.abs(JtWdy)) < epsilon_1 ){
console.log(' *** Your Initial Guess is Extremely Close to Optimal ***')
console.log(' *** epsilon_1 = ', epsilon_1);
stop = true;
}
switch(Update_Type){
case 1: // Marquardt: init'l lambda
lambda = lambda_0;
break;
default: // Quadratic and Nielsen
lambda = lambda_0 * Math.max(math.diag(JtWJ));
nu=2;
}
//console.log(X2);
X2_old = X2; // previous value of X2
//console.log(MaxIter+" "+Npar);
var cvg_hst = math.ones(MaxIter,Npar+3); // initialize convergence history
var h = null;
while ( !stop && iteration <= MaxIter ) { // --- Main Loop
iteration = iteration + 1;
// incremental change in parameters
switch(Update_Type){
case 1: // Marquardt
//h = ( JtWJ + lambda * math.diag(math.diag(JtWJ)) ) \ JtWdy;
//h = math.multiply(math.inv(JtWdy),math.add(JtWJ,math.multiply(lambda,math.diag(math.diag(Npar)))));
h = math.solve(math.add(JtWJ,math.multiply(lambda,math.diag(math.diag(JtWJ)))),JtWdy);
break;
default: // Quadratic and Nielsen
//h = ( JtWJ + lambda * math.eye(Npar) ) \ JtWdy;
h = math.solve(math.add(JtWJ,math.multiply(lambda,math.eye(Npar))),JtWdy);
}
for(var k=0;k< h.length;k++){
h[k]=[h[k]];
}
//console.log("h "+h);
//h=math.matrix(h);
// big = max(abs(h./p)) > 2;
//this is a big step
// --- Are parameters [p+h] much better than [p] ?
var hidx = new Array(idx.length);
for(var k=0;k<idx.length;k++){
hidx[k]=h[idx[k]];
}
var p_try = math.add(p, hidx);// update the [idx] elements
//console.log(p_try);
for(var k=0;k<p_try.length;k++){
p_try[k][0]=Math.min(Math.max(p_min[k][0],p_try[k][0]),p_max[k][0]);
}
// p_try = Math.min(Math.max(p_min,p_try),p_max); // apply constraints
var delta_y = math.subtract(y_dat, func(t,p_try,c)); // residual error using p_try
//func_calls = func_calls + 1;
//X2_try = delta_y' * ( delta_y .* weight_sq ); // Chi-squared error criteria
var X2_try = math.multiply(math.transpose(delta_y),math.dotMultiply(delta_y,weight_sq));
if ( Update_Type == 2 ){ // Quadratic
// One step of quadratic line update in the h direction for minimum X2
//var alpha = JtWdy'*h / ( (X2_try - X2)/2 + 2*JtWdy'*h ) ;
var JtWdy_th = math.multiply(math.transpose(JtWdy),h);
var alpha = math.multiply(JtWdy_th,math.inv(math.add(math.multiply(math.subtract(X2_try - X2),1/2)),math.multiply(JtWdy_th,2)));//JtWdy'*h / ( (X2_try - X2)/2 + 2*JtWdy'*h ) ;
h = math.multiply(alpha, h);
for(var k=0;k<idx.length;k++){
hidx[k]=h[idx[k]];
}
p_try = math.add(p ,hidx); // update only [idx] elements
p_try = math.min(math.max(p_min,p_try),p_max); // apply constraints
delta_y = math.subtract(y_dat, func(t,p_try,c)); // residual error using p_try
// func_calls = func_calls + 1;
//X2_try = delta_y' * ( delta_y .* weight_sq ); // Chi-squared error criteria
X2_try = math.multiply(math.transpose(delta_y), mat.dotMultiply(delta_y, weight_sq));
}
//rho = (X2 - X2_try) / ( 2*h' * (lambda * h + JtWdy) ); // Nielsen
var sub = math.multiply(math.multiply(math.transpose(h),2),math.add(math.multiply(lambda, h),JtWdy));
//console.log(X2+" X "+X2_try);
var rho = math.multiply(math.subtract(X2, X2_try),math.inv(sub));
//console.log("rho "+rho);
if ( rho > epsilon_4 ) { // it IS significantly better
dX2 = math.subtract(X2, X2_old);
X2_old = X2;
p_old = p;
y_old = y_hat;
p = p_try; // accept p_try
result = this.lm_matx(func, t, p_old, y_old, dX2, J, p, y_dat, weight_sq, dp, c);
JtWJ = result.JtWJ,JtWdy=result.JtWdy,X2=result.Chi_sq,y_hat=result.y_hat,J=result.J;
// decrease lambda ==> Gauss-Newton method
switch (Update_Type) {
case 1: // Levenberg
lambda = Math.max(lambda / lambda_DN_fac, 1.e-7);
break;
case 2: // Quadratic
lambda = Math.max(lambda / (1 + alpha), 1.e-7);
break;
case 3: // Nielsen
lambda = math.multiply(Math.max(1 / 3, 1 - (2 * rho - 1) ^ 3),lambda);
nu = 2;
break;
}
}
else { // it IS NOT better
X2 = X2_old; // do not accept p_try
if (iteration%(2 * Npar)==0) { // rank-1 update of Jacobian
result = this.lm_matx(func, t, p_old, y_old, -1, J, p, y_dat, weight_sq, dp, c);
JtWJ = result.JtWJ,JtWdy=result.JtWdy,dX2=result.Chi_sq,y_hat=result.y_hat,J=result.J;
}
// increase lambda ==> gradient descent method
switch (Update_Type) {
case 1: // Levenberg
lambda = Math.min(lambda * lambda_UP_fac, 1.e7);
break;
case 2: // Quadratic
lambda = lambda + math.abs((X2_try - X2) / 2 / alpha);
break;
case 3: // Nielsen
lambda = lambda * nu;
nu = 2 * nu;
break;
}
if (DEBUG) {
/*fprintf('>//3d://3d | chi_sq=//10.3e | lambda=//8.1e \n', iteration,func_calls,X2,lambda );
fprintf(' param: ');
for pn=1:Npar
fprintf(' //10.3e', p(pn) );
end
fprintf('\n');
fprintf(' dp/p : ');
for pn=1:Npar
fprintf(' //10.3e', h(pn) / p(pn) );
end
fprintf('\n');
end
cvg_hst(iteration,:) = [ func_calls p' X2/2 lambda ]; // update convergence history
if ( max(abs(JtWdy)) < epsilon_1 & iteration > 2 )
fprintf(' **** Convergence in r.h.s. ("JtWdy") **** \n')
fprintf(' **** epsilon_1 = //e\n', epsilon_1);
stop = 1;
end
if ( max(abs(h./p)) < epsilon_2 & iteration > 2 )
fprintf(' **** Convergence in Parameters **** \n')
fprintf(' **** epsilon_2 = //e\n', epsilon_2);
stop = 1;
end
if ( X2/(Npnt-Npar+1) < epsilon_3 & iteration > 2 )
fprintf(' **** Convergence in Chi-square **** \n')
fprintf(' **** epsilon_3 = //e\n', epsilon_3);
stop = 1;
end
if ( iteration == MaxIter )
disp(' !! Maximum Number of Iterations Reached Without Convergence !!')
stop = 1;
end*/
}
}
}// --- End of Main Loop
// --- convergence achieved, find covariance and confidence intervals
// equal weights for paramter error analysis
weight_sq = math.multiply(math.multiply(math.transpose(delta_y),delta_y), math.ones(Npnt,1));
math.map(weight_sq,function(value){
return (Npnt-Nfit+1)/value;
});
result = this.lm_matx(func,t,p_old,y_old,-1,J,p,y_dat,weight_sq,dp,c);
JtWJ = result.JtWJ,JtWdy=result.JtWdy,X2=result.Chi_sq,y_hat=result.y_hat,J=result.J;
/*if nargout > 2 // standard error of parameters
covar = inv(JtWJ);
sigma_p = sqrt(diag(covar));
end
if nargout > 3 // standard error of the fit
// sigma_y = sqrt(diag(J * covar * J')); // slower version of below
sigma_y = zeros(Npnt,1);
for i=1:Npnt
sigma_y(i) = J(i,:) * covar * J(i,:)';
end
sigma_y = sqrt(sigma_y);
end
if nargout > 4 // parameter correlation matrix
corr = covar ./ [sigma_p*sigma_p'];
end
if nargout > 5 // coefficient of multiple determination
R_sq = corrcoef([y_dat y_hat]);
R_sq = R_sq(1,2).^2;
end
if nargout > 6 // convergence history
cvg_hst = cvg_hst(1:iteration,:);
end*/
// endfunction # ---------------------------------------------------------- LM
return p;
},
lm_FD_J:function(func,t,p,y,dp,c) {
// J = lm_FD_J(func,t,p,y,{dp},{c})
//
// partial derivatives (Jacobian) dy/dp for use with lm.m
// computed via Finite Differences
// Requires n or 2n function evaluations, n = number of nonzero values of dp
// -------- INPUT VARIABLES ---------
// func = function of independent variables, 't', and parameters, 'p',
// returning the simulated model: y_hat = func(t,p,c)
// t = m-vector of independent variables (used as arg to func)
// p = n-vector of current parameter values
// y = func(t,p,c) n-vector initialised by user before each call to lm_FD_J
// dp = fractional increment of p for numerical derivatives
// dp(j)>0 central differences calculated
// dp(j)<0 one sided differences calculated
// dp(j)=0 sets corresponding partials to zero; i.e. holds p(j) fixed
// Default: 0.001;
// c = optional vector of constants passed to y_hat = func(t,p,c)
//---------- OUTPUT VARIABLES -------
// J = Jacobian Matrix J(i,j)=dy(i)/dp(j) i=1:n; j=1:m
// Henri Gavin, Dept. Civil & Environ. Engineering, Duke Univ. November 2005
// modified from: ftp://fly.cnuce.cnr.it/pub/software/octave/leasqr/
// Press, et al., Numerical Recipes, Cambridge Univ. Press, 1992, Chapter 15.
var m = y.length; // number of data points
var n = p.length; // number of parameters
dp = dp || math.multiply(math.ones(1, n), 0.001);
var ps = JSON.parse(JSON.stringify(p));
//var ps = $.extend(true, [], p);
var J = new Array(m), del =new Array(n); // initialize Jacobian to Zero
for(var k=0;k<m;k++)
J[k]=new Array(n);
for (var j = 0;j < n; j++) {
//console.log(j+" "+dp[j]+" "+p[j]+" "+ps[j]+" "+del[j]);
del[j] = math.multiply(dp[j], math.add(1, math.abs(p[j]))); // parameter perturbation
p[j] = math.add(ps[j], del[j]); // perturb parameter p(j)
//console.log(j+" "+dp[j]+" "+p[j]+" "+ps[j]+" "+del[j]);
if (del[j] != 0){
y1 = func(t, p, c);
//func_calls = func_calls + 1;
if (dp[j][0] < 0) { // backwards difference
//J(:,j) = math.dotDivide(math.subtract(y1, y),del[j]);//. / del[j];
//console.log(del[j]);
//console.log(y);
var column = math.dotDivide(math.subtract(y1, y),del[j]);
for(var k=0;k< m;k++){
J[k][j]=column[k][0];
}
//console.log(column);
}
else{
p[j] = ps[j] - del[j];
//J(:,j) = (y1 - feval(func, t, p, c)). / (2. * del[j]);
var column = math.dotDivide(math.subtract(y1,func(t,p,c)),2*del[j]);
for(var k=0;k< m;k++){
J[k][j]=column[k][0];
}
} // central difference, additional func call
}
p[j] = ps[j]; // restore p(j)
}
//console.log("lm_FD_J: "+ JSON.stringify(J));
return J;
},
// endfunction # -------------------------------------------------- LM_FD_J
lm_Broyden_J: function(p_old,y_old,J,p,y){
// J = lm_Broyden_J(p_old,y_old,J,p,y)
// carry out a rank-1 update to the Jacobian matrix using Broyden's equation
//---------- INPUT VARIABLES -------
// p_old = previous set of parameters
// y_old = model evaluation at previous set of parameters, y_hat(t;p_old)
// J = current version of the Jacobian matrix
// p = current set of parameters
// y = model evaluation at current set of parameters, y_hat(t;p)
//---------- OUTPUT VARIABLES -------
// J = rank-1 update to Jacobian Matrix J(i,j)=dy(i)/dp(j) i=1:n; j=1:m
//console.log(p+" X "+ p_old)
var h = math.subtract(p, p_old);
//console.log("hhh "+h);
var h_t = math.transpose(h);
var hh = math.multiply(h_t,h);
var hhh = math.map(h_t,function(value){
return value / hh;
});
//J = J + ( y - y_old - J*h )*h' / (h'*h); // Broyden rank-1 update eq'n
J = math.add(J, math.multiply(math.subtract(y, math.add(y_old,math.multiply(J,h))),hhh));
return J;
// endfunction # ---------------------------------------------- LM_Broyden_J
},
lm_matx : function (func,t,p_old,y_old,dX2,J,p,y_dat,weight_sq,dp,c,iteration){
// [JtWJ,JtWdy,Chi_sq,y_hat,J] = this.lm_matx(func,t,p_old,y_old,dX2,J,p,y_dat,weight_sq,{da},{c})
//
// Evaluate the linearized fitting matrix, JtWJ, and vector JtWdy,
// and calculate the Chi-squared error function, Chi_sq
// Used by Levenberg-Marquard algorithm, lm.m
// -------- INPUT VARIABLES ---------
// func = function ofpn independent variables, p, and m parameters, p,
// returning the simulated model: y_hat = func(t,p,c)
// t = m-vectors or matrix of independent variables (used as arg to func)
// p_old = n-vector of previous parameter values
// y_old = m-vector of previous model ... y_old = y_hat(t;p_old);
// dX2 = previous change in Chi-squared criteria
// J = m-by-n Jacobian of model, y_hat, with respect to parameters, p
// p = n-vector of current parameter values
// y_dat = n-vector of data to be fit by func(t,p,c)
// weight_sq = square of the weighting vector for least squares fit ...
// inverse of the standard measurement errors
// dp = fractional increment of 'p' for numerical derivatives
// dp(j)>0 central differences calculated
// dp(j)<0 one sided differences calculated
// dp(j)=0 sets corresponding partials to zero; i.e. holds p(j) fixed
// Default: 0.001;
// c = optional vector of constants passed to y_hat = func(t,p,c)
//---------- OUTPUT VARIABLES -------
// JtWJ = linearized Hessian matrix (inverse of covariance matrix)
// JtWdy = linearized fitting vector
// Chi_sq = Chi-squared criteria: weighted sum of the squared residuals WSSR
// y_hat = model evaluated with parameters 'p'
// J = m-by-n Jacobian of model, y_hat, with respect to parameters, p
// Henri Gavin, Dept. Civil & Environ. Engineering, Duke Univ. November 2005
// modified from: ftp://fly.cnuce.cnr.it/pub/software/octave/leasqr/
// Press, et al., Numerical Recipes, Cambridge Univ. Press, 1992, Chapter 15.
var Npnt = y_dat.length; // number of data points
var Npar = p.length; // number of parameters
dp = dp || 0.001;
var JtWJ = math.zeros(Npar);
var JtWdy = math.zeros(Npar,1);
var y_hat = func(t,p,c); // evaluate model using parameters 'p'
//console.log(p);
//func_calls = func_calls + 1;
var J;
if ( (iteration%(2*Npar))==0 || dX2 > 0 ) {
//console.log("Par");
J = this.lm_FD_J(func, t, p, y_hat, dp, c); // finite difference
}
else{
//console.log("ImPar");
J = this.lm_Broyden_J(p_old, y_old, J, p, y_hat); // rank-1 update
}
//console.log("J "+J);
var delta_y = math.subtract(y_dat, y_hat); // residual error between model and data
//console.log(delta_y[0][0]);
//console.log(weight_sq);
//var Chi_sq = delta_y' * ( delta_y .* weight_sq ); // Chi-squared error criteria
var Chi_sq = math.multiply(math.transpose(delta_y),math.dotMultiply(delta_y,weight_sq));
//JtWJ = J' * ( J .* ( weight_sq * ones(1,Npar) ) );
var Jt = math.transpose(J);
//console.log("J "+J);
//console.log(weight_sq);
var JtWJ = math.multiply(Jt,math.dotMultiply(J,math.multiply(weight_sq,math.ones(1,Npar))));
//console.log(math.multiply(weight_sq,math.ones(1,Npar)));
//JtWdy = J' * ( weight_sq .* delta_y );
var JtWdy = math.multiply(Jt,math.dotMultiply(weight_sq,delta_y));
return {JtWJ:JtWJ,JtWdy:JtWdy,Chi_sq:Chi_sq,y_hat:y_hat,J:J};
// endfunction # ------------------------------------------------------ LM_MATX
}
};
module.exports = LM;