Skip to content

Commit

Permalink
Merge pull request #148 from PiaLJP/feature/correlated_combined_fit
Browse files Browse the repository at this point in the history
Feature/correlated combined fit
  • Loading branch information
fjosw authored Feb 3, 2023
2 parents ae68e5b + b0b5d88 commit 3ca7958
Show file tree
Hide file tree
Showing 2 changed files with 128 additions and 26 deletions.
99 changes: 73 additions & 26 deletions pyerrors/fits.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,9 +705,6 @@ def chisqfunc_compact(d):

def _combined_fit(x, y, func, silent=False, **kwargs):

if kwargs.get('correlated_fit') is True:
raise Exception("Correlated fit has not been implemented yet")

output = Fit_result()
output.fit_function = func

Expand Down Expand Up @@ -735,6 +732,9 @@ def _combined_fit(x, y, func, silent=False, **kwargs):
if len(x_all.shape) > 2:
raise Exception('Unknown format for x values')

if np.any(np.asarray(dy_f) <= 0.0):
raise Exception('No y errors available, run the gamma method first.')

# number of fit parameters
n_parms_ls = []
for key in key_ls:
Expand Down Expand Up @@ -766,6 +766,22 @@ def _combined_fit(x, y, func, silent=False, **kwargs):
else:
x0 = [0.1] * n_parms

if kwargs.get('correlated_fit') is True:
corr = covariance(y_all, correlation=True, **kwargs)
covdiag = np.diag(1 / np.asarray(dy_f))
condn = np.linalg.cond(corr)
if condn > 0.1 / np.finfo(float).eps:
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
if condn > 1e13:
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
chol = np.linalg.cholesky(corr)
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)

def chisqfunc_corr(p):
model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
return chisq

def chisqfunc(p):
func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
model = anp.array([func_list[i](p, x_all[i]) for i in range(len(x_all))])
Expand All @@ -782,30 +798,46 @@ def chisqfunc(p):
if 'tol' in kwargs:
tolerance = kwargs.get('tol')
fit_result = iminuit.minimize(chisqfunc, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef
if kwargs.get('correlated_fit') is True:
fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=tolerance)
output.iterations = fit_result.nfev
else:
tolerance = 1e-12
if 'tol' in kwargs:
tolerance = kwargs.get('tol')
fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=tolerance)
if kwargs.get('correlated_fit') is True:
fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=tolerance)
output.iterations = fit_result.nit

chisquare = fit_result.fun

else:
if kwargs.get('correlated_fit') is True:
def chisqfunc_residuals_corr(p):
model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
chisq = anp.dot(chol_inv, (y_f - model))
return chisq

def chisqfunc_residuals(p):
model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
chisq = ((y_f - model) / dy_f)
return chisq

if 'tol' in kwargs:
print('tol cannot be set for Levenberg-Marquardt')

fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
if kwargs.get('correlated_fit') is True:
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)

chisquare = np.sum(fit_result.fun ** 2)
assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
output.iterations = fit_result.nfev
if kwargs.get('correlated_fit') is True:
assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14)
else:
assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)

output.message = fit_result.message
output.iterations = fit_result.nfev

if not fit_result.success:
raise Exception('The minimization procedure did not converge.')
Expand All @@ -818,17 +850,12 @@ def chisqfunc_residuals(p):
else:
output.chisquare_by_dof = float('nan')

output.message = fit_result.message
if not silent:
print(fit_result.message)
print('chisquare/d.o.f.:', output.chisquare_by_dof)
print('fit parameters', fit_result.x)

def chisqfunc_compact(d):
func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
model = anp.array([func_list[i](d[:n_parms], x_all[i]) for i in range(len(x_all))])
chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
return chisq

def prepare_hat_matrix():
hat_vector = []
for key in key_ls:
Expand All @@ -838,16 +865,43 @@ def prepare_hat_matrix():
hat_vector = [item for sublist in hat_vector for item in sublist]
return hat_vector

fitp = fit_result.x
if kwargs.get('expected_chisquare') is True:
if kwargs.get('correlated_fit') is not True:
W = np.diag(1 / np.asarray(dy_f))
cov = covariance(y_all)
hat_vector = prepare_hat_matrix()
A = W @ hat_vector # hat_vector = 'jacobian(func)(fit_result.x, x)'
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
if not silent:
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)

fitp = fit_result.x
if np.any(np.asarray(dy_f) <= 0.0):
raise Exception('No y errors available, run the gamma method first.')

try:
hess = hessian(chisqfunc)(fitp)
if kwargs.get('correlated_fit') is True:
hess = hessian(chisqfunc_corr)(fitp)
else:
hess = hessian(chisqfunc)(fitp)
except TypeError:
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None

if kwargs.get('correlated_fit') is True:
def chisqfunc_compact(d):
func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
model = anp.array([func_list[i](d[:n_parms], x_all[i]) for i in range(len(x_all))])
chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2)
return chisq
else:
def chisqfunc_compact(d):
func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
model = anp.array([func_list[i](d[:n_parms], x_all[i]) for i in range(len(x_all))])
chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
return chisq

jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f)))

# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
Expand All @@ -856,24 +910,17 @@ def prepare_hat_matrix():
except np.linalg.LinAlgError:
raise Exception("Cannot invert hessian matrix.")

if kwargs.get('expected_chisquare') is True:
if kwargs.get('correlated_fit') is not True:
W = np.diag(1 / np.asarray(dy_f))
cov = covariance(y_all)
hat_vector = prepare_hat_matrix()
A = W @ hat_vector # hat_vector = 'jacobian(func)(fit_result.x, x)'
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
if not silent:
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)

result = []
for i in range(n_parms):
result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all), man_grad=list(deriv_y[i])))

output.fit_parameters = result

if kwargs.get('correlated_fit') is True:
n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all))
output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare,
output.dof, n_cov - output.dof)

return output


Expand Down
55 changes: 55 additions & 0 deletions tests/fits_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,8 @@ def func_valid(a,x):
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
with pytest.raises(Exception):
pe.least_squares({'a':xvals}, {'b':yvals}, {'a':func_valid})
with pytest.raises(Exception):
pe.least_squares({'a':xvals}, {'a':yvals}, {'a':func_valid})

def test_combined_fit_no_autograd():

Expand Down Expand Up @@ -833,6 +835,59 @@ def func_auto_b(a,x):
assert(no_order_x_y[0] == order[0])
assert(no_order_x_y[1] == order[1])

def test_correlated_combined_fit_vs_correlated_standard_fit():

x_const = {'a':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'b':np.arange(10, 20)}
y_const = {'a':[pe.Obs([np.random.normal(1, val, 1000)], ['ensemble1'])
for val in [0.25, 0.3, 0.01, 0.2, 0.5, 1.3, 0.26, 0.4, 0.1, 1.0]],
'b':[pe.Obs([np.random.normal(1, val, 1000)], ['ensemble1'])
for val in [0.5, 1.12, 0.26, 0.25, 0.3, 0.01, 0.2, 1.0, 0.38, 0.1]]}
for key in y_const.keys():
[item.gamma_method() for item in y_const[key]]
y_const_ls = np.concatenate([np.array(o) for o in y_const.values()])
x_const_ls = np.arange(0, 20)

def func_const(a,x):
return 0 * x + a[0]

funcs_const = {"a": func_const,"b": func_const}
for method_kw in ['Levenberg-Marquardt', 'migrad', 'Powell', 'Nelder-Mead']:
res = []
res.append(pe.fits.least_squares(x_const, y_const, funcs_const, method = method_kw, correlated_fit=True))
res.append(pe.fits.least_squares(x_const_ls, y_const_ls, func_const, method = method_kw, correlated_fit=True))
[item.gamma_method for item in res]
assert np.isclose(0.0, (res[0].chisquare_by_dof - res[1].chisquare_by_dof), 1e-14, 1e-8)
assert np.isclose(0.0, (res[0].p_value - res[1].p_value), 1e-14, 1e-8)
assert np.isclose(0.0, (res[0].t2_p_value - res[1].t2_p_value), 1e-14, 1e-8)
assert (res[0][0] - res[1][0]).is_zero(atol=1e-8)

def test_combined_fit_hotelling_t():
xvals_b = np.arange(0,6)
xvals_a = np.arange(0,8)

def func_exp1(x):
return 0.3*np.exp(0.5*x)

def func_exp2(x):
return 0.3*np.exp(0.8*x)

def func_a(a,x):
return a[0]*anp.exp(a[1]*x)

def func_b(a,x):
return a[0]*anp.exp(a[2]*x)

funcs = {'a':func_a, 'b':func_b}
xs = {'a':xvals_a, 'b':xvals_b}
yobs_a = [pe.Obs([np.random.normal(item, item*1.5, 1000)],['ensemble1']) for item in func_exp1(xvals_a)]
yobs_b = [pe.Obs([np.random.normal(item, item*1.4, 1000)],['ensemble1']) for item in func_exp2(xvals_b)]
ys = {'a': yobs_a, 'b': yobs_b}

for key in funcs.keys():
[item.gamma_method() for item in ys[key]]
ft = pe.fits.least_squares(xs, ys, funcs, correlated_fit=True)
assert ft.t2_p_value >= ft.p_value

def fit_general(x, y, func, silent=False, **kwargs):
"""Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
Expand Down

0 comments on commit 3ca7958

Please sign in to comment.