Skip to content

Commit

Permalink
Corrected likelihood: Forgot non-constant K term when K=K(theta)
Browse files Browse the repository at this point in the history
See article draft
  • Loading branch information
fzeiser authored and fzeiser committed Mar 19, 2020
1 parent 9edb485 commit c8c0046
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 27 deletions.
6 changes: 3 additions & 3 deletions ompy/normalizer_gsf.py
Expand Up @@ -545,13 +545,13 @@ def update(scale_low, shift_low, scale_high, shift_high):
scale_high=self.model_high.scale_high,
shift_high=self.model_high.shift_high)

def errfn(self):
""" Weighted chi2 """
def lnlike(self):
""" log likelihood """
Gg = self.norm_pars.Gg[0]
sigma_Gg = self.norm_pars.Gg[1]
chi2 = (self.Gg_before_norm() - Gg)/(sigma_Gg)
chi2 = chi2**2
return chi2
return -0.5*chi2

def self_if_none(self, *args, **kwargs):
""" wrapper for lib.self_if_none """
Expand Down
30 changes: 18 additions & 12 deletions ompy/normalizer_nld.py
Expand Up @@ -252,9 +252,11 @@ def initial_guess(self, limit_low: Optional[Tuple[float, float]] = None,
rel_uncertainty = self.norm_pars.D0[1]/self.norm_pars.D0[0]
nldSn = np.array([nldSn, nldSn * rel_uncertainty])

def neglnlike(*args, **kwargs):
return - self.lnlike(*args, **kwargs)
args = (nld_low, nld_high, discrete, self.model, self.norm_pars.Sn[0],
nldSn)
res = differential_evolution(self.errfn, bounds=bounds, args=args)
res = differential_evolution(neglnlike, bounds=bounds, args=args)

LOG.info("DE results:\n%s", tt.to_string([res.x.tolist()],
header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]']))
Expand All @@ -271,7 +273,7 @@ def optimize(self, num: int, args,
Args:
num (int): Loop number
args_nld (Iterable): Additional arguments for the nld errfn
args_nld (Iterable): Additional arguments for the nld lnlike
guess (Dict[str, float]): The initial guess of the parameters
Returns:
Expand Down Expand Up @@ -326,9 +328,7 @@ def prior(cube, ndim, nparams):
LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])

def loglike(cube, ndim, nparams):
chi2 = self.errfn(cube, *args)
loglikelihood = -0.5 * chi2
return loglikelihood
return self.lnlike(cube, *args)

self.multinest_path.mkdir(exist_ok=True)
path = self.multinest_path / f"nld_norm_{num}_"
Expand Down Expand Up @@ -502,11 +502,13 @@ def save(self, path: Optional[Union[str, Path]] = None,
#self.res.save(path / f"nld_{num}")

@staticmethod
def errfn(x: Tuple[float, float, float, float], nld_low: Vector,
nld_high: Vector, discrete: Vector,
model: Callable[..., ndarray],
Sn, nldSn) -> float:
""" Compute the χ² of the normalization fitting
def lnlike(x: Tuple[float, float, float, float], nld_low: Vector,
nld_high: Vector, discrete: Vector,
model: Callable[..., ndarray],
Sn, nldSn) -> float:
""" Compute log likelihood of the normalization fitting
This is the result up a, which is irrelevant for the maximization
Args:
x: The arguments ordered as A, alpha, T and Eshift
Expand All @@ -518,8 +520,9 @@ def errfn(x: Tuple[float, float, float, float], nld_low: Vector,
model: The model to use when fitting the upper region.
Must support the keyword arguments
``model(E=..., T=..., Eshift=...) -> ndarray``
Returns:
chi2 (float): The χ² value
lnlike: log likelihood
"""
A, alpha, T, Eshift = x[:4] # slicing needed for multinest?
transformed_low = nld_low.transform(A, alpha, inplace=False)
Expand All @@ -534,7 +537,10 @@ def errfn(x: Tuple[float, float, float, float], nld_low: Vector,
nldSn_model = model(E=Sn, T=T, Eshift=Eshift)
err_nldSn = ((nldSn[0] - nldSn_model)/nldSn[1])**2

return err_low + err_high + err_nldSn
ln_stds = (np.log(transformed_low.std).sum()
+ np.log(transformed_high.std).sum())

return -0.5*(err_low + err_high + err_nldSn + ln_stds)

@staticmethod
def const_temperature(E: ndarray, T: float, Eshift: float) -> ndarray:
Expand Down
25 changes: 13 additions & 12 deletions ompy/normalizer_simultan.py
Expand Up @@ -197,7 +197,7 @@ def optimize(self, num: int,
Args:
num (int): Loop number
args_nld (Iterable): Additional arguments for the nld errfn
args_nld (Iterable): Additional arguments for the nld lnlike
guess (Dict[str, float]): The initial guess of the parameters
Returns:
Expand Down Expand Up @@ -261,11 +261,9 @@ def prior(cube, ndim, nparams):
LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])

def loglike(cube, ndim, nparams):
chi2 = self.errfn(cube, args_nld=args_nld)
loglikelihood = -0.5 * chi2
return loglikelihood
return self.lnlike(cube, args_nld=args_nld)

# parameters are changed in the errfn
# parameters are changed in the lnlike
norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars)

self.multinest_path.mkdir(exist_ok=True)
Expand Down Expand Up @@ -314,24 +312,27 @@ def loglike(cube, ndim, nparams):

return popt, samples

def errfn(self, x: Tuple[float, float, float, float, float],
args_nld: Iterable) -> float:
"""Compute the χ² of the normalization fitting
def lnlike(self, x: Tuple[float, float, float, float, float],
args_nld: Iterable) -> float:
"""Compute log likelihood of the normalization fitting
This is the result up to the constant, which is irrelevant for the
maximization
Args:
x (Tuple[float, float, float, float, float]): The arguments
ordered as A, alpha, T and Eshift, B
args_nld (TYPE): Additional arguments for the nld errfn
args_nld (TYPE): Additional arguments for the nld lnlike
Returns:
chi2 (float): The χ² value
lnlike: log likelihood
"""
A, alpha, T, Eshift, B = x[:5] # slicing needed for multinest?

normalizer_gsf = self.normalizer_gsf
normalizer_nld = self.normalizer_nld

err_nld = normalizer_nld.errfn(x[:4], *args_nld)
err_nld = normalizer_nld.lnlike(x[:4], *args_nld)

nld = normalizer_nld.nld.transform(A, alpha, inplace=False)
nld_model = lambda E: normalizer_nld.model(E, T=T, Eshift=Eshift) # noqa
Expand All @@ -346,7 +347,7 @@ def errfn(self, x: Tuple[float, float, float, float, float],
inplace=False)
normalizer_gsf._gsf_low, normalizer_gsf._gsf_high = \
normalizer_gsf.extrapolate()
err_gsf = normalizer_gsf.errfn()
err_gsf = normalizer_gsf.lnlike()
return err_nld + err_gsf

def plot(self, ax: Optional[Any] = None, add_label: bool = True,
Expand Down

0 comments on commit c8c0046

Please sign in to comment.