Skip to content

Commit

Permalink
Merge pull request #8 from sibirrer/sne_sampling
Browse files Browse the repository at this point in the history
Sne sampling
  • Loading branch information
sibirrer committed Sep 13, 2020
2 parents b0b6388 + fe9f6b4 commit 3fdb30a
Show file tree
Hide file tree
Showing 21 changed files with 418 additions and 114 deletions.
15 changes: 13 additions & 2 deletions hierarc/Likelihood/LensLikelihood/base_lens_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


LIKELIHOOD_TYPES = ['DdtGaussian', 'DdtDdKDE', 'DdtDdGaussian', 'DsDdsGaussian', 'DdtLogNorm', 'IFUKinCov', 'DdtHist',
'DdtHistKDE', 'DdtHistKin', 'DdtGaussKin']
'DdtHistKDE', 'DdtHistKin', 'DdtGaussKin', 'Mag', 'TDMag']


class LensLikelihoodBase(object):
Expand Down Expand Up @@ -53,6 +53,12 @@ def __init__(self, z_lens, z_source, likelihood_type, name='name', **kwargs_like
elif likelihood_type == 'DdtGaussKin':
from hierarc.Likelihood.LensLikelihood.ddt_gauss_kin_likelihood import DdtGaussKinLikelihood
self._lens_type = DdtGaussKinLikelihood(z_lens, z_source, **kwargs_likelihood)
elif likelihood_type == 'Mag':
from hierarc.Likelihood.LensLikelihood.mag_likelihood import MagnificationLikelihood
self._lens_type = MagnificationLikelihood(**kwargs_likelihood)
elif likelihood_type == 'TDMag':
from hierarc.Likelihood.LensLikelihood.td_mag_likelihood import TDMagLikelihood
self._lens_type = TDMagLikelihood(**kwargs_likelihood)
else:
raise ValueError('likelihood_type %s not supported! Supported are %s.' % (likelihood_type, LIKELIHOOD_TYPES))

Expand All @@ -64,7 +70,7 @@ def num_data(self):
"""
return self._lens_type.num_data

def log_likelihood(self, ddt, dd, aniso_scaling=None, sigma_v_sys_error=None):
def log_likelihood(self, ddt, dd, aniso_scaling=None, sigma_v_sys_error=None, mu_intrinsic=None):
"""
:param ddt: time-delay distance [physical Mpc]
Expand All @@ -73,6 +79,7 @@ def log_likelihood(self, ddt, dd, aniso_scaling=None, sigma_v_sys_error=None):
dimensionless quantity J (proportional to sigma_v^2) of the anisotropy model in the sampling relative to the
anisotropy model used to derive the prediction and covariance matrix in the init of this class.
:param sigma_v_sys_error: unaccounted uncertainty in the velocity dispersion measurement
:param mu_intrinsic: float, intrinsic source brightness
:return: natural logarithm of the likelihood of the data given the model
"""
if self.likelihood_type in ['DdtGaussian', 'DdtLogNorm', 'DdtHist', 'DdtHistKDE']:
Expand All @@ -82,6 +89,10 @@ def log_likelihood(self, ddt, dd, aniso_scaling=None, sigma_v_sys_error=None):
elif self.likelihood_type in ['DdtHistKin', 'IFUKinCov', 'DdtGaussKin']:
return self._lens_type.log_likelihood(ddt, dd, aniso_scaling=aniso_scaling,
sigma_v_sys_error=sigma_v_sys_error)
elif self.likelihood_type in ['Mag']:
return self._lens_type.log_likelihood(mu_intrinsic=mu_intrinsic)
elif self.likelihood_type in ['TDMag']:
return self._lens_type.log_likelihood(ddt=ddt, mu_intrinsic=mu_intrinsic)
else:
raise ValueError('likelihood type %s not fully supported.' % self.likelihood_type)

Expand Down
1 change: 0 additions & 1 deletion hierarc/Likelihood/LensLikelihood/ddt_hist_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __init__(self, z_lens, z_source, ddt_samples, kde_kernel=None, ddt_weights=N
# ignore potential zero weights, sklearn does not like them
kde_bins = [b for v, b in zip(vals, bins) if v > 0]
kde_weights = [v for v in vals if v > 0]
print(np.shape(kde_weights), np.shape(kde_bins))
self._kde = gaussian_kde(dataset=kde_bins, weights=kde_weights[:])
self.num_data = 1
self._sigma = np.std(ddt_samples)
Expand Down
62 changes: 62 additions & 0 deletions hierarc/Likelihood/LensLikelihood/mag_likelihood.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import numpy as np


class MagnificationLikelihood(object):
"""
likelihood of an unlensed apprarent source magnification given a measurement of the magnified brightness
This can i.e. be applied to lensed SNIa on the population level
"""
def __init__(self, amp_measured, cov_amp_measured, mag_model, cov_model):
"""
:param amp_measured: array, amplitudes of measured fluxes of image positions
:param cov_amp_measured: 2d array, error covariance matrix of the measured amplitudes
:param mag_model: mean magnification of the model prediction
:param cov_model: 2d array (image amplitudes); model lensing magnification covariances
"""

self._data_vector = amp_measured
self._cov_amp_measured = np.array(cov_amp_measured)
# check sizes of covariances matches
n_tot = len(self._data_vector)
assert n_tot == len(cov_model)
self._cov_data = self._cov_amp_measured
self._model_tot = np.array(mag_model)
self._cov_model = cov_model
self.num_data = n_tot

def log_likelihood(self, mu_intrinsic):
"""
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: log likelihood of the measured magnified images given the source brightness
"""
model_vector, cov_tot = self._scale_model(mu_intrinsic)
# invert matrix
try:
cov_tot_inv = np.linalg.inv(cov_tot)
except:
return -np.inf
# difference to data vector
delta = self._data_vector - model_vector
# evaluate likelihood
lnlikelihood = -delta.dot(cov_tot_inv.dot(delta)) / 2.
sign_det, lndet = np.linalg.slogdet(cov_tot)
lnlikelihood -= 1 / 2. * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood

def _scale_model(self, mu_intrinsic):
"""
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return:
"""
# compute model predicted magnified image amplitude and time delay
model_vector = mu_intrinsic * self._model_tot
# scale model covariance matrix with model_scale vector (in quadrature)
cov_model = self._cov_model * mu_intrinsic ** 2
# combine data and model covariance matrix
cov_tot = self._cov_data + cov_model
return model_vector, cov_tot

81 changes: 81 additions & 0 deletions hierarc/Likelihood/LensLikelihood/td_mag_likelihood.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import numpy as np
from lenstronomy.Util import constants as const


class TDMagLikelihood(object):
"""
likelihood of time delays and magnification likelihood
"""
def __init__(self, time_delay_measured, cov_td_measured, amp_measured, cov_amp_measured,
fermat_diff, mag_model, cov_model):
"""
:param time_delay_measured: array, relative time delays (relative to the first image)
:param cov_td_measured: 2d array, error covariance matrix of time delay measurement
:param amp_measured: array, amplitudes of measured fluxes of image positions
:param cov_amp_measured: 2d array, error covariance matrix of the measured amplitudes
:param fermat_diff: mean Fermat potential differences (relative to the first image) in arcsec^2
:param mag_model: mean magnification of the model prediction
:param cov_model: 2d array (length relative time delays + image amplitudes); model fermat potential differences
and lensing magnification covariances
"""

self._data_vector = np.append(time_delay_measured, amp_measured)
self._cov_td_measured = np.array(cov_td_measured)
self._cov_amp_measured = np.array(cov_amp_measured)
# check sizes of covariances matches
n_tot = len(self._data_vector)
self._n_td = len(time_delay_measured)
self._n_amp = len(amp_measured)
assert self._n_td == len(cov_td_measured)
assert self._n_amp == len(cov_amp_measured)
assert n_tot == len(cov_model)
# merge data covariance matrices from time delay and image amplitudes
self._cov_data = np.zeros((n_tot, n_tot))
self._cov_data[:self._n_td, :self._n_td] = self._cov_td_measured
self._cov_data[self._n_td:, self._n_td:] = self._cov_amp_measured
#self._fermat_diff = fermat_diff # in units arcsec^2
self._fermat_unit_conversion = const.Mpc / const.c / const.day_s * const.arcsec ** 2
#self._mag_model = mag_model
self._model_tot = np.append(fermat_diff, mag_model)
self._cov_model = cov_model
self.num_data = n_tot

def log_likelihood(self, ddt, mu_intrinsic):
"""
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return: log likelihood of the measured magnified images given the source brightness
"""
model_vector, cov_tot = self._model_cov(ddt, mu_intrinsic)
# invert matrix
try:
cov_tot_inv = np.linalg.inv(cov_tot)
except:
return -np.inf
# difference to data vector
delta = self._data_vector - model_vector
# evaluate likelihood
lnlikelihood = -delta.dot(cov_tot_inv.dot(delta)) / 2.
sign_det, lndet = np.linalg.slogdet(cov_tot)
lnlikelihood -= 1 / 2. * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood

def _model_cov(self, ddt, mu_intrinsic):
"""
:param ddt: time-delay distance (physical Mpc)
:param mu_intrinsic: intrinsic brightness of the source (already incorporating the inverse MST transform)
:return:
"""
# compute model predicted magnified image amplitude and time delay
model_scale = np.append(ddt * self._fermat_unit_conversion * np.ones(self._n_td),
mu_intrinsic * np.ones(self._n_amp))
model_vector = model_scale * self._model_tot
# scale model covariance matrix with model_scale vector (in quadrature)
cov_model = model_scale * (self._cov_model * model_scale).T
# combine data and model covariance matrix
cov_tot = self._cov_data + cov_model
return model_vector, cov_tot
16 changes: 13 additions & 3 deletions hierarc/Likelihood/cosmo_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ def __init__(self, kwargs_likelihood_list, cosmology, kwargs_bounds, ppn_samplin
lambda_mst_sampling=False, lambda_mst_distribution='delta', anisotropy_sampling=False,
kappa_ext_sampling=False, kappa_ext_distribution='NONE', alpha_lambda_sampling=False,
lambda_ifu_sampling=False, lambda_ifu_distribution='NONE', sigma_v_systematics=False,
sne_sampling=False, sne_distribution='GAUSSIAN',
log_scatter=False,
anisotropy_model='OM', anisotropy_distribution='NONE', custom_prior=None, interpolate_cosmo=True,
num_redshift_interp=100, cosmo_fixed=None):
Expand Down Expand Up @@ -41,6 +42,11 @@ def __init__(self, kwargs_likelihood_list, cosmology, kwargs_bounds, ppn_samplin
:param anisotropy_distribution: string, distribution of the anisotropy parameters
:param sigma_v_systematics: bool, if True samples paramaters relative to systematics in the velocity dispersion
measurement
:param sne_sampling: boolean, if True, samples/queries SNe unlensed magnitude distribution
(not intrinsic magnitudes but apparent!)
:param sne_distribution: string, apparent non-lensed brightness distribution (in linear space).
Currently supports:
'GAUSSIAN': Gaussian distribution
:param log_scatter: boolean, if True, samples the Gaussian scatter amplitude in log space (and thus flat prior in log)
:param custom_prior: None or a definition that takes the keywords from the CosmoParam conventions and returns a
log likelihood value (e.g. prior)
Expand All @@ -56,6 +62,8 @@ def __init__(self, kwargs_likelihood_list, cosmology, kwargs_bounds, ppn_samplin
lambda_ifu_sampling=lambda_ifu_sampling,
lambda_ifu_distribution=lambda_ifu_distribution,
alpha_lambda_sampling=alpha_lambda_sampling,
sne_sampling=sne_sampling,
sne_distribution=sne_distribution,
sigma_v_systematics=sigma_v_systematics,
kappa_ext_sampling=kappa_ext_sampling, kappa_ext_distribution=kappa_ext_distribution,
anisotropy_sampling=anisotropy_sampling, anisotropy_model=anisotropy_model,
Expand Down Expand Up @@ -85,7 +93,7 @@ def likelihood(self, args):
if args[i] < self._lower_limit[i] or args[i] > self._upper_limit[i]:
return -np.inf

kwargs_cosmo, kwargs_lens, kwargs_kin = self.param.args2kwargs(args)
kwargs_cosmo, kwargs_lens, kwargs_kin, kwargs_source = self.param.args2kwargs(args)
if self._cosmology == "oLCDM":
# assert we are not in a crazy cosmological situation that prevents computing the angular distance integral
h0, ok, om = kwargs_cosmo['h0'], kwargs_cosmo['ok'], kwargs_cosmo['om']
Expand All @@ -97,7 +105,8 @@ def likelihood(self, args):
if 1.0 - om - ok <= 0:
return -np.inf
cosmo = self.cosmo_instance(kwargs_cosmo)
logL = self._likelihoodLensSample.log_likelihood(cosmo=cosmo, kwargs_lens=kwargs_lens, kwargs_kin=kwargs_kin)
logL = self._likelihoodLensSample.log_likelihood(cosmo=cosmo, kwargs_lens=kwargs_lens, kwargs_kin=kwargs_kin,
kwargs_source=kwargs_source)

if self._prior_add is True:
logL += self._custom_prior(kwargs_cosmo, kwargs_lens, kwargs_kin)
Expand All @@ -116,7 +125,8 @@ def cosmo_instance(self, kwargs_cosmo):
else:
if self._interpolate_cosmo is True:
if not hasattr(self, '_cosmo_fixed_interp'):
self._cosmo_fixed_interp = CosmoInterp(cosmo=self._cosmo_fixed, z_stop=self._z_max, num_interp=self._num_redshift_interp)
self._cosmo_fixed_interp = CosmoInterp(cosmo=self._cosmo_fixed, z_stop=self._z_max,
num_interp=self._num_redshift_interp)
cosmo = self._cosmo_fixed_interp
else:
cosmo = self._cosmo_fixed
Expand Down

0 comments on commit 3fdb30a

Please sign in to comment.