Skip to content

Commit

Permalink
add cray-cray option to scale K variacne with P, e
Browse files Browse the repository at this point in the history
  • Loading branch information
adrn committed Oct 20, 2019
1 parent b5cce0d commit 6950ada
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 12 deletions.
41 changes: 37 additions & 4 deletions thejoker/sampler/fast_likelihood.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
# cython: language_level=3

# Third-party
import astropy.units as u
import numpy as np
cimport numpy as np
np.import_array()
Expand Down Expand Up @@ -278,7 +279,7 @@ cpdef batch_marginal_ln_likelihood(double[:,::1] chunk,

# inverse variance array with jitter included
double[::1] jitter_ivar = np.zeros(data.ivar.value.shape)
double[:, ::1] Lambda = np.ascontiguousarray(joker_params.linear_par_Lambda)
double[:, ::1] Lambda = np.zeros((n_pars, n_pars))
double[::1] mu = np.ascontiguousarray(joker_params.linear_par_mu)

# design matrix
Expand All @@ -302,12 +303,24 @@ cpdef batch_marginal_ln_likelihood(double[:,::1] chunk,
# Needed for temporary storage in likelihood_worker:
double[:, ::1] Btmp = np.zeros((n_times, n_times), dtype=np.float64)
double[:, ::1] Atmp = np.zeros((n_pars, n_pars), dtype=np.float64)
double[:, ::1] Linv = np.linalg.inv(Lambda)
double[:, ::1] Linv = np.zeros((n_pars, n_pars), dtype=np.float64)
int[::1] npar_ipiv = np.zeros(n_pars, dtype=np.int32)
int[::1] ntime_ipiv = np.zeros(n_times, dtype=np.int32)
double[::1] npar_work = np.zeros(n_pars, dtype=np.float64)
double[::1] ntime_work = np.zeros(n_times, dtype=np.float64)

double K0
int scale_K_prior_with_P = joker_params.scale_K_prior_with_P

if scale_K_prior_with_P > 0:
# TODO: allow customizing K0!
K0 = (100 * u.m/u.s).to_value(joker_params._jitter_unit)
Lambda[1:, 1:] = np.array(joker_params.linear_par_Lambda)
Linv[1:, 1:] = np.linalg.inv(joker_params.linear_par_Lambda)
else:
Lambda = np.ascontiguousarray(joker_params.linear_par_Lambda)
Linv = np.linalg.inv(Lambda)

if joker_params._fixed_jitter:
_fixed_jitter = 1
jitter = joker_params.jitter.to(data.rv.unit).value
Expand All @@ -325,6 +338,10 @@ cpdef batch_marginal_ln_likelihood(double[:,::1] chunk,
# Note: jitter must be in same units as the data RV's / ivar!
get_ivar(ivar, jitter, jitter_ivar)

if scale_K_prior_with_P > 0:
Lambda[0, 0] = K0**2 / (1 - chunk[n, 2]**2) * (chunk[n, 0] / 365.)**(-2/3.)
Linv[0, 0] = 1 / Lambda[0, 0]

# compute things needed for the ln(likelihood)
ll[n] = likelihood_worker(rv, jitter_ivar, M_T, mu, Lambda, Linv,
0, B, b, Ainv, a, Btmp, Atmp,
Expand Down Expand Up @@ -363,7 +380,7 @@ cpdef batch_get_posterior_samples(double[:,::1] chunk,

# inverse variance array with jitter included
double[::1] jitter_ivar = np.zeros(data.ivar.value.shape)
double[:, ::1] Lambda = np.ascontiguousarray(joker_params.linear_par_Lambda)
double[:, ::1] Lambda = np.zeros((n_pars, n_pars))
double[::1] mu = np.ascontiguousarray(joker_params.linear_par_mu)

# transpose of design matrix
Expand All @@ -384,7 +401,7 @@ cpdef batch_get_posterior_samples(double[:,::1] chunk,
# Needed for temporary storage in likelihood_worker:
double[:, ::1] Btmp = np.zeros((n_times, n_times), dtype=np.float64)
double[:, ::1] Atmp = np.zeros((n_pars, n_pars), dtype=np.float64)
double[:, ::1] Linv = np.linalg.inv(Lambda)
double[:, ::1] Linv = np.zeros((n_pars, n_pars), dtype=np.float64)
int[::1] npar_ipiv = np.zeros(n_pars, dtype=np.int32)
int[::1] ntime_ipiv = np.zeros(n_times, dtype=np.int32)
double[::1] npar_work = np.zeros(n_pars, dtype=np.float64)
Expand All @@ -394,6 +411,18 @@ cpdef batch_get_posterior_samples(double[:,::1] chunk,
joker_params.num_params + int(return_logprobs)))
double[::1] linear_pars

double K0
int scale_K_prior_with_P = joker_params.scale_K_prior_with_P

if scale_K_prior_with_P > 0:
# TODO: allow customizing K0!
K0 = (100 * u.m/u.s).to_value(joker_params._jitter_unit)
Lambda[1:, 1:] = np.array(joker_params.linear_par_Lambda)
Linv[1:, 1:] = np.linalg.inv(joker_params.linear_par_Lambda)
else:
Lambda = np.ascontiguousarray(joker_params.linear_par_Lambda)
Linv = np.linalg.inv(Lambda)

for n in range(n_samples):
pars[n, 0] = chunk[n, 0] # P
pars[n, 1] = chunk[n, 1] # M0
Expand All @@ -407,6 +436,10 @@ cpdef batch_get_posterior_samples(double[:,::1] chunk,
# jitter must be in same units as the data RV's / ivar!
get_ivar(ivar, chunk[n, 4], jitter_ivar)

if scale_K_prior_with_P > 0:
Lambda[0, 0] = K0**2 / (1 - chunk[n, 2]**2) * (chunk[n, 0] / 365.)**(-2/3)
Linv[0, 0] = 1 / Lambda[0, 0]

# compute things needed for the ln(likelihood)
ll = likelihood_worker(rv, jitter_ivar, M_T, mu, Lambda, Linv,
1, B, b, Ainv, a, Btmp, Atmp,
Expand Down
13 changes: 12 additions & 1 deletion thejoker/sampler/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,12 +186,23 @@ def marginal_ln_likelihood(nonlinear_p, data, joker_params, make_aAinv=False):

M = design_matrix(nonlinear_p, data, joker_params)

if joker_params.scale_K_prior_with_P:
# TODO: allow customizing K0!
K0 = (100 * u.m/u.s).to_value(joker_params._jitter_unit)
Lambda = np.zeros((joker_params._n_linear, joker_params._n_linear))
P = nonlinear_p[0]
e = nonlinear_p[2]
Lambda[0, 0] = K0**2 / (1 - e**2) * (P / 365.)**(-2/3)
Lambda[1:, 1:] = joker_params.linear_par_Lambda
else:
Lambda = joker_params.linear_par_Lambda

# jitter must be in same units as the data RV's / ivar!
s = nonlinear_p[4]
ivar = get_ivar(data, s)
marg_ll, *_ = likelihood_worker(data.rv.value, ivar, M,
joker_params.linear_par_mu,
joker_params.linear_par_Lambda,
Lambda,
make_aAinv=make_aAinv)

return marg_ll
17 changes: 10 additions & 7 deletions thejoker/sampler/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class JokerParams:
@u.quantity_input(P_min=u.day, P_max=u.day)
def __init__(self, P_min, P_max,
linear_par_Lambda=None, linear_par_mu=None,
scale_K_prior_with_P=True,
scale_K_prior_with_P=False,
jitter=None, jitter_unit=None,
poly_trend=1,
anomaly_tol=1E-10, anomaly_maxiter=128):
Expand All @@ -90,11 +90,11 @@ def __init__(self, P_min, P_max,
self.anomaly_maxiter = int(anomaly_maxiter)

# K + the linear trend parameters
_n_linear = 1 + self.poly_trend
self._n_linear = 1 + self.poly_trend

# TODO: ignoring units / assuming units are same as data here
if linear_par_mu is None:
linear_par_mu = np.zeros(_n_linear)
linear_par_mu = np.zeros(self._n_linear)

if linear_par_Lambda is None:
msg = (
Expand All @@ -115,13 +115,16 @@ def __init__(self, P_min, P_max,
self.linear_par_Lambda = np.array(linear_par_Lambda)
self.linear_par_mu = np.array(linear_par_mu)

if self.linear_par_Lambda.shape != (_n_linear, _n_linear):
self.scale_K_prior_with_P = scale_K_prior_with_P
_check_size = self._n_linear - int(self.scale_K_prior_with_P)

if self.linear_par_Lambda.shape != (_check_size, _check_size):
raise ValueError("Linear parameter prior variance must have shape "
"({0}, {0})".format(_n_linear))
"({0}, {0})".format(_check_size))

if self.linear_par_mu.shape != (_n_linear, ):
if self.linear_par_mu.shape != (self._n_linear, ):
raise ValueError("Linear parameter prior mean must have shape "
"({0}, )".format(_n_linear))
"({0}, )".format(self._n_linear))

# validate the input jitter specification
if jitter is None:
Expand Down
41 changes: 41 additions & 0 deletions thejoker/sampler/tests/test_fast_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,47 @@ def test_against_py():
assert np.allclose(np.array(cy_ll), py_ll)


def test_against_py_scale_varK():
joker_params = JokerParams(P_min=8*u.day, P_max=32768*u.day,
jitter=0*u.m/u.s,
scale_K_prior_with_P=True,
linear_par_Lambda=np.diag([1e2])**2)
joker = TheJoker(joker_params)

# t = np.random.uniform(0, 250, 16) + 56831.324
t = np.random.uniform(0, 250, 3) + 56831.324
t.sort()

rv = np.cos(t)
rv_err = np.random.uniform(0.1, 0.2, t.size)

data = RVData(t=t, rv=rv*u.km/u.s, stddev=rv_err*u.km/u.s)

samples = joker.sample_prior(size=16384)

chunk = []
for k in samples:
chunk.append(np.array(samples[k]))

chunk = np.ascontiguousarray(np.vstack(chunk).T)

t0 = time.time()
n_chunk = len(chunk)
py_ll = np.zeros(n_chunk)
for i in range(n_chunk):
try:
py_ll[i] = marginal_ln_likelihood(chunk[i], data, joker_params)
except Exception as e:
py_ll[i] = np.nan
print("Python:", time.time() - t0)

t0 = time.time()
cy_ll = batch_marginal_ln_likelihood(chunk, data, joker_params)
print("Cython:", time.time() - t0)

assert np.allclose(np.array(cy_ll), py_ll)


def test_likelihood_helpers():
np.random.seed(42)

Expand Down

0 comments on commit 6950ada

Please sign in to comment.