Skip to content

Commit

Permalink
Merge pull request #126 from sblunt/bugfixes
Browse files Browse the repository at this point in the history
Fix three bugs
  • Loading branch information
sblunt committed Sep 9, 2019
2 parents 82559ee + 418a32b commit a44a802
Show file tree
Hide file tree
Showing 7 changed files with 59 additions and 47 deletions.
4 changes: 1 addition & 3 deletions orbitize/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,7 @@ def __init__(self, input_data, sampler_str,
plx, mass_err=mass_err, plx_err=plx_err, **system_kwargs
)

# Initialize Sampler object, which stores information about
# the likelihood function & the algorithm used to generate
# orbits, and has System object as an attribute.
# Initialize Sampler object, which has System object as an attribute.
if mcmc_kwargs is not None and sampler_str == 'MCMC':
kwargs = mcmc_kwargs
else:
Expand Down
1 change: 1 addition & 0 deletions orbitize/lnlike.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def chi2_lnlike(data, errors, model, seppa_indices):
this function should be an array of dimension 8 x 2 x 10,000.
"""

if np.ndim(model) == 3:
# move M dimension to the primary axis, so that numpy knows to iterate over it
model = np.rollaxis(model, 2, 0) # now MxNobsx2 in dimensions
Expand Down
15 changes: 7 additions & 8 deletions orbitize/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def __init__(self, sampler_name=None, post=None, lnlike=None, tau_ref_epoch=None
self.tau_ref_epoch = tau_ref_epoch
self.labels=labels


def add_samples(self, orbital_params, lnlikes, labels):
"""
Add accepted orbits and their likelihoods to the results
Expand All @@ -74,7 +73,7 @@ def add_samples(self, orbital_params, lnlikes, labels):
Written: Henry Ngo, 2018
"""
# If no exisiting results then it is easy
if self.post is None and self.lnlike is None:
if self.post is None:
self.post = orbital_params
self.lnlike = lnlikes
self.labels = labels
Expand All @@ -96,11 +95,11 @@ def save_results(self, filename):
Args:
filename (string): filepath to save to
Save the ``sampler_name``, ``post``, and ``lnlike``
attributes from the ``results.Results`` object.
Save attributes from the ``results.Results`` object.
``sampler_name``, ``tau_ref_epcoh`` are attributes of the root group.
``post``, ``lnlike``, and ``parameter_labels`` are datasets that are members of the root group.
``post``, ``lnlike``, and ``parameter_labels`` are datasets
that are members of the root group.
Written: Henry Ngo, 2018
"""
Expand All @@ -110,7 +109,7 @@ def save_results(self, filename):
hf.attrs['tau_ref_epoch'] = self.tau_ref_epoch
# Now add post and lnlike from the results object as datasets
hf.create_dataset('post', data=self.post)
if self.lnlike is not None: # This property doesn't exist for OFTI
if self.lnlike is not None:
hf.create_dataset('lnlike', data=self.lnlike)
if self.labels is not None:
hf['col_names'] = np.array(self.labels).astype('S')
Expand Down Expand Up @@ -171,12 +170,12 @@ def load_results(self, filename, append=False):


# Now append post and lnlike
self.add_samples(post,lnlike, labels=self.labels)
self.add_samples(post, lnlike, self.labels)
else:
# Only proceed if object is completely empty
if self.sampler_name is None and self.post is None and self.lnlike is None and self.tau_ref_epoch is None:
self._set_sampler_name(sampler_name)
self.add_samples(post,lnlike, labels=self.labels)
self.add_samples(post, lnlike, self.labels)
self.tau_ref_epoch = tau_ref_epoch
self.labels = labels
else:
Expand Down
12 changes: 7 additions & 5 deletions orbitize/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,10 +309,8 @@ def run_sampler(self, total_orbits, num_samples=10000, num_cores=None):
Runs OFTI in parallel on multiple cores until we get the number of total accepted orbits we want.
Args:
total_orbits (int): total number of accepted orbits desired by user
num_samples (int): number of orbits to prepare for OFTI to run
rejection sampling on. Defaults to 10000.
num_cores (int): the number of cores to run OFTI on. Defaults to
number of cores availabe.
Return:
Expand Down Expand Up @@ -614,11 +612,15 @@ def run_sampler(self, total_orbits, burn_steps=0, thin=1):

if self.use_pt:
self.post = sampler.flatchain[0,:,:]
self.lnlikes = sampler.logprobability[0,:,:].flatten() # should also be picking out the lowest temperature logps
self.lnlikes_alltemps = sampler.logprobability
self.lnlikes = sampler.loglikelihood[0,:,:].flatten() # should also be picking out the lowest temperature logps
self.lnlikes_alltemps = sampler.loglikelihood
else:
self.post = sampler.flatchain
self.lnlikes = sampler.lnprobability
self.lnlikes = sampler.flatlnprobability

# convert posterior probability (returned by sampler objects) to likelihood (required by orbitize.results.Results)
for i, orb in enumerate(self.post):
self.lnlikes[i] -= orbitize.priors.all_lnpriors(orb,self.priors)

# include fixed parameters in posterior
self.post = self._fill_in_fixed_params(self.post)
Expand Down
7 changes: 2 additions & 5 deletions orbitize/system.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,6 @@ def __init__(self, num_secondary_bodies, data_table, stellar_mass,
np.intersect1d(self.body_indices[body_num], seppa_indices)
)

if (len(radec_indices[0]) + len(seppa_indices[0]) == len(self.data_table)) and (restrict_angle_ranges is None):
print("No RV in data table: We are restricting the longitude of ascending node to [0,pi]")
restrict_angle_ranges = True

if restrict_angle_ranges:
angle_upperlim = np.pi
else:
Expand Down Expand Up @@ -232,7 +228,8 @@ def convert_data_table_radec2seppa(self,body_num=1):
dec_err = self.data_table['quant2_err'][i]
# Convert to sep/PA
sep, pa = radec2seppa(ra,dec)
sep_err, pa_err = radec2seppa(ra_err,dec_err)
sep_err = 0.5*(ra_err+dec_err)
pa_err = np.degrees(sep_err/sep)
# Update data_table
self.data_table['quant1'][i]=sep
self.data_table['quant1_err'][i]=sep_err
Expand Down
8 changes: 8 additions & 0 deletions tests/test_OFTI.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
import orbitize.sampler as sampler
import orbitize.driver
import orbitize.priors as priors
from orbitize.lnlike import chi2_lnlike
from orbitize.kepler import calc_orbit
import orbitize.system


testdir = os.path.dirname(os.path.abspath(__file__))
Expand Down Expand Up @@ -84,6 +87,11 @@ def test_run_sampler():
print()
print(orbits[0])

# test that lnlikes being saved are correct
returned_lnlike_test = s.results.lnlike[0]
computed_lnlike_test = s._logl(orbits[0])
assert returned_lnlike_test == pytest.approx(computed_lnlike_test, abs=0.01)

print()
idx = s.system.param_idx
sma = np.median([x[idx['sma1']] for x in orbits])
Expand Down
59 changes: 33 additions & 26 deletions tests/test_mcmc.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
import pytest

import os
from orbitize.driver import Driver
import orbitize.sampler as sampler
import orbitize.system as system
import orbitize.read_input as read_input
Expand All @@ -7,53 +10,57 @@ def test_pt_mcmc_runs(num_threads=1):
"""
Tests the PTMCMC sampler by making sure it even runs
"""

# use the test_csv dir
testdir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(testdir, 'test_val.csv')
data_table = read_input.read_file(input_file)
# Manually set 'object' column of data table
data_table['object'] = 1

# construct the system
orbit = system.System(1, data_table, 1, 0.01)

# construct sampler
n_temps=2
n_walkers=100
mcmc = sampler.MCMC(orbit, n_temps, n_walkers, num_threads=num_threads)
myDriver = Driver(input_file, 'MCMC', 1, 1, 0.01,
mcmc_kwargs={'num_temps':2, 'num_threads':num_threads, 'num_walkers':100}
)

# run it a little (tests 0 burn-in steps)
mcmc.run_sampler(100)
myDriver.sampler.run_sampler(100)

# run it a little more
mcmc.run_sampler(1000, 1)
myDriver.sampler.run_sampler(1000, burn_steps=1)

# run it a little more (tests adding to results object)
mcmc.run_sampler(1000, 1)
myDriver.sampler.run_sampler(1000, burn_steps=1)

# test that lnlikes being saved are correct
returned_lnlike_test = myDriver.sampler.results.lnlike[0]
computed_lnlike_test = myDriver.sampler._logl(myDriver.sampler.results.post[0])

assert returned_lnlike_test == pytest.approx(computed_lnlike_test, abs=0.01)

def test_ensemble_mcmc_runs(num_threads=1):
"""
Tests the EnsembleMCMC sampler by making sure it even runs
"""

# use the test_csv dir
testdir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(testdir, 'test_val.csv')
data_table = read_input.read_file(input_file)
# Manually set 'object' column of data table
data_table['object'] = 1

# construct the system
orbit = system.System(1, data_table, 1, 0.01)

# construct sampler
n_walkers=100
mcmc = sampler.MCMC(orbit, 0, n_walkers, num_threads=num_threads)

myDriver = Driver(input_file, 'MCMC', 1, 1, 0.01,
mcmc_kwargs={'num_temps':1, 'num_threads':num_threads, 'num_walkers':100}
)

# run it a little (tests 0 burn-in steps)
mcmc.run_sampler(100)
myDriver.sampler.run_sampler(100)

# run it a little more
mcmc.run_sampler(1000, burn_steps=1)
myDriver.sampler.run_sampler(1000, burn_steps=1)

# run it a little more (tests adding to results object)
mcmc.run_sampler(1000, burn_steps=1)
myDriver.sampler.run_sampler(1000, burn_steps=1)

# test that lnlikes being saved are correct
returned_lnlike_test = myDriver.sampler.results.lnlike[0]
computed_lnlike_test = myDriver.sampler._logl(myDriver.sampler.results.post[0])

assert returned_lnlike_test == pytest.approx(computed_lnlike_test, abs=0.01)

if __name__ == "__main__":
test_pt_mcmc_runs(num_threads=1)
Expand Down

0 comments on commit a44a802

Please sign in to comment.