Skip to content

Commit

Permalink
Merge branch 'clock-search' of github.com:aarchiba/PINT into clock-se…
Browse files Browse the repository at this point in the history
…arch
  • Loading branch information
aarchiba committed May 26, 2022
2 parents 9d4d4ec + 706371a commit 9d98345
Show file tree
Hide file tree
Showing 7 changed files with 145 additions and 33 deletions.
15 changes: 5 additions & 10 deletions .github/workflows/ci_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,13 @@ jobs:
include:
- os: ubuntu-latest
python: '3.8'
tox_env: 'py38-test-cov'
tox_env: 'ephemeris_connection'
- os: ubuntu-latest
python: '3.8'
tox_env: 'ephemeris_connection'
tox_env: 'black'
- os: ubuntu-latest
python: '3.8'
tox_env: 'py38-test-cov'
- os: ubuntu-latest
python: '3.8'
tox_env: 'notebooks'
Expand All @@ -38,7 +41,6 @@ jobs:
- os: ubuntu-latest
python: '3.8'
tox_env: 'py38-test-alldeps-cov'
use_remote_data: true
- os: macos-latest
python: '3.7'
tox_env: 'py37-test'
Expand All @@ -48,9 +50,6 @@ jobs:
- os: ubuntu-latest
python: '3.7'
tox_env: 'oldestdeps'
- os: ubuntu-latest
python: '3.8'
tox_env: 'black'
# - os: ubuntu-latest
# python: '3.8'
# tox_env: 'codestyle'
Expand Down Expand Up @@ -79,11 +78,7 @@ jobs:
python -c "import setuptools; print(f'setuptools {setuptools.__version__}')"
python -c "import tox; print(f'tox {tox.__version__}')"
- name: Run tests
if: "! matrix.use_remote_data"
run: tox -e ${{ matrix.tox_env }}
- name: Run tests with remote data
if: "matrix.use_remote_data"
run: tox -e ${{ matrix.tox_env }} -- --remote-data=any
- name: Upload coverage to codecov
if: "endsWith(matrix.tox_env, '-cov')"
uses: codecov/codecov-action@v2
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ and this project, at least loosely, adheres to [Semantic Versioning](https://sem
- Added the ability to query clock correction files or observatories for their last corrected MJD
- Added an example showing how to check the status of your clock corrections
### Fixed
- WLS fitters no longer ignore EFAC/EQUAD (bug #1226)
### Changed
- Clock correction files that are entirely missing are handled the same way as TOAs past the end of a clock correction file
- Observatory objects can now note that their clock correction files include a bogus "99999" (or similar) entry at the end
Expand Down
6 changes: 4 additions & 2 deletions check_ephemeris_connection.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import urllib.request
import pint.solar_system_ephemerides

#pint.solar_system_ephemerides.load_kernel("de440")

urllib.request.urlopen("https://data.nanograv.org/static/data/ephem/de440.bsp").read()
urllib.request.urlopen("https://data.nanograv.org/static/data/ephem/de405.bsp").read()

for e in ["de440", "de405", "de421", "de430", "de430t", "de434", "de436t", "de436", "de436t"]:
pint.solar_system_ephemerides.load_kernel(e)
20 changes: 9 additions & 11 deletions src/pint/fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
To automatically select a fitter based on the properties of the data and model::
>>> fitter = Fitter.auto(toas, model)
"""
import collections
import copy
Expand Down Expand Up @@ -1336,7 +1336,7 @@ def step(self):
toas=self.fitter.toas, incfrozen=False, incoffset=True
)
# Get residuals and TOA uncertainties in seconds
Nvec = self.fitter.toas.get_errors().to(u.s).value
Nvec = self.model.scaled_toa_uncertainty(self.fitter.toas).to(u.s).value
scaled_resids = self.resids.time_resids.to(u.s).value / Nvec

# "Whiten" design matrix and residuals by dividing by uncertainties
Expand All @@ -1350,7 +1350,6 @@ def step(self):
# fast converge fitting.
# M[:,1:] -= M[:,1:].mean(axis=0)
fac = np.sqrt((M**2).mean(axis=0))
# fac[0] = 1.0
fac[fac == 0] = 1.0
M /= fac
# Singular value decomp of design matrix:
Expand All @@ -1371,6 +1370,7 @@ def step(self):
# threshold = np.finfo(float).eps * max(M.shape)
threshold = 1e-14 * max(M.shape)

log.trace(f"Singular values for fit are {s}")
bad = np.where(s <= threshold * s[0])[0]
s[bad] = np.inf
for c in bad:
Expand Down Expand Up @@ -1523,6 +1523,7 @@ def step(self):

else:
phiinv /= norm**2
# Why are we scaling residuals by the *square* of the uncertainty?
Nvec = (
self.model.scaled_toa_uncertainty(self.fitter.toas).to(u.s).value ** 2
)
Expand Down Expand Up @@ -1708,7 +1709,8 @@ def M_params_units_norm(self):

# normalize the design matrix
norm = np.sqrt(np.sum(M**2, axis=0))
ntmpar = len(self.model.free_params)
# The fixed offset is an unlisted parameter
ntmpar = len(self.model.free_params) + 1
if M.shape[1] > ntmpar:
norm[ntmpar:] = 1
for c in np.where(norm == 0)[0]:
Expand Down Expand Up @@ -1770,14 +1772,10 @@ def mtcm_mtcy_mtcmplain(self):
[
self.model.scaled_toa_uncertainty(self.fitter.toas).to_value(
u.s
)
if hasattr(self.model, "scaled_toa_uncertainty")
else self.resids.toa.get_errors().to_value(u.s),
),
self.model.scaled_dm_uncertainty(self.fitter.toas).to_value(
u.pc / u.cm**3
)
if hasattr(self.model, "scaled_dm_uncertainty")
else self.resids.dm.get_dm_errors().to_value(u.pc / u.cm**3),
),
]
)
** 2
Expand Down Expand Up @@ -2046,7 +2044,7 @@ def fit_toas(self, maxiter=1, threshold=None, debug=False):
# Get residuals and TOA uncertainties in seconds
self.update_resids()
residuals = self.resids.time_resids.to(u.s).value
Nvec = self.toas.get_errors().to(u.s).value
Nvec = self.model.scaled_toa_uncertainty(self.toas).to(u.s).value

# "Whiten" design matrix and residuals by dividing by uncertainties
M = M / Nvec.reshape((-1, 1))
Expand Down
7 changes: 5 additions & 2 deletions src/pint/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,18 @@ def warn(message, *args, **kwargs):
action, msg, cat, mod, ln = filter
if (
(msg is not None)
and (msg.match(message) and len(args) == 0)
and (msg.match(str(message)) and len(args) == 0)
and action == "ignore"
):
return
if (
(cat is not None)
and (
(len(args) > 0 and isinstance(args[0], type))
and ((msg is None or msg.match(message)) and issubclass(args[0], cat))
and (
(msg is None or msg.match(str(message)))
and issubclass(args[0], cat)
)
)
and action == "ignore"
):
Expand Down
18 changes: 10 additions & 8 deletions src/pint/solar_system_ephemerides.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
"""Solar system ephemeris downloading and setting support."""
import os
from urllib.parse import urljoin

from astropy.utils.data import download_file
import astropy.coordinates
import astropy.units as u
import numpy as np
from astropy.utils.data import download_file
from urllib.parse import urljoin
from loguru import logger as log

import pint.config
from pint.utils import PosVel


__all__ = ["objPosVel_wrt_SSB", "get_tdb_tt_ephem_geocenter"]

ephemeris_mirrors = [
Expand All @@ -22,6 +20,8 @@
"https://data.nanograv.org/static/data/ephem/",
"ftp://ssd.jpl.nasa.gov/pub/eph/planets/bsp/",
"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/",
# DE440 is here, officially
"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/",
]

jpl_obj_code = {
Expand Down Expand Up @@ -51,6 +51,7 @@ def _load_kernel_link(ephem, link=None):
astropy.coordinates.solar_system_ephemeris.set(
download_file(mirrors[0], cache=True, sources=mirrors)
)
log.info(f"Set solar system ephemeris to {ephem} from download")


def _load_kernel_local(ephem, path):
Expand Down Expand Up @@ -121,17 +122,18 @@ def load_kernel(ephem, path=None, link=None):
return
except OSError:
log.info(
"Failed to load local solar system ephemeris kernel {}, falling back on astropy".format(
path
)
f"Failed to load local solar system ephemeris kernel {path}, falling back on astropy"
)
# Links are just suggestions, try just plain loading
# Astropy may download something here, not from nanograv
try:
astropy.coordinates.solar_system_ephemeris.set(ephem)
log.info("Set solar system ephemeris to {}".format(ephem))
log.info(f"Set solar system ephemeris to {ephem} through astropy")
return
except ValueError:
except (ValueError, OSError):
# Just means it wasn't a standard astropy ephemeris
# or astropy can't access it (because astropy doesn't know about
# the nanograv mirrors)
pass
# If this raises an exception our last hope is gone so let it propagate
_load_kernel_link(ephem, link=link)
Expand Down
111 changes: 111 additions & 0 deletions tests/test_fitter_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
import os
import unittest
from os.path import join
from io import StringIO
import copy
import numpy as np

import astropy.units as u
import pytest
Expand Down Expand Up @@ -87,3 +90,111 @@ def test_compare_downhill_wb(full_cov, wb):
pass

assert abs(wb.resids.chi2 - dwb.resids.chi2) < 0.01


@pytest.fixture
def m_t():
model = get_model(
StringIO(
"""
PSR J1234+5678
ELAT 0
ELONG 0
F0 1 1
F1 0 1
DM 10
PEPOCH 56000
EFAC mjd 55000 56000 1
EFAC mjd 56000 57000 2
"""
)
)
toas = make_fake_toas_uniform(
55000, 57000, 20, model=model, add_noise=True, dm=10 * u.pc / u.cm**3
)
return model, toas


@pytest.mark.parametrize(
"fitter",
[
WLSFitter,
DownhillWLSFitter,
GLSFitter,
DownhillGLSFitter,
WidebandTOAFitter,
WidebandDownhillFitter,
],
)
def test_step_different_with_efacs(fitter, m_t):
m, t = m_t
f = fitter(t, m)
try:
f.fit_toas(maxiter=1)
except MaxiterReached:
pass
m2 = copy.deepcopy(m)
m2.EFAC1.value = 1
m2.EFAC2.value = 1
f2 = fitter(t, m2)
try:
f2.fit_toas(maxiter=1)
except MaxiterReached:
pass
for p in m.free_params:
assert getattr(f.model, p).value != getattr(f2.model, p).value


@pytest.mark.parametrize(
"fitter",
[
GLSFitter,
DownhillGLSFitter,
WidebandTOAFitter,
WidebandDownhillFitter,
],
)
def test_step_different_with_efacs_full_cov(fitter, m_t):
m, t = m_t
f = fitter(t, m)
try:
f.fit_toas(maxiter=1, full_cov=True)
except MaxiterReached:
pass
m2 = copy.deepcopy(m)
m2.EFAC1.value = 1
m2.EFAC2.value = 1
f2 = fitter(t, m2)
try:
f2.fit_toas(maxiter=1, full_cov=True)
except MaxiterReached:
pass
for p in m.free_params:
assert getattr(f.model, p).value != getattr(f2.model, p).value


@pytest.mark.parametrize(
"fitter1, fitter2",
[
(WLSFitter, DownhillWLSFitter),
(GLSFitter, DownhillGLSFitter),
(WidebandTOAFitter, WidebandDownhillFitter),
],
)
def test_downhill_same_step(fitter1, fitter2, m_t):
m, t = m_t
f1 = fitter1(t, m)
f2 = fitter2(t, m)
try:
f1.fit_toas(maxiter=1)
except MaxiterReached:
pass
try:
f2.fit_toas(maxiter=1)
except MaxiterReached:
pass
for p in m.free_params:
assert np.isclose(
getattr(f1.model, p).value - getattr(f1.model_init, p).value,
getattr(f2.model, p).value - getattr(f2.model_init, p).value,
)

0 comments on commit 9d98345

Please sign in to comment.