diff --git a/docs/development/index.rst b/docs/development/index.rst index 92e548a1a6c..159070b2b4a 100644 --- a/docs/development/index.rst +++ b/docs/development/index.rst @@ -58,6 +58,21 @@ Here's some issues where this was discussed: * https://github.com/radio-astro-tools/spectral-cube/issues/110 * https://github.com/astropy/astropy/pull/2855#issuecomment-52610106 + +.. _development-result_object + +Functions returning several values +---------------------------------- + +Functions that return more than a single value shouldn't return a list +or dictionary of values but rather a Python Bunch result object. A Bunch +is similar to a dict, except that it allows attribute access to the result +values. The approach is the same as e.g. the use of `~scipy.optimize.OptimizeResult`. +An example of how Bunches are used in gammapy is given by the `~gammapy.detect.TSMapResult` +class. + + + .. _development-python2and3: Python 2 and 3 support diff --git a/gammapy/detect/__init__.py b/gammapy/detect/__init__.py index 37c8d1f541b..89c1a786d45 100644 --- a/gammapy/detect/__init__.py +++ b/gammapy/detect/__init__.py @@ -5,4 +5,4 @@ from .iterfind import * from .matched_filter import * from .sex import * -from .test_statistic import * +from .test_statistics import * diff --git a/gammapy/detect/test_statistic.py b/gammapy/detect/test_statistic.py deleted file mode 100644 index 795969bb053..00000000000 --- a/gammapy/detect/test_statistic.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under a 3-clause BSD style license - see LICENSE.rst -"""Functions to compute TS maps - -This is in the exploratory phase, we are trying to get a fast tool for a large map. -Here we compare different ways to split the map into parts and different optimizers. - -Reference : Stewart (2009) "Maximum-likelihood detection of sources among Poissonian noise" - Appendix A: Cash amplitude fitting - http://adsabs.harvard.edu/abs/2009A%26A...495..989S - -TODO: - -- try different optimizers -- give good fit start values -- PSF-convolved Gauss-source kernels -- Use multiple CPUs with multiprocessing. -- check that Li & Ma significance maps match sqrt(ts) maps for kernel with weights 0 / 1 only -- implement On / Off and On Likelihood fitting -- implement optimized linear filter from Stewart paper -- implement down-sampling for large kernels or generally for speed -- implement possibility to only compute part of the TS image -- understand negative amplitudes!??? -- speed profiling: - - - expect speed constant with image size - - expect speed inversely proportional to number of pixels in the kernel - - expect speedup proportional to number of cores - -- accuracy profiling: - - - want accuracy of TS = 0.1 for all regimes; no need to waste cycles on higher accuracy - - don't care about accuracy for TS < 1 - -- check distribution against expected chi2(ndf) distribution -- HGPS survey sensitiviy calculation (maybe needs cluster computing?) -""" -from __future__ import print_function, division -import numpy as np -from .. import stats -from ..image import process_image_pixels - -__all__ = ['ts_image', - 'TSMapCalculator', - ] - - -def fit_amplitude(counts, background, kernel, start_value): - """Fit amplitude. - - TODO: document. - """ - out = dict() - - def stat(amplitude): - return stats.cash(counts, background + amplitude * kernel) - - from iminuit import Minuit - minuit = Minuit(stat, pedantic=False, print_level=0, - amplitude=start_value) - minuit.migrad() - # import IPython; IPython.embed(); 1/0 - out['amplitude'] = minuit.values['amplitude'] - out['ncalls'] = minuit.ncalls - return out - - -def ts_center(images, kernel): - """Compute TS for one position. - - The shapes of the images and the kernel must match. - - TODO: document - """ - counts = np.asanyarray(images['counts']) - background = np.asanyarray(images['background']) - kernel = kernel / kernel.sum() - - assert counts.shape == kernel.shape - assert background.shape == kernel.shape - - C0 = stats.cash(counts, background) - out = fit_amplitude(counts, background, kernel) - C1 = stats.cash(counts, background + out['amplitude'] * kernel) - # Cash is a negative log likelihood statistic, - # thus the minus in the TS formula here - out['ts'] = - 2 * (C1 - C0) - return out - - -def ts_image(images, kernel, extra_info=False): - """Compute TS image. - - TODO: document - """ - out = dict() - out['ts'] = np.zeros_like(images['counts'], dtype='float64') - out['ncalls'] = np.zeros_like(images['counts'], dtype='uint16') - process_image_pixels(images, kernel, out, ts_center) - if extra_info: - return out - else: - return out['ts'] - - -class TSMapCalculator(object): - """TS Map calculator class.""" - - def __init__(self, images, kernel, optimizer='migrad', guess_method='estimate'): - self.images = images - - # Check kernel shape - k0, k1 = kernel.shape - if (k0 % 2 == 0) or (k1 % 2 == 0): - raise ValueError('Kernel shape must have odd dimensions') - self.kernel = kernel - - self.optimizer = optimizer - self.guess_method = guess_method - self.out_shape = self.images['counts'].shape - - def run(self): - out = dict() - out['ts'] = np.zeros_like(self.out_shape, dtype='float64') - out['ncalls'] = np.zeros_like(self.out_shape, dtype='uint16') - - """ - # TODO: finish implementation - method = ts_method() - process_image_full(images, kernel, out, compute_ts) - self.out = out - """ diff --git a/gammapy/detect/test_statistics.py b/gammapy/detect/test_statistics.py new file mode 100644 index 00000000000..5be5d426fa5 --- /dev/null +++ b/gammapy/detect/test_statistics.py @@ -0,0 +1,386 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +"""Functions to compute TS maps + +This is in the exploratory phase, we are trying to get a fast tool for a large map. +Here we compare different ways to split the map into parts and different optimizers. + +Reference : Stewart (2009) "Maximum-likelihood detection of sources among Poissonian noise" + Appendix A: Cash amplitude fitting + http://adsabs.harvard.edu/abs/2009A%26A...495..989S +""" +from __future__ import print_function, division +import logging +import warnings +from itertools import product +from functools import partial +from multiprocessing import Pool, cpu_count + + +import numpy as np +from astropy.convolution import Tophat2DKernel +from astropy.io.fits import ImageHDU + + +from ..stats import cash +from ..image import measure_containment_radius +from ..extern.zeros import newton +from ..extern.bunch import Bunch + +__all__ = ['compute_ts_map', 'TSMapResult'] + + +FLUX_FACTOR = 1E-12 +MAXNITER = 20 +CONTAINMENT = 0.8 + + +class TSMapResult(Bunch): + """ + Represents the TS map computation result. + + Attributes + ---------- + ts : ndarray + Estimated TS map. + amplitude : ndarray + Estimated best fit flux amplitude map. + niter : ndarray + Number of iterations per pixel. + runtime : float + Time needed to compute TS map. + """ + + +def f_cash_root(x, counts, background, model): + """ + Function to find root of. Described in Appendix A, Stewart (2009). + + Parameters + ---------- + x : float + Model amplitude. + counts : array + Count map slice, where model is defined. + background : array + Background map slice, where model is defined. + model : array + Source template (multiplied with exposure). + """ + return (model - (counts * model) / (background + x * FLUX_FACTOR * model)).sum() + + +def f_cash(x, counts, background, model): + """ + Cash fit statistics wrapper for TS map computation. + + Parameters + ---------- + x : float + Model amplitude. + counts : array + Count map slice, where model is defined. + background : array + Background map slice, where model is defined. + model : array + Source template (multiplied with exposure). + """ + with np.errstate(invalid='ignore', divide='ignore'): + return cash(counts, background + x * FLUX_FACTOR * model).sum() + + +def compute_ts_map(counts, background, exposure, kernel, flux=None, + method='root', optimizer='Brent', parallel=True, + threshold=None, debug=False): + """ + Compute TS map using different methods. + + Parameters + ---------- + counts : array + Count map + background : array + Background map + exposure : array + Exposure map + kernel : `astropy.convolution.Kernel2D` + Source model kernel. + flux : float (None) + Flux map used as a starting value for the amplitude fit. + method : str ('root') + The following options are available: + * ``'root'`` (default) + Fit amplitude finding roots of the the derivative of + the fit statistics. Described in Appendix A in Stewart (2009). + * ``'fit scipy'`` + Use `scipy.optimize.minimize_scalar` for fitting. + * ``'fit minuit'`` + Use minuit for fitting. + optimizer : str ('Brent') + Which optimizing algorithm to use from scipy. See + `scipy.optimize.minimize_scalar` for options. + parallel : bool (True) + Whether to use multiple cores for parallel processing. + threshold : float (None) + If the TS value corresponding to the initial flux estimate is not above + this threshold, the optimizing step is omitted to save computing time. + debug : bool (False) + Run function in debug mode which returns and additional fitted flux and + number of iterations map (see section `Returns`). + + Returns + ------- + TS : `TSMapResult` + `TSMapResult` object. + """ + from scipy.ndimage.morphology import binary_erosion + from time import time + t_0 = time() + + assert counts.shape == background.shape + assert counts.shape == exposure.shape + + if flux is None: + from scipy.ndimage import convolve + radius = _flux_correlation_radius(kernel) + tophat = Tophat2DKernel(radius, mode='oversample') * np.pi * radius ** 2 + logging.info('Using correlation radius of {0:.1f} pix to estimate initial flux.'.format(radius)) + flux = (counts - background) / exposure / FLUX_FACTOR + flux = convolve(flux, tophat.array) / CONTAINMENT + else: + assert counts.shape == flux.shape + + TS = np.zeros(counts.shape) + + x_min, x_max = kernel.shape[1] // 2, counts.shape[1] - kernel.shape[1] // 2 + y_min, y_max = kernel.shape[0] // 2, counts.shape[0] - kernel.shape[0] // 2 + positions = product(range(x_min, x_max), range(y_min, y_max)) + + # Positions where exposure == 0 and flux < 0 are not processed + mask = binary_erosion(exposure > 0, np.ones(kernel.shape)) + positions = [(i, j) for i, j in positions if mask[j][i] and flux[j][i] > 0] + wrap = partial(_ts_value, counts=counts, exposure=exposure, + background=background, kernel=kernel, flux=flux, + method=method, optimizer=optimizer, threshold=threshold, + debug=debug) + + if parallel: + logging.info('Using {0} cores to compute TS map.'.format(cpu_count())) + pool = Pool() + results = pool.map(wrap, positions) + pool.close() + pool.join() + else: + results = map(wrap, positions) + + # Set TS values at given positions + i, j = zip(*positions) + if debug: + amplitudes = np.zeros(TS.shape) + niter = np.zeros(TS.shape) + TS[j, i] = [_[0] for _ in results] + amplitudes[j, i] = [_[1] for _ in results] + niter[j, i] = [_[2] for _ in results] + return TSMapResult(ts=TS, amplitude=amplitudes * FLUX_FACTOR, + niter=niter, runtime=time() - t_0) + else: + TS[j, i] = results + return TSMapResult(ts=TS) + + +def _ts_value(position, counts, exposure, background, kernel, flux, + method, optimizer, threshold, debug): + """ + Compute TS value at a given pixel position i, j using the approach described + in Stewart (2009). + + Parameters + ---------- + position : tuple (i, j) + Pixel position. + counts : array + Count map. + background : array + Background map. + exposure : array + Exposure map. + kernel : astropy.convolution.core.Kernel2D + Source model kernel. + flux : array + Flux map. The flux value at the given pixel position is used as + starting value for the minimization. + + Returns + ------- + TS : float + TS value at the given pixel position. + """ + from imageutils import extract_array_2d + + # Get data slices + counts_slice = extract_array_2d(counts, kernel.shape, position).astype(float) + background_slice = extract_array_2d(background, kernel.shape, position).astype(float) + exposure_slice = extract_array_2d(exposure, kernel.shape, position).astype(float) + model = (exposure_slice * kernel._array).astype(float) + + # Compute null hypothesis statistics + flux_value = flux[position[1]][position[0]] + + with np.errstate(invalid='ignore', divide='ignore'): + C_0 = cash(counts_slice, background_slice).sum() + with np.errstate(invalid='ignore', divide='ignore'): + C_1 = cash(counts_slice, background_slice + flux_value * FLUX_FACTOR * model).sum() + + # Don't fit if pixel is low significant + TS = C_0 - C_1 + if threshold is not None and TS < threshold: + if debug: + return TS, flux_value, 0 + else: + return TS + else: + if method == 'fit minuit': + amplitude, niter = _fit_amplitude_minuit(counts_slice, background_slice, + model, flux_value) + elif method == 'fit scipy': + amplitude, niter = _fit_amplitude_scipy(counts_slice, background_slice, + model, flux_value) + elif method == 'root': + amplitude, niter = _root_amplitude(counts_slice, background_slice, + model, flux_value) + if niter > MAXNITER: + logging.warn('Exceeded maximum number of function evaluations!') + if debug: + return np.nan, amplitude, niter + else: + return np.nan + + with np.errstate(invalid='ignore', divide='ignore'): + C_1 = cash(counts_slice, background_slice + amplitude * FLUX_FACTOR * model).sum() + + # Compute and return TS value + if debug: + return C_0 - C_1, amplitude, niter + else: + return C_0 - C_1 + + +def _root_amplitude(counts, background, model, flux): + """ + Fit amplitude by finding roots. See Appendix A Stewart (2009). + + Parameters + ---------- + counts : array + Slice of count map. + background : array + Slice of background map. + model : array + Model template to fit. + flux : float + Starting value for the fit. + + Returns + ------- + amplitude : float + Fitted flux amplitude. + niter : int + Number of function evaluations needed for the fit. + """ + args = (counts, background, model) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + return newton(f_cash_root, flux, args=args, maxiter=MAXNITER, tol=0.1) + except RuntimeError: + # Where the root finding fails NaN is set as amplitude + return np.nan, MAXNITER + + +def _fit_amplitude_scipy(counts, background, model, flux, optimizer='Brent'): + """ + Fit amplitude using scipy.optimize. + + Parameters + ---------- + counts : array + Slice of count map. + background : array + Slice of background map. + model : array + Model template to fit. + flux : float + Starting value for the fit. + + Returns + ------- + amplitude : float + Fitted flux amplitude. + niter : int + Number of function evaluations needed for the fit. + """ + from scipy.optimize import minimize_scalar + args = (counts, background, model) + try: + bracket = (0, flux, 10 * flux) + result = minimize_scalar(f_cash, bracket=bracket, args=args, + method=optimizer, tol=10) + return result.x, result.nfev + except ValueError: + result = minimize_scalar(f_cash, args=args, method=optimizer, tol=0.1) + return result.x, result.nfev + + +def _fit_amplitude_minuit(counts, background, model, flux): + """ + Fit amplitude using minuit. + + Parameters + ---------- + counts : array + Slice of count map. + background : array + Slice of background map. + model : array + Model template to fit. + flux : float + Starting value for the fit. + + Returns + ------- + amplitude : float + Fitted flux amplitude. + niter : int + Number of function evaluations needed for the fit. + """ + from iminuit import Minuit + + def stat(x): + return f_cash(x, counts, background, model) + minuit = Minuit(f_cash, x=flux, pedantic=False, print_level=0) + minuit.migrad() + return minuit.values['x'], minuit.ncalls + + +def _flux_correlation_radius(kernel, containment=CONTAINMENT): + """ + Compute equivalent Tophat kernel radius for a given kernel instance and + containment fraction. + + Parameters + ---------- + kernel : `astropy.convolution.Kernel2D` + Name of the kernel type. + containment : float + Containment fraction. + + Returns + ------- + kernel : `astropy.convolution.Tophat2DKernel` + Equivalent Tophat kernel. + """ + kernel_image = ImageHDU(kernel.array) + y, x = kernel.center + r_c = measure_containment_radius(kernel_image, x, y, containment) + # Containment radius of Tophat kernel is given by r_c_tophat = r_0 * sqrt(C) + # by setting r_c = r_c_tophat we can estimate the equivalent containment radius r_0 + return r_c / np.sqrt(containment) diff --git a/gammapy/detect/tests/test_test_statistic.py b/gammapy/detect/tests/test_test_statistic.py deleted file mode 100644 index b34044994bd..00000000000 --- a/gammapy/detect/tests/test_test_statistic.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under a 3-clause BSD style license - see LICENSE.rst -from __future__ import print_function, division -from time import time -import numpy as np -from astropy.tests.helper import pytest -from ...detect import ts_image, TSMapCalculator - - -def make_test_images(shape=(100, 200)): - np.random.seed(0) - background = 10. * np.ones(shape) - excess = 1 - images = dict() - images['background'] = background + excess - images['counts'] = np.random.poisson(images['background']) - return images - - -def make_test_kernel(shape=(11, 11)): - kernel = np.ones(shape=shape) - return kernel - - -@pytest.mark.xfail -def test_simple(): - """A simple test case""" - images = make_test_images(shape=(50, 70)) - kernel = make_test_kernel(shape=(11, 11)) - ts_results = ts_image(images, kernel) - images['ts'] = ts_results['ts'] - images['ncalls'] = ts_results['ncalls'] - print('mean ncalls: {0}'.format(images['ncalls'].mean())) - images['kernel'] = kernel - from astropy.io import fits - hdu_list = fits.HDUList() - for name, data in images.items(): - hdu = fits.ImageHDU(data=data, name=name) - hdu_list.append(hdu) - hdu_list.writeto('images.fits', clobber=True) - - -@pytest.mark.xfail -def test_optimizers(): - """Compare speed for a few different optimizers""" - optimizers = ['migrad', 'fmin'] - start_values = ['last', 'estimate'] - - images = make_test_images(shape=(30, 50)) - kernel = make_test_kernel(shape=(5, 5)) - - for optimizer in optimizers: - for start_value in start_values: - tsmc = TSMapCalculator(images, kernel, - optmizier=optimizer, - start_value=start_value) - tsmc.run() - tsmc.report() - - -@pytest.mark.xfail -def test_speed(): - image_sizes = [100, 200] - kernel_sizes = [1, 11, 21] - for image_size in image_sizes: - for kernel_size in kernel_sizes: - images = make_test_images(shape=(image_size, image_size)) - kernel = make_test_kernel(shape=(kernel_size, kernel_size)) - t = time() - ts = ts_image(images, kernel) - t = time() - t - # Compute speed = 1e3 image pixels per second (kpps) - kpix = 1e-3 * images.values()[0].size - speed = kpix / t - print('image: {image_size:5d}, kernel: {kernel_size:5d}, speed (kpps): {speed:10.1f}' - ''.format(**locals())) - -""" -Not what I expected: speed doesn't go down with number of pixels in the kernel -image: 100, kernel: 1, speed (kpps): 13.6 -image: 100, kernel: 11, speed (kpps): 10.2 -image: 100, kernel: 21, speed (kpps): 8.5 -image: 100, kernel: 31, speed (kpps): 6.8 -image: 200, kernel: 1, speed (kpps): 13.7 -image: 200, kernel: 11, speed (kpps): 10.2 -image: 200, kernel: 21, speed (kpps): 8.3 -image: 200, kernel: 31, speed (kpps): 6.5 -image: 300, kernel: 1, speed (kpps): 13.7 -image: 300, kernel: 11, speed (kpps): 10.2 -image: 300, kernel: 21, speed (kpps): 8.2 -image: 300, kernel: 31, speed (kpps): 6.5 -""" diff --git a/gammapy/detect/tests/test_test_statistics.py b/gammapy/detect/tests/test_test_statistics.py new file mode 100644 index 00000000000..a4c35293dd7 --- /dev/null +++ b/gammapy/detect/tests/test_test_statistics.py @@ -0,0 +1,38 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +from __future__ import print_function, division + +import numpy as np +from numpy.testing.utils import assert_allclose + +from astropy.tests.helper import pytest +from astropy.convolution import Gaussian2DKernel + + +from ...detect import compute_ts_map +from ...datasets import load_poisson_stats_image +from ...image.utils import upsample_2N, downsample_2N + +try: + import scipy + HAS_SCIPY = True +except ImportError: + HAS_SCIPY = False + + +@pytest.mark.skipif('not HAS_SCIPY') +def test_compute_ts_map(): + """Minimal test of compute_ts_map""" + data = load_poisson_stats_image(extra_info=True) + kernel = Gaussian2DKernel(2.5) + data['exposure'] = np.ones(data['counts'].shape) * 1E12 + for _, func in zip(['counts', 'background', 'exposure'], [np.nansum, np.nansum, np.mean]): + data[_] = downsample_2N(data[_], 2, func) + + result = compute_ts_map(data['counts'], data['background'], data['exposure'], + kernel, debug=True) + for name, order in zip(['ts', 'amplitude', 'niter'], [2, 5, 0]): + result[name] = upsample_2N(result[name], 2, order=order) + assert_allclose([[99], [99]], np.where(result.ts == result.ts.max())) + assert_allclose(1705.840212274973, result.ts[99, 99]) + assert_allclose(3, result.niter[99, 99]) + assert_allclose(1.0227934338735763e-09, result.amplitude[99, 99]) diff --git a/gammapy/extern/bunch.py b/gammapy/extern/bunch.py new file mode 100644 index 00000000000..0e88a92fde5 --- /dev/null +++ b/gammapy/extern/bunch.py @@ -0,0 +1,28 @@ +# Licensed under a 3-clause BSD style license - see LICENSE.rst +# Implementation is taken from scipy.optimize.OptimizeResult and renamed to 'Bunch' + +__all__ = ['Bunch'] + + +class Bunch(dict): + """ + Dictionary with attribute access for result objects. + + Example is `~gammapy.detect.TSMapResult` + """ + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + def __repr__(self): + if self.keys(): + m = max(map(len, list(self.keys()))) + 1 + return '\n'.join([k.rjust(m) + ': ' + repr(v) + for k, v in self.items()]) + else: + return self.__class__.__name__ + "()" diff --git a/gammapy/extern/zeros.py b/gammapy/extern/zeros.py new file mode 100644 index 00000000000..e7e6f561d49 --- /dev/null +++ b/gammapy/extern/zeros.py @@ -0,0 +1,515 @@ +# This code is taken from scipy.optimize. The `newton` function is modfied to +# return the root as well as the number of iterations + + +from __future__ import division, print_function, absolute_import + +import warnings + +# from . import _zeros +from numpy import finfo, sign, sqrt + +_iter = 100 +_xtol = 1e-12 +_rtol = finfo(float).eps * 2 + +__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth'] + +CONVERGED = 'converged' +SIGNERR = 'sign error' +CONVERR = 'convergence error' +flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR} + + +class RootResults(object): + """ Represents the root finding result. + Attributes + ---------- + root : float + Estimated root location. + iterations : int + Number of iterations needed to find the root. + function_calls : int + Number of times the function was called. + converged : bool + True if the routine converged. + flag : str + Description of the cause of termination. + """ + def __init__(self, root, iterations, function_calls, flag): + self.root = root + self.iterations = iterations + self.function_calls = function_calls + self.converged = flag == 0 + try: + self.flag = flag_map[flag] + except KeyError: + self.flag = 'unknown error %d' % (flag,) + + +def results_c(full_output, r): + if full_output: + x, funcalls, iterations, flag = r + results = RootResults(root=x, + iterations=iterations, + function_calls=funcalls, + flag=flag) + return x, results + else: + return r + + +# Newton-Raphson method +def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, + fprime2=None): + """ + Find a zero using the Newton-Raphson or secant method. + + Find a zero of the function `func` given a nearby starting point `x0`. + The Newton-Raphson method is used if the derivative `fprime` of `func` + is provided, otherwise the secant method is used. If the second order + derivate `fprime2` of `func` is provided, parabolic Halley's method + is used. + + Parameters + ---------- + func : function + The function whose zero is wanted. It must be a function of a + single variable of the form f(x,a,b,c...), where a,b,c... are extra + arguments that can be passed in the `args` parameter. + x0 : float + An initial estimate of the zero that should be somewhere near the + actual zero. + fprime : function, optional + The derivative of the function when available and convenient. If it + is None (default), then the secant method is used. + args : tuple, optional + Extra arguments to be used in the function call. + tol : float, optional + The allowable error of the zero value. + maxiter : int, optional + Maximum number of iterations. + fprime2 : function, optional + The second order derivative of the function when available and + convenient. If it is None (default), then the normal Newton-Raphson + or the secant method is used. If it is given, parabolic Halley's + method is used. + + Returns + ------- + zero : float + Estimated location where function is zero. + + See Also + -------- + brentq, brenth, ridder, bisect + fsolve : find zeroes in n dimensions. + + Notes + ----- + The convergence rate of the Newton-Raphson method is quadratic, + the Halley method is cubic, and the secant method is + sub-quadratic. This means that if the function is well behaved + the actual error in the estimated zero is approximately the square + (cube for Halley) of the requested tolerance up to roundoff + error. However, the stopping criterion used here is the step size + and there is no guarantee that a zero has been found. Consequently + the result should be verified. Safer algorithms are brentq, + brenth, ridder, and bisect, but they all require that the root + first be bracketed in an interval where the function changes + sign. The brentq algorithm is recommended for general use in one + dimensional problems when such an interval has been found. + + """ + if tol <= 0: + raise ValueError("tol too small (%g <= 0)" % tol) + if fprime is not None: + # Newton-Rapheson method + # Multiply by 1.0 to convert to floating point. We don't use float(x0) + # so it still works if x0 is complex. + p0 = 1.0 * x0 + fder2 = 0 + for iter in range(maxiter): + myargs = (p0,) + args + fder = fprime(*myargs) + if fder == 0: + msg = "derivative was zero." + warnings.warn(msg, RuntimeWarning) + return p0, iter + fval = func(*myargs) + if fprime2 is not None: + fder2 = fprime2(*myargs) + if fder2 == 0: + # Newton step + p = p0 - fval / fder + else: + # Parabolic Halley's method + discr = fder ** 2 - 2 * fval * fder2 + if discr < 0: + p = p0 - fder / fder2 + else: + p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr)) + if abs(p - p0) < tol: + return p, iter + p0 = p + else: + # Secant method + p0 = x0 + if x0 >= 0: + p1 = x0*(1 + 1e-4) + 1e-4 + else: + p1 = x0*(1 + 1e-4) - 1e-4 + q0 = func(*((p0,) + args)) + q1 = func(*((p1,) + args)) + for iter in range(maxiter): + if q1 == q0: + if p1 != p0: + msg = "Tolerance of %s reached" % (p1 - p0) + warnings.warn(msg, RuntimeWarning) + return (p1 + p0)/2.0, iter + else: + p = p1 - q1*(p1 - p0)/(q1 - q0) + if abs(p - p1) < tol: + return p, iter + p0 = p1 + q0 = q1 + p1 = p + q1 = func(*((p1,) + args)) + msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) + raise RuntimeError(msg) + + +def bisect(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find root of a function within an interval. + + Basic bisection routine to find a zero of the function `f` between the + arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs. + Slow but sure. + + Parameters + ---------- + f : function + Python function returning a number. `f` must be continuous, and + f(a) and f(b) must have opposite signs. + a : number + One end of the bracketing interval [a,b]. + b : number + The other end of the bracketing interval [a,b]. + xtol : number, optional + The routine converges when a root is known to lie within `xtol` of the + value return. Should be >= 0. The routine modifies this to take into + account the relative precision of doubles. + rtol : number, optional + The routine converges when a root is known to lie within `rtol` times + the value returned of the value returned. Should be >= 0. Defaults to + ``np.finfo(float).eps * 2``. + maxiter : number, optional + if convergence is not achieved in `maxiter` iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where x is the root, and r is + a `RootResults` object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : RootResults (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton + fixed_point : scalar fixed-point finder + fsolve : n-dimensional root-finding + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp) + return results_c(full_output, r) + + +def ridder(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in an interval. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : number + One end of the bracketing interval [a,b]. + b : number + The other end of the bracketing interval [a,b]. + xtol : number, optional + The routine converges when a root is known to lie within xtol of the + value return. Should be >= 0. The routine modifies this to take into + account the relative precision of doubles. + rtol : number, optional + The routine converges when a root is known to lie within `rtol` times + the value returned of the value returned. Should be >= 0. Defaults to + ``np.finfo(float).eps * 2``. + maxiter : number, optional + if convergence is not achieved in maxiter iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a RootResults object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : RootResults (present if ``full_output = True``) + Object containing information about the convergence. + In particular, ``r.converged`` is True if the routine converged. + + See Also + -------- + brentq, brenth, bisect, newton : one-dimensional root-finding + fixed_point : scalar fixed-point finder + + Notes + ----- + Uses [Ridders1979]_ method to find a zero of the function `f` between the + arguments `a` and `b`. Ridders' method is faster than bisection, but not + generally as fast as the Brent rountines. [Ridders1979]_ provides the + classic description and source of the algorithm. A description can also be + found in any recent edition of Numerical Recipes. + + The routine used here diverges slightly from standard presentations in + order to be a bit more careful of tolerance. + + References + ---------- + .. [Ridders1979] + Ridders, C. F. J. "A New Algorithm for Computing a + Single Root of a Real Continuous Function." + IEEE Trans. Circuits Systems 26, 979-980, 1979. + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._ridder(f,a,b,xtol,rtol,maxiter,args,full_output,disp) + return results_c(full_output, r) + + +def brentq(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """ + Find a root of a function in given interval. + + Return float, a zero of `f` between `a` and `b`. `f` must be a continuous + function, and [a,b] must be a sign changing interval. + + Description: + Uses the classic Brent (1973) method to find a zero of the function `f` on + the sign changing interval [a , b]. Generally considered the best of the + rootfinding routines here. It is a safe version of the secant method that + uses inverse quadratic extrapolation. Brent's method combines root + bracketing, interval bisection, and inverse quadratic interpolation. It is + sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973) + claims convergence is guaranteed for functions computable within [a,b]. + + [Brent1973]_ provides the classic description of the algorithm. Another + description can be found in a recent edition of Numerical Recipes, including + [PressEtal1992]_. Another description is at + http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to + understand the algorithm just by reading our code. Our code diverges a bit + from standard presentations: we choose a different formula for the + extrapolation step. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : number + One end of the bracketing interval [a,b]. + b : number + The other end of the bracketing interval [a,b]. + xtol : number, optional + The routine converges when a root is known to lie within xtol of the + value return. Should be >= 0. The routine modifies this to take into + account the relative precision of doubles. + rtol : number, optional + The routine converges when a root is known to lie within `rtol` times + the value returned of the value returned. Should be >= 0. Defaults to + ``np.finfo(float).eps * 2``. + maxiter : number, optional + if convergence is not achieved in maxiter iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a RootResults object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : RootResults (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + multivariate local optimizers + `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` + nonlinear least squares minimizer + `leastsq` + constrained multivariate optimizers + `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` + global optimizers + `anneal`, `basinhopping`, `brute`, `differential_evolution` + local scalar minimizers + `fminbound`, `brent`, `golden`, `bracket` + n-dimensional root-finding + `fsolve` + one-dimensional root-finding + `brentq`, `brenth`, `ridder`, `bisect`, `newton` + scalar fixed-point finder + `fixed_point` + + Notes + ----- + `f` must be continuous. f(a) and f(b) must have opposite signs. + + + References + ---------- + .. [Brent1973] + Brent, R. P., + *Algorithms for Minimization Without Derivatives*. + Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. + + .. [PressEtal1992] + Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. + *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. + Cambridge, England: Cambridge University Press, pp. 352-355, 1992. + Section 9.3: "Van Wijngaarden-Dekker-Brent Method." + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp) + return results_c(full_output, r) + + +def brenth(f, a, b, args=(), + xtol=_xtol, rtol=_rtol, maxiter=_iter, + full_output=False, disp=True): + """Find root of f in [a,b]. + + A variation on the classic Brent routine to find a zero of the function f + between the arguments a and b that uses hyperbolic extrapolation instead of + inverse quadratic extrapolation. There was a paper back in the 1980's ... + f(a) and f(b) cannot have the same signs. Generally on a par with the + brent routine, but not as heavily tested. It is a safe version of the + secant method that uses hyperbolic extrapolation. The version here is by + Chuck Harris. + + Parameters + ---------- + f : function + Python function returning a number. f must be continuous, and f(a) and + f(b) must have opposite signs. + a : number + One end of the bracketing interval [a,b]. + b : number + The other end of the bracketing interval [a,b]. + xtol : number, optional + The routine converges when a root is known to lie within xtol of the + value return. Should be >= 0. The routine modifies this to take into + account the relative precision of doubles. + rtol : number, optional + The routine converges when a root is known to lie within `rtol` times + the value returned of the value returned. Should be >= 0. Defaults to + ``np.finfo(float).eps * 2``. + maxiter : number, optional + if convergence is not achieved in maxiter iterations, an error is + raised. Must be >= 0. + args : tuple, optional + containing extra arguments for the function `f`. + `f` is called by ``apply(f, (x)+args)``. + full_output : bool, optional + If `full_output` is False, the root is returned. If `full_output` is + True, the return value is ``(x, r)``, where `x` is the root, and `r` is + a RootResults object. + disp : bool, optional + If True, raise RuntimeError if the algorithm didn't converge. + + Returns + ------- + x0 : float + Zero of `f` between `a` and `b`. + r : RootResults (present if ``full_output = True``) + Object containing information about the convergence. In particular, + ``r.converged`` is True if the routine converged. + + See Also + -------- + fmin, fmin_powell, fmin_cg, + fmin_bfgs, fmin_ncg : multivariate local optimizers + + leastsq : nonlinear least squares minimizer + + fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers + + anneal, brute : global optimizers + + fminbound, brent, golden, bracket : local scalar minimizers + + fsolve : n-dimensional root-finding + + brentq, brenth, ridder, bisect, newton : one-dimensional root-finding + + fixed_point : scalar fixed-point finder + + """ + if not isinstance(args, tuple): + args = (args,) + if xtol <= 0: + raise ValueError("xtol too small (%g <= 0)" % xtol) + if rtol < _rtol: + raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol)) + r = _zeros._brenth(f,a, b, xtol, rtol, maxiter, args, full_output, disp) + return results_c(full_output, r) diff --git a/gammapy/image/tests/test_measure.py b/gammapy/image/tests/test_measure.py index 65ebef01619..57d6b9bf713 100644 --- a/gammapy/image/tests/test_measure.py +++ b/gammapy/image/tests/test_measure.py @@ -1,14 +1,22 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import print_function, division + import numpy as np from numpy.testing import assert_equal, assert_allclose, assert_almost_equal + from astropy.tests.helper import pytest +from astropy.io import fits +from astropy.modeling.models import Gaussian2D + from ...image import (measure_labeled_regions, make_empty_image, lookup, measure_containment_radius, measure_image_moments, measure_containment, - measure_curve_of_growth) + measure_curve_of_growth, + coordinates) + +BINSZ = 0.02 try: import scipy @@ -27,16 +35,7 @@ def generate_example_image(): return image -def generate_gaussian_image(): - """ - Generate some greyscale image to run the detection on. - """ - from astropy.io import fits - from astropy.modeling.models import Gaussian2D - from ...image import coordinates - - BINSZ = 0.02 - image = fits.ImageHDU(data=np.zeros((201, 201))) +def set_header(image): image.header['SIMPLE'] = 'T' image.header['BITPIX'] = -64 image.header['NAXIS'] = 2 @@ -52,6 +51,15 @@ def generate_gaussian_image(): image.header['CDELT2'] = BINSZ image.header['CUNIT1'] = 'deg' image.header['CUNIT2'] = 'deg' + return image + + +def generate_gaussian_image(): + """ + Generate some greyscale image to run the detection on. + """ + image = fits.ImageHDU(data=np.zeros((201, 201))) + image = set_header(image) GLON, GLAT = coordinates(image, lon_sym=True) sigma = 0.2 source = Gaussian2D(1. / (2 * np.pi * (sigma / BINSZ) ** 2), 0, 0, sigma, sigma) @@ -65,7 +73,6 @@ def test_measure(): labels = np.zeros_like(image, dtype=int) labels[10:20, 20:30] = 1 results = measure_labeled_regions(image, labels) - # TODO: check output! diff --git a/gammapy/image/utils.py b/gammapy/image/utils.py index 71ecbeffbb5..efffd19519e 100644 --- a/gammapy/image/utils.py +++ b/gammapy/image/utils.py @@ -16,26 +16,29 @@ 'binary_disk', 'binary_opening_circle', 'binary_ring', + 'block_reduce_hdu', 'contains', 'coordinates', 'cube_to_image', 'cube_to_spec', 'crop_image', + 'dict_to_hdulist', 'disk_correlate', + 'downsample_2N', 'exclusion_distance', 'image_groupby', 'images_to_cube', + 'lon_lat_rectangle_mask', 'make_empty_image', 'make_header', 'paste_cutout_into_image', 'process_image_pixels', - 'block_reduce_hdu', 'ring_correlate', 'separation', 'solid_angle', 'threshold', + 'upsample_2N', 'wcs_histogram2d', - 'lon_lat_rectangle_mask', ] @@ -143,6 +146,83 @@ def ring_correlate(image, r_in, r_out, mode='constant'): return convolve(image, structure, mode=mode) +def downsample_2N(image, factor, method=np.nansum, shape=None): + """ + Down sample image by a power of two. + + The image is down sampled using `skimage.measure.block_reduce`. Only + down sampling factor, that are a power of two are allowed. The image is + padded to a given size using the 'reflect' method, before the down sampling + is done. + + Parameters + ---------- + image : ndarray + Image to be down sampled. + factor : int + Down sampling factor, must be power of two. + method : np.ufunc (np.nansum) + Method how to combine the image blocks. + shape : tuple (None) + If shape is specified, the image is padded prior to the down sampling + symmetrically in x and y direction to the given shape. + + Returns + ------- + image : ndarray + Down sampled image. + """ + from skimage.measure import block_reduce + if not np.log2(factor).is_integer(): + raise ValueError('Downsampling factor must be power of 2.') + factor = int(factor) + + if shape is not None: + x_pad = (shape[1] - image.shape[1]) // 2 + y_pad = (shape[0] - image.shape[0]) // 2 + image = np.pad(image, ((y_pad, y_pad), (x_pad, x_pad)), mode='reflect') + return block_reduce(image, (factor, factor), method) + + +def upsample_2N(image, factor, order=3, shape=None): + """ + Up sample image by a power of two. + + The image is up sampled using 'scipy.ndimage.zoom'. Only + up sampling factors, that are a power of two are allowed. The image is + cropped to a given size. + + Parameters + ---------- + image : ndarray + Image to be up sampled. + factor : int + up sampling factor, must be power of two. + order : np.ufunc (np.nansum) + Method how to combine the image blocks. + shape : tuple (None) + If shape is specified, the image is cropped after the up sampling + symmetrically in x and y direction to the given shape. + + Returns + ------- + image : ndarray + Down sampled image. + """ + from scipy.ndimage import zoom + if not np.log2(factor).is_integer(): + raise ValueError('Up sampling factor must be power of 2.') + factor = int(factor) + + if shape is not None: + x_crop = (factor * image.shape[1] - shape[1]) // 2 + y_crop = (factor * image.shape[0] - shape[0]) // 2 + # Sample up result and crop to original size + return zoom(image, factor, order=order)[y_crop:-y_crop, x_crop:-x_crop] + else: + return zoom(image, factor, order=order) + + def exclusion_distance(exclusion): """Distance to nearest exclusion region. @@ -233,7 +313,7 @@ def coordinates(image, world=True, lon_sym=True, radians=False, system=None): Parameters ---------- - image : `~astropy.io.fits.ImageHDU` + image : `~astropy.io.fits.ImageHDU` or ndarray Input image world : bool Use world coordinates (or pixel coordinates)? @@ -254,10 +334,11 @@ def coordinates(image, world=True, lon_sym=True, radians=False, system=None): >>> dist = np.sqrt(lon ** 2 + lat ** 2) """ # Create arrays of pixel coordinates - y, x = np.indices(image.data.shape, dtype='int32') + 1 - if not world: + y, x = np.indices(image.shape, dtype='int32') + 1 return x, y + else: + y, x = np.indices(image.data.shape, dtype='int32') + 1 wcs = WCS(image.header) lon, lat = wcs.wcs_pix2world(x, y, 1) @@ -272,6 +353,29 @@ def coordinates(image, world=True, lon_sym=True, radians=False, system=None): return lon, lat +def dict_to_hdulist(image_dict, header): + """ + Take a dictionary of image data and a header to create a HDUList. + + Parameters + ---------- + image_dict : dict + Dictionary of input data. The keys are used as FITS extension names. + Image data are the corresponding values. + header : `astropy.io.fits.Header` + Header to be used for all images. + + Returns + ------- + hdu_list : `astropy.io.fits.HDUList` + HDU list of input dictionary. + """ + hdu_list = fits.HDUList() + for name, image in image_dict.iteritems(): + hdu_list.append(fits.ImageHDU(image, header, name.upper())) + return hdu_list + + def separation(image, center, world=True, radians=False): """Compute distance image from a given center point. @@ -975,3 +1079,4 @@ def lon_lat_rectangle_mask(lons, lats, lon_min=None, lon_max=None, lat_mask = mask_lat_min & mask_lat_max return lon_mask & lat_mask + diff --git a/gammapy/irf/psf_core.py b/gammapy/irf/psf_core.py index cda56871e64..cdf8c10898a 100644 --- a/gammapy/irf/psf_core.py +++ b/gammapy/irf/psf_core.py @@ -324,7 +324,7 @@ def _get_psf(self, index): return psf -def multi_gauss_psf_kernel(psf_parameters, **kwargs): +def multi_gauss_psf_kernel(psf_parameters, BINSZ=0.02, NEW_BINSZ=0.02, **kwargs): """Create multi-Gauss PSF kernel. The Gaussian PSF components are specified via the @@ -335,10 +335,14 @@ def multi_gauss_psf_kernel(psf_parameters, **kwargs): ---------- psf_parameters : dict PSF parameters + BINSZ : float (0.02) + Pixel size used for the given parameters in deg. + NEW_BINSZ : float (0.02) + New pixel size in deg. USed to change the resolution of the PSF. Returns ------- - psf_kernel : `astropy.convolution.Kernel` + psf_kernel : `astropy.convolution.Kernel2D` PSF kernel Examples @@ -353,13 +357,11 @@ def multi_gauss_psf_kernel(psf_parameters, **kwargs): for ii in range(1, 4): # Convert sigma and amplitude pars = psf_parameters['psf{0}'.format(ii)] - sigma = fwhm_to_sigma * pars['fwhm'] + sigma = fwhm_to_sigma * pars['fwhm'] * BINSZ / NEW_BINSZ ampl = 2 * np.pi * sigma ** 2 * pars['ampl'] if psf is None: psf = float(ampl) * Gaussian2DKernel(sigma, **kwargs) else: psf += float(ampl) * Gaussian2DKernel(sigma, **kwargs) - psf.normalize() - return psf diff --git a/gammapy/morphology/shapes.py b/gammapy/morphology/shapes.py index 20f96cfb608..57679cc331b 100644 --- a/gammapy/morphology/shapes.py +++ b/gammapy/morphology/shapes.py @@ -1,15 +1,11 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst -"""Morphological models for astrophysical sources. - -This was written before I used sherpa and is independent. -Might be useful to keep around anyways. """ +Morphological models for astrophysical gamma-ray sources. +""" + from __future__ import print_function, division import numpy as np -from astropy.modeling import (Parameter, - ModelDefinitionError, - Fittable2DModel, - ) +from astropy.modeling import Parameter, ModelDefinitionError, Fittable2DModel from astropy.modeling.models import Gaussian2D __all__ = ['morph_types', 'Shell2D', 'Sphere2D', 'Delta2D'] @@ -79,7 +75,8 @@ class Shell2D(Fittable2DModel): r_in = Parameter('r_in') width = Parameter('width') - def __init__(self, amplitude, x_0, y_0, r_in, width=None, r_out=None, normed=True, **constraints): + def __init__(self, amplitude, x_0, y_0, r_in, width=None, r_out=None, + normed=True, **constraints): if r_out is not None: width = r_out - r_in if r_out is None and width is None: @@ -88,7 +85,7 @@ def __init__(self, amplitude, x_0, y_0, r_in, width=None, r_out=None, normed=Tru if not normed: self.eval = self.eval_peak_norm super(Shell2D, self).__init__(amplitude=amplitude, x_0=x_0, - y_0=y_0, r_in=r_in, width=width, **constraints) + y_0=y_0, r_in=r_in, width=width, **constraints) @staticmethod def eval(x, y, amplitude, x_0, y_0, r_in, width): @@ -102,8 +99,8 @@ def eval(x, y, amplitude, x_0, y_0, r_in, width): # Note: for r > r_out 'np.select' fills automatically zeros! with np.errstate(invalid='ignore'): values = np.select([rr <= rr_in, rr <= rr_out], - [np.sqrt(rr_out - rr) - np.sqrt(rr_in - rr), - np.sqrt(rr_out - rr)]) + [np.sqrt(rr_out - rr) - np.sqrt(rr_in - rr), + np.sqrt(rr_out - rr)]) return amplitude * values / (2 * np.pi / 3 * (rr_out * (r_in + width) - rr_in * r_in)) @@ -119,8 +116,8 @@ def eval_peak_norm(x, y, amplitude, x_0, y_0, r_in, width): # Note: for r > r_out 'np.select' fills automatically zeros! with np.errstate(invalid='ignore'): values = np.select([rr <= rr_in, rr <= rr_out], - [np.sqrt(rr_out - rr) - np.sqrt(rr_in - rr), - np.sqrt(rr_out - rr)]) + [np.sqrt(rr_out - rr) - np.sqrt(rr_in - rr), + np.sqrt(rr_out - rr)]) return amplitude * values / np.sqrt(rr_out - rr_in) @@ -184,7 +181,7 @@ def __init__(self, amplitude, x_0, y_0, r_0, normed=True, **constraints): if not normed: self.eval = self.eval_peak_norm super(Sphere2D, self).__init__(amplitude=amplitude, x_0=x_0, - y_0=y_0, r_0=r_0, **constraints) + y_0=y_0, r_0=r_0, **constraints) @staticmethod def eval(x, y, amplitude, x_0, y_0, r_0): @@ -253,7 +250,7 @@ class Delta2D(Fittable2DModel): def __init__(self, amplitude, x_0, y_0, **constraints): super(Delta2D, self).__init__(amplitude=amplitude, x_0=x_0, - y_0=y_0, **constraints) + y_0=y_0, **constraints) @staticmethod def eval(x, y, amplitude, x_0, y_0): diff --git a/gammapy/morphology/tests/test_gauss.py b/gammapy/morphology/tests/test_gauss.py index ddd331c87af..4057dab9640 100644 --- a/gammapy/morphology/tests/test_gauss.py +++ b/gammapy/morphology/tests/test_gauss.py @@ -1,11 +1,17 @@ # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import print_function, division import unittest + import numpy as np from numpy.testing import assert_equal, assert_almost_equal, assert_allclose + from astropy.tests.helper import pytest from astropy.modeling.models import Gaussian2D from astropy.convolution import discretize_model +from astropy.io import fits +from astropy.wcs import WCS + +from ...image.tests.test_measure import set_header, BINSZ from ...image import measure_image_moments from ...morphology import Gauss2DPDF, MultiGauss2D, gaussian_sum_moments @@ -124,8 +130,7 @@ def test_gauss_convolve(self): def test_gaussian_sum_moments(): """Check analytical against numerical solution. """ - - # We define three components with different flux, position and size + # We define three components with different flux, position and size in pixel coordinates F_1, F_2, F_3 = 100, 200, 300 sigma_1, sigma_2, sigma_3 = 15, 10, 5 x_1, x_2, x_3 = 100, 120, 70 @@ -140,19 +145,24 @@ def A(F, sigma): f_2 = Gaussian2D(A(F_2, sigma_2), x_2, y_2, sigma_2, sigma_2) f_3 = Gaussian2D(A(F_3, sigma_3), x_3, y_3, sigma_3, sigma_3) - F_1_image = discretize_model(f_1, (0, 200), (0, 200)) - F_2_image = discretize_model(f_2, (0, 200), (0, 200)) - F_3_image = discretize_model(f_3, (0, 200), (0, 200)) + F_1_image = discretize_model(f_1, (0, 201), (0, 201)) + F_2_image = discretize_model(f_2, (0, 201), (0, 201)) + F_3_image = discretize_model(f_3, (0, 201), (0, 201)) + image = set_header(fits.ImageHDU(F_1_image + F_2_image + F_3_image)) + moments_num = measure_image_moments(image) - moments_num = measure_image_moments(F_1_image + F_2_image + F_3_image) + wcs = WCS(image.header) # Compute analytical values cov_matrix = np.zeros((12, 12)) F = [F_1, F_2, F_3] - sigma = [sigma_1, sigma_2, sigma_3] - x = [x_1, x_2, x_3] - y = [y_1, y_2, y_3] + sigma = np.array([sigma_1, sigma_2, sigma_3]) * BINSZ + x, y = wcs.wcs_pix2world([x_1, x_2, x_3], [y_1, y_2, y_3], 0) + + # Fix longitude range, is there a wcs option for this? + x = np.where(x > 180, x - 360, x) - moments_ana, uncertainties = gaussian_sum_moments(F, sigma, x, y, cov_matrix) + moments_ana, uncertainties = gaussian_sum_moments(F, sigma, x, y, cov_matrix, shift=0) assert_allclose(moments_ana, moments_num, 1e-6) assert_allclose(uncertainties, 0) + diff --git a/scripts/gammapy-ts-image b/scripts/gammapy-ts-image new file mode 100755 index 00000000000..71e751156f5 --- /dev/null +++ b/scripts/gammapy-ts-image @@ -0,0 +1,120 @@ +#!/usr/bin/env python +# Licensed under a 3-clause BSD style license - see LICENSE.rst +""" +Compute source model residual images. + +The input `data` fits file must contain the following HDU extensions: + +* 'On' -- Counts image +* 'Background' -- Background image +* 'Diffuse' -- Diffuse model image +* 'ExpGammaMap' -- Exposure image +""" + +# Parse command line arguments + +from gammapy.utils.scripts import argparse, GammapyFormatter +parser = argparse.ArgumentParser(description=__doc__, + formatter_class=GammapyFormatter) +parser.add_argument('data', type=str, + help='Input data FITS file name') +parser.add_argument('--psf', type=str, default='psf.json', + help='JSON file containing PSF information. ') +parser.add_argument('--kernel', type=str, default='Gaussian2DKernel', + help='Which kernel to use for TS calculation.') +parser.add_argument('--scale', type=float, default=0, + help='Scale of the kernel in deg.') +parser.add_argument('--downsample', type=str, default='auto', + help="Downsample factor of the data to obtain optimal performance. " + "Must be power of 2. Can be 'auto' to choose the downsample " + "factor automatically depending on the scale.") +parser.add_argument('--residual', action='store_true', + help="Whether to compute a residual TS image. If a residual TS image is computed " + "an excess model has to be provided using the '--model' parameter.") +parser.add_argument('--model', type=str, + help='Input excess model FITS file name') +parser.add_argument('--threshold', type=float, default=None, + help="Minimal required initial (before fitting) TS value, that the fit is done at all.") +parser.add_argument('--clobber', action='store_true', + help='Clobber output files?') +args = parser.parse_args() + +# Execute script +import json +import logging +logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s') +from time import time +from collections import OrderedDict + +import numpy as np + +from astropy.io import fits +from astropy.convolution import convolve, Gaussian2DKernel +from gammapy.detect import compute_ts_map +from gammapy.irf import multi_gauss_psf_kernel +from gammapy.image import dict_to_hdulist, upsample_2N, downsample_2N + + +# Read data +logging.info('Reading {0}'.format(args.data)) +maps = fits.open(args.data) + +if args.residual: + logging.info('Reading {0}'.format(args.model)) + data = fits.getdata(args.model) + header = fits.getheader(args.model) + maps.append(fits.ImageHDU(data, header, 'OnModel')) +logging.info('Computing (residual) TS map for scale {0} deg'.format(args.scale)) + +# Sample down if required +downsampled = False +if args.downsample == 'auto': + args.downsample = np.select([args.scale < 0.1, args.scale < 0.2, args.scale < 0.4], [1, 2, 4], 8) + logging.info('Using down sampling factor of {0}'.format(args.downsample)) +factor = int(args.downsample) +if factor == 1: + logging.info('No down sampling used.') +else: + funcs = [np.nansum, np.mean, np.nansum, np.nansum, np.nansum] + for map_, func in zip(maps, funcs): + map_.data = downsample_2N(map_.data, factor, func, shape=(512, 9472)) + downsampled = True + +# Set up PSF and source kernel +BINSZ = 0.02 * factor +psf_parameters = json.load(open(args.psf)) +kernel = multi_gauss_psf_kernel(psf_parameters, NEW_BINSZ=BINSZ, mode='oversample', x_size=21) +if args.scale > 0: + sigma = args.scale / BINSZ + source_kernel = Gaussian2DKernel(sigma, x_size=21) + kernel = convolve(source_kernel, kernel) + +# Compute TS map +if args.residual: + background = maps['Background'].data + maps['DIFFUSE'].data + maps['OnModel'].data +else: + background = maps['Background'].data + maps['Diffuse'].data +ts_results = compute_ts_map(maps['On'].data, background, maps['ExpGammaMap'].data, + kernel, debug=True, threshold=args.threshold) +logging.info('TS map computation took {0:.1f} s'.format(ts_results.runtime)) + +# Write results to file +if args.residual: + filename = 'residual_ts_map_{0:.2f}.fits'.format(args.scale) +else: + filename = 'ts_map_{0:.2f}.fits'.format(args.scale) + +# Use an OrderedDict to have a well defined order of the FITS extensions +results = OrderedDict() +for name, order in zip(['ts', 'amplitude', 'niter'], [3, 3, 0]): + if name == 'ts': + # We don't allow negative TS values and clip + ts_results[name] = np.clip(ts_results[name], 0, np.inf) + + if downsampled: + results[name] = upsample_2N(ts_results[name], factor, order=order, shape=(500, 9400)) + else: + results[name] = ts_results[name] + +logging.info('Writing {0}'.format(filename)) +dict_to_hdulist(results, maps['ExpGammaMap'].header).writeto(filename, clobber=args.clobber)