Skip to content

Commit

Permalink
Merge pull request #230 from cdeil/setuptools-entrypoints
Browse files Browse the repository at this point in the history
Move scripts to gammapy/scripts and use setuptools entry_points
  • Loading branch information
cdeil committed Jan 17, 2015
2 parents 4afe85a + 014d0fd commit b854958
Show file tree
Hide file tree
Showing 72 changed files with 1,707 additions and 1,488 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Expand Up @@ -18,6 +18,9 @@ htmlcov
# Sphinx
_build

# Pytest
.cache

# Packages/installer info
*.egg
*.egg-info
Expand Down
17 changes: 10 additions & 7 deletions scripts/gammapy-sherpa-ts-image → dev/sherpa/sherpa_ts_image.py 100755 → 100644
@@ -1,9 +1,13 @@
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""Compute TS image with Sherpa.
TODO: describe what is done.
"""

# TODO: clean up or remove!

# Parse command line arguments

from gammapy.utils.scripts import argparse, GammapyFormatter
Expand Down Expand Up @@ -38,9 +42,8 @@
'This can be useful to get a quick look at a significance map without '
'bothering fimgbin on the input images. By default significance is '
'computed for all pixels')
parser.add_argument('-c', '--clobber',
action='store_true', default=False,
help='Overwrite output files?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
args = parser.parse_args()

import logging
Expand All @@ -63,7 +66,7 @@
# time computing the significance image but not being able to save it
# ---------------------------------------------------------

if (args.clobber == False and isfile(args.significance_image)):
if (args.overwrite == False and isfile(args.significance_image)):
logging.error('Output file exists: {0}'.format(args.significance_image))
from sys import exit
exit(-1)
Expand Down Expand Up @@ -184,7 +187,7 @@
significance = np.sign(test_source.ampl.val) * np.sqrt(np.abs(L0 - L1))

except FitErr as e:
print e
print(e)
significance = np.nan

# Print and remember values
Expand All @@ -205,5 +208,5 @@
# Save the TS map to file
# ---------------------------------------------------------
logging.info('Writing significance_image: {0}'.format(args.significance_image))
save_data('significance', args.significance_image, clobber=args.clobber)
save_data('significance', args.significance_image, clobber=args.overwrite)

9 changes: 5 additions & 4 deletions scripts/gammapy-ts-image → dev/ts_image.py 100755 → 100644
@@ -1,5 +1,6 @@
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""
Compute source model residual images.
Expand Down Expand Up @@ -35,8 +36,8 @@
help='Input excess model FITS file name')
parser.add_argument('--threshold', type=float, default=None,
help="Minimal required initial (before fitting) TS value, that the fit is done at all.")
parser.add_argument('--clobber', action='store_true',
help='Clobber output files?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
args = parser.parse_args()

# Execute script
Expand Down Expand Up @@ -120,4 +121,4 @@
results[name] = ts_results[name]

logging.info('Writing {0}'.format(filename))
dict_to_hdulist(results, maps['ExpGammaMap'].header).writeto(filename, clobber=args.clobber)
dict_to_hdulist(results, maps['ExpGammaMap'].header).writeto(filename, clobber=args.overwrite)
25 changes: 0 additions & 25 deletions docs/scripts/gammapy-info.rst

This file was deleted.

46 changes: 28 additions & 18 deletions docs/scripts/index.rst
@@ -1,26 +1,36 @@
.. _scripts:

Command line tools
==================
***************************************
Command line tools (`gammapy.scripts`)
***************************************

The ``gammapy/scripts`` folder contains command line tools
that are also installed in the ``bin`` directory if you install Gammapy
and are thus available to you if that is on your ``$PATH``.
.. currentmodule:: gammapy.scripts

TODO: explain better how to find out where the scripts were installed,
i.e. what the user should add to their ``$PATH``.
Introduction
============

.. note:: For now the full list of scripts is only available here:
https://github.com/gammapy/gammapy/tree/master/scripts

We plan to integrate that into the online Sphinx docs ...
please help if you know how to do this:
https://github.com/gammapy/gammapy/issues/24
Currently the `gammapy.scripts` sub-package contains the Gammapy command line tools.

Here's an example of a script listed by hand ... this should be auto-generated
by ``build_sphinx``:
This might change though ... see the notes here:
https://github.com/gammapy/gammapy/pull/230

.. toctree::
:maxdepth: 1
To be able to use them you have to install Gammapy:

gammapy-info
.. code-block:: bash
$ pip install --user .
For Gammapy development we recommend you run this command so that you can edit
Gammapy and the tools and don't have to re-install after every change.

.. code-block:: bash
$ pip install --user --editable .
Reference/API
=============

.. automodapi:: gammapy.scripts
:no-inheritance-diagram:
10 changes: 4 additions & 6 deletions gammapy/data/spectral_cube.py
Expand Up @@ -419,19 +419,17 @@ def to_fits(self):

return hdu_list

def writeto(self, filename, clobber=False):
def writeto(self, filename, overwrite=False):
"""Writes SpectralCube to fits file.
Parameters
----------
filename : string
Name of output file (.fits)
clobber : bool
True: overwrites existing files of same name.
False: returns error if a file exists of the same name in the
output directory.
overwrite : bool
Overwrite existing output file?
"""
self.to_fits.writeto(filename, clobber)
self.to_fits.writeto(filename, overwrite)

def __repr__(self):
# Copied from `spectral-cube` package
Expand Down
4 changes: 2 additions & 2 deletions gammapy/detect/cwt.py
Expand Up @@ -183,10 +183,10 @@ def max_scale_image(self):
maximum = np.argmax(self.transform, 0)
return self.scale_array[maximum] * (self.support.sum(0) > 0)

def save_filter(self, filename, clobber=False):
def save_filter(self, filename, overwrite=False):
"""Save filter to file."""
hdu = fits.PrimaryHDU(self.filter, self.header)
hdu.writeto(filename, clobber=clobber)
hdu.writeto(filename, clobber=overwrite)
fits.append(filename, self.approx, self.header)
fits.append(filename, self.filter + self.approx, self.header)
fits.append(filename, self.max_scale_image(), self.header)
39 changes: 4 additions & 35 deletions gammapy/detect/iterfind.py
Expand Up @@ -39,7 +39,6 @@
from ..image import disk_correlate

__all__ = ['IterativeSourceDetector',
'run_detection',
]


Expand Down Expand Up @@ -73,7 +72,7 @@ class IterativeSourceDetector(object):
"""

def __init__(self, maps, scales, max_sources=10, significance_threshold=5,
max_ncall=300, debug_output_folder='', clobber=False):
max_ncall=300, debug_output_folder='', overwrite=False):
self.maps = maps
# Note: FITS convention is to start counting pixels at 1
y, x = np.indices(maps['counts'].shape, dtype=np.int32) + 1
Expand All @@ -88,7 +87,7 @@ def __init__(self, maps, scales, max_sources=10, significance_threshold=5,
self.significance_threshold = significance_threshold
self.max_ncall = max_ncall
self.debug_output_folder = debug_output_folder
self.clobber = clobber
self.overwrite = overwrite

self.sources_guess = []
self.sources = []
Expand Down Expand Up @@ -116,14 +115,14 @@ def run(self):
for name in ['background']:
filename = '{0}/{1}.fits'.format(debug_folder, name)
logging.info('Writing {0}'.format(filename))
fits.writeto(filename, self.iter_maps[name], clobber=self.clobber)
fits.writeto(filename, self.iter_maps[name], clobber=self.overwrite)

# Save per iteration and scale maps
for name in ['significance']:
for scale in self.scales:
filename = '{0}/{1}_{2}.fits'.format(debug_folder, name, scale)
logging.info('Writing {0}'.format(filename))
fits.writeto(filename, self.iter_maps[name][scale], clobber=self.clobber)
fits.writeto(filename, self.iter_maps[name][scale], clobber=self.overwrite)

self.find_peaks()
# TODO: debug output to JSON here and for later steps
Expand Down Expand Up @@ -362,33 +361,3 @@ def save_json(self, filename):
# TypeError: 1.2617354e-10 is not JSON serializable
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4)


def run_detection(args):
"""Run iterative source detection."""
# Load data
maps = dict()
for mapname in ['counts', 'background', 'exposure']:
filename = args[mapname]
logging.info('Reading {0} map: {1}'.format(mapname, filename))
maps[mapname] = fits.getdata(filename)

# Compute scales in pixel coordinates
DEG_PER_PIX = np.abs(fits.getval(args['counts'], 'CDELT1'))
scales_deg = args['scales']
scales_pix = np.array(scales_deg) / DEG_PER_PIX
logging.info('Number of scales: {0}'.format(len(scales_deg)))
logging.info('DEG_PER_PIX: {0}'.format(DEG_PER_PIX))
logging.info('Scales in deg: {0}'.format(scales_deg))
logging.info('Scales in pix: {0}'.format(scales_pix))

# Run the iterative source detection
detector = IterativeSourceDetector(maps=maps, scales=scales_pix,
debug_output_folder=args['debug_output_folder'],
clobber=True)
detector.run()

# Save the results
# detector.save_fits(args['output_fits'])
detector.save_regions(args['output_regions'])
# detector.save_json('detect.json')
3 changes: 2 additions & 1 deletion gammapy/obs/run.py
@@ -1,6 +1,7 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Run and RunList class"""
from __future__ import print_function, division
from __future__ import (absolute_import, division, print_function,
unicode_literals)

__all__ = ['Run', 'RunList']

Expand Down
35 changes: 35 additions & 0 deletions gammapy/scripts/__init__.py
@@ -0,0 +1,35 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Gammapy command line tools.
TODO: document how this works and how users should write their own
command line tools.
"""

from .background_cube import *
from .bin_cube import *
from .bin_image import *
from .check import *
from .coordinate_images import *
from .cwt import *
from .derived_images import *
from .detect import *
from .find_runs import *
from .image_decompose_a_trous import *
from .info import *
from .irf_info import *
from .irf_root_to_fits import *
from .iterative_source_detect import *
from .look_up_image import *
from .model_image import *
from .pfmap import *
from .pfsim import *
from .pfspec import *
from .reflected_regions import *
from .residual_images import *
from .root_to_fits import *
from .sherpa_like import *
from .sherpa_model_image import *
from .significance_image import *
from .simulate_source_catalog import *
# from .ts_image import *
from .xspec import *
38 changes: 38 additions & 0 deletions gammapy/scripts/background_cube.py
@@ -0,0 +1,38 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser

__all__ = ['background_cube']


def main(args=None):
parser = get_parser(background_cube)
parser.add_argument('run_list', type=str,
help='Input run list file name')
parser.add_argument('exclusion_list', type=str,
help='Input exclusion list file name')
parser.add_argument('reference_file', type=str,
help='Input FITS reference cube file name')
parser.add_argument('out_file', type=str,
help='Output FITS counts cube file name')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
args = parser.parse_args(args)
background_cube(**vars(args))


def background_cube(run_list,
exclusion_list,
reference_file,
out_file,
overwrite):
"""Create background model cube from off runs.
TODO: explain a bit.
"""
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')

# TODO: implement
raise NotImplementedError

0 comments on commit b854958

Please sign in to comment.