diff --git a/.mailmap b/.mailmap index 419f9cd92a..e7d368b8d1 100644 --- a/.mailmap +++ b/.mailmap @@ -29,3 +29,5 @@ Basile Pinsard bpinsard Nguyen, Ly lxn2 Ben Cipollini Ben Cipollini Chris Markiewicz Christopher J. Markiewicz +Chris Markiewicz Christopher J. Markiewicz +Chris Markiewicz Chris Johnson diff --git a/.travis.yml b/.travis.yml index a7aca4375b..55f268ab81 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,11 +8,13 @@ env: global: - DEPENDS="numpy scipy matplotlib h5py" - PYDICOM=1 + - INSTALL_TYPE="setup" python: - 2.6 - 3.2 - 3.3 - 3.4 + - 3.5 matrix: include: - python: 2.7 @@ -26,20 +28,37 @@ matrix: - python: 2.7 env: - DEPENDS="numpy==1.5.1 pydicom==0.9.7" + # pydicom 1.0 (currently unreleased) + - python: 2.7 + env: + - PYDICOM="v1.0" # Documentation doctests - python: 2.7 env: - DOC_DOC_TEST=1 + - python: 2.7 + env: + - INSTALL_TYPE=sdist + - python: 2.7 + env: + - INSTALL_TYPE=wheel + - python: 2.7 + env: + - INSTALL_TYPE=requirements before_install: + - source tools/travis_tools.sh - virtualenv --python=python venv - source venv/bin/activate - python --version # just to check - - pip install nose # always - - pip install --no-index -f http://travis-wheels.scikit-image.org $DEPENDS + - pip install -U pip # upgrade to latest pip to find 3.5 wheels + - retry pip install nose # always + - wheelhouse_pip_install $DEPENDS # pydicom <= 0.9.8 doesn't install on python 3 - if [ "${TRAVIS_PYTHON_VERSION:0:1}" == "2" ]; then if [ "$PYDICOM" == "1" ]; then pip install pydicom; + elif [ "$PYDICOM" == "v1.0" ]; then + pip install git+https://github.com/darcymason/pydicom.git@43f278444d5cb2e4648135d3edcd430c363c6975; fi fi - if [ "${COVERAGE}" == "1" ]; then @@ -51,7 +70,21 @@ before_install: fi # command to install dependencies install: - - python setup.py install + - | + if [ "$INSTALL_TYPE" == "setup" ]; then + python setup.py install + elif [ "$INSTALL_TYPE" == "sdist" ]; then + python setup_egg.py egg_info # check egg_info while we're here + python setup_egg.py sdist + wheelhouse_pip_install dist/*.tar.gz + elif [ "$INSTALL_TYPE" == "wheel" ]; then + pip install wheel + python setup_egg.py bdist_wheel + wheelhouse_pip_install dist/*.whl + elif [ "$INSTALL_TYPE" == "requirements" ]; then + wheelhouse_pip_install -r requirements.txt + python setup.py install + fi # Point to nibabel data directory - export NIBABEL_DATA_DIR="$PWD/nibabel-data" # command to run tests, e.g. python setup.py test diff --git a/Changelog b/Changelog index 8bd1a39839..bb54df572b 100644 --- a/Changelog +++ b/Changelog @@ -24,6 +24,25 @@ and Stephan Gerhard (SG). References like "pr/298" refer to github pull request numbers. +* 2.0.2 (Monday 23 November 2015) + + * Fix for integer overflow on large images (pr/325) (MB); + * Fix for Freesurfer nifti files with unusual dimensions (pr/332) (Chris + Markiewicz); + * Fix typos on benchmarks and tests (pr/336, pr/340, pr/347) (Chris + Markiewicz); + * Fix Windows install script (pr/339) (MB); + * Support for Python 3.5 (pr/363) (MB) and numpy 1.10 (pr/358) (Chris + Markiewicz); + * Update pydicom imports to permit version 1.0 (pr/379) (Chris Markiewicz); + * Workaround for Python 3.5.0 gzip regression (pr/383) (Ben Cipollini). + * tripwire.TripWire object now raises subclass of AttributeError when trying + to get an attribute, rather than a direct subclass of Exception. This + prevents Python 3.5 triggering the tripwire when doing inspection prior to + running doctests. + * Minor API change for tripwire.TripWire object; code that checked for + AttributeError will now also catch TripWireError. + * 2.0.1 (Saturday 27 June 2015) Contributions from Ben Cipollini, Chris Markiewicz, Alexandre Gramfort, diff --git a/MANIFEST.in b/MANIFEST.in index 1248441c9a..11bf20b7c2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,5 @@ include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* -include Changelog TODO +include Changelog TODO requirements.txt recursive-include doc * recursive-include bin * recursive-include tools * diff --git a/doc/source/devel/advanced_testing.rst b/doc/source/devel/advanced_testing.rst new file mode 100644 index 0000000000..1b61c58ccd --- /dev/null +++ b/doc/source/devel/advanced_testing.rst @@ -0,0 +1,32 @@ +.. -*- mode: rst -*- +.. ex: set sts=4 ts=4 sw=4 et tw=79: + ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### + # + # See COPYING file distributed along with the NiBabel package for the + # copyright and license terms. + # + ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### + +.. _advanced_testing: + +************ +Advanced Testing +************ + +Setup +----- + +Before running advanced tests, please update all submodules of nibabel, by running ``git submodule update --init`` + + +Long-running tests +------------------ + +Long-running tests are not enabled by default, and can be resource-intensive. To run these tests: + +* Set environment variable ``NIPY_EXTRA_TESTS=slow`` +* Run ``nosetests``. + +Note that some tests may require a machine with >4GB of RAM. + +.. include:: ../links_names.txt diff --git a/doc/source/devel/index.rst b/doc/source/devel/index.rst index 0f11891dd1..659061ed9d 100644 --- a/doc/source/devel/index.rst +++ b/doc/source/devel/index.rst @@ -14,3 +14,4 @@ Developer documentation page add_image_format devdiscuss make_release + advanced_testing diff --git a/doc/source/devel/make_release.rst b/doc/source/devel/make_release.rst index 9fda1044e7..5d46d72f55 100644 --- a/doc/source/devel/make_release.rst +++ b/doc/source/devel/make_release.rst @@ -130,11 +130,9 @@ Release checklist * Check on different platforms, particularly windows and PPC. Look at the `nipy buildbot`_ automated test runs for this. -* Check the documentation doctests:: +* Check the documentation doctests (forcing Python 2):: - cd doc - make doctest - cd .. + make -C doc doctest SPHINXBUILD="python $(which sphinx-build)" This should also be tested by `nibabel on travis`_. diff --git a/doc/source/index.rst b/doc/source/index.rst index b064922107..4c34b2c72f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -47,7 +47,7 @@ contributed code and discussion (in rough order of appearance): * Bago Amirbekian * Brendan Moloney * Félix C. Morency -* Chris Johnson +* Chris Markiewicz * JB Poline * Nolan Nichols * Nguyen, Ly @@ -61,8 +61,7 @@ contributed code and discussion (in rough order of appearance): * Demian Wassermann * Philippe Gervais * Justin Lecher -* Ben Cippolini -* Chris Markiewicz +* `Ben Cipollini`_ * Clemens C. C. Bauer License reprise diff --git a/doc/source/installation.rst b/doc/source/installation.rst index c34acaa766..25bc89ec5d 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -114,7 +114,11 @@ Just install the modules by invoking:: If sudo is not configured (or even installed) you might have to use ``su`` instead. -Now fire up Python and try importing the module to see if everything is fine. + +Validating your install +----------------------- + +For a basic test of your installation, fire up Python and try importing the module to see if everything is fine. It should look something like this:: Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) @@ -123,4 +127,9 @@ It should look something like this:: >>> import nibabel >>> + +To run the nibabel test suite, from the terminal run ``nosetests nibabel`` or ``python -c "import nibabel; nibabel.test()``. + +To run an extended test suite that validates ``nibabel`` for long-running and resource-intensive cases, please see :ref:`advanced_testing`. + .. include:: links_names.txt diff --git a/doc/source/links_names.txt b/doc/source/links_names.txt index ba1f4b4e73..1cab6fd63f 100644 --- a/doc/source/links_names.txt +++ b/doc/source/links_names.txt @@ -5,7 +5,7 @@ and name substitutions. It may be included in many files, therefore it should only contain link targets and name substitutions. Try grepping for "^\.\. _" to find plausible - candidates for this list. + candidates for this list. .. NOTE: reST targets are @@ -233,6 +233,7 @@ .. _Michael Hanke: http://mih.voxindeserto.de .. _Gaël Varoquaux: http://gael-varoquaux.info/ .. _Stephan Gerhard: http://www.unidesign.ch +.. _Ben Cipollini: http://bcipolli.github.io/ .. Substitutions .. |emdash| unicode:: U+02014 diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 26be0c948f..159fb3ae93 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -50,7 +50,7 @@ def bench_array_to_file(): mtime = measure('array_to_file(arr, BytesIO(), np.int16)', repeat) print('%30s %6.2f' % ('Save float64 to int16, infs', mtime)) # Int16 input, float output - arr = np.random.random_integers(low=-1000,high=-1000, size=img_shape) + arr = np.random.random_integers(low=-1000, high=1000, size=img_shape) arr = arr.astype(np.int16) mtime = measure('array_to_file(arr, BytesIO(), np.float32)', repeat) print('%30s %6.2f' % ('Save Int16 to float32', mtime)) diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 298345fa41..d10797dab5 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -42,7 +42,7 @@ def bench_finite_range(): mtime = measure('finite_range(arr)', repeat) print('%30s %6.2f' % ('float64 many infs', mtime)) # Int16 input, float output - arr = np.random.random_integers(low=-1000,high=-1000, size=img_shape) + arr = np.random.random_integers(low=-1000, high=1000, size=img_shape) arr = arr.astype(np.int16) mtime = measure('finite_range(arr)', repeat) print('%30s %6.2f' % ('int16', mtime)) diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index b2e140854e..a2a987cf0e 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -56,7 +56,7 @@ def bench_load_save(): mtime = measure('img.from_file_map(img.file_map)', repeat) print('%30s %6.2f' % ('Load from int16, NaNs', mtime)) # Int16 input, float output - arr = np.random.random_integers(low=-1000,high=-1000, size=img_shape) + arr = np.random.random_integers(low=-1000, high=1000, size=img_shape) arr = arr.astype(np.int16) img = Nifti1Image(arr, np.eye(4)) sio = BytesIO() diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 8e2e37f508..9152d314c4 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -764,9 +764,15 @@ def strided_scalar(shape, scalar=0.): strided_arr : array Array of shape `shape` for which all values == `scalar`, built by setting all strides of `strided_arr` to 0, so the scalar is broadcast - out to the full array `shape`. + out to the full array `shape`. `strided_arr` is flagged as not + `writeable`. + + The array is set read-only to avoid a numpy error when broadcasting - + see https://github.com/numpy/numpy/issues/6491 """ shape = tuple(shape) scalar = np.array(scalar) strides = [0] * len(shape) - return np.lib.stride_tricks.as_strided(scalar, shape, strides) + strided_scalar = np.lib.stride_tricks.as_strided(scalar, shape, strides) + strided_scalar.flags.writeable = False + return strided_scalar diff --git a/nibabel/info.py b/nibabel/info.py index eb1d8d0c56..77d92513af 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -9,8 +9,8 @@ _version_major = 2 _version_minor = 0 _version_micro = 2 -_version_extra = 'dev' -#_version_extra = '' +#_version_extra = 'dev' +_version_extra = '' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 7822159b19..3d68c9a76c 100644 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -49,10 +49,13 @@ def wrapper_from_file(file_like, *args, **kwargs): dcm_w : ``dicomwrappers.Wrapper`` or subclass DICOM wrapper corresponding to DICOM data type """ - import dicom + try: + from dicom import read_file + except ImportError: + from pydicom.dicomio import read_file with BinOpener(file_like) as fobj: - dcm_data = dicom.read_file(fobj, *args, **kwargs) + dcm_data = read_file(fobj, *args, **kwargs) return wrapper_from_data(dcm_data) diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 2b93768c07..4145de1c7f 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -31,7 +31,10 @@ def test_csa_header_read(): assert_true(csa.is_mosaic(hdr)) # Get a shallow copy of the data, lacking the CSA marker # Need to do it this way because del appears broken in pydicom 0.9.7 - from dicom.dataset import Dataset + try: + from dicom.dataset import Dataset + except ImportError: + from pydicom.dataset import Dataset data2 = Dataset() for element in DATA: if (element.tag.group, element.tag.elem) != (0x29, 0x10): diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index a0ab85a98c..a4588d486f 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -40,7 +40,10 @@ def test_passing_kwds(): # Check that we correctly pass keywords to dicom dwi_glob = 'siemens_dwi_*.dcm.gz' csa_glob = 'csa*.bin' - import dicom + try: + from dicom.filereader import InvalidDicomError + except ImportError: + from pydicom.filereader import InvalidDicomError for func in (didr.read_mosaic_dwi_dir, didr.read_mosaic_dir): data, aff, bs, gs = func(IO_DATA_PATH, dwi_glob) # This should not raise an error @@ -49,14 +52,14 @@ def test_passing_kwds(): dwi_glob, dicom_kwargs=dict(force=True)) assert_array_equal(data, data2) - # This should raise an error in dicom.read_file + # This should raise an error in pydicom.dicomio.read_file assert_raises(TypeError, func, IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(not_a_parameter=True)) # These are invalid dicoms, so will raise an error unless force=True - assert_raises(dicom.filereader.InvalidDicomError, + assert_raises(InvalidDicomError, func, IO_DATA_PATH, csa_glob) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index aa4db2baa0..660b87f1b3 100644 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -9,12 +9,17 @@ import numpy as np +have_dicom = True try: - import dicom + import dicom as pydicom + read_file = pydicom.read_file except ImportError: - have_dicom = False -else: - have_dicom = True + try: + import pydicom + except ImportError: + have_dicom = False + else: + from pydicom.dicomio import read_file dicom_test = np.testing.dec.skipif(not have_dicom, 'could not import pydicom') @@ -32,8 +37,8 @@ DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz') DATA_FILE_PHILIPS = pjoin(IO_DATA_PATH, 'philips_mprage.dcm.gz') if have_dicom: - DATA = dicom.read_file(gzip.open(DATA_FILE)) - DATA_PHILIPS = dicom.read_file(gzip.open(DATA_FILE_PHILIPS)) + DATA = read_file(gzip.open(DATA_FILE)) + DATA_PHILIPS = read_file(gzip.open(DATA_FILE_PHILIPS)) else: DATA = None DATA_PHILIPS = None @@ -166,7 +171,7 @@ def test_wrapper_from_data(): @dicom_test def test_wrapper_args_kwds(): - # Test we can pass args, kwargs to dicom.read_file + # Test we can pass args, kwargs to read_file dcm = didw.wrapper_from_file(DATA_FILE) data = dcm.get_data() # Passing in non-default arg for defer_size @@ -177,7 +182,7 @@ def test_wrapper_args_kwds(): assert_array_equal(data, dcm2.get_data()) # Trying to read non-dicom file raises pydicom error, usually csa_fname = pjoin(IO_DATA_PATH, 'csa2_b0.bin') - assert_raises(dicom.filereader.InvalidDicomError, + assert_raises(pydicom.filereader.InvalidDicomError, didw.wrapper_from_file, csa_fname) # We can force the read, in which case rubbish returns diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index 6dae2fd86d..f984fb4a70 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -28,7 +28,10 @@ def test_find_private_section_real(): assert_equal(find_private_section(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER'), None) # Make fake datasets - from dicom.dataset import Dataset + try: + from dicom.dataset import Dataset + except ImportError: + from pydicom.dataset import Dataset ds = Dataset({}) ds.add_new((0x11, 0x10), 'LO', b'some section') assert_equal(find_private_section(ds, 0x11, 'some section'), 0x1000) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 5cc00da705..01de7dff4f 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -695,27 +695,23 @@ def get_data_shape(self): Notes ----- - Allows for freesurfer hack for large vectors described in - https://github.com/nipy/nibabel/issues/100 and - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?spec=svn5022&r=5022#77 + Applies freesurfer hack for large vectors described in `issue 100`_ and + `save_nifti.m `_. Allows for freesurfer hack for 7th order icosahedron surface described - in - https://github.com/nipy/nibabel/issues/309 - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/load_nifti.m?r=8776#86 - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?r=8776#50 + in `issue 309`_, load_nifti.m_, and `save_nifti.m `_. ''' shape = super(Nifti1Header, self).get_data_shape() - # Apply freesurfer hack for vector - if shape == (-1, 1, 1): + # Apply freesurfer hack for large vectors + if shape[:3] == (-1, 1, 1): vec_len = int(self._structarr['glmin']) if vec_len == 0: raise HeaderDataError('-1 in dim[1] but 0 in glmin; ' 'inconsistent freesurfer type header?') - return (vec_len, 1, 1) + return (vec_len, 1, 1) + shape[3:] # Apply freesurfer hack for ico7 surface - elif shape == (27307, 1, 6): - return (163842, 1, 1) + elif shape[:3] == (27307, 1, 6): + return (163842, 1, 1) + shape[3:] else: # Normal case return shape @@ -725,6 +721,12 @@ def set_data_shape(self, shape): If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 + Nifti1 images can have up to seven dimensions. For FreeSurfer-variant + Nifti surface files, the first dimension is assumed to correspond to + vertices/nodes on a surface, and dimensions two and three are + constrained to have depth of 1. Dimensions 4-7 are constrained only by + type bounds. + Parameters ---------- shape : sequence @@ -732,24 +734,43 @@ def set_data_shape(self, shape): Notes ----- - Applies freesurfer hack for large vectors described in - https://github.com/nipy/nibabel/issues/100 and - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?spec=svn5022&r=5022#77 + Applies freesurfer hack for large vectors described in `issue 100`_ and + `save_nifti.m `_. Allows for freesurfer hack for 7th order icosahedron surface described - in - https://github.com/nipy/nibabel/issues/309 - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/load_nifti.m?r=8776#86 - https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?r=8776#50 + in `issue 309`_, load_nifti.m_, and `save_nifti.m `_. + + The Nifti1 `standard header`_ allows for the following "point set" + definition of a surface, not currently implemented in nibabel. + + :: + + To signify that the vector value at each voxel is really a + spatial coordinate (e.g., the vertices or nodes of a surface mesh): + - dataset must have a 5th dimension + - intent_code must be NIFTI_INTENT_POINTSET + - dim[0] = 5 + - dim[1] = number of points + - dim[2] = dim[3] = dim[4] = 1 + - dim[5] must be the dimensionality of space (e.g., 3 => 3D space). + - intent_name may describe the object these points come from + (e.g., "pial", "gray/white" , "EEG", "MEG"). + + .. _issue 100: https://github.com/nipy/nibabel/issues/100 + .. _issue 309: https://github.com/nipy/nibabel/issues/309 + .. _save77: https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?spec=svn8776&r=8776#77 + .. _save50: https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/save_nifti.m?spec=svn8776&r=8776#50 + .. _load_nifti.m: https://code.google.com/p/fieldtrip/source/browse/trunk/external/freesurfer/load_nifti.m?spec=svn8776&r=8776#86 + .. _standard header: http://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h ''' hdr = self._structarr shape = tuple(shape) # Apply freesurfer hack for ico7 surface - if shape == (163842, 1, 1): - shape = (27307, 1, 6) - # Apply freesurfer hack for vector - elif (len(shape) == 3 and shape[1:] == (1, 1) and + if shape[:3] == (163842, 1, 1): + shape = (27307, 1, 6) + shape[3:] + # Apply freesurfer hack for large vectors + elif (len(shape) >= 3 and shape[1:3] == (1, 1) and shape[0] > np.iinfo(hdr['dim'].dtype.base).max): try: hdr['glmin'] = shape[0] @@ -762,7 +783,7 @@ def set_data_shape(self, shape): shape[0]) warnings.warn('Using large vector Freesurfer hack; header will ' 'not be compatible with SPM or FSL', stacklevel=2) - shape = (-1, 1, 1) + shape = (-1, 1, 1) + shape[3:] super(Nifti1Header, self).set_data_shape(shape) def get_qform_quaternion(self): @@ -801,7 +822,7 @@ def get_qform(self, coded=False): quat = self.get_qform_quaternion() R = quat2mat(quat) vox = hdr['pixdim'][1:4].copy() - if np.any(vox) < 0: + if np.any(vox < 0): raise HeaderDataError('pixdims[1,2,3] should be positive') qfac = hdr['pixdim'][0] if qfac not in (-1, 1): diff --git a/nibabel/openers.py b/nibabel/openers.py index ee9226ac6d..5c296bcdcf 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -9,19 +9,65 @@ """ Context manager openers for various fileobject types """ -from os.path import splitext -import gzip import bz2 +import gzip +import sys +from os.path import splitext + # The largest memory chunk that gzip can use for reads GZIP_MAX_READ_CHUNK = 100 * 1024 * 1024 # 100Mb +class BufferedGzipFile(gzip.GzipFile): + """GzipFile able to readinto buffer >= 2**32 bytes. + + This class only differs from gzip.GzipFile + in Python 3.5.0. + + This works around a known issue in Python 3.5. + See https://bugs.python.org/issue25626""" + + # This helps avoid defining readinto in Python 2.6, + # where it is undefined on gzip.GzipFile. + # It also helps limit the exposure to this code. + if sys.version_info[:3] == (3, 5, 0): + def __init__(self, fileish, mode='rb', compresslevel=9, + buffer_size=2**32-1): + super(BufferedGzipFile, self).__init__(fileish, mode=mode, + compresslevel=compresslevel) + self.buffer_size = buffer_size + + def readinto(self, buf): + """Uses self.buffer_size to do a buffered read.""" + n_bytes = len(buf) + if n_bytes < 2 ** 32: + return super(BufferedGzipFile, self).readinto(buf) + + # This works around a known issue in Python 3.5. + # See https://bugs.python.org/issue25626 + mv = memoryview(buf) + n_read = 0 + max_read = 2 ** 32 - 1 # Max for unsigned 32-bit integer + while (n_read < n_bytes): + n_wanted = min(n_bytes - n_read, max_read) + n_got = super(BufferedGzipFile, self).readinto( + mv[n_read:n_read + n_wanted]) + n_read += n_got + if n_got != n_wanted: + break + return n_read + + def _gzip_open(fileish, *args, **kwargs): - # open gzip files with faster reads on large files using larger chunks + gzip_file = BufferedGzipFile(fileish, *args, **kwargs) + + # Speedup for #209; attribute not present in in Python 3.5 + # open gzip files with faster reads on large files using larger # See https://github.com/nipy/nibabel/pull/210 for discussion - gzip_file = gzip.open(fileish, *args, **kwargs) - gzip_file.max_read_chunk = GZIP_MAX_READ_CHUNK + if hasattr(gzip_file, 'max_chunk_read'): + gzip_file.max_read_chunk = GZIP_MAX_READ_CHUNK + return gzip_file diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index b813aaca8f..a2087ca67f 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -459,7 +459,7 @@ def get_data(self, caching='fill'): The default behavior for ``get_data()`` on a proxy image is to read the data from the proxy, and store in an internal cache. Future calls to ``get_data`` will return the cached array. This is the behavior - selected with `caching` == "fill"`. + selected with `caching` == "fill". Once the data has been cached and returned from an array proxy, if you modify the returned array, you will also modify the cached array diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 79ed08e079..7c2f0372dc 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -7,8 +7,8 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Utilities for testing ''' +import os from os.path import dirname, abspath, join as pjoin -from warnings import catch_warnings import numpy as np from warnings import catch_warnings, simplefilter @@ -16,6 +16,7 @@ # set path to example data data_path = abspath(pjoin(dirname(__file__), '..', 'tests', 'data')) +from numpy.testing.decorators import skipif # Allow failed import of nose if not now running tests try: import nose.tools as nt @@ -89,3 +90,15 @@ def __exit__(self, *exc_info): mod.__warningregistry__.clear() if mod in self._warnreg_copies: mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +EXTRA_SET = os.environ.get('NIPY_EXTRA_TESTS', '').split(',') + + +EXTRA_SET = os.environ.get('NIPY_EXTRA_TESTS', '').split(',') + + +def runif_extra_has(test_str): + """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" + return skipif(test_str not in EXTRA_SET, + "Skip {0} tests.".format(test_str)) diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 12668e56d9..7ff86c61d0 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -639,8 +639,14 @@ def test_strided_scalar(): assert_equal(observed.shape, shape) assert_equal(observed.dtype, expected.dtype) assert_array_equal(observed.strides, 0) - observed[..., 0] = 99 - assert_array_equal(observed, expected * 0 + 99) + # Strided scalars are set as not writeable + # This addresses a numpy 1.10 breakage of broadcasting a strided + # array without resizing (see GitHub PR #358) + assert_false(observed.flags.writeable) + def setval(x): + x[..., 0] = 99 + # RuntimeError for numpy < 1.10 + assert_raises((RuntimeError, ValueError), setval, observed) # Default scalar value is 0 assert_array_equal(strided_scalar((2, 3, 4)), np.zeros((2, 3, 4))) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 3d4db9faf4..dc5bf5e046 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -13,17 +13,17 @@ import numpy as np -from ..externals.six import BytesIO -from ..casting import type_info, have_binary128 -from ..tmpdirs import InTemporaryDirectory -from ..spatialimages import HeaderDataError -from ..eulerangles import euler2mat -from ..affines import from_matvec -from .. import nifti1 as nifti1 -from ..nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image, - Nifti1Pair, Nifti1Extension, Nifti1Extensions, - data_type_codes, extension_codes, slice_order_codes) - +from nibabel import nifti1 as nifti1 +from nibabel.affines import from_matvec +from nibabel.casting import type_info, have_binary128 +from nibabel.eulerangles import euler2mat +from nibabel.externals.six import BytesIO +from nibabel.nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image, + Nifti1Pair, Nifti1Extension, Nifti1Extensions, + data_type_codes, extension_codes, + slice_order_codes) +from nibabel.spatialimages import HeaderDataError +from nibabel.tmpdirs import InTemporaryDirectory from ..freesurfer import load as mghload from .test_arraywriters import rt_err_estimate, IUINT_TYPES @@ -35,7 +35,7 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from ..testing import data_path, suppress_warnings +from ..testing import data_path, suppress_warnings, runif_extra_has from . import test_analyze as tana from . import test_spm99analyze as tspm @@ -45,12 +45,12 @@ # Example transformation matrix -R = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix -Z = [2.0, 3.0, 4.0] # zooms -T = [20, 30, 40] # translations +R = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix +Z = [2.0, 3.0, 4.0] # zooms +T = [20, 30, 40] # translations A = np.eye(4) -A[:3,:3] = np.array(R) * Z # broadcasting does the job -A[:3,3] = T +A[:3, :3] = np.array(R) * Z # broadcasting does the job +A[:3, 3] = T class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): @@ -91,7 +91,7 @@ def test_data_scaling(self): hdr.set_data_shape(data.shape) hdr.set_data_dtype(np.float32) S = BytesIO() - # Writing to float datatype with scaling gives slope, inter as identities + # Writing to float dtype with scaling gives slope, intercept as (1, 0) hdr.data_to_fileobj(data, S, rescale=True) assert_array_equal(hdr.get_slope_inter(), (1, 0)) rdata = hdr.data_from_fileobj(S) @@ -115,13 +115,13 @@ def test_big_scaling(self): # Test that upcasting works for huge scalefactors # See tests for apply_read_scaling in test_utils hdr = self.header_class() - hdr.set_data_shape((2,1,1)) + hdr.set_data_shape((2, 1, 1)) hdr.set_data_dtype(np.int16) sio = BytesIO() dtt = np.float32 # This will generate a huge scalefactor finf = type_info(dtt) - data = np.array([finf['min'], finf['max']], dtype=dtt)[:,None, None] + data = np.array([finf['min'], finf['max']], dtype=dtt)[:, None, None] hdr.data_to_fileobj(data, sio) data_back = hdr.data_from_fileobj(sio) assert_true(np.allclose(data, data_back)) @@ -132,40 +132,40 @@ def test_slope_inter(self): HDE = HeaderDataError assert_equal(hdr.get_slope_inter(), (1.0, 0.0)) for in_tup, exp_err, out_tup, raw_values in ( - # Null scalings - ((None, None), None, (None, None), (nan, nan)), - ((nan, None), None, (None, None), (nan, nan)), - ((None, nan), None, (None, None), (nan, nan)), - ((nan, nan), None, (None, None), (nan, nan)), - # Can only be one null - ((None, 0), HDE, (None, None), (nan, 0)), - ((nan, 0), HDE, (None, None), (nan, 0)), - ((1, None), HDE, (None, None), (1, nan)), - ((1, nan), HDE, (None, None), (1, nan)), - # Bad slope plus anything generates an error - ((0, 0), HDE, (None, None), (0, 0)), - ((0, None), HDE, (None, None), (0, nan)), - ((0, nan), HDE, (None, None), (0, nan)), - ((0, inf), HDE, (None, None), (0, inf)), - ((0, minf), HDE, (None, None), (0, minf)), - ((inf, 0), HDE, (None, None), (inf, 0)), - ((inf, None), HDE, (None, None), (inf, nan)), - ((inf, nan), HDE, (None, None), (inf, nan)), - ((inf, inf), HDE, (None, None), (inf, inf)), - ((inf, minf), HDE, (None, None), (inf, minf)), - ((minf, 0), HDE, (None, None), (minf, 0)), - ((minf, None), HDE, (None, None), (minf, nan)), - ((minf, nan), HDE, (None, None), (minf, nan)), - ((minf, inf), HDE, (None, None), (minf, inf)), - ((minf, minf), HDE, (None, None), (minf, minf)), - # Good slope and bad intercept generates error for get_slope_inter - ((2, None), HDE, HDE, (2, nan)), - ((2, nan), HDE, HDE, (2, nan)), - ((2, inf), HDE, HDE, (2, inf)), - ((2, minf), HDE, HDE, (2, minf))): - # Good slope and inter - you guessed it - ((2, 0), None, (None, None), (2, 0)), - ((2, 1), None, (None, None), (2, 1)), + # Null scalings + ((None, None), None, (None, None), (nan, nan)), + ((nan, None), None, (None, None), (nan, nan)), + ((None, nan), None, (None, None), (nan, nan)), + ((nan, nan), None, (None, None), (nan, nan)), + # Can only be one null + ((None, 0), HDE, (None, None), (nan, 0)), + ((nan, 0), HDE, (None, None), (nan, 0)), + ((1, None), HDE, (None, None), (1, nan)), + ((1, nan), HDE, (None, None), (1, nan)), + # Bad slope plus anything generates an error + ((0, 0), HDE, (None, None), (0, 0)), + ((0, None), HDE, (None, None), (0, nan)), + ((0, nan), HDE, (None, None), (0, nan)), + ((0, inf), HDE, (None, None), (0, inf)), + ((0, minf), HDE, (None, None), (0, minf)), + ((inf, 0), HDE, (None, None), (inf, 0)), + ((inf, None), HDE, (None, None), (inf, nan)), + ((inf, nan), HDE, (None, None), (inf, nan)), + ((inf, inf), HDE, (None, None), (inf, inf)), + ((inf, minf), HDE, (None, None), (inf, minf)), + ((minf, 0), HDE, (None, None), (minf, 0)), + ((minf, None), HDE, (None, None), (minf, nan)), + ((minf, nan), HDE, (None, None), (minf, nan)), + ((minf, inf), HDE, (None, None), (minf, inf)), + ((minf, minf), HDE, (None, None), (minf, minf)), + # Good slope and bad inter generates error for get_slope_inter + ((2, None), HDE, HDE, (2, nan)), + ((2, nan), HDE, HDE, (2, nan)), + ((2, inf), HDE, HDE, (2, inf)), + ((2, minf), HDE, HDE, (2, minf)), + # Good slope and inter - you guessed it + ((2, 0), None, (2, 0), (2, 0)), + ((2, 1), None, (2, 1), (2, 1))): hdr = self.header_class() if not exp_err is None: assert_raises(exp_err, hdr.set_slope_inter, *in_tup) @@ -180,7 +180,8 @@ def test_slope_inter(self): # Check set survives through checking hdr = self.header_class.from_header(hdr, check=True) assert_equal(hdr.get_slope_inter(), out_tup) - assert_array_equal([hdr['scl_slope'], hdr['scl_inter']], raw_values) + assert_array_equal([hdr['scl_slope'], hdr['scl_inter']], + raw_values) def test_nifti_qsform_checks(self): # qfac, qform, sform checks @@ -261,27 +262,37 @@ def test_freesurfer_large_vector_hack(self): hdr.set_data_shape((too_big-1, 1, 1)) assert_equal(hdr.get_data_shape(), (too_big-1, 1, 1)) # The freesurfer case + full_shape = (too_big, 1, 1, 1, 1, 1, 1) + for dim in range(3, 8): + # First element in 'dim' field is number of dimensions + expected_dim = np.array([dim, -1, 1, 1, 1, 1, 1, 1]) + with suppress_warnings(): + hdr.set_data_shape(full_shape[:dim]) + assert_equal(hdr.get_data_shape(), full_shape[:dim]) + assert_array_equal(hdr['dim'], expected_dim) + assert_equal(hdr['glmin'], too_big) + # Allow the fourth dimension to vary with suppress_warnings(): - hdr.set_data_shape((too_big, 1, 1)) - assert_equal(hdr.get_data_shape(), (too_big, 1, 1)) - assert_array_equal(hdr['dim'][:4], [3, -1, 1, 1]) - assert_equal(hdr['glmin'], too_big) - # This only works for the case of a 3D with -1, 1, 1 + hdr.set_data_shape((too_big, 1, 1, 4)) + assert_equal(hdr.get_data_shape(), (too_big, 1, 1, 4)) + assert_array_equal(hdr['dim'][:5], np.array([4, -1, 1, 1, 4])) + # This only works when the first 3 dimensions are -1, 1, 1 assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,1,2)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,2,1)) + assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 1)) + assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 1, 2)) + assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 2, 1)) assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big)) assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big, 1)) assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, too_big)) + assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, too_big)) # Outside range of glmin raises error far_too_big = int(np.iinfo(glmin).max) + 1 with suppress_warnings(): hdr.set_data_shape((far_too_big-1, 1, 1)) assert_equal(hdr.get_data_shape(), (far_too_big-1, 1, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (far_too_big,1,1)) + assert_raises(HeaderDataError, hdr.set_data_shape, (far_too_big, 1, 1)) # glmin of zero raises error (implausible vector length) - hdr.set_data_shape((-1,1,1)) + hdr.set_data_shape((-1, 1, 1)) hdr['glmin'] = 0 assert_raises(HeaderDataError, hdr.get_data_shape) # Lists or tuples or arrays will work for setting shape @@ -295,9 +306,22 @@ def test_freesurfer_large_vector_hack(self): def test_freesurfer_ico7_hack(self): HC = self.header_class hdr = HC() + full_shape = (163842, 1, 1, 1, 1, 1, 1) # Test that using ico7 shape automatically uses factored dimensions - hdr.set_data_shape((163842, 1, 1)) - assert_array_equal(hdr._structarr['dim'][1:4], np.array([27307, 1, 6])) + for dim in range(3, 8): + expected_dim = np.array([dim, 27307, 1, 6, 1, 1, 1, 1]) + hdr.set_data_shape(full_shape[:dim]) + assert_equal(hdr.get_data_shape(), full_shape[:dim]) + assert_array_equal(hdr._structarr['dim'], expected_dim) + # Only works on dimensions >= 3 + assert_raises(HeaderDataError, hdr.set_data_shape, full_shape[:1]) + assert_raises(HeaderDataError, hdr.set_data_shape, full_shape[:2]) + # Bad shapes + assert_raises(HeaderDataError, hdr.set_data_shape, (163842, 2, 1)) + assert_raises(HeaderDataError, hdr.set_data_shape, (163842, 1, 2)) + assert_raises(HeaderDataError, hdr.set_data_shape, (1, 163842, 1)) + assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 163842)) + assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, 163842)) # Test consistency of data in .mgh and mri_convert produced .nii nitest_path = os.path.join(get_nibabel_data(), 'nitest-freesurfer') mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', @@ -320,15 +344,16 @@ def test_qform_sform(self): HC = self.header_class hdr = HC() assert_array_equal(hdr.get_qform(), np.eye(4)) - empty_sform = np.zeros((4,4)) - empty_sform[-1,-1] = 1 + empty_sform = np.zeros((4, 4)) + empty_sform[-1, -1] = 1 assert_array_equal(hdr.get_sform(), empty_sform) assert_equal(hdr.get_qform(coded=True), (None, 0)) assert_equal(hdr.get_sform(coded=True), (None, 0)) - # Affine with no shears + # Affines with no shears nice_aff = np.diag([2, 3, 4, 1]) + another_aff = np.diag([3, 4, 5, 1]) # Affine with shears - nasty_aff = from_matvec(np.arange(9).reshape((3,3)), [9, 10, 11]) + nasty_aff = from_matvec(np.arange(9).reshape((3, 3)), [9, 10, 11]) fixed_aff = unshear_44(nasty_aff) for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), (hdr.set_sform, hdr.get_sform)): @@ -336,11 +361,12 @@ def test_qform_sform(self): aff, code = out_meth(coded=True) assert_array_equal(aff, nice_aff) assert_equal(code, 2) - assert_array_equal(out_meth(), nice_aff) # non coded - # Affine can also be passed if code == 0, affine will be suitably set - in_meth(nice_aff, 0) - assert_equal(out_meth(coded=True), (None, 0)) - assert_array_almost_equal(out_meth(), nice_aff) + assert_array_equal(out_meth(), nice_aff) # non coded + # Affine may be passed if code == 0, and will get set into header, + # but the returned affine with 'coded=True' will be None. + in_meth(another_aff, 0) + assert_equal(out_meth(coded=True), (None, 0)) # coded -> None + assert_array_almost_equal(out_meth(), another_aff) # else -> input # Default qform code when previous == 0 is 2 in_meth(nice_aff) aff, code = out_meth(coded=True) @@ -351,10 +377,10 @@ def test_qform_sform(self): aff, code = out_meth(coded=True) assert_equal(code, 1) # Can set code without modifying affine, by passing affine=None - assert_array_equal(aff, nice_aff) # affine same as before + assert_array_equal(aff, nice_aff) # affine same as before in_meth(None, 3) aff, code = out_meth(coded=True) - assert_array_equal(aff, nice_aff) # affine same as before + assert_array_equal(aff, nice_aff) # affine same as before assert_equal(code, 3) # affine is None on its own, or with code==0, resets code to 0 in_meth(None, 0) @@ -422,6 +448,10 @@ def test_qform(self): assert_true, ehdr['qform_code'] == xfas['scanner'] ehdr.set_qform(A, xfas['aligned']) assert_true, ehdr['qform_code'] == xfas['aligned'] + # Test pixdims[1,2,3] are checked for negatives + for dims in ((-1, 1, 1), (1, -1, 1), (1, 1, -1)): + ehdr['pixdim'][1:4] = dims + assert_raises(HeaderDataError, ehdr.get_qform) def test_sform(self): # Test roundtrip case @@ -439,13 +469,13 @@ def test_sform(self): def test_dim_info(self): ehdr = self.header_class() assert_true(ehdr.get_dim_info() == (None, None, None)) - for info in ((0,2,1), - (None, None, None), - (0,2,None), - (0,None,None), - (None,2,1), - (None, None,1), - ): + for info in ((0, 2, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): ehdr.set_dim_info(*info) assert_true(ehdr.get_dim_info() == info) @@ -467,27 +497,26 @@ def test_slice_times(self): #The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] assert_equal(_print_me(hdr.get_slice_times()), - ['0.0', '0.1', '0.2', '0.3', '0.4', - '0.5', '0.6']) + ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) hdr['slice_start'] = 1 hdr['slice_end'] = 5 assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) + [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) hdr['slice_code'] = slice_order_codes['sequential decreasing'] assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) + [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) hdr['slice_code'] = slice_order_codes['alternating increasing'] assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) + [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) hdr['slice_code'] = slice_order_codes['alternating decreasing'] assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) + [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) hdr['slice_code'] = slice_order_codes['alternating increasing 2'] assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) + [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) hdr['slice_code'] = slice_order_codes['alternating decreasing 2'] assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) + [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) # test set hdr = self.header_class() hdr.set_dim_info(slice=2) @@ -495,25 +524,21 @@ def test_slice_times(self): times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None] assert_raises(HeaderDataError, hdr.set_slice_times, times) hdr.set_data_shape([1, 1, 7]) - assert_raises(HeaderDataError, - hdr.set_slice_times, - times[:-1]) # wrong length - assert_raises(HeaderDataError, - hdr.set_slice_times, - (None,) * len(times)) # all None + assert_raises(HeaderDataError, hdr.set_slice_times, + times[:-1]) # wrong length + assert_raises(HeaderDataError, hdr.set_slice_times, + (None,) * len(times)) # all None n_mid_times = times[:] n_mid_times[3] = None - assert_raises(HeaderDataError, - hdr.set_slice_times, - n_mid_times) # None in middle + assert_raises(HeaderDataError, hdr.set_slice_times, + n_mid_times) # None in middle funny_times = times[:] funny_times[3] = 0.05 - assert_raises(HeaderDataError, - hdr.set_slice_times, - funny_times) # can't get single slice duration + assert_raises(HeaderDataError, hdr.set_slice_times, + funny_times) # can't get single slice duration hdr.set_slice_times(times) assert_equal(hdr.get_value_label('slice_code'), - 'alternating decreasing') + 'alternating decreasing') assert_equal(hdr['slice_start'], 1) assert_equal(hdr['slice_end'], 5) assert_array_almost_equal(hdr['slice_duration'], 0.1) @@ -524,25 +549,18 @@ def test_intents(self): assert_equal(ehdr.get_intent(), ('t test', (10.0,), 'some score')) # invalid intent name - assert_raises(KeyError, - ehdr.set_intent, 'no intention') + assert_raises(KeyError, ehdr.set_intent, 'no intention') # too many parameters - assert_raises(HeaderDataError, - ehdr.set_intent, - 't test', (10,10)) + assert_raises(HeaderDataError, ehdr.set_intent, 't test', (10, 10)) # too few parameters - assert_raises(HeaderDataError, - ehdr.set_intent, - 'f test', (10,)) + assert_raises(HeaderDataError, ehdr.set_intent, 'f test', (10,)) # check unset parameters are set to 0, and name to '' ehdr.set_intent('t test') - assert_equal((ehdr['intent_p1'], - ehdr['intent_p2'], - ehdr['intent_p3']), (0,0,0)) + assert_equal((ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']), + (0, 0, 0)) assert_equal(ehdr['intent_name'], b'') ehdr.set_intent('t test', (10,)) - assert_equal((ehdr['intent_p2'], - ehdr['intent_p3']), (0,0)) + assert_equal((ehdr['intent_p2'], ehdr['intent_p3']), (0, 0)) def test_set_slice_times(self): hdr = self.header_class() @@ -610,9 +628,9 @@ def test_recoded_fields(self): hdr.set_intent('t test', (10,), name='some score') assert_equal(hdr.get_value_label('intent_code'), 't test') assert_equal(hdr.get_value_label('slice_code'), 'unknown') - hdr['slice_code'] = 4 # alternating decreasing + hdr['slice_code'] = 4 # alternating decreasing assert_equal(hdr.get_value_label('slice_code'), - 'alternating decreasing') + 'alternating decreasing') def unshear_44(affine): @@ -621,7 +639,7 @@ def unshear_44(affine): R = RZS / zooms P, S, Qs = np.linalg.svd(R) PR = np.dot(P, Qs) - return from_matvec(PR * zooms, affine[:3,3]) + return from_matvec(PR * zooms, affine[:3, 3]) class TestNifti1SingleHeader(TestNifti1PairHeader): @@ -700,20 +718,20 @@ def test_qform_cycle(self): # Qform load save cycle img_klass = self.image_class # None affine - img = img_klass(np.zeros((2,3,4)), None) + img = img_klass(np.zeros((2, 3, 4)), None) hdr_back = self._qform_rt(img).header assert_equal(hdr_back['qform_code'], 3) assert_equal(hdr_back['sform_code'], 4) # Try non-None affine - img = img_klass(np.zeros((2,3,4)), np.eye(4)) + img = img_klass(np.zeros((2, 3, 4)), np.eye(4)) hdr_back = self._qform_rt(img).header assert_equal(hdr_back['qform_code'], 3) assert_equal(hdr_back['sform_code'], 4) # Modify affine in-place - does it hold? - img.affine[0,0] = 9 + img.affine[0, 0] = 9 img.to_file_map() img_back = img.from_file_map(img.file_map) - exp_aff = np.diag([9,1,1,1]) + exp_aff = np.diag([9, 1, 1, 1]) assert_array_equal(img_back.affine, exp_aff) hdr_back = img.header assert_array_equal(hdr_back.get_sform(), exp_aff) @@ -721,10 +739,10 @@ def test_qform_cycle(self): def test_header_update_affine(self): # Test that updating occurs only if affine is not allclose - img = self.image_class(np.zeros((2,3,4)), np.eye(4)) + img = self.image_class(np.zeros((2, 3, 4)), np.eye(4)) hdr = img.header aff = img.affine - aff[:] = np.diag([1.1, 1.1, 1.1, 1]) # inexact floats + aff[:] = np.diag([1.1, 1.1, 1.1, 1]) # inexact floats hdr.set_qform(aff, 2) hdr.set_sform(aff, 2) img.update_header() @@ -732,7 +750,8 @@ def test_header_update_affine(self): assert_equal(hdr['qform_code'], 2) def test_set_qform(self): - img = self.image_class(np.zeros((2,3,4)), np.diag([2.2, 3.3, 4.3, 1])) + img = self.image_class(np.zeros((2, 3, 4)), + np.diag([2.2, 3.3, 4.3, 1])) hdr = img.header new_affine = np.diag([1.1, 1.1, 1.1, 1]) # Affine is same as sform (best affine) @@ -779,7 +798,7 @@ def test_set_qform(self): assert_raises(TypeError, img.get_qform, strange=True) # updating None affine, None header does not work, because None header # results in setting the sform to default - img = self.image_class(np.zeros((2,3,4)), None) + img = self.image_class(np.zeros((2, 3, 4)), None) new_affine = np.eye(4) img.set_qform(new_affine, 2) assert_array_almost_equal(img.affine, img.header.get_best_affine()) @@ -789,7 +808,7 @@ def test_set_qform(self): def test_set_sform(self): orig_aff = np.diag([2.2, 3.3, 4.3, 1]) - img = self.image_class(np.zeros((2,3,4)), orig_aff) + img = self.image_class(np.zeros((2, 3, 4)), orig_aff) hdr = img.header new_affine = np.diag([1.1, 1.1, 1.1, 1]) qform_affine = np.diag([1.2, 1.2, 1.2, 1]) @@ -841,14 +860,14 @@ def test_set_sform(self): # Unexpected keyword raises error assert_raises(TypeError, img.get_sform, strange=True) # updating None affine should also work - img = self.image_class(np.zeros((2,3,4)), None) + img = self.image_class(np.zeros((2, 3, 4)), None) new_affine = np.eye(4) img.set_sform(new_affine, 2) assert_array_almost_equal(img.affine, new_affine) def test_hdr_diff(self): # Check an offset beyond data does not raise an error - img = self.image_class(np.zeros((2,3,4)), np.eye(4)) + img = self.image_class(np.zeros((2, 3, 4)), np.eye(4)) ext = dict(img.files_types)['image'] hdr_len = len(img.header.binaryblock) img.header['vox_offset'] = hdr_len + 400 @@ -876,7 +895,8 @@ def test_load_save(self): assert_true(isinstance(img3, img.__class__)) assert_array_equal(img3.get_data(), data) assert_equal(img3.header, img.header) - assert_true(isinstance(img3.get_data(), np.memmap if ext == '' else np.ndarray)) + assert_true(isinstance(img3.get_data(), + np.memmap if ext == '' else np.ndarray)) # del to avoid windows errors of form 'The process cannot # access the file because it is being used' del img3 @@ -885,7 +905,7 @@ def test_load_pixdims(self): # Make sure load preserves separate qform, pixdims, sform IC = self.image_class HC = IC.header_class - arr = np.arange(24).reshape((2,3,4)) + arr = np.arange(24).reshape((2, 3, 4)) qaff = np.diag([2, 3, 4, 1]) saff = np.diag([5, 6, 7, 1]) hdr = HC() @@ -898,7 +918,7 @@ def test_load_pixdims(self): # Check qform, sform, pixdims are the same assert_array_equal(img_hdr.get_qform(), qaff) assert_array_equal(img_hdr.get_sform(), saff) - assert_array_equal(img_hdr.get_zooms(), [2,3,4]) + assert_array_equal(img_hdr.get_zooms(), [2, 3, 4]) # Save to stringio re_simg = bytesio_round_trip(simg) assert_array_equal(re_simg.get_data(), arr) @@ -906,14 +926,14 @@ def test_load_pixdims(self): rimg_hdr = re_simg.header assert_array_equal(rimg_hdr.get_qform(), qaff) assert_array_equal(rimg_hdr.get_sform(), saff) - assert_array_equal(rimg_hdr.get_zooms(), [2,3,4]) + assert_array_equal(rimg_hdr.get_zooms(), [2, 3, 4]) def test_affines_init(self): # Test we are doing vaguely spec-related qform things. The 'spec' here # is some thoughts by Mark Jenkinson: # http://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/qsform_brief_usage IC = self.image_class - arr = np.arange(24).reshape((2,3,4)) + arr = np.arange(24).reshape((2, 3, 4)) aff = np.diag([2, 3, 4, 1]) # Default is sform set, qform not set img = IC(arr, aff) @@ -937,16 +957,16 @@ def test_affines_init(self): # But if no affine passed, codes and matrices stay the same img = IC(arr, None, hdr) new_hdr = img.header - assert_equal(new_hdr['qform_code'], 1) # scanner + assert_equal(new_hdr['qform_code'], 1) # scanner assert_array_equal(new_hdr.get_qform(), qaff) - assert_equal(new_hdr['sform_code'], 3) # Still talairach + assert_equal(new_hdr['sform_code'], 3) # Still talairach assert_array_equal(new_hdr.get_sform(), saff) # Pixdims as in the original header assert_array_equal(new_hdr.get_zooms(), [3, 4, 5]) def test_read_no_extensions(self): IC = self.image_class - arr = np.arange(24).reshape((2,3,4)) + arr = np.arange(24).reshape((2, 3, 4)) img = IC(arr, np.eye(4)) assert_equal(len(img.header.extensions), 0) img_rt = bytesio_round_trip(img) @@ -967,11 +987,11 @@ def _set_raw_scaling(self, hdr, slope, inter): def test_write_scaling(self): # Check we can set slope, inter on write for slope, inter, e_slope, e_inter in ( - (1, 0, 1, 0), - (2, 0, 2, 0), - (2, 1, 2, 1), - (0, 0, 1, 0), - (np.inf, 0, 1, 0)): + (1, 0, 1, 0), + (2, 0, 2, 0), + (2, 1, 2, 1), + (0, 0, 1, 0), + (np.inf, 0, 1, 0)): with np.errstate(invalid='ignore'): self._check_write_scaling(slope, inter, e_slope, e_inter) @@ -983,7 +1003,7 @@ class TestNifti1Image(TestNifti1Pair): def test_offset_errors(self): # Test that explicit offset too low raises error IC = self.image_class - arr = np.arange(24).reshape((2,3,4)) + arr = np.arange(24).reshape((2, 3, 4)) img = IC(arr, np.eye(4)) assert_equal(img.header.get_data_offset(), 0) # Saving with zero offset is OK @@ -1014,7 +1034,7 @@ def test_ext_eq(): def test_extension_codes(): for k in extension_codes.keys(): - ext = Nifti1Extension(k, 'somevalue') + Nifti1Extension(k, 'somevalue') def test_extension_list(): @@ -1120,10 +1140,9 @@ def test_loadsave_cycle(self): lnim = bytesio_round_trip(nim) hdr = lnim.header lexts_container = hdr.extensions - assert_equal(exts_container, - lexts_container) + assert_equal(exts_container, lexts_container) # build int16 image - data = np.ones((2,3,4,5), dtype='int16') + data = np.ones((2, 3, 4, 5), dtype='int16') img = self.single_class(data, np.eye(4)) hdr = img.header assert_equal(hdr.get_data_dtype(), np.int16) @@ -1150,9 +1169,9 @@ def test_loadsave_cycle(self): assert_equal((lnim.dataobj.slope, lnim.dataobj.inter), (2, 8)) def test_load(self): - # test module level load. We try to load a nii and an .img and a .hdr and - # expect to get a nifti back of single or pair type - arr = np.arange(24).reshape((2,3,4)) + # test module level load. We try to load a nii and an .img and a .hdr + # and expect to get a nifti back of single or pair type + arr = np.arange(24).reshape((2, 3, 4)) aff = np.diag([2, 3, 4, 1]) simg = self.single_class(arr, aff) pimg = self.pair_class(arr, aff) @@ -1194,7 +1213,8 @@ def test_float_int_spread(self): arr_back_sc = img_back.get_data() slope, inter = img_back.header.get_slope_inter() # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, + inter) # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) @@ -1217,7 +1237,24 @@ def test_rt_bias(self): slope, inter = img_back.header.get_slope_inter() bias = np.mean(arr_t - arr_back_sc) # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, + inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert_true(np.abs(bias) < bias_thresh) + + +@runif_extra_has('slow') +def test_large_nifti1(): + image_shape = (91, 109, 91, 1200) + img = Nifti1Image(np.ones(image_shape, dtype=np.float32), + affine=np.eye(4)) + # Dump and load the large image. + with InTemporaryDirectory(): + img.to_filename('test.nii.gz') + del img + data = load('test.nii.gz').get_data() + # Check that the data are all ones + assert_equal(image_shape, data.shape) + n_ones = np.sum((data == 1.)) + assert_equal(np.prod(image_shape), n_ones) diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 52c29c98cf..fc1a76df0b 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -6,8 +6,8 @@ import numpy as np from ..externals.six import BytesIO -from .. import Nifti1Image -from ..spatialimages import HeaderDataError +from .. import Nifti1Image, Nifti1Header +from ..spatialimages import HeaderDataError, supported_np_types from ..arraywriters import ScalingError from ..casting import best_float, ulp, type_info @@ -95,9 +95,9 @@ def test_round_trip(): N = 10000 sd_10s = range(-20, 51, 5) iuint_types = np.sctypes['int'] + np.sctypes['uint'] - # Remove intp types, which cannot be set into nifti header datatype - iuint_types.remove(np.intp) - iuint_types.remove(np.uintp) + # Remove types which cannot be set into nifti header datatype + nifti_supported = supported_np_types(Nifti1Header()) + iuint_types = [t for t in iuint_types if t in nifti_supported] f_types = [np.float32, np.float64] # Expanding standard deviations for i, sd_10 in enumerate(sd_10s): diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py new file mode 100644 index 0000000000..74e47a2331 --- /dev/null +++ b/nibabel/tests/test_tripwire.py @@ -0,0 +1,29 @@ +""" Testing tripwire module +""" + +from ..tripwire import TripWire, is_tripwire, TripWireError + +from nose import SkipTest +from nose.tools import (assert_true, assert_false, assert_raises, + assert_equal, assert_not_equal) + + +def test_is_tripwire(): + assert_false(is_tripwire(object())) + assert_true(is_tripwire(TripWire('some message'))) + + +def test_tripwire(): + # Test tripwire object + silly_module_name = TripWire('We do not have silly_module_name') + assert_raises(TripWireError, + getattr, + silly_module_name, + 'do_silly_thing') + # Check AttributeError can be checked too + try: + silly_module_name.__wrapped__ + except TripWireError as err: + assert_true(isinstance(err, AttributeError)) + else: + raise RuntimeError("No error raised, but expected") diff --git a/nibabel/tests/test_utils.py b/nibabel/tests/test_utils.py index 1810722b55..83122bbe95 100644 --- a/nibabel/tests/test_utils.py +++ b/nibabel/tests/test_utils.py @@ -1227,3 +1227,21 @@ def assert_rt(data, slope = slope, post_clips = post_clips, nan_fill = nan_fill) + + +def test_array_from_file_overflow(): + # Test for int overflow in size calculation in array_from_file + shape = (1500,) * 6 + class NoStringIO: # Null file-like for forcing error + def seek(self, n_bytes): + pass + def read(self, n_bytes): + return b'' + try: + array_from_file(shape, np.int8, NoStringIO()) + except IOError as err: + message = str(err) + assert_equal(message, + 'Expected {0} bytes, got {1} bytes from {2}\n' + ' - could the file be damaged?'.format( + 11390625000000000000, 0, 'object')) diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index 1576757bf7..e4967ee69e 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,8 +1,13 @@ """ Class to raise error for missing modules or other misfortunes """ -class TripWireError(Exception): + +class TripWireError(AttributeError): """ Exception if trying to use TripWire object """ + # Has to be subclass of AttributeError, to work round Python 3.5 inspection + # for doctests. Python 3.5 looks for a ``__wrapped__`` attribute during + # initialization of doctests, and only allows AttributeError as signal this + # is not present. def is_tripwire(obj): @@ -31,14 +36,11 @@ class TripWire(object): Examples -------- - >>> try: - ... import silly_module_name - ... except ImportError: - ... silly_module_name = TripWire('We do not have silly_module_name') - >>> silly_module_name.do_silly_thing('with silly string') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> a_module = TripWire('We do not have a_module') + >>> a_module.do_silly_thing('with silly string') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - TripWireError: We do not have silly_module_name + TripWireError: We do not have a_module """ def __init__(self, msg): self._msg = msg diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index d43c43f4cc..ce2e0cd6ac 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -14,6 +14,8 @@ import gzip import bz2 from os.path import exists, splitext +from operator import mul +from functools import reduce import numpy as np @@ -504,7 +506,8 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): pass if len(shape) == 0: return np.array([]) - n_bytes = int(np.prod(shape) * in_dtype.itemsize) + # Use reduce and mul to work around numpy integer overflow + n_bytes = reduce(mul, shape) * in_dtype.itemsize if n_bytes == 0: return np.array([]) # Read data from file diff --git a/nisext/ast.py b/nisext/ast.py deleted file mode 100644 index 1984f43388..0000000000 --- a/nisext/ast.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -""" - ast - ~~~ - - The `ast` module helps Python applications to process trees of the Python - abstract syntax grammar. The abstract syntax itself might change with - each Python release; this module helps to find out programmatically what - the current grammar looks like and allows modifications of it. - - An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as - a flag to the `compile()` builtin function or by using the `parse()` - function from this module. The result will be a tree of objects whose - classes all inherit from `ast.AST`. - - A modified abstract syntax tree can be compiled into a Python code object - using the built-in `compile()` function. - - Additionally various helper functions are provided that make working with - the trees simpler. The main intention of the helper functions and this - module in general is to provide an easy to use interface for libraries - that work tightly with the python syntax (template engines for example). - - - :copyright: Copyright 2008 by Armin Ronacher. - :license: Python License. - - From: http://dev.pocoo.org/hg/sandbox -""" -from _ast import * - - -BOOLOP_SYMBOLS = { - And: 'and', - Or: 'or' -} - -BINOP_SYMBOLS = { - Add: '+', - Sub: '-', - Mult: '*', - Div: '/', - FloorDiv: '//', - Mod: '%', - LShift: '<<', - RShift: '>>', - BitOr: '|', - BitAnd: '&', - BitXor: '^' -} - -CMPOP_SYMBOLS = { - Eq: '==', - Gt: '>', - GtE: '>=', - In: 'in', - Is: 'is', - IsNot: 'is not', - Lt: '<', - LtE: '<=', - NotEq: '!=', - NotIn: 'not in' -} - -UNARYOP_SYMBOLS = { - Invert: '~', - Not: 'not', - UAdd: '+', - USub: '-' -} - -ALL_SYMBOLS = {} -ALL_SYMBOLS.update(BOOLOP_SYMBOLS) -ALL_SYMBOLS.update(BINOP_SYMBOLS) -ALL_SYMBOLS.update(CMPOP_SYMBOLS) -ALL_SYMBOLS.update(UNARYOP_SYMBOLS) - - -def parse(expr, filename='', mode='exec'): - """Parse an expression into an AST node.""" - return compile(expr, filename, mode, PyCF_ONLY_AST) - - -def literal_eval(node_or_string): - """Safe evaluate a literal. The string or node provided may include any - of the following python structures: strings, numbers, tuples, lists, - dicts, booleans or None. - """ - _safe_names = {'None': None, 'True': True, 'False': False} - if isinstance(node_or_string, basestring): - node_or_string = parse(node_or_string, mode='eval') - if isinstance(node_or_string, Expression): - node_or_string = node_or_string.body - def _convert(node): - if isinstance(node, Str): - return node.s - elif isinstance(node, Num): - return node.n - elif isinstance(node, Tuple): - return tuple(map(_convert, node.elts)) - elif isinstance(node, List): - return list(map(_convert, node.elts)) - elif isinstance(node, Dict): - return dict((_convert(k), _convert(v)) for k, v - in zip(node.keys, node.values)) - elif isinstance(node, Name): - if node.id in _safe_names: - return _safe_names[node.id] - raise ValueError('malformed string') - return _convert(node_or_string) - - -def dump(node, annotate_fields=True, include_attributes=False): - """A very verbose representation of the node passed. This is useful for - debugging purposes. Per default the returned string will show the names - and the values for fields. This makes the code impossible to evaluate, - if evaluation is wanted `annotate_fields` must be set to False. - Attributes such as line numbers and column offsets are dumped by default. - If this is wanted, `include_attributes` can be set to `True`. - """ - def _format(node): - if isinstance(node, AST): - fields = [(a, _format(b)) for a, b in iter_fields(node)] - rv = '%s(%s' % (node.__class__.__name__, ', '.join( - ('%s=%s' % field for field in fields) - if annotate_fields else - (b for a, b in fields) - )) - if include_attributes and node._attributes: - rv += fields and ', ' or ' ' - rv += ', '.join('%s=%s' % (a, _format(getattr(node, a))) - for a in node._attributes) - return rv + ')' - elif isinstance(node, list): - return '[%s]' % ', '.join(_format(x) for x in node) - return repr(node) - if not isinstance(node, AST): - raise TypeError('expected AST, got %r' % node.__class__.__name__) - return _format(node) - - -def copy_location(new_node, old_node): - """Copy the source location hint (`lineno` and `col_offset`) from the - old to the new node if possible and return the new one. - """ - for attr in 'lineno', 'col_offset': - if attr in old_node._attributes and attr in new_node._attributes \ - and hasattr(old_node, attr): - setattr(new_node, attr, getattr(old_node, attr)) - return new_node - - -def fix_missing_locations(node): - """Some nodes require a line number and the column offset. Without that - information the compiler will abort the compilation. Because it can be - a dull task to add appropriate line numbers and column offsets when - adding new nodes this function can help. It copies the line number and - column offset of the parent node to the child nodes without this - information. - - Unlike `copy_location` this works recursive and won't touch nodes that - already have a location information. - """ - def _fix(node, lineno, col_offset): - if 'lineno' in node._attributes: - if not hasattr(node, 'lineno'): - node.lineno = lineno - else: - lineno = node.lineno - if 'col_offset' in node._attributes: - if not hasattr(node, 'col_offset'): - node.col_offset = col_offset - else: - col_offset = node.col_offset - for child in iter_child_nodes(node): - _fix(child, lineno, col_offset) - _fix(node, 1, 0) - return node - - -def increment_lineno(node, n=1): - """Increment the line numbers of all nodes by `n` if they have line number - attributes. This is useful to "move code" to a different location in a - file. - """ - if 'lineno' in node._attributes: - node.lineno = getattr(node, 'lineno', 0) + n - for child in walk(node): - if 'lineno' in child._attributes: - child.lineno = getattr(child, 'lineno', 0) + n - return node - - -def iter_fields(node): - """Iterate over all fields of a node, only yielding existing fields.""" - for field in node._fields: - try: - yield field, getattr(node, field) - except AttributeError: - pass - - -def get_fields(node): - """Like `iter_fiels` but returns a dict.""" - return dict(iter_fields(node)) - - -def iter_child_nodes(node): - """Iterate over all child nodes or a node.""" - for name, field in iter_fields(node): - if isinstance(field, AST): - yield field - elif isinstance(field, list): - for item in field: - if isinstance(item, AST): - yield item - - -def get_child_nodes(node): - """Like `iter_child_nodes` but returns a list.""" - return list(iter_child_nodes(node)) - - -def get_docstring(node, trim=True): - """Return the docstring for the given node or `None` if no docstring can - be found. If the node provided does not accept docstrings a `TypeError` - will be raised. - """ - if not isinstance(node, (FunctionDef, ClassDef, Module)): - raise TypeError("%r can't have docstrings" % node.__class__.__name__) - if node.body and isinstance(node.body[0], Expr) and \ - isinstance(node.body[0].value, Str): - doc = node.body[0].value.s - if trim: - doc = trim_docstring(doc) - return doc - - -def trim_docstring(docstring): - """Trim a docstring. This should probably go into the inspect module.""" - lines = docstring.expandtabs().splitlines() - - # Find minimum indentation of any non-blank lines after first line. - from sys import maxint - margin = maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < maxint: - for i in range(1, len(lines)): - lines[i] = lines[i][margin:] - - # Remove any trailing or leading blank lines. - while lines and not lines[-1]: - lines.pop() - while lines and not lines[0]: - lines.pop(0) - return '\n'.join(lines) - - -def get_symbol(operator): - """Return the symbol of the given operator node or node type.""" - if isinstance(operator, AST): - operator = type(operator) - try: - return ALL_SYMBOLS[operator] - except KeyError: - raise LookupError('no known symbol for %r' % operator) - - -def walk(node): - """Iterate over all nodes. This is useful if you only want to modify nodes - in place and don't care about the context or the order the nodes are - returned. - """ - from collections import deque - todo = deque([node]) - while todo: - node = todo.popleft() - todo.extend(iter_child_nodes(node)) - yield node - - -class NodeVisitor(object): - """Walks the abstract syntax tree and call visitor functions for every - node found. The visitor functions may return values which will be - forwarded by the `visit` method. - - Per default the visitor functions for the nodes are ``'visit_'`` + - class name of the node. So a `TryFinally` node visit function would - be `visit_TryFinally`. This behavior can be changed by overriding - the `get_visitor` function. If no visitor function exists for a node - (return value `None`) the `generic_visit` visitor is used instead. - - Don't use the `NodeVisitor` if you want to apply changes to nodes during - traversing. For this a special visitor exists (`NodeTransformer`) that - allows modifications. - """ - - def get_visitor(self, node): - """Return the visitor function for this node or `None` if no visitor - exists for this node. In that case the generic visit function is - used instead. - """ - method = 'visit_' + node.__class__.__name__ - return getattr(self, method, None) - - def visit(self, node): - """Visit a node.""" - f = self.get_visitor(node) - if f is not None: - return f(node) - return self.generic_visit(node) - - def generic_visit(self, node): - """Called if no explicit visitor function exists for a node.""" - for field, value in iter_fields(node): - if isinstance(value, list): - for item in value: - if isinstance(item, AST): - self.visit(item) - elif isinstance(value, AST): - self.visit(value) - - -class NodeTransformer(NodeVisitor): - """Walks the abstract syntax tree and allows modifications of nodes. - - The `NodeTransformer` will walk the AST and use the return value of the - visitor functions to replace or remove the old node. If the return - value of the visitor function is `None` the node will be removed - from the previous location otherwise it's replaced with the return - value. The return value may be the original node in which case no - replacement takes place. - - Here an example transformer that rewrites all `foo` to `data['foo']`:: - - class RewriteName(NodeTransformer): - - def visit_Name(self, node): - return copy_location(Subscript( - value=Name(id='data', ctx=Load()), - slice=Index(value=Str(s=node.id)), - ctx=node.ctx - ), node) - - Keep in mind that if the node you're operating on has child nodes - you must either transform the child nodes yourself or call the generic - visit function for the node first. - - Nodes that were part of a collection of statements (that applies to - all statement nodes) may also return a list of nodes rather than just - a single node. - - Usually you use the transformer like this:: - - node = YourTransformer().visit(node) - """ - - def generic_visit(self, node): - for field, old_value in iter_fields(node): - old_value = getattr(node, field, None) - if isinstance(old_value, list): - new_values = [] - for value in old_value: - if isinstance(value, AST): - value = self.visit(value) - if value is None: - continue - elif not isinstance(value, AST): - new_values.extend(value) - continue - new_values.append(value) - old_value[:] = new_values - elif isinstance(old_value, AST): - new_node = self.visit(old_value) - if new_node is None: - delattr(node, field) - else: - setattr(node, field, new_node) - return node diff --git a/nisext/codegen.py b/nisext/codegen.py deleted file mode 100644 index 5eef41ea72..0000000000 --- a/nisext/codegen.py +++ /dev/null @@ -1,530 +0,0 @@ -# -*- coding: utf-8 -*- -""" - codegen - ~~~~~~~ - - Extension to ast that allow ast -> python code generation. - - :copyright: Copyright 2008 by Armin Ronacher. - :license: BSD. - - From: http://dev.pocoo.org/hg/sandbox -""" -# Explicit local imports to satisfy python 2.5 -from .ast import (BOOLOP_SYMBOLS, BINOP_SYMBOLS, CMPOP_SYMBOLS, UNARYOP_SYMBOLS, - NodeVisitor, If, Name) - - -def to_source(node, indent_with=' ' * 4, add_line_information=False): - """This function can convert a node tree back into python sourcecode. - This is useful for debugging purposes, especially if you're dealing with - custom asts not generated by python itself. - - It could be that the sourcecode is evaluable when the AST itself is not - compilable / evaluable. The reason for this is that the AST contains some - more data than regular sourcecode does, which is dropped during - conversion. - - Each level of indentation is replaced with `indent_with`. Per default this - parameter is equal to four spaces as suggested by PEP 8, but it might be - adjusted to match the application's styleguide. - - If `add_line_information` is set to `True` comments for the line numbers - of the nodes are added to the output. This can be used to spot wrong line - number information of statement nodes. - """ - generator = SourceGenerator(indent_with, add_line_information) - generator.visit(node) - return ''.join(generator.result) - - -class SourceGenerator(NodeVisitor): - """This visitor is able to transform a well formed syntax tree into python - sourcecode. For more details have a look at the docstring of the - `node_to_source` function. - """ - - def __init__(self, indent_with, add_line_information=False): - self.result = [] - self.indent_with = indent_with - self.add_line_information = add_line_information - self.indentation = 0 - self.new_lines = 0 - - def write(self, x): - if self.new_lines: - if self.result: - self.result.append('\n' * self.new_lines) - self.result.append(self.indent_with * self.indentation) - self.new_lines = 0 - self.result.append(x) - - def newline(self, node=None, extra=0): - self.new_lines = max(self.new_lines, 1 + extra) - if node is not None and self.add_line_information: - self.write('# line: %s' % node.lineno) - self.new_lines = 1 - - def body(self, statements): - self.new_line = True - self.indentation += 1 - for stmt in statements: - self.visit(stmt) - self.indentation -= 1 - - def body_or_else(self, node): - self.body(node.body) - if node.orelse: - self.newline() - self.write('else:') - self.body(node.orelse) - - def signature(self, node): - want_comma = [] - def write_comma(): - if want_comma: - self.write(', ') - else: - want_comma.append(True) - - padding = [None] * (len(node.args) - len(node.defaults)) - for arg, default in zip(node.args, padding + node.defaults): - write_comma() - self.visit(arg) - if default is not None: - self.write('=') - self.visit(default) - if node.vararg is not None: - write_comma() - self.write('*' + node.vararg) - if node.kwarg is not None: - write_comma() - self.write('**' + node.kwarg) - - def decorators(self, node): - for decorator in node.decorator_list: - self.newline(decorator) - self.write('@') - self.visit(decorator) - - # Statements - - def visit_Assign(self, node): - self.newline(node) - for idx, target in enumerate(node.targets): - if idx: - self.write(', ') - self.visit(target) - self.write(' = ') - self.visit(node.value) - - def visit_AugAssign(self, node): - self.newline(node) - self.visit(node.target) - self.write(BINOP_SYMBOLS[type(node.op)] + '=') - self.visit(node.value) - - def visit_ImportFrom(self, node): - self.newline(node) - self.write('from %s%s import ' % ('.' * node.level, node.module)) - for idx, item in enumerate(node.names): - if idx: - self.write(', ') - self.write(item) - - def visit_Import(self, node): - self.newline(node) - for item in node.names: - self.write('import ') - self.visit(item) - - def visit_Expr(self, node): - self.newline(node) - self.generic_visit(node) - - def visit_FunctionDef(self, node): - self.newline(extra=1) - self.decorators(node) - self.newline(node) - self.write('def %s(' % node.name) - self.signature(node.args) - self.write('):') - self.body(node.body) - - def visit_ClassDef(self, node): - have_args = [] - def paren_or_comma(): - if have_args: - self.write(', ') - else: - have_args.append(True) - self.write('(') - - self.newline(extra=2) - self.decorators(node) - self.newline(node) - self.write('class %s' % node.name) - for base in node.bases: - paren_or_comma() - self.visit(base) - # XXX: the if here is used to keep this module compatible - # with python 2.6. - if hasattr(node, 'keywords'): - for keyword in node.keywords: - paren_or_comma() - self.write(keyword.arg + '=') - self.visit(keyword.value) - if node.starargs is not None: - paren_or_comma() - self.write('*') - self.visit(node.starargs) - if node.kwargs is not None: - paren_or_comma() - self.write('**') - self.visit(node.kwargs) - self.write(have_args and '):' or ':') - self.body(node.body) - - def visit_If(self, node): - self.newline(node) - self.write('if ') - self.visit(node.test) - self.write(':') - self.body(node.body) - while True: - else_ = node.orelse - if len(else_) == 1 and isinstance(else_[0], If): - node = else_[0] - self.newline() - self.write('elif ') - self.visit(node.test) - self.write(':') - self.body(node.body) - else: - self.newline() - self.write('else:') - self.body(else_) - break - - def visit_For(self, node): - self.newline(node) - self.write('for ') - self.visit(node.target) - self.write(' in ') - self.visit(node.iter) - self.write(':') - self.body_or_else(node) - - def visit_While(self, node): - self.newline(node) - self.write('while ') - self.visit(node.test) - self.write(':') - self.body_or_else(node) - - def visit_With(self, node): - self.newline(node) - self.write('with ') - self.visit(node.context_expr) - if node.optional_vars is not None: - self.write(' as ') - self.visit(node.optional_vars) - self.write(':') - self.body(node.body) - - def visit_Pass(self, node): - self.newline(node) - self.write('pass') - - def visit_Print(self, node): - # XXX: python 2.6 only - self.newline(node) - self.write('print ') - want_comma = False - if node.dest is not None: - self.write(' >> ') - self.visit(node.dest) - want_comma = True - for value in node.values: - if want_comma: - self.write(', ') - self.visit(value) - want_comma = True - if not node.nl: - self.write(',') - - def visit_Delete(self, node): - self.newline(node) - self.write('del ') - for idx, target in enumerate(node): - if idx: - self.write(', ') - self.visit(target) - - def visit_TryExcept(self, node): - self.newline(node) - self.write('try:') - self.body(node.body) - for handler in node.handlers: - self.visit(handler) - - def visit_TryFinally(self, node): - self.newline(node) - self.write('try:') - self.body(node.body) - self.newline(node) - self.write('finally:') - self.body(node.finalbody) - - def visit_Global(self, node): - self.newline(node) - self.write('global ' + ', '.join(node.names)) - - def visit_Nonlocal(self, node): - self.newline(node) - self.write('nonlocal ' + ', '.join(node.names)) - - def visit_Return(self, node): - self.newline(node) - self.write('return ') - self.visit(node.value) - - def visit_Break(self, node): - self.newline(node) - self.write('break') - - def visit_Continue(self, node): - self.newline(node) - self.write('continue') - - def visit_Raise(self, node): - # XXX: Python 2.6 / 3.0 compatibility - self.newline(node) - self.write('raise') - if hasattr(node, 'exc') and node.exc is not None: - self.write(' ') - self.visit(node.exc) - if node.cause is not None: - self.write(' from ') - self.visit(node.cause) - elif hasattr(node, 'type') and node.type is not None: - self.visit(node.type) - if node.inst is not None: - self.write(', ') - self.visit(node.inst) - if node.tback is not None: - self.write(', ') - self.visit(node.tback) - - # Expressions - - def visit_Attribute(self, node): - self.visit(node.value) - self.write('.' + node.attr) - - def visit_Call(self, node): - want_comma = [] - def write_comma(): - if want_comma: - self.write(', ') - else: - want_comma.append(True) - - self.visit(node.func) - self.write('(') - for arg in node.args: - write_comma() - self.visit(arg) - for keyword in node.keywords: - write_comma() - self.write(keyword.arg + '=') - self.visit(keyword.value) - if node.starargs is not None: - write_comma() - self.write('*') - self.visit(node.starargs) - if node.kwargs is not None: - write_comma() - self.write('**') - self.visit(node.kwargs) - self.write(')') - - def visit_Name(self, node): - self.write(node.id) - - def visit_Str(self, node): - self.write(repr(node.s)) - - def visit_Bytes(self, node): - self.write(repr(node.s)) - - def visit_Num(self, node): - self.write(repr(node.n)) - - def visit_Tuple(self, node): - self.write('(') - idx = -1 - for idx, item in enumerate(node.elts): - if idx: - self.write(', ') - self.visit(item) - self.write(idx and ')' or ',)') - - def sequence_visit(left, right): - def visit(self, node): - self.write(left) - for idx, item in enumerate(node.elts): - if idx: - self.write(', ') - self.visit(item) - self.write(right) - return visit - - visit_List = sequence_visit('[', ']') - visit_Set = sequence_visit('{', '}') - del sequence_visit - - def visit_Dict(self, node): - self.write('{') - for idx, (key, value) in enumerate(zip(node.keys, node.values)): - if idx: - self.write(', ') - self.visit(key) - self.write(': ') - self.visit(value) - self.write('}') - - def visit_BinOp(self, node): - self.visit(node.left) - self.write(' %s ' % BINOP_SYMBOLS[type(node.op)]) - self.visit(node.right) - - def visit_BoolOp(self, node): - self.write('(') - for idx, value in enumerate(node.values): - if idx: - self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)]) - self.visit(value) - self.write(')') - - def visit_Compare(self, node): - self.write('(') - self.write(node.left) - for op, right in zip(node.ops, node.comparators): - self.write(' %s %%' % CMPOP_SYMBOLS[type(op)]) - self.visit(right) - self.write(')') - - def visit_UnaryOp(self, node): - self.write('(') - op = UNARYOP_SYMBOLS[type(node.op)] - self.write(op) - if op == 'not': - self.write(' ') - self.visit(node.operand) - self.write(')') - - def visit_Subscript(self, node): - self.visit(node.value) - self.write('[') - self.visit(node.slice) - self.write(']') - - def visit_Slice(self, node): - if node.lower is not None: - self.visit(node.lower) - self.write(':') - if node.upper is not None: - self.visit(node.upper) - if node.step is not None: - self.write(':') - if not (isinstance(node.step, Name) and node.step.id == 'None'): - self.visit(node.step) - - def visit_ExtSlice(self, node): - for idx, item in node.dims: - if idx: - self.write(', ') - self.visit(item) - - def visit_Yield(self, node): - self.write('yield ') - self.visit(node.value) - - def visit_Lambda(self, node): - self.write('lambda ') - self.signature(node.args) - self.write(': ') - self.visit(node.body) - - def visit_Ellipsis(self, node): - self.write('Ellipsis') - - def generator_visit(left, right): - def visit(self, node): - self.write(left) - self.visit(node.elt) - for comprehension in node.generators: - self.visit(comprehension) - self.write(right) - return visit - - visit_ListComp = generator_visit('[', ']') - visit_GeneratorExp = generator_visit('(', ')') - visit_SetComp = generator_visit('{', '}') - del generator_visit - - def visit_DictComp(self, node): - self.write('{') - self.visit(node.key) - self.write(': ') - self.visit(node.value) - for comprehension in node.generators: - self.visit(comprehension) - self.write('}') - - def visit_IfExp(self, node): - self.visit(node.body) - self.write(' if ') - self.visit(node.test) - self.write(' else ') - self.visit(node.orelse) - - def visit_Starred(self, node): - self.write('*') - self.visit(node.value) - - def visit_Repr(self, node): - # XXX: python 2.6 only - self.write('`') - self.visit(node.value) - self.write('`') - - # Helper Nodes - - def visit_alias(self, node): - self.write(node.name) - if node.asname is not None: - self.write(' as ' + node.asname) - - def visit_comprehension(self, node): - self.write(' for ') - self.visit(node.target) - self.write(' in ') - self.visit(node.iter) - if node.ifs: - for if_ in node.ifs: - self.write(' if ') - self.visit(if_) - - def visit_excepthandler(self, node): - self.newline(node) - self.write('except') - if node.type is not None: - self.write(' ') - self.visit(node.type) - if node.name is not None: - self.write(' as ') - self.visit(node.name) - self.write(':') - self.body(node.body) diff --git a/nisext/py3builder.py b/nisext/py3builder.py index 2efddaff1f..9435f6c60b 100644 --- a/nisext/py3builder.py +++ b/nisext/py3builder.py @@ -40,200 +40,3 @@ def log_debug(self, msg, *args): r.refactor(files, write=True) # Then doctests r.refactor(files, write=True, doctests_only=True) - # Then custom doctests markup - doctest_markup_files(files) - - -def doctest_markup_files(fnames): - """ Process simple doctest comment markup on sequence of filenames - - Parameters - ---------- - fnames : seq - sequence of filenames - - Returns - ------- - None - """ - for fname in fnames: - with open(fname, 'rt') as fobj: - res = list(fobj) - out, errs = doctest_markup(res) - for err_tuple in errs: - print('Marked line %s unchanged because "%s"' % err_tuple) - with open(fname, 'wt') as fobj: - fobj.write(''.join(out)) - - -MARK_COMMENT = re.compile('(\s*>>>\s+)(.*?)(\s*#23dt\s+)(.*?\s*)$', re.DOTALL) -PLACE_LINE_EXPRS = re.compile('\s*([\w+\- ]*):\s*(.*)$') -INDENT_SPLITTER = re.compile('(\s*)(.*?)(\s*)$', re.DOTALL) - -def doctest_markup(in_lines): - """ Process doctest comment markup on sequence of strings - - The algorithm looks for lines that start with optional whitespace followed - by ``>>>`` and ending with a comment starting with ``#23dt``. The stuff - after the ``#23dt`` marker is the *markup* and gives instructions for - modifying the corresponding line or some other line. - - The *markup* is of form : . Let's say the output - lines are in a variable ``out_lines``. - - * is an expression giving a line number. In this expression, - the two variables defined are ``here`` (giving the current line number), - and ``next == here+1``. Let's call the result of ``place``. - If is empty (only whitespace before the colon) then ``place - == here``. The result of will replace ``lines[place]``. - * is a special value (see below) or a python3 expression - returning a processed value, where ``line`` contains the line referred to - by line number ``place``, and ``lines`` is a list of all lines. If - ``place != here``, then ``line == lines[place]``. If ``place == here`` - then ``line`` will be the source line, minus the comment and markup. - - A beginning with "replace(" we take to be short for - "line.replace(". - - Special values; if ==: - - * 'bytes': make all the strings in the selected line be byte strings. This - algormithm uses the ``ast`` module, so the text in which it works must be - valid python 3 syntax. - * 'BytesIO': shorthand for ``replace('StringIO', 'BytesIO')`` - - There is also a special non-doctest comment markup - '#23dt skip rest'. If - we find that comment (with whitespace before or after) as a line in the - file, we just pass the rest of the file unchanged. This is a hack to stop - 23dt processing its own tests. - - Parameters - ---------- - in_lines : sequence of str - - Returns - ------- - out_lines : sequence of str - lines with processing applied - error_tuples : sequence of (str, str) - sequence of 2 element tuples, where the first entry in the tuple is one - line that generated an error during processing, and the second is the - explanatory message for the error. These lines remain unchanged in - `out_lines`. - - Examples - -------- - The next three lines all do the same thing: - - >> a = '1234567890' #23dt here: line.replace("'12", "b'12") - >> a = '1234567890' #23dt here: replace("'12", "b'12") - >> a = '1234567890' #23dt here: bytes - - and that is to result in the part before the comment changing to: - - >> a = b'1234567890' - - The part after the comment (including markup) stays the same. - - You might want to process the line after the comment - such as test output. - The next test replaces "'a string'" with "b'a string'" - - >> 'a string'.encode('ascii') #23dt next: bytes - 'a string' - - This might work too, to do the same thing: - - >> 'a string'.encode('ascii') #23dt here+1: bytes - 'a string' - """ - out_lines = list(in_lines)[:] - err_tuples = [] - for pos, this in enumerate(out_lines): - # Check for 'leave the rest' markup - if this.strip() == '#23dt skip rest': - break - # Check for docest line with markup - mark_match = MARK_COMMENT.search(this) - if mark_match is None: - continue - docbits, marked_line, marker, markup = mark_match.groups() - place_line_match = PLACE_LINE_EXPRS.match(markup) - if place_line_match is None: - msg = ('Found markup "%s" in line "%s" but wrong syntax' % - (markup, this)) - err_tuples.append((this, msg)) - continue - place_expr, line_expr = place_line_match.groups() - exec_globals = {'here': pos, 'next': pos+1} - if place_expr.strip() == '': - place = pos - else: - try: - place = eval(place_expr, exec_globals) - except: - msg = ('Error finding place with "%s" in line "%s"' % - (place_expr, this)) - err_tuples.append((this, msg)) - continue - # Prevent processing operating on 23dt comment part of line - if place == pos: - line = marked_line - else: - line = out_lines[place] - # Shorthand - if line_expr == 'bytes': - # Any strings on the given line are byte strings - pre, mid, post = INDENT_SPLITTER.match(line).groups() - try: - res = byter(mid) - except: - err = sys.exc_info()[1] - msg = ('Error "%s" parsing "%s"' % (err, err)) - err_tuples.append((this, msg)) - continue - res = pre + res + post - else: - exec_globals.update({'line': line, 'lines': out_lines}) - # If line_expr starts with 'replace', implies "line.replace" - if line_expr.startswith('replace('): - line_expr = 'line.' + line_expr - elif line_expr == 'BytesIO': - line_expr = "line.replace('StringIO', 'BytesIO')" - try: - res = eval(line_expr, exec_globals) - except: - err = sys.exc_info()[1] - msg = ('Error "%s" working on "%s" at line %d with "%s"' % - (err, line, place, line_expr)) - err_tuples.append((this, msg)) - continue - # Put back comment if removed - if place == pos: - res = docbits + res + marker + markup - if res != line: - out_lines[place] = res - return out_lines, err_tuples - - -def byter(src): - """ Convert strings in `src` to byte string literals - - Parameters - ---------- - src : str - source string. Must be valid python 3 source - - Returns - ------- - p_src : str - string with ``str`` literals replace by ``byte`` literals - """ - import ast - from . import codegen - class RewriteStr(ast.NodeTransformer): - def visit_Str(self, node): - return ast.Bytes(node.s.encode('ascii')) - tree = ast.parse(src) - tree = RewriteStr().visit(tree) - return codegen.to_source(tree) - diff --git a/nisext/sexts.py b/nisext/sexts.py index db6cc66c7e..da1c056418 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -222,7 +222,7 @@ def _package_status(pkg_name, version, version_getter, checker): exit /b 1 :goodstart set py_exe=%line1:~2% -call %py_exe% %pyscript% %* +call "%py_exe%" %pyscript% %* """ class install_scripts_bat(install_scripts): diff --git a/nisext/tests/test_doctest_markup.py b/nisext/tests/test_doctest_markup.py deleted file mode 100644 index 55254a2fce..0000000000 --- a/nisext/tests/test_doctest_markup.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Testing doctest markup tests -""" - -import sys -from ..py3builder import doctest_markup, byter - -from numpy.testing import (assert_array_almost_equal, assert_array_equal, dec) - -from nose.tools import assert_true, assert_equal, assert_raises - -is_2 = sys.version_info[0] < 3 -skip42 = dec.skipif(is_2) - -# Tell 23dt processing to pass the rest of this file unchanged. We don't want -# the processor to mess up the example string -#23dt skip rest - -IN_TXT = """ - -Anonymous lines, also blanks - -As all that is empty, use entropy, and endure - -# Comment, unchanged - -#23dt comment not processed without doctest marker ->>> #23dthere: no whitespace; comment not recognized even as error ->>>#23dt nor without preceding whitespace ->>> #23dt not correct syntax creates error ->>> #23dt novar: 'undefined variable creates error' ->>> #23dt here: 'OK' ->>> #23dt here : 'tolerates whitespace' ->>> #23dt here + 0 : 'OK' ->>> #23dt here -0 : 'OK' ->>> #23dt here - here + here + 0: 'OK' ->>> #23dt here *0 : 'only allowed plus or minus' ->>> #23dt : 'empty means here' ->>> #23dt : 'regardless of whitespace' ->>> #23dt 'need colon' ->>> #23dt here : 3bad syntax ->>> #23dt here : 1/0 ->>> #23dt next : line.replace('some','') -something ->>> #23dt next : replace('some','') -something ->>> #23dt next : lines[next].replace('some','') -something ->>> #23dt next + 1: line.replace('some','') -something -something ->>> #23dt next : lines[next+1].replace('some','') -this is the line where replacement happens -something - >>> whitespace #23dt : 'OK' ->>> from io import StringIO as BytesIO #23dt : replace('StringIO as ', '') ->>> from io import StringIO #23dt : BytesIO ->>> from io import StringIO #23dt : BytesIO -""" - -OUT_TXT = """ - -Anonymous lines, also blanks - -As all that is empty, use entropy, and endure - -# Comment, unchanged - -#23dt comment not processed without doctest marker ->>> #23dthere: no whitespace; comment not recognized even as error ->>>#23dt nor without preceding whitespace ->>> #23dt not correct syntax creates error ->>> #23dt novar: 'undefined variable creates error' ->>> OK#23dt here: 'OK' ->>> tolerates whitespace#23dt here : 'tolerates whitespace' ->>> OK#23dt here + 0 : 'OK' ->>> OK#23dt here -0 : 'OK' ->>> OK#23dt here - here + here + 0: 'OK' ->>> #23dt here *0 : 'only allowed plus or minus' ->>> empty means here#23dt : 'empty means here' ->>> regardless of whitespace#23dt : 'regardless of whitespace' ->>> #23dt 'need colon' ->>> #23dt here : 3bad syntax ->>> #23dt here : 1/0 ->>> #23dt next : line.replace('some','') -thing ->>> #23dt next : replace('some','') -thing ->>> #23dt next : lines[next].replace('some','') -thing ->>> #23dt next + 1: line.replace('some','') -something -thing ->>> #23dt next : lines[next+1].replace('some','') -thing -something - >>> OK #23dt : 'OK' ->>> from io import BytesIO #23dt : replace('StringIO as ', '') ->>> from io import BytesIO #23dt : BytesIO ->>> from io import BytesIO #23dt : BytesIO -""" - -ERR_TXT = \ -""">>> #23dt not correct syntax creates error ->>> #23dt novar: 'undefined variable creates error' ->>> #23dt here *0 : 'only allowed plus or minus' ->>> #23dt 'need colon' ->>> #23dt here : 3bad syntax ->>> #23dt here : 1/0 -""" - -def test_some_text(): - out_lines, err_tuples = doctest_markup(IN_TXT.splitlines(True)) - assert_equal(out_lines, OUT_TXT.splitlines(True)) - err_lines, err_msgs = zip(*err_tuples) - assert_equal(list(err_lines), ERR_TXT.splitlines(True)) - - -IN_BYTES_TXT = """\ - -Phatos lives - ->>> 'hello' #23dt : bytes ->>> (1, 'hello') #23dt : bytes ->>> 'hello' #23dt next : bytes -'TRACK' ->>> ('hello', 1, 'world') #23dt : bytes ->>> 3bad_syntax #23dt : bytes -""" - -OUT_BYTES_TXT = """\ - -Phatos lives - ->>> b'hello' #23dt : bytes ->>> (1, b'hello') #23dt : bytes ->>> 'hello' #23dt next : bytes -b'TRACK' ->>> (b'hello', 1, b'world') #23dt : bytes ->>> 3bad_syntax #23dt : bytes -""" - -ERR_BYTES_TXT = \ -""">>> 3bad_syntax #23dt : bytes -""" - -@skip42 -def test_bytes_text(): - out_lines, err_tuples = doctest_markup(IN_BYTES_TXT.splitlines(True)) - assert_equal(out_lines, OUT_BYTES_TXT.splitlines(True)) - err_lines, err_msgs = zip(*err_tuples) - assert_equal(list(err_lines), ERR_BYTES_TXT.splitlines(True)) - - -@skip42 -def test_byter(): - # Test bytes formatter - assert_equal('(b"hello \' world", b\'again\')', - byter('("hello \' world", "again")')) - line = "_ = bio.write(' ' * 10)" - assert_equal( - byter(line), - "_ = bio.write(b' ' * 10)") - diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000..d37709b4c0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +numpy>=1.5.1 diff --git a/tools/travis_tools.sh b/tools/travis_tools.sh new file mode 100644 index 0000000000..0710891430 --- /dev/null +++ b/tools/travis_tools.sh @@ -0,0 +1,26 @@ +# Tools for working with travis-ci +export WHEELHOST="travis-wheels.scikit-image.org" +export WHEELHOUSE="http://${WHEELHOST}/" + +retry () { + # https://gist.github.com/fungusakafungus/1026804 + local retry_max=5 + local count=$retry_max + while [ $count -gt 0 ]; do + "$@" && break + count=$(($count - 1)) + sleep 1 + done + + [ $count -eq 0 ] && { + echo "Retry failed [$retry_max]: $@" >&2 + return 1 + } + return 0 +} + + +wheelhouse_pip_install() { + # Install pip requirements via travis wheelhouse + retry pip install --timeout=60 --no-index --trusted-host $WHEELHOST --find-links $WHEELHOUSE $@ +}