diff --git a/.circleci/auto-cache-timestamp b/.circleci/auto-cache-timestamp index 770bcea7d1..50b266a34f 100644 --- a/.circleci/auto-cache-timestamp +++ b/.circleci/auto-cache-timestamp @@ -1 +1 @@ -Sat Apr 13 02:15:46 CEST 2019 +2019-04-19 15:05:58.522213 \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index ed0a1be3ab..95000f104e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -29,24 +29,15 @@ jobs: - run: sudo -E apt-get -yq update - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - run: - name: Today & Week # Fixing the date and week of the year in files to generate & preserve cache key. + name: Today & Week # Saving today's date and current week of the year in files to generate daily & weekly new cache key respectively. command: | - echo 2019-04-12 > today - echo 14 > week_num + date +%F > today + date +%U > week_num - restore_cache: key: v1-packages+datasets-{{ checksum "week_num" }} - restore_cache: key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - - run: - name: If branch cache has cached docs, don't restore docs from master cache - command: | - if - ls doc/_build/html - then - date > .circleci/auto-cache-timestamp - fi - - restore_cache: - key: master-docs-{{ checksum ".circleci/auto-cache-timestamp" }} + - run: name: Download & install conda if absent command: | @@ -76,15 +67,20 @@ jobs: name: Install packages in conda env command: | conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx=1.8 numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq + lxml mkl sphinx numpydoc pillow pandas -yq + conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq - run: name: Running CircleCI test (make html) command: | source activate testenv pip install -e . - set -o pipefail && cd doc && make html-strict SPHINXOPTS="-v" 2>&1 | tee log.txt + set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt no_output_timeout: 7h + - store_test_results: + path: doc/_build/test-results + - store_artifacts: + path: doc/_build/test-results + - save_cache: key: v1-packages+datasets-{{ checksum "week_num" }} paths: @@ -101,8 +97,6 @@ jobs: path: coverage - store_artifacts: path: doc/log.txt - - store_artifacts: - path: .circleci full-build: @@ -127,58 +121,26 @@ jobs: # Installing required packages for `make -C doc check command` to work. - run: sudo -E apt-get -yq update - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: - name: Today & Week # Fixing the date and week of the year in files to generate & preserve cache key. - command: | - echo 2019-04-12 > today - echo 14 > week_num - - restore_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - - run: - name: Download & install conda if absent - command: | - if - ls $HOME/miniconda3/bin | grep conda -q - then - echo "(Mini)Conda already present from the cache." - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - chmod +x ~/miniconda.sh && ~/miniconda.sh -b - fi + - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh + - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b - run: echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Create new conda env - command: | - if - conda env list | grep testenv - then - echo "Conda env testenv already exists courtesy of the cache." - else - conda create -n testenv -yq - fi - run: name: Install packages in conda env command: | - conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx=1.8 numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq + conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ + lxml mkl sphinx numpydoc pillow pandas -yq + conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq - run: name: Running CircleCI test (make html) command: | source activate testenv pip install -e . - set -o pipefail && cd doc && make html-strict SPHINXOPTS="-v" 2>&1 | tee log.txt + set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt no_output_timeout: 7h - - - save_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - paths: - - ../nilearn_data - - ../miniconda3 - - save_cache: - key: master-docs-{{ checksum ".circleci/auto-cache-timestamp" }} - paths: - - doc + - store_test_results: + path: doc/_build/test-results + - store_artifacts: + path: doc/_build/test-results - store_artifacts: path: doc/_build/html @@ -186,8 +148,6 @@ jobs: path: coverage - store_artifacts: path: doc/log.txt - - store_artifacts: - path: .circleci workflows: @@ -207,3 +167,14 @@ workflows: only: - master - test-circleci # test branch to check if merges occur on master as expected. + + nightly: + triggers: + - schedule: + cron: "0 6 * * *" + filters: + branches: + only: + - master + jobs: + - full-build diff --git a/.circleci/config.yml-presprint b/.circleci/config.yml-presprint deleted file mode 100644 index e272fec91b..0000000000 --- a/.circleci/config.yml-presprint +++ /dev/null @@ -1,171 +0,0 @@ -# quick-build rebuilds changes using the cached documentation. -# The cache is emptied everyday, forcing a full build on the day's first push. -# It doesn't operate on master branch. New branches are always built from scratch. -# full-build always rebuilds from scratch, without any cache. Only for changes in master branch. - -version: 2 - -jobs: - quick-build: - docker: - - image: circleci/python:3.6 - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.6" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - MATPLOTLIB_VERSION: "*" - - steps: - - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: - name: Today & Week # Saving today's date and current week of the year in files to generate daily & weekly new cache key respectively. - command: | - date +%F > today - date +%U > week_num - - restore_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - - restore_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - - - run: - name: Download & install conda if absent - command: | - if - ls $HOME/miniconda3/bin | grep conda -q - then - echo "(Mini)Conda already present from the cache." - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - chmod +x ~/miniconda.sh && ~/miniconda.sh -b - fi - - run: - name: Setup conda path in env variables - command: | - echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Create new conda env - command: | - if - conda env list | grep testenv - then - echo "Conda env testenv already exists courtesy of the cache." - else - conda create -n testenv -yq - fi - - run: - name: Install packages in conda env - command: | - conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq - - run: - name: Running CircleCI test (make html) - command: | - source activate testenv - pip install -e . - set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt - no_output_timeout: 7h - - save_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - paths: - - ../nilearn_data - - ../miniconda3 - - save_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - paths: - - doc - - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt - - - full-build: - docker: - - image: circleci/python:3.6 - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.6" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - MATPLOTLIB_VERSION: "*" - - steps: - - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b - - run: echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Install packages in conda env - command: | - conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq - - run: - name: Running CircleCI test (make html) - command: | - source activate testenv - pip install -e . - set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt - no_output_timeout: 7h - - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt - - -workflows: - version: 2 - push: - jobs: - - quick-build: - filters: - branches: - ignore: - - master - - test-circleci # test branch to check if merges occur on master as expected. - - - full-build: - filters: - branches: - only: - - master - - test-circleci # test branch to check if merges occur on master as expected. - - nightly: - triggers: - - schedule: - cron: "0 6 * * *" - filters: - branches: - only: - - master - jobs: - - full-build diff --git a/.circleci/manual-cache-timestamp b/.circleci/manual-cache-timestamp index 3d0c4f522e..e3790b2eeb 100644 --- a/.circleci/manual-cache-timestamp +++ b/.circleci/manual-cache-timestamp @@ -1 +1 @@ -2019-04-13 20:12:23.519962 \ No newline at end of file +2019-04-19 15:05:58.522064 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 4beefc864a..0709c70f7b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ build *# nilearn.egg-info/ +env/ dist/ doc/.nojekyll doc/building_blocks/generated/ @@ -34,4 +35,4 @@ tags .idea/ -doc/themes/nilearn/static/jquery.js \ No newline at end of file +doc/themes/nilearn/static/jquery.js diff --git a/.travis.yml b/.travis.yml index 1b20644e30..9554e60eee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,8 +21,8 @@ matrix: include: # without matplotlib - env: DISTRIB="conda" PYTHON_VERSION="3.5" - NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" - SCIKIT_LEARN_VERSION="*" COVERAGE="true" + NUMPY_VERSION="1.11" SCIPY_VERSION="0.17" PANDAS_VERSION="*" + SCIKIT_LEARN_VERSION="0.18" COVERAGE="true" LXML_VERSION="*" - env: DISTRIB="conda" PYTHON_VERSION="3.5" NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2f792f5c27..44094ac85e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,8 +1,7 @@ .. _contributing: -============ -Contributing -============ +How to contribute to nilearn +============================= This project is a community effort, and everyone is welcome to contribute. @@ -19,7 +18,7 @@ you first open an `issue `_ before sending a :ref:`pull request`. Opening an issue -================ +------------------ Nilearn uses issues for tracking bugs, requesting potential features, and holding project discussions. @@ -37,7 +36,7 @@ Labels can be assigned a variety of issues, such as: .. _pull request: Pull Requests -============= +--------------- We welcome pull requests from all community members. We follow the same conventions as scikit-learn. You can find the recommended process to submit code in the @@ -47,7 +46,7 @@ We follow the same conventions as scikit-learn. You can find the recommended pro .. _git_repo: Retrieving the latest code -========================== +--------------------------- We use `Git `_ for version control and `GitHub `_ for hosting our main repository. If you are @@ -64,7 +63,7 @@ or if you have write privileges:: git clone git@github.com:nilearn/nilearn.git Coding guidelines -================= +------------------ Nilearn follows the coding conventions used by scikit-learn. `Please read them `_ diff --git a/README.rst b/README.rst index 7885ed878b..d9916729c4 100644 --- a/README.rst +++ b/README.rst @@ -38,7 +38,7 @@ Dependencies The required dependencies to use the software are: -* Python >= 2.7, +* Python >= 3.5, * setuptools * Numpy >= 1.11 * SciPy >= 0.17 diff --git a/doc/conf.py b/doc/conf.py index 950f619c59..3f88540004 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -227,12 +227,10 @@ # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', 'nilearn.tex', u'NeuroImaging with scikit-learn', - ur"""Gaël Varoquaux and Alexandre Abraham""" - + r"\\\relax ~\\\relax http://nilearn.github.io", - 'manual'), - -] + ('index', 'nilearn.tex', 'NeuroImaging with scikit-learn', + 'Gaël Varoquaux and Alexandre Abraham' + + r"\\\relax ~\\\relax http://nilearn.github.io", 'manual'), + ] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -302,7 +300,8 @@ sphinx_gallery_conf = { 'doc_module': 'nilearn', 'backreferences_dir': os.path.join('modules', 'generated'), - 'reference_url': {'nilearn': None} + 'reference_url': {'nilearn': None}, + 'junit': '../test-results/sphinx-gallery/junit.xml', } # Get rid of spurious warnings due to some interaction between diff --git a/doc/contributing.rst b/doc/contributing.rst deleted file mode 100644 index e582053ea0..0000000000 --- a/doc/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../CONTRIBUTING.rst diff --git a/doc/development.rst b/doc/development.rst new file mode 100644 index 0000000000..a0ea29cdd6 --- /dev/null +++ b/doc/development.rst @@ -0,0 +1,133 @@ +============================ +Nilearn development process +============================ + +.. contents:: + :depth: 2 + :local: + +How to help? +============= + +* You are new to python and you don't know how to do xy + + - Create a question on `neurostars `_ + +* If you discovered a bug, but don't know how to fix it + + - Create `an issue `_ + +* If you discovered a bug and know how to fix it, but don't know how to + get your code onto github (ie, you have some Python experience but have + never have used git/github, or have never written tests) + + - Learn git and github: http://try.github.io/ + - Learn what tests are how to run them locally + (https://docs.pytest.org) + - Learn how to write doc/examples and build them locally + https://sphinx-gallery.github.io/ + +* You want to contribute code + + - See below + + +How do we decide what codes goes in? +===================================== + + +Scope of the project +--------------------- + +Nilearn strives to develop open and powerful statistical analysis of +brain volumes (as produced by multiple modalities: MRI, PET, and others). +Its focus is to reach end users of the methods (as opposed to methods +developers). + +Nilearn targets ease of use, but as Python code. In other words, we will +not add graphical user interfaces, but we want our code to be as easy to +understand as possible, with easy prototyping and debugging, even for +beginners in Python. + +We are parsimonious in the way we add features to the project, as it +puts on weight. Criteria include: + +* It must be in the scope of the project +* It must correspond to an established practice (typically as used in + scientific publications) +* It must have a concrete use case that can be demo-ed simply with nilearn: + an example, on real data, understandable by end-users. + +Part of the decision will also be about weighing the benefits (i.e., new +features or ease of use for the users) with the cost (i.e., complexity of +the code, runtime of the examples). + +In practice: + +* The new feature must be demoed in an example in a way that shows its + benefit to new users. +* Because our infrastructure is limited, running all the examples must + lead to downloading a limited amount of data (gigabytes) and execute + in a reasonable amount of time (a few hours) +* The new feature must be thoroughly tested (it should not decrease + code coverage) +* The new feature may not introduce a new dependency + +Special cases: + +* A downloader for a new atlas: we are currently being very lenient for this: + if the atlas is published and can be used in an example, we will accept + the pull request (but see below for specifics). +* A downloader for a new dataset: the larger the dataset is, the less + likely we are to consider including it. Datasets are meant to demo and + teach features, rather than be the basis of research. + +How to contribute a feature +---------------------------- + +To contribute a feature, first create an issue, in order to discuss +whether the feature can be included or not, and the specifications of +this feature. Once agreed on the feature, send us a pull request. + +There are specific guidelines about how to write code for the project. +They can be found in the contributors guide, below. + +Special case: How to contribute an atlas +............................................. + +We want atlases in nilearn to be internally consistent. Specifically, +your atlas object should have three attributes (as with the existing +atlases): + +- ``description`` (bytes): A text description of the atlas. This should be + brief but thorough, describing the source (paper), relevant information + related to its construction (modality, dataset, method), and if there are + more than one maps, a description of each map. +- ``labels`` (list): a list of string labels corresponding to each atlas + label, in the same (numerical) order as the atlas labels +- ``maps`` (list or string): the path to the nifti image, or a list of paths + +In addition, the atlas will need to be called by a fetcher. For example, see `here `__. + +Finally, as with other features, please provide a test for your atlas. +Examples can be found `here +`__ + +Who makes decisions +-------------------- + +We strongly aim to be a community oriented project where decisions are +made based on consensus according to the criteria described above. +Decisions are made public, through discussion on issues and pull requests +in Github. + +The decisions are made by the core-contributors, ie people with write +access to the repository, as listed `here +`__ + +If there are open questions, final decisions are made by the Temporary +Benevolent Dictator, currently Gaël Varoquaux. + + + +.. include:: ../CONTRIBUTING.rst diff --git a/doc/index.rst b/doc/index.rst index b1bcd02011..3de66dff3b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -101,6 +101,6 @@ user_guide.rst auto_examples/index.rst whats_new.rst - contributing.rst + development.rst Nilearn is part of the `NiPy ecosystem `_. diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index 60d25c1665..ae52a6df74 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -261,7 +261,7 @@

Machine learning for Neuro-Imaging in Python

News

    -
  • April 12th 2019: Nilearn 0.5.2 released +

  • April 17th 2019: Nilearn 0.5.2 released

  • April 12th 2019: Nilearn 0.5.1 released

  • @@ -290,7 +290,7 @@

    Development

    href="https://github.com/nilearn/nilearn">Nilearn on GitHub

  • All material Free Software: BSD license (3 clause).

  • Authors

  • -
  • Contributing

  • +
  • Contributing

{% endif %} diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 66add0cda8..fa78828ae8 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,9 +1,14 @@ -0.6.0 -===== +0.6.0a +====== NEW --- +- Optimization to image resampling + :func:`nilearn.image.resample_img` has been optimized to pad rather than + resample images in the special case when there is only a translation + between two spaces. This is a common case in :class:`nilearn.input_data.NiftiMasker` + when using the `mask_strategy="template"` option for brains in MNI space. - New brain development fMRI dataset fetcher :func:`nilearn.datasets.fetch_development_fmri` can be used to download movie-watching data in children and adults. A light-weight dataset implemented diff --git a/nilearn/_utils/extmath.py b/nilearn/_utils/extmath.py index 6cbc8c6daf..615464f1eb 100644 --- a/nilearn/_utils/extmath.py +++ b/nilearn/_utils/extmath.py @@ -6,8 +6,6 @@ import numpy as np -from numpy import partition - def fast_abs_percentile(data, percentile=80): """ A fast version of the percentile of the absolute value. @@ -36,11 +34,8 @@ def fast_abs_percentile(data, percentile=80): data = np.abs(data) data = data.ravel() index = int(data.size * .01 * percentile) - if partition is not None: - # Partial sort: faster than sort - data = partition(data, index) - else: - data.sort() + # Partial sort: faster than sort + data = np.partition(data, index) return data[index] diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index 8393e44bd4..8904ef5389 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -146,7 +146,7 @@ def load_mni152_brain_mask(): See Also -------- - nilearn.datasets.load_mni152_template for details about version of the + nilearn.datasets.load_mni152_template : for details about version of the MNI152 T1 template and related. """ # Load MNI template diff --git a/nilearn/image/image.py b/nilearn/image/image.py index e51a29bc06..0387ee1ea5 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -327,7 +327,7 @@ def _crop_img_to(img, slices, copy=True): return new_img_like(img, cropped_data, new_affine) -def crop_img(img, rtol=1e-8, copy=True): +def crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False): """Crops img as much as possible Will crop img, removing as many zero entries as possible @@ -349,10 +349,22 @@ def crop_img(img, rtol=1e-8, copy=True): copy: boolean Specifies whether cropped data is copied or not. + pad: boolean + Toggles adding 1-voxel of 0s around the border. Recommended. + + return_offset: boolean + Specifies whether to return a tuple of the removed padding. + Returns ------- cropped_img: image Cropped version of the input image + + offset: list (optional) + List of tuples representing the number of voxels removed (before, after) + the cropped volumes, i.e.: + [(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)] + """ img = check_niimg(img) @@ -364,16 +376,53 @@ def crop_img(img, rtol=1e-8, copy=True): if data.ndim == 4: passes_threshold = np.any(passes_threshold, axis=-1) coords = np.array(np.where(passes_threshold)) - start = coords.min(axis=1) - end = coords.max(axis=1) + 1 + + # Sets full range if no data are found along the axis + if coords.shape[1] == 0: + start, end = [0, 0, 0], list(data.shape) + else: + start = coords.min(axis=1) + end = coords.max(axis=1) + 1 # pad with one voxel to avoid resampling problems - start = np.maximum(start - 1, 0) - end = np.minimum(end + 1, data.shape[:3]) + if pad: + start = np.maximum(start - 1, 0) + end = np.minimum(end + 1, data.shape[:3]) + + slices = [slice(s, e) for s, e in zip(start, end)][:3] + cropped_im = _crop_img_to(img, slices, copy=copy) + return cropped_im if not return_offset else (cropped_im, tuple(slices)) - slices = [slice(s, e) for s, e in zip(start, end)] - return _crop_img_to(img, slices, copy=copy) +def _pad_array(array, pad_sizes): + """Pad an ndarray with zeros of quantity specified + as follows pad_sizes = [x1minpad, x1maxpad, x2minpad, + x2maxpad, x3minpad, ...] + """ + + if len(pad_sizes) % 2 != 0: + raise ValueError("Please specify as many max paddings as min" + " paddings. You have specified %d arguments" % + len(pad_sizes)) + + all_paddings = np.zeros([array.ndim, 2], dtype=np.int64) + all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2) + + lower_paddings, upper_paddings = all_paddings.T + new_shape = np.array(array.shape) + upper_paddings + lower_paddings + + padded = np.zeros(new_shape, dtype=array.dtype) + source_slices = [slice(max(-lp, 0), min(s + up, s)) + for lp, up, s in zip(lower_paddings, + upper_paddings, + array.shape)] + target_slices = [slice(max(lp, 0), min(s - up, s)) + for lp, up, s in zip(lower_paddings, + upper_paddings, + new_shape)] + + padded[tuple(target_slices)] = array[source_slices].copy() + return padded def _compute_mean(imgs, target_affine=None, diff --git a/nilearn/image/resampling.py b/nilearn/image/resampling.py index 27dede7712..a699351341 100644 --- a/nilearn/image/resampling.py +++ b/nilearn/image/resampling.py @@ -13,6 +13,7 @@ import scipy from scipy import ndimage, linalg +from .image import crop_img from .. import _utils from .._utils.compat import _basestring @@ -289,7 +290,7 @@ def _resample_one_img(data, A, b, target_shape, def resample_img(img, target_affine=None, target_shape=None, interpolation='continuous', copy=True, order="F", - clip=True, fill_value=0): + clip=True, fill_value=0, force_resample=False): """Resample a Niimg-like object Parameters @@ -331,6 +332,9 @@ def resample_img(img, target_affine=None, target_shape=None, fill_value: float, optional Use a fill value for points outside of input volume (default 0). + force_resample: bool, optional + Intended for testing, this prevents the use of a padding optimzation + Returns ------- resampled: nibabel.Nifti1Image @@ -418,18 +422,19 @@ def resample_img(img, target_affine=None, target_shape=None, input_img_is_string = False img = _utils.check_niimg(img) + shape = img.shape + affine = img.affine # noop cases if target_affine is None and target_shape is None: if copy and not input_img_is_string: img = _utils.copy_img(img) return img + if target_affine is affine and target_shape is shape: + return img if target_affine is not None: target_affine = np.asarray(target_affine) - shape = img.shape - affine = img.affine - if (np.all(np.array(target_shape) == shape[:3]) and np.allclose(target_affine, affine)): if copy and not input_img_is_string: @@ -486,14 +491,6 @@ def resample_img(img, target_affine=None, target_shape=None, else: transform_affine = np.dot(linalg.inv(affine), target_affine) A, b = to_matrix_vector(transform_affine) - # If A is diagonal, ndimage.affine_transform is clever enough to use a - # better algorithm. - if np.all(np.diag(np.diag(A)) == A): - if LooseVersion(scipy.__version__) < LooseVersion('0.18'): - # Before scipy 0.18, ndimage.affine_transform was applying a - # different logic to the offset for diagonal affine - b = np.dot(linalg.inv(A), b) - A = np.diag(A) data_shape = list(data.shape) # Make sure that we have a list here @@ -501,6 +498,11 @@ def resample_img(img, target_affine=None, target_shape=None, target_shape = target_shape.tolist() target_shape = tuple(target_shape) + if LooseVersion(scipy.__version__) < LooseVersion('0.20'): + # Before scipy 0.20, force native data types due to endian issues + # that caused instability. + data = data.astype(data.dtype.newbyteorder('N')) + if interpolation == 'continuous' and data.dtype.kind == 'i': # cast unsupported data types to closest support dtype aux = data.dtype.name.replace('int', 'float') @@ -527,20 +529,58 @@ def resample_img(img, target_affine=None, target_shape=None, # Code is generic enough to work for both 3D and 4D images other_shape = data_shape[3:] - resampled_data = np.empty(list(target_shape) + other_shape, + resampled_data = np.zeros(list(target_shape) + other_shape, order=order, dtype=resampled_data_dtype) all_img = (slice(None), ) * 3 - # Iterate over a set of 3D volumes, as the interpolation problem is - # separable in the extra dimensions. This reduces the - # computational cost - for ind in np.ndindex(*other_shape): - _resample_one_img(data[all_img + ind], A, b, target_shape, - interpolation_order, - out=resampled_data[all_img + ind], - copy=not input_img_is_string, - fill_value=fill_value) + # if (A == I OR some combination of permutation(I) and sign-flipped(I)) AND + # all(b == integers): + if (np.all(np.eye(3) == A) and all(bt == np.round(bt) for bt in b) and + not force_resample): + # TODO: also check for sign flips + # TODO: also check for permutations of I + + # ... special case: can be solved with padding alone + # crop source image and keep N voxels offset before/after volume + cropped_img, offsets = crop_img(img, pad=False, return_offset=True) + + # TODO: flip axes that are flipped + # TODO: un-shuffle permuted dimensions + + # offset the original un-cropped image indices by the relative + # translation, b. + indices = [(int(off.start - dim_b), int(off.stop - dim_b)) + for off, dim_b in zip(offsets[:3], b[:3])] + + # If image are not fully overlapping, place only portion of image. + slices = [] + for dimsize, index in zip(resampled_data.shape, indices): + slices.append(slice(np.max((0, index[0])), + np.min((dimsize, index[1])))) + slices = tuple(slices) + + # ensure the source image being placed isn't larger than the dest + subset_indices = tuple(slice(0, s.stop-s.start) for s in slices) + resampled_data[slices] = cropped_img.get_data()[subset_indices] + else: + # If A is diagonal, ndimage.affine_transform is clever enough to use a + # better algorithm. + if np.all(np.diag(np.diag(A)) == A): + if LooseVersion(scipy.__version__) < LooseVersion('0.18'): + # Before scipy 0.18, ndimage.affine_transform was applying a + # different logic to the offset for diagonal affine + b = np.dot(linalg.inv(A), b) + A = np.diag(A) + # Iterate over a set of 3D volumes, as the interpolation problem is + # separable in the extra dimensions. This reduces the + # computational cost + for ind in np.ndindex(*other_shape): + _resample_one_img(data[all_img + ind], A, b, target_shape, + interpolation_order, + out=resampled_data[all_img + ind], + copy=not input_img_is_string, + fill_value=fill_value) if clip: # force resampled data to have a range contained in the original data @@ -556,7 +596,7 @@ def resample_img(img, target_affine=None, target_shape=None, def resample_to_img(source_img, target_img, interpolation='continuous', copy=True, order='F', - clip=False, fill_value=0): + clip=False, fill_value=0, force_resample=False): """Resample a Niimg-like source image on a target Niimg-like image (no registration is performed: the image should already be aligned). @@ -593,6 +633,9 @@ def resample_to_img(source_img, target_img, fill_value: float, optional Use a fill value for points outside of input volume (default 0). + force_resample: bool, optional + Intended for testing, this prevents the use of a padding optimzation + Returns ------- resampled: nibabel.Nifti1Image @@ -616,7 +659,8 @@ def resample_to_img(source_img, target_img, target_affine=target.affine, target_shape=target_shape, interpolation=interpolation, copy=copy, order=order, - clip=clip, fill_value=fill_value) + clip=clip, fill_value=fill_value, + force_resample=force_resample) def reorder_img(img, resample=None): diff --git a/nilearn/image/tests/test_resampling.py b/nilearn/image/tests/test_resampling.py index a0b66d551d..9cff7ed923 100644 --- a/nilearn/image/tests/test_resampling.py +++ b/nilearn/image/tests/test_resampling.py @@ -18,6 +18,7 @@ from nilearn.image.resampling import from_matrix_vector, coord_transform from nilearn.image.resampling import get_bounds from nilearn.image.resampling import BoundingBoxError +from nilearn.image.image import _pad_array, crop_img from nilearn._utils import testing @@ -37,37 +38,6 @@ def rotation(theta, phi): return np.dot(a1, a2) -def pad(array, *args): - """Pad an ndarray with zeros of quantity specified - in args as follows args = (x1minpad, x1maxpad, x2minpad, - x2maxpad, x3minpad, ...) - """ - - if len(args) % 2 != 0: - raise ValueError("Please specify as many max paddings as min" - " paddings. You have specified %d arguments" % - len(args)) - - all_paddings = np.zeros([array.ndim, 2], dtype=np.int64) - all_paddings[:len(args) // 2] = np.array(args).reshape(-1, 2) - - lower_paddings, upper_paddings = all_paddings.T - new_shape = np.array(array.shape) + upper_paddings + lower_paddings - - padded = np.zeros(new_shape, dtype=array.dtype) - source_slices = [slice(max(-lp, 0), min(s + up, s)) - for lp, up, s in zip(lower_paddings, - upper_paddings, - array.shape)] - target_slices = [slice(max(lp, 0), min(s - up, s)) - for lp, up, s in zip(lower_paddings, - upper_paddings, - new_shape)] - - padded[target_slices] = array[source_slices].copy() - return padded - - ############################################################################### # Tests def test_identity_resample(): @@ -121,6 +91,11 @@ def test_downsample(): np.testing.assert_almost_equal(downsampled, rot_img.get_data()[:x, :y, :z, ...]) + rot_img_2 = resample_img(Nifti1Image(data, affine), + target_affine=2 * affine, interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(rot_img_2.get_data(), + rot_img.get_data()) # Test with non native endian data # Test to check that if giving non native endian data as input should @@ -471,8 +446,8 @@ def test_resampling_result_axis_permutation(): offset_cropping = np.vstack([-offset[ap][np.newaxis, :], np.zeros([1, 3])] ).T.ravel().astype(int) - what_resampled_data_should_be = pad(full_data.transpose(ap), - *list(offset_cropping)) + what_resampled_data_should_be = _pad_array(full_data.transpose(ap), + list(offset_cropping)) assert_array_almost_equal(resampled_data, what_resampled_data_should_be) @@ -558,6 +533,56 @@ def test_resample_to_img(): np.testing.assert_almost_equal(downsampled, result_img.get_data()[:x, :y, :z, ...]) +def test_crop(): + # Testing that padding of arrays and cropping of images work symmetrically + shape = (4, 6, 2) + data = np.ones(shape) + padded = _pad_array(data, [3, 2, 4, 4, 5, 7]) + padd_nii = Nifti1Image(padded, np.eye(4)) + + cropped = crop_img(padd_nii, pad=False) + np.testing.assert_equal(cropped.get_data(), data) + + +def test_resample_identify_affine_int_translation(): + # Testing resample to img function + rand_gen = np.random.RandomState(0) + + source_shape = (6, 4, 6) + source_affine = np.eye(4) + source_affine[:, 3] = np.append(np.random.randint(0, 4, 3), 1) + source_data = rand_gen.random_sample(source_shape) + source_img = Nifti1Image(source_data, source_affine) + + target_shape = (11, 10, 9) + target_data = np.zeros(target_shape) + target_affine = source_affine + target_affine[:3, 3] -= 3 # add an offset of 3 in x, y, z + target_data[3:9, 3:7, 3:9] = source_data # put the data at the offset location + target_img = Nifti1Image(target_data, target_affine) + + result_img = resample_to_img(source_img, target_img, + interpolation='nearest') + np.testing.assert_almost_equal(target_img.get_data(), + result_img.get_data()) + + result_img_2 = resample_to_img(result_img, source_img, + interpolation='nearest') + np.testing.assert_almost_equal(source_img.get_data(), + result_img_2.get_data()) + + result_img_3 = resample_to_img(result_img, source_img, + interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(result_img_2.get_data(), + result_img_3.get_data()) + + result_img_4 = resample_to_img(source_img, target_img, + interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(target_img.get_data(), + result_img_4.get_data()) + def test_resample_clip(): # Resample and image and get larger and smaller # value than in the original. Use clip to get rid of these images diff --git a/nilearn/plotting/surf_plotting.py b/nilearn/plotting/surf_plotting.py index e67eebe3ff..01e1b3bfd1 100644 --- a/nilearn/plotting/surf_plotting.py +++ b/nilearn/plotting/surf_plotting.py @@ -117,7 +117,7 @@ def plot_surf(surf_mesh, surf_map=None, bg_map=None, nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain surfaces. - nilearn.plotting.plot_surf_stat_map for plotting statistical maps on + nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on brain surfaces. """ @@ -403,10 +403,10 @@ def plot_surf_stat_map(surf_mesh, stat_map, bg_map=None, See Also -------- - nilearn.datasets.fetch_surf_fsaverage : For surface data object to be + nilearn.datasets.fetch_surf_fsaverage: For surface data object to be used as background map for this plotting function. - nilearn.plotting.plot_surf : For brain surface visualization. + nilearn.plotting.plot_surf: For brain surface visualization. """ # Call _get_colorbar_and_data_ranges to derive symmetric vmin, vmax diff --git a/nilearn/version.py b/nilearn/version.py index 24b941bea1..326ad9d28d 100644 --- a/nilearn/version.py +++ b/nilearn/version.py @@ -21,7 +21,7 @@ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = '0.5.2' +__version__ = '0.6.0a' _NILEARN_INSTALL_MSG = 'See %s for installation information.' % ( 'http://nilearn.github.io/introduction.html#installation')