diff --git a/.circleci/auto-cache-timestamp b/.circleci/auto-cache-timestamp index 770bcea7d1..50b266a34f 100644 --- a/.circleci/auto-cache-timestamp +++ b/.circleci/auto-cache-timestamp @@ -1 +1 @@ -Sat Apr 13 02:15:46 CEST 2019 +2019-04-19 15:05:58.522213 \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index ed0a1be3ab..9ec84ad53f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,6 +15,7 @@ jobs: NUMPY_VERSION: "*" SCIPY_VERSION: "*" SCIKIT_LEARN_VERSION: "*" + JOBLIB_VERSION: "*" MATPLOTLIB_VERSION: "*" steps: @@ -29,24 +30,15 @@ jobs: - run: sudo -E apt-get -yq update - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - run: - name: Today & Week # Fixing the date and week of the year in files to generate & preserve cache key. + name: Today & Week # Saving today's date and current week of the year in files to generate daily & weekly new cache key respectively. command: | - echo 2019-04-12 > today - echo 14 > week_num + date +%F > today + date +%U > week_num - restore_cache: key: v1-packages+datasets-{{ checksum "week_num" }} - restore_cache: key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - - run: - name: If branch cache has cached docs, don't restore docs from master cache - command: | - if - ls doc/_build/html - then - date > .circleci/auto-cache-timestamp - fi - - restore_cache: - key: master-docs-{{ checksum ".circleci/auto-cache-timestamp" }} + - run: name: Download & install conda if absent command: | @@ -76,15 +68,20 @@ jobs: name: Install packages in conda env command: | conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx=1.8 numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq + lxml mkl sphinx numpydoc pillow pandas -yq + conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq - run: name: Running CircleCI test (make html) command: | source activate testenv pip install -e . - set -o pipefail && cd doc && make html-strict SPHINXOPTS="-v" 2>&1 | tee log.txt + set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt no_output_timeout: 7h + - store_test_results: + path: doc/_build/test-results + - store_artifacts: + path: doc/_build/test-results + - save_cache: key: v1-packages+datasets-{{ checksum "week_num" }} paths: @@ -101,8 +98,6 @@ jobs: path: coverage - store_artifacts: path: doc/log.txt - - store_artifacts: - path: .circleci full-build: @@ -127,58 +122,26 @@ jobs: # Installing required packages for `make -C doc check command` to work. - run: sudo -E apt-get -yq update - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: - name: Today & Week # Fixing the date and week of the year in files to generate & preserve cache key. - command: | - echo 2019-04-12 > today - echo 14 > week_num - - restore_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - - run: - name: Download & install conda if absent - command: | - if - ls $HOME/miniconda3/bin | grep conda -q - then - echo "(Mini)Conda already present from the cache." - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - chmod +x ~/miniconda.sh && ~/miniconda.sh -b - fi + - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh + - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b - run: echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Create new conda env - command: | - if - conda env list | grep testenv - then - echo "Conda env testenv already exists courtesy of the cache." - else - conda create -n testenv -yq - fi - run: name: Install packages in conda env command: | - conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx=1.8 numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq + conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ + lxml mkl sphinx numpydoc pillow pandas -yq + conda install -n testenv nibabel sphinx-gallery junit-xml -c conda-forge -yq - run: name: Running CircleCI test (make html) command: | source activate testenv pip install -e . - set -o pipefail && cd doc && make html-strict SPHINXOPTS="-v" 2>&1 | tee log.txt + set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt no_output_timeout: 7h - - - save_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - paths: - - ../nilearn_data - - ../miniconda3 - - save_cache: - key: master-docs-{{ checksum ".circleci/auto-cache-timestamp" }} - paths: - - doc + - store_test_results: + path: doc/_build/test-results + - store_artifacts: + path: doc/_build/test-results - store_artifacts: path: doc/_build/html @@ -186,8 +149,6 @@ jobs: path: coverage - store_artifacts: path: doc/log.txt - - store_artifacts: - path: .circleci workflows: @@ -207,3 +168,14 @@ workflows: only: - master - test-circleci # test branch to check if merges occur on master as expected. + + nightly: + triggers: + - schedule: + cron: "0 6 * * *" + filters: + branches: + only: + - master + jobs: + - full-build diff --git a/.circleci/config.yml-presprint b/.circleci/config.yml-presprint deleted file mode 100644 index e272fec91b..0000000000 --- a/.circleci/config.yml-presprint +++ /dev/null @@ -1,171 +0,0 @@ -# quick-build rebuilds changes using the cached documentation. -# The cache is emptied everyday, forcing a full build on the day's first push. -# It doesn't operate on master branch. New branches are always built from scratch. -# full-build always rebuilds from scratch, without any cache. Only for changes in master branch. - -version: 2 - -jobs: - quick-build: - docker: - - image: circleci/python:3.6 - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.6" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - MATPLOTLIB_VERSION: "*" - - steps: - - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: - name: Today & Week # Saving today's date and current week of the year in files to generate daily & weekly new cache key respectively. - command: | - date +%F > today - date +%U > week_num - - restore_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - - restore_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - - - run: - name: Download & install conda if absent - command: | - if - ls $HOME/miniconda3/bin | grep conda -q - then - echo "(Mini)Conda already present from the cache." - else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - chmod +x ~/miniconda.sh && ~/miniconda.sh -b - fi - - run: - name: Setup conda path in env variables - command: | - echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Create new conda env - command: | - if - conda env list | grep testenv - then - echo "Conda env testenv already exists courtesy of the cache." - else - conda create -n testenv -yq - fi - - run: - name: Install packages in conda env - command: | - conda install -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq - - run: - name: Running CircleCI test (make html) - command: | - source activate testenv - pip install -e . - set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt - no_output_timeout: 7h - - save_cache: - key: v1-packages+datasets-{{ checksum "week_num" }} - paths: - - ../nilearn_data - - ../miniconda3 - - save_cache: - key: v1-docs-{{ .Branch }}-{{ checksum "today" }}-{{ checksum ".circleci/manual-cache-timestamp" }} - paths: - - doc - - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt - - - full-build: - docker: - - image: circleci/python:3.6 - environment: - DISTRIB: "conda" - PYTHON_VERSION: "3.6" - NUMPY_VERSION: "*" - SCIPY_VERSION: "*" - SCIKIT_LEARN_VERSION: "*" - MATPLOTLIB_VERSION: "*" - - steps: - - checkout - # Get rid of existing virtualenvs on circle ci as they conflict with conda. - # Trick found here: - # https://discuss.circleci.com/t/disable-autodetection-of-project-or-application-of-python-venv/235/10 - - run: cd && rm -rf ~/.pyenv && rm -rf ~/virtualenvs - # We need to remove conflicting texlive packages. - - run: sudo -E apt-get -yq remove texlive-binaries --purge - # Installing required packages for `make -C doc check command` to work. - - run: sudo -E apt-get -yq update - - run: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install dvipng texlive-latex-base texlive-latex-extra - - run: wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh - - run: chmod +x ~/miniconda.sh && ~/miniconda.sh -b - - run: echo 'export PATH="$HOME/miniconda3/bin:$PATH"' >> $BASH_ENV - - run: - name: Install packages in conda env - command: | - conda create -n testenv python=3.6 numpy scipy scikit-learn matplotlib pandas \ - lxml mkl sphinx numpydoc pillow pandas -yq - conda install -n testenv nibabel sphinx-gallery -c conda-forge -yq - - run: - name: Running CircleCI test (make html) - command: | - source activate testenv - pip install -e . - set -o pipefail && cd doc && make html-strict 2>&1 | tee log.txt - no_output_timeout: 7h - - - store_artifacts: - path: doc/_build/html - - store_artifacts: - path: coverage - - store_artifacts: - path: doc/log.txt - - -workflows: - version: 2 - push: - jobs: - - quick-build: - filters: - branches: - ignore: - - master - - test-circleci # test branch to check if merges occur on master as expected. - - - full-build: - filters: - branches: - only: - - master - - test-circleci # test branch to check if merges occur on master as expected. - - nightly: - triggers: - - schedule: - cron: "0 6 * * *" - filters: - branches: - only: - - master - jobs: - - full-build diff --git a/.circleci/manual-cache-timestamp b/.circleci/manual-cache-timestamp index 3d0c4f522e..e3790b2eeb 100644 --- a/.circleci/manual-cache-timestamp +++ b/.circleci/manual-cache-timestamp @@ -1 +1 @@ -2019-04-13 20:12:23.519962 \ No newline at end of file +2019-04-19 15:05:58.522064 \ No newline at end of file diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..5e2d545938 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,5 @@ +[run] +branch = True +parallel = True +omit = + */nilearn/externals/* diff --git a/.gitignore b/.gitignore index 4beefc864a..0709c70f7b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ build *# nilearn.egg-info/ +env/ dist/ doc/.nojekyll doc/building_blocks/generated/ @@ -34,4 +35,4 @@ tags .idea/ -doc/themes/nilearn/static/jquery.js \ No newline at end of file +doc/themes/nilearn/static/jquery.js diff --git a/.travis.yml b/.travis.yml index 1b20644e30..e63407049c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,49 +2,58 @@ sudo: required dist: xenial language: python -python: "3.5" - -virtualenv: - system_site_packages: true env: global: - - TEST_RUN_FOLDER="/tmp" # folder where the tests are run from + - TEST_RUN_FOLDER="/tmp" matrix: # Do not wait for the allowed_failures entry to finish before # setting the status fast_finish: true allow_failures: - # allow_failures seems to be keyed on the python version. - - python: 3.5 + # allow_failures keyed to python 3.5 & skipping tests. + - python: "3.5" + env: DISTRIB="travisci" PYTHON_VERSION="3.5" FLAKE8_VERSION="*" SKIP_TESTS="true" include: - # without matplotlib - - env: DISTRIB="conda" PYTHON_VERSION="3.5" - NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" - SCIKIT_LEARN_VERSION="*" COVERAGE="true" + - name: "Python 3.5 minimum package versions without Matplotlib" + python: "3.5" + env: DISTRIB="travisci" PYTHON_VERSION="3.5" + NUMPY_VERSION="1.11" SCIPY_VERSION="0.19" PANDAS_VERSION="*" + SCIKIT_LEARN_VERSION="0.19" COVERAGE="true" JOBLIB_VERSION="0.11" LXML_VERSION="*" - - env: DISTRIB="conda" PYTHON_VERSION="3.5" + - name: "Python 3.5 latest package versions" + python: "3.5" + env: DISTRIB="travisci" PYTHON_VERSION="3.5" NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" + JOBLIB_VERSION="0.11" LXML_VERSION="*" - - env: DISTRIB="conda" PYTHON_VERSION="3.6" + - name: "Python 3.6 latest package versions" + python: "3.6" + env: DISTRIB="travisci" PYTHON_VERSION="3.6" NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" - LXML_VERSION="*" - - env: DISTRIB="conda" PYTHON_VERSION="3.7" + JOBLIB_VERSION="0.12" LXML_VERSION="*" + # joblib.Memory switches from keyword cachedir to location in version 0.12 + # Making sure we get the deprecation warning. + + - name: "Python 3.7 latest package versions" + python: "3.7" + env: DISTRIB="travisci" PYTHON_VERSION="3.7" NUMPY_VERSION="*" SCIPY_VERSION="*" PANDAS_VERSION="*" SCIKIT_LEARN_VERSION="*" MATPLOTLIB_VERSION="*" COVERAGE="true" - LXML_VERSION="*" + JOBLIB_VERSION="*" LXML_VERSION="*" # FLAKE8 linting on diff wrt common ancestor with upstream/master # Note: the python value is only there to trigger allow_failures - - python: 3.5 - env: DISTRIB="conda" PYTHON_VERSION="3.5" FLAKE8_VERSION="*" SKIP_TESTS="true" + - name: Python 3.5 Flake8 no tests + python: "3.5" + env: DISTRIB="travisci" PYTHON_VERSION="3.5" FLAKE8_VERSION="*" SKIP_TESTS="true" install: source continuous_integration/install.sh -before_script: make clean +before_script: make clean script: source continuous_integration/test_script.sh diff --git a/AUTHORS.rst b/AUTHORS.rst index c67751492c..ed04bc5d1a 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -3,38 +3,36 @@ People ------ -This work is made available by a community of people, amongst which +This work is made available by a community of people, which +originated from the `INRIA Parietal Project Team `_ -and the `scikit-learn `_ folks, in -particular: - -* Alexandre Abraham -* `Alexandre Gramfort `_ -* Vincent Michel -* Bertrand Thirion -* `Fabian Pedregosa `_ -* `Gael Varoquaux `_ -* Philippe Gervais -* Michael Eickenberg -* Danilo Bzdok -* Loïc Estève -* Kamalakar Reddy Daddy -* Elvis Dohmatob -* Alexandre Abadie -* Andres Hoyos Idrobo -* Salma Bougacha -* Mehdi Rahim -* Sylvain Lanuzel -* `Kshitij Chawla `_ - -Many of also contributed outside of Parietal, notably: - -* `Chris Filo Gorgolewski `_ -* `Ben Cipollini `_ -* Julia Huntenburg -* Martin Perez-Guevara - -Thanks to M. Hanke and Y. Halchenko for data and packaging. +and the `scikit-learn `_ but grew much further. + +An up-to-date list of contributors can be seen in on `gitub +`_ + +Additional credit goes to M. Hanke and Y. Halchenko for data and packaging. + +.. _core_devs: + +Core developers +................. + +The nilearn core developers are: + +* Alexandre Gramfort https://github.com/agramfort +* Ben Cipollini https://github.com/bcipolli +* Bertrand Thirion https://github.com/bthirion +* Chris Gorgolewski https://github.com/chrisgorgo +* Danilo Bzdok https://github.com/banilo +* Elizabeth DuPre https://github.com/emdupre +* Gael Varoquaux https://github.com/GaelVaroquaux +* Jerome Dockes https://github.com/jeromedockes +* Julia Huntenburg https://github.com/juhuntenburg +* KamalakerDadi https://github.com/KamalakerDadi +* Kshitij Chawla https://github.com/kchawla-pi +* Medhi Rahim https://github.com/mrahim +* Salma Bougacha https://github.com/salma1601 Funding ........ @@ -45,7 +43,8 @@ Mehdi Rahim, Philippe Gervais where payed by the `NiConnect project, funded by the French `Investissement d'Avenir `_. -NiLearn is also supported by `DigiCosme `_ |digicomse logo| +NiLearn is also supported by `DigiCosme `_ +|digicosme logo| and `DataIA `_ |dataia_logo|. .. _citing: @@ -74,6 +73,10 @@ See the scikit-learn documentation on `how to cite `_. -.. |digicomse logo| image:: logos/digi-saclay-logo-small.png +.. |digicosme logo| image:: logos/digi-saclay-logo-small.png + :height: 25 + :alt: DigiComse Logo + +.. |dataia_logo| image:: logos/dataia.png :height: 25 - :alt: DigiComse Logo \ No newline at end of file + :alt: DataIA Logo diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2f792f5c27..44094ac85e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,8 +1,7 @@ .. _contributing: -============ -Contributing -============ +How to contribute to nilearn +============================= This project is a community effort, and everyone is welcome to contribute. @@ -19,7 +18,7 @@ you first open an `issue `_ before sending a :ref:`pull request`. Opening an issue -================ +------------------ Nilearn uses issues for tracking bugs, requesting potential features, and holding project discussions. @@ -37,7 +36,7 @@ Labels can be assigned a variety of issues, such as: .. _pull request: Pull Requests -============= +--------------- We welcome pull requests from all community members. We follow the same conventions as scikit-learn. You can find the recommended process to submit code in the @@ -47,7 +46,7 @@ We follow the same conventions as scikit-learn. You can find the recommended pro .. _git_repo: Retrieving the latest code -========================== +--------------------------- We use `Git `_ for version control and `GitHub `_ for hosting our main repository. If you are @@ -64,7 +63,7 @@ or if you have write privileges:: git clone git@github.com:nilearn/nilearn.git Coding guidelines -================= +------------------ Nilearn follows the coding conventions used by scikit-learn. `Please read them `_ diff --git a/Makefile b/Makefile index 15d6f8d4dd..ec14caa46b 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,6 @@ PYTHON ?= python CYTHON ?= cython -NOSETESTS ?= nosetests -NOSETESTS_OPTIONS := $(shell pip list | grep nose-timer > /dev/null && \ - echo '--with-timer --timer-top-n 50') CTAGS ?= ctags all: clean test doc-noplot @@ -32,15 +29,15 @@ inplace: $(PYTHON) setup.py build_ext -i test-code: - $(NOSETESTS) -s nilearn $(NOSETESTS_OPTIONS) + python -m pytest --pyargs nilearn --cov=nilearn + test-doc: - $(NOSETESTS) -s --with-doctest --doctest-tests --doctest-extension=rst \ - --doctest-extension=inc --doctest-fixtures=_fixture `find doc/ -name '*.rst'` + pytest --doctest-glob='*.rst' `find doc/ -name '*.rst'` + test-coverage: rm -rf coverage .coverage - $(NOSETESTS) -s --with-coverage --cover-html --cover-html-dir=coverage \ - --cover-package=nilearn nilearn + pytest --pyargs nilearn --showlocals --cov=nilearn --cov-report=html:coverage test: test-code test-doc @@ -66,4 +63,3 @@ doc: .PHONY : pdf pdf: make -C doc pdf - diff --git a/README.rst b/README.rst index 7885ed878b..e2f42830a9 100644 --- a/README.rst +++ b/README.rst @@ -38,11 +38,12 @@ Dependencies The required dependencies to use the software are: -* Python >= 2.7, +* Python >= 3.5, * setuptools * Numpy >= 1.11 -* SciPy >= 0.17 -* Scikit-learn >= 0.18 +* SciPy >= 0.19 +* Scikit-learn >= 0.19 +* Joblib >= 0.11 * Nibabel >= 2.0.2 If you are using nilearn plotting functionalities or running the diff --git a/appveyor.yml b/appveyor.yml index 605c79581b..ed77a89874 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -24,10 +24,10 @@ install: # See similar fix which made for travis and circleci # https://github.com/nilearn/nilearn/pull/1525 # Should be removed after a new matplotlib release 2.1.1 - - "conda install pip numpy scipy scikit-learn nose wheel matplotlib -y -q" + - "conda install pip numpy scipy scikit-learn joblib nose pytest wheel matplotlib -y -q" # Install other nilearn dependencies - - "pip install nibabel coverage nose-timer" + - "pip install nibabel coverage nose-timer pytest-cov" - "python setup.py bdist_wheel" - ps: "ls dist" @@ -41,7 +41,7 @@ test_script: # Change to a non-source folder to make sure we run the tests on the # installed library. - "cd C:\\" - - "python -c \"import nose; nose.main()\" -v -s nilearn --with-timer --timer-top-n 50" + - "pytest --pyargs nilearn -v" artifacts: # Archive the generated packages in the ci.appveyor.com build report. diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 4601af26a9..5ae4de4c1a 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -30,7 +30,7 @@ jobs: - script: | pip install . - nosetests ./nilearn -v + pytest ./nilearn -v displayName: 'test' - task: PublishTestResults@2 diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..9fd6de2da3 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "*externals/.*" # ignore folders and all its contents diff --git a/continuous_integration/install.sh b/continuous_integration/install.sh index 512cbdf2f3..a387010a74 100755 --- a/continuous_integration/install.sh +++ b/continuous_integration/install.sh @@ -22,21 +22,20 @@ create_new_venv() { deactivate virtualenv --system-site-packages testvenv source testvenv/bin/activate - pip install nose + pip install nose pytest } -print_conda_requirements() { - # Echo a conda requirement string for example +echo_requirements_string() { + # Echo a requirement string for example # "pip nose python='2.7.3 scikit-learn=*". It has a hardcoded # list of possible packages to install and looks at _VERSION # environment variables to know whether to install a given package and # if yes which version to install. For example: # - for numpy, NUMPY_VERSION is used # - for scikit-learn, SCIKIT_LEARN_VERSION is used - TO_INSTALL_ALWAYS="pip nose" + TO_INSTALL_ALWAYS="pip nose pytest" REQUIREMENTS="$TO_INSTALL_ALWAYS" - TO_INSTALL_MAYBE="python numpy scipy matplotlib scikit-learn pandas \ -flake8 lxml" + TO_INSTALL_MAYBE="numpy scipy matplotlib scikit-learn pandas flake8 lxml joblib" for PACKAGE in $TO_INSTALL_MAYBE; do # Capitalize package name and add _VERSION PACKAGE_VERSION_VARNAME="${PACKAGE^^}_VERSION" @@ -45,45 +44,25 @@ flake8 lxml" # dereference $PACKAGE_VERSION_VARNAME to figure out the # version to install PACKAGE_VERSION="${!PACKAGE_VERSION_VARNAME}" - if [ -n "$PACKAGE_VERSION" ]; then - REQUIREMENTS="$REQUIREMENTS $PACKAGE=$PACKAGE_VERSION" + if [[ -n "$PACKAGE_VERSION" ]]; then + if [[ "$PACKAGE_VERSION" == "*" ]]; then + REQUIREMENTS="$REQUIREMENTS $PACKAGE" + else + REQUIREMENTS="$REQUIREMENTS $PACKAGE==$PACKAGE_VERSION" + fi fi done echo $REQUIREMENTS } -create_new_conda_env() { - # Skip Travis related code on circle ci. - if [ -z $CIRCLECI ]; then - # Deactivate the travis-provided virtual environment and setup a - # conda-based environment instead - deactivate - fi - - # Use the miniconda installer for faster download / install of conda - # itself - wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - -O ~/miniconda.sh - chmod +x ~/miniconda.sh && ~/miniconda.sh -b - export PATH=$HOME/miniconda3/bin:$PATH - echo $PATH - conda update --quiet --yes conda - - # Configure the conda environment and put it in the path using the - # provided versions - REQUIREMENTS=$(print_conda_requirements) - echo "conda requirements string: $REQUIREMENTS" - conda create -n testenv --quiet --yes $REQUIREMENTS - source activate testenv +create_new_travisci_env() { + REQUIREMENTS=$(echo_requirements_string) + pip install ${REQUIREMENTS} + pip install pytest pytest-cov if [[ "$INSTALL_MKL" == "true" ]]; then # Make sure that MKL is used - conda install --quiet --yes mkl - elif [[ -z $CIRCLECI ]]; then - # Travis doesn't use MKL but circle ci does for speeding up examples - # generation in the html documentation. - # Make sure that MKL is not used - conda remove --yes --features mkl || echo "MKL not installed" + pip install mkl fi } @@ -91,16 +70,16 @@ if [[ "$DISTRIB" == "neurodebian" ]]; then create_new_venv pip install nose-timer bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) - sudo apt-get install -qq python-scipy python-nose python-nibabel python-sklearn + sudo apt-get install -qq python-scipy python-nose python-nibabel python-sklearn python-joblib -elif [[ "$DISTRIB" == "conda" ]]; then - create_new_conda_env +elif [[ "$DISTRIB" == "travisci" ]]; then + create_new_travisci_env pip install nose-timer # Note: nibabel is in setup.py install_requires so nibabel will # always be installed eventually. Defining NIBABEL_VERSION is only # useful if you happen to want a specific nibabel version rather # than the latest available one. - if [ -n "$NIBABEL_VERSION" ]; then + if [[ -n "$NIBABEL_VERSION" ]]; then pip install nibabel=="$NIBABEL_VERSION" fi diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py index 1822dd172e..3c2f3319a3 100644 --- a/continuous_integration/show-python-packages-versions.py +++ b/continuous_integration/show-python-packages-versions.py @@ -1,6 +1,6 @@ import sys -DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel'] +DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'joblib', 'matplotlib', 'nibabel'] def print_package_version(package_name, indent=' '): diff --git a/continuous_integration/test_script.sh b/continuous_integration/test_script.sh index 1dfa2578d1..4fbb32ba2e 100755 --- a/continuous_integration/test_script.sh +++ b/continuous_integration/test_script.sh @@ -11,6 +11,7 @@ if [[ "$SKIP_TESTS" != "true" ]]; then # Copy setup.cfg to TEST_RUN_FOLDER where we are going to run the tests from # Mainly for nose config settings cp setup.cfg "$TEST_RUN_FOLDER" + cp .coveragerc "$TEST_RUN_FOLDER" # We want to back out of the current working directory to make # sure we are using nilearn installed in site-packages rather # than the one from the current working directory diff --git a/doc/conf.py b/doc/conf.py index 950f619c59..392ccb353d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -227,12 +227,10 @@ # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', 'nilearn.tex', u'NeuroImaging with scikit-learn', - ur"""Gaël Varoquaux and Alexandre Abraham""" - + r"\\\relax ~\\\relax http://nilearn.github.io", - 'manual'), - -] + ('index', 'nilearn.tex', 'NeuroImaging with scikit-learn', + 'Gaël Varoquaux and Alexandre Abraham' + + r"\\\relax ~\\\relax http://nilearn.github.io", 'manual'), + ] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -283,13 +281,18 @@ _python_doc_base = 'http://docs.python.org/3.6' +# Scraper, copied from https://github.com/mne-tools/mne-python/ +from nilearn.reporting import _ReportScraper +report_scraper = _ReportScraper() +scrapers = ('matplotlib', report_scraper) + # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': (_python_doc_base, None), 'numpy': ('http://docs.scipy.org/doc/numpy', None), 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), 'matplotlib': ('http://matplotlib.org/', None), - 'sklearn': ('http://scikit-learn.org/0.18', None), + 'sklearn': ('http://scikit-learn.org/stable/', None), 'nibabel': ('http://nipy.org/nibabel', None), 'pandas': ('http://pandas.pydata.org', None), } @@ -302,7 +305,9 @@ sphinx_gallery_conf = { 'doc_module': 'nilearn', 'backreferences_dir': os.path.join('modules', 'generated'), - 'reference_url': {'nilearn': None} + 'reference_url': {'nilearn': None}, + 'junit': '../test-results/sphinx-gallery/junit.xml', + 'image_scrapers': scrapers, } # Get rid of spurious warnings due to some interaction between @@ -311,7 +316,6 @@ # details numpydoc_show_class_members = False - def touch_example_backreferences(app, what, name, obj, options, lines): # generate empty examples files, so that we don't get # inclusion errors if there are no examples for a class / module @@ -328,3 +332,4 @@ def touch_example_backreferences(app, what, name, obj, options, lines): def setup(app): app.add_javascript('copybutton.js') app.connect('autodoc-process-docstring', touch_example_backreferences) + report_scraper.app = app diff --git a/doc/connectivity/connectome_extraction.rst b/doc/connectivity/connectome_extraction.rst index 6707f081d4..fb62356326 100644 --- a/doc/connectivity/connectome_extraction.rst +++ b/doc/connectivity/connectome_extraction.rst @@ -49,12 +49,12 @@ conditioned on all the others. To recover well the interaction structure, a **sparse inverse covariance -estimator** is necessary. The GraphLasso, implemented in scikit-learn's -estimator :class:`sklearn.covariance.GraphLassoCV` is a good, simple +estimator** is necessary. The GraphicalLasso, implemented in scikit-learn's +estimator :class:`sklearn.covariance.GraphicalLassoCV` is a good, simple solution. To use it, you need to create an estimator object:: - >>> from sklearn.covariance import GraphLassoCV - >>> estimator = GraphLassoCV() + >>> from sklearn.covariance import GraphicalLassoCV + >>> estimator = GraphicalLassoCV() And then you can fit it on the activation time series, for instance extracted in :ref:`the previous section `:: @@ -95,7 +95,7 @@ of the estimator:: The parameter controlling the sparsity is set by `cross-validation `_ scheme. If you want to specify it manually, use the estimator - :class:`sklearn.covariance.GraphLasso`. + :class:`sklearn.covariance.GraphicalLasso`. .. topic:: **Full example** @@ -126,7 +126,7 @@ differing connection values across subjects. For this, nilearn provides the :class:`nilearn.connectome.GroupSparseCovarianceCV` -estimator. Its usage is similar to the GraphLassoCV object, but it takes +estimator. Its usage is similar to the GraphicalLassoCV object, but it takes a list of time series:: >>> estimator.fit([time_series_1, time_series_2, ...]) # doctest: +SKIP @@ -186,7 +186,7 @@ with different precision matrices, but sharing a common sparsity pattern: 10 brain regions, for 20 subjects. A single-subject estimation can be performed using the -:class:`sklearn.covariance.GraphLassoCV` estimator from scikit-learn. +:class:`sklearn.covariance.GraphicalLassoCV` estimator from scikit-learn. It is also possible to fit a graph lasso on data from every subject all together. diff --git a/doc/connectivity/resting_state_networks.rst b/doc/connectivity/resting_state_networks.rst index e089ca135f..64b12d4979 100644 --- a/doc/connectivity/resting_state_networks.rst +++ b/doc/connectivity/resting_state_networks.rst @@ -64,13 +64,19 @@ We can visualize each component outlined over the brain: We can also plot the map for different components separately: -.. |left_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_002.png +.. |ic1| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_002.png :width: 23% -.. |right_img| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_003.png +.. |ic2| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_003.png :width: 23% -.. centered:: |left_img| |right_img| +.. |ic3| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_004.png + :width: 23% + +.. |ic4| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_005.png + :width: 23% + +.. centered:: |ic1| |ic2| |ic3| |ic4| .. seealso:: @@ -84,6 +90,14 @@ We can also plot the map for different components separately: a fair representation, you should display all components and investigate which one resemble those displayed above. +Interpreting such components +----------------------------- + +ICA, and related algorithms, extract patterns that coactivate in the +signal. As a result, it finds functional networks, but also patterns of +non neural activity, ie confounding signals. Both are visible in the +plots of the components. + An alternative to ICA: Dictionary learning =========================================== Recent work has shown that dictionary learning based techniques outperform @@ -102,30 +116,37 @@ good extracted maps. Sparsity of output map is controlled by a parameter alpha: using a larger alpha yields sparser maps. -We can fit both estimators to compare them. 4D plotting offers an efficient way -to compare both resulting outputs. +We can fit both estimators to compare them. 4D plotting (using +:func:`nilearn.plotting.plot_prob_atlas`) offers an efficient way to +compare both resulting outputs. -.. |left_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_022.png +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_022.png :target: ../auto_examples/03_connectivity/plot_compare_decomposition.html - :width: 50% -.. |right_img_decomp| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_023.png - :target: ../auto_examples/03_connectivity/plot_compare_decomposition.html - :width: 50% + :align: center -.. |left_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_024.png - :target: ../auto_examples/03_connectivity/plot_compare_decomposition.html - :width: 50% -.. |right_img_decomp_single| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_025.png +.. figure:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_001.png :target: ../auto_examples/03_connectivity/plot_compare_decomposition.html - :width: 50% - + :align: center -.. centered:: |left_img_decomp| |right_img_decomp| -.. centered:: |left_img_decomp_single| |right_img_decomp_single| Maps obtained with dictionary learning are often easier to exploit as they are -less noisy than ICA maps, with blobs usually better defined. Typically, +more contrasted than ICA maps, with blobs usually better defined. Typically, *smoothing can be lower than when doing ICA*. + +.. |dl1| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_023.png + :width: 23% + +.. |dl2| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_024.png + :width: 23% + +.. |dl3| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_025.png + :width: 23% + +.. |dl4| image:: ../auto_examples/03_connectivity/images/sphx_glr_plot_compare_decomposition_026.png + :width: 23% + +.. centered:: |dl1| |dl2| |dl3| |dl4| + While dictionary learning computation time is comparable to CanICA, obtained atlases have been shown to outperform ICA in a variety of classification tasks. diff --git a/doc/contributing.rst b/doc/contributing.rst deleted file mode 100644 index e582053ea0..0000000000 --- a/doc/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../CONTRIBUTING.rst diff --git a/doc/developers/group_sparse_covariance.rst b/doc/developers/group_sparse_covariance.rst index 56e15e1c45..b95eadf867 100644 --- a/doc/developers/group_sparse_covariance.rst +++ b/doc/developers/group_sparse_covariance.rst @@ -369,7 +369,7 @@ used), then a tighter grid near the found maximum is computed, and so on. This allows for a very precise determination of the maximum location while reducing a lot the required evaluation number. The code is very close to what is done in -:class:`sklearn.covariance.GraphLassoCV`. +:class:`sklearn.covariance.GraphicalLassoCV`. Warm restart diff --git a/doc/development.rst b/doc/development.rst new file mode 100644 index 0000000000..4edac4347f --- /dev/null +++ b/doc/development.rst @@ -0,0 +1,130 @@ +============================ +Nilearn development process +============================ + +.. contents:: + :depth: 2 + :local: + +How to help? +============= + +* You are new to python and you don't know how to do xy + + - Create a question on `neurostars `_ + +* If you discovered a bug, but don't know how to fix it + + - Create `an issue `_ + +* If you discovered a bug and know how to fix it, but don't know how to + get your code onto github (ie, you have some Python experience but have + never have used git/github, or have never written tests) + + - Learn git and github: http://try.github.io/ + - Learn what tests are how to run them locally + (https://docs.pytest.org) + - Learn how to write doc/examples and build them locally + https://sphinx-gallery.github.io/ + +* You want to contribute code + + - See below + + +How do we decide what codes goes in? +===================================== + + +Scope of the project +--------------------- + +Nilearn strives to develop open and powerful statistical analysis of +brain volumes (as produced by multiple modalities: MRI, PET, and others). +Its focus is to reach end users of the methods (as opposed to methods +developers). + +Nilearn targets ease of use, but as Python code. In other words, we will +not add graphical user interfaces, but we want our code to be as easy to +understand as possible, with easy prototyping and debugging, even for +beginners in Python. + +We are parsimonious in the way we add features to the project, as it +puts on weight. Criteria include: + +* It must be in the scope of the project +* It must correspond to an established practice (typically as used in + scientific publications) +* It must have a concrete use case that can be demo-ed simply with nilearn: + an example, on real data, understandable by end-users. + +Part of the decision will also be about weighing the benefits (i.e., new +features or ease of use for the users) with the cost (i.e., complexity of +the code, runtime of the examples). + +In practice: + +* The new feature must be demoed in an example in a way that shows its + benefit to new users. +* Because our infrastructure is limited, running all the examples must + lead to downloading a limited amount of data (gigabytes) and execute + in a reasonable amount of time (a few hours) +* The new feature must be thoroughly tested (it should not decrease + code coverage) +* The new feature may not introduce a new dependency + +Special cases: + +* A downloader for a new atlas: we are currently being very lenient for this: + if the atlas is published and can be used in an example, we will accept + the pull request (but see below for specifics). +* A downloader for a new dataset: the larger the dataset is, the less + likely we are to consider including it. Datasets are meant to demo and + teach features, rather than be the basis of research. + +How to contribute a feature +---------------------------- + +To contribute a feature, first create an issue, in order to discuss +whether the feature can be included or not, and the specifications of +this feature. Once agreed on the feature, send us a pull request. + +There are specific guidelines about how to write code for the project. +They can be found in the contributors guide, below. + +Special case: How to contribute an atlas +............................................. + +We want atlases in nilearn to be internally consistent. Specifically, +your atlas object should have three attributes (as with the existing +atlases): + +- ``description`` (bytes): A text description of the atlas. This should be + brief but thorough, describing the source (paper), relevant information + related to its construction (modality, dataset, method), and if there are + more than one maps, a description of each map. +- ``labels`` (list): a list of string labels corresponding to each atlas + label, in the same (numerical) order as the atlas labels +- ``maps`` (list or string): the path to the nifti image, or a list of paths + +In addition, the atlas will need to be called by a fetcher. For example, see `here `__. + +Finally, as with other features, please provide a test for your atlas. +Examples can be found `here +`__ + +Who makes decisions +-------------------- + +We strongly aim to be a community oriented project where decisions are +made based on consensus according to the criteria described above. +Decisions are made public, through discussion on issues and pull requests +in Github. + +The decisions are made by the core-contributors, ie people with write +access to the repository, as listed :ref:`here ` + +If there are open questions, final decisions are made by the Temporary +Benevolent Dictator, currently Gaël Varoquaux. + +.. include:: ../CONTRIBUTING.rst diff --git a/doc/images/niftimasker_report.png b/doc/images/niftimasker_report.png new file mode 100644 index 0000000000..664469b9a3 Binary files /dev/null and b/doc/images/niftimasker_report.png differ diff --git a/doc/images/niftimasker_report_params.png b/doc/images/niftimasker_report_params.png new file mode 100644 index 0000000000..7528695bcf Binary files /dev/null and b/doc/images/niftimasker_report_params.png differ diff --git a/doc/index.rst b/doc/index.rst index b1bcd02011..3de66dff3b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -101,6 +101,6 @@ user_guide.rst auto_examples/index.rst whats_new.rst - contributing.rst + development.rst Nilearn is part of the `NiPy ecosystem `_. diff --git a/doc/install_doc_component.html b/doc/install_doc_component.html index c6584d3961..d1e97934b9 100644 --- a/doc/install_doc_component.html +++ b/doc/install_doc_component.html @@ -53,7 +53,8 @@ as an alternative.

Nilearn requires a Python installation and the following - dependencies: ipython, scipy, scikit-learn, matplotlib and nibabel.

+ dependencies: ipython, scipy, scikit-learn, joblib, matplotlib + and nibabel.

Second: open a Command Prompt

(Press "Win-R", type "cmd" and press "Enter". This will open @@ -83,7 +84,8 @@ it will save you time and trouble.

Nilearn requires a Python installation and the following - dependencies: ipython, scipy, scikit-learn, matplotlib and nibabel.

+ dependencies: ipython, scipy, scikit-learn, joblib, + matplotlib and nibabel.

Second: open a Terminal

(Navigate to /Applications/Utilities and double-click on @@ -114,7 +116,8 @@

Install or ask your system administrator to install the following packages using the distribution package manager: ipython , scipy, scikit-learn (sometimes called sklearn, - or python-sklearn), matplotlib (sometimes + or python-sklearn), joblib, + matplotlib (sometimes called python-matplotlib) and nibabel (sometimes called python-nibabel).

diff --git a/doc/logos/dataia.png b/doc/logos/dataia.png new file mode 100644 index 0000000000..fae26199d3 Binary files /dev/null and b/doc/logos/dataia.png differ diff --git a/doc/manipulating_images/manipulating_images.rst b/doc/manipulating_images/manipulating_images.rst index 57993de8ad..8c388ff426 100644 --- a/doc/manipulating_images/manipulating_images.rst +++ b/doc/manipulating_images/manipulating_images.rst @@ -8,7 +8,7 @@ This chapter discusses how nilearn can be used to do simple operations on brain images. -.. contents:: **Chapters contents** +.. contents:: **Chapter contents** :local: :depth: 1 @@ -198,7 +198,7 @@ brain. It is thus convenient to apply a brain mask in order to convert the :width: 100% Note that in an analysis pipeline, this operation is best done using the -:ref:`masker objects `. For completness, we give code to +:ref:`masker objects `. For completeness, we give the code to do it manually below: .. literalinclude:: ../../examples/01_plotting/plot_visualization.py @@ -219,8 +219,8 @@ statistical test. This requires a chain of image operations on the input data. Here is a possible recipe for computing an ROI mask: - * **Smoothing**: Before a statistical test, it is often use to smooth a bit - the image using :func:`nilearn.image.smooth_img`, typically fwhm=6 for + * **Smoothing**: Before a statistical test, it is often useful to smooth the image a bit + using :func:`nilearn.image.smooth_img`, typically fwhm=6 for fMRI. * **Selecting voxels**: Given the smoothed data, we can select voxels diff --git a/doc/manipulating_images/masker_objects.rst b/doc/manipulating_images/masker_objects.rst index 5e2a2c2330..dc2f06dafd 100644 --- a/doc/manipulating_images/masker_objects.rst +++ b/doc/manipulating_images/masker_objects.rst @@ -136,12 +136,25 @@ mask computation parameters. The mask can be retrieved and visualized from the `mask_img_` attribute of the masker: +.. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py + :start-after: # A NiftiMasker with the default strategy + :end-before: # Plot the generated mask using the .generate_report method + +.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_002.png + :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html + :align: center + :scale: 40 + +Alternatively, the mask can be visualized using the `generate_report` +method of the masker. The generated report can be viewed in a Jupyter notebook, +opened in a new browser tab using `report.open_in_browser()`, +or saved as a portable HTML file `report.save_as_html(output_filepath)`. + .. literalinclude:: ../../examples/04_manipulating_images/plot_mask_computation.py :start-after: # We need to specify an 'epi' mask_strategy, as this is raw EPI data :end-before: # Generate mask with strong opening - -.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_004.png +.. figure:: /images/niftimasker_report.png :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -163,7 +176,7 @@ Controling these arguments set the fine aspects of the mask. See the functions documentation, or :doc:`the NiftiMasker example <../auto_examples/04_manipulating_images/plot_mask_computation>`. -.. figure:: ../auto_examples/04_manipulating_images/images/sphx_glr_plot_mask_computation_005.png +.. figure:: /images/niftimasker_report_params.png :target: ../auto_examples/04_manipulating_images/plot_mask_computation.html :scale: 50% @@ -177,12 +190,13 @@ preparation:: >>> from nilearn import input_data >>> masker = input_data.NiftiMasker() - >>> masker # doctest: +ELLIPSIS + >>> masker # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE NiftiMasker(detrend=False, dtype=None, high_pass=None, low_pass=None, mask_args=None, mask_img=None, mask_strategy='background', - memory=Memory(...), memory_level=1, sample_mask=None, - sessions=None, smoothing_fwhm=None, standardize=False, t_r=None, - target_affine=None, target_shape=None, verbose=0) + memory=Memory(...), memory_level=1, reports=True, + sample_mask=None, sessions=None, smoothing_fwhm=None, + standardize=False, t_r=None, target_affine=None, target_shape=None, + verbose=0) .. note:: @@ -234,7 +248,7 @@ Temporal Filtering and confound removal properties, before conversion to voxel signals. - **Standardization**. Parameter ``standardize``: Signals can be - standardized (scaled to unit variance). + standardized (scaled to unit variance). - **Frequency filtering**. Low-pass and high-pass filters can be used to remove artifacts. Parameters: ``high_pass`` and ``low_pass``, specified @@ -242,7 +256,7 @@ properties, before conversion to voxel signals. the ``t_r`` parameter: ``loss_pass=.5, t_r=2.1``). - **Confound removal**. Two ways of removing confounds are provided: simple - detrending or using prespecified confounds, such as behavioral or movement + detrending or using prespecified confounds, such as behavioral or movement information. * Linear trends can be removed by activating the `detrend` parameter. @@ -251,7 +265,7 @@ properties, before conversion to voxel signals. signal of interest (e.g., the neural correlates of cognitive tasks). It is not activated by default in :class:`NiftiMasker` but is recommended in almost all scenarios. - + * More complex confounds, measured during the acquision, can be removed by passing them to :meth:`NiftiMasker.transform`. If the dataset provides a confounds file, just pass its path to the masker. diff --git a/doc/modules/reference.rst b/doc/modules/reference.rst index 8ea986a79f..6f43d06135 100644 --- a/doc/modules/reference.rst +++ b/doc/modules/reference.rst @@ -71,6 +71,7 @@ uses. fetch_atlas_harvard_oxford fetch_atlas_msdl fetch_coords_power_2011 + fetch_coords_seitzman_2018 fetch_atlas_smith_2009 fetch_atlas_yeo_2011 fetch_atlas_aal @@ -269,6 +270,7 @@ uses. RegionExtractor Parcellations + ReNA :mod:`nilearn.mass_univariate`: Mass-univariate analysis diff --git a/doc/themes/nilearn/layout.html b/doc/themes/nilearn/layout.html index 60d25c1665..ae52a6df74 100644 --- a/doc/themes/nilearn/layout.html +++ b/doc/themes/nilearn/layout.html @@ -261,7 +261,7 @@

Machine learning for Neuro-Imaging in Python

News

    -
  • April 12th 2019: Nilearn 0.5.2 released +

  • April 17th 2019: Nilearn 0.5.2 released

  • April 12th 2019: Nilearn 0.5.1 released

  • @@ -290,7 +290,7 @@

    Development

    href="https://github.com/nilearn/nilearn">Nilearn on GitHub

  • All material Free Software: BSD license (3 clause).

  • Authors

  • -
  • Contributing

  • +
  • Contributing

{% endif %} diff --git a/doc/themes/nilearn/static/nature.css_t b/doc/themes/nilearn/static/nature.css_t index e6e3b39a2b..7397860e39 100644 --- a/doc/themes/nilearn/static/nature.css_t +++ b/doc/themes/nilearn/static/nature.css_t @@ -420,6 +420,12 @@ div.body p.sphx-glr-script-out { margin: -.9ex 0ex; } +div.body div.sphx-glr-download-link-note { + max-width: 8em; + float: right; + margin-right: -1em; +} + div.bodywrapper { margin: 0 246px 0 auto; /*border-right: 1px solid #EEE;*/ @@ -1683,3 +1689,22 @@ ul#tab li div.contents p{ p.sphx-glr-horizontal { margin-top: 2em; } + + +/* Sphinx-gallery Report embedding */ +div.sg-report { + padding: 0pt; + transform: scale(.95); +} + +div.sg-report iframe { + display: block; + border-style: none; + transform: scale(.85); + height: 470px; + margin-left: -12%; /* Negative because of .8 scaling */ + margin-top: -4%; + padding: 0pt; + margin-bottom: 0pt; + width: 126%; /* More than 100% because of .8 scaling */ +} diff --git a/doc/themes/nilearn/static/sphinxdoc.css b/doc/themes/nilearn/static/sphinxdoc.css deleted file mode 100644 index b680a95710..0000000000 --- a/doc/themes/nilearn/static/sphinxdoc.css +++ /dev/null @@ -1,339 +0,0 @@ -/* - * sphinxdoc.css_t - * ~~~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- sphinxdoc theme. Originally created by - * Armin Ronacher for Werkzeug. - * - * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; - font-size: 14px; - letter-spacing: -0.01em; - line-height: 150%; - text-align: center; - background-color: #BFD1D4; - color: black; - padding: 0; - border: 1px solid #aaa; - - margin: 0px 80px 0px 80px; - min-width: 740px; -} - -div.document { - background-color: white; - text-align: left; - background-image: url(contents.png); - background-repeat: repeat-x; -} - -div.bodywrapper { - margin: 0 240px 0 0; - border-right: 1px solid #ccc; -} - -div.body { - margin: 0; - padding: 0.5em 20px 20px 20px; -} - -div.related { - font-size: 1em; -} - -div.related ul { - background-image: url(navigation.png); - height: 2em; - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; -} - -div.related ul li { - margin: 0; - padding: 0; - height: 2em; - float: left; -} - -div.related ul li.right { - float: right; - margin-right: 5px; -} - -div.related ul li a { - margin: 0; - padding: 0 5px 0 5px; - line-height: 1.75em; - color: #EE9816; -} - -div.related ul li a:hover { - color: #3CA8E7; -} - -div.sphinxsidebarwrapper { - padding: 0; -} - -div.sphinxsidebar { - margin: 0; - padding: 0.5em 15px 15px 0; - width: 210px; - float: right; - font-size: 1em; - text-align: left; -} - -div.sphinxsidebar h3, div.sphinxsidebar h4 { - margin: 1em 0 0.5em 0; - font-size: 1em; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border: 1px solid #86989B; - background-color: #AFC1C4; -} - -div.sphinxsidebar h3 a { - color: white; -} - -div.sphinxsidebar ul { - padding-left: 1.5em; - margin-top: 7px; - padding: 0; - line-height: 130%; -} - -div.sphinxsidebar ul ul { - margin-left: 20px; -} - -div.footer { - background-color: #E3EFF1; - color: #86989B; - padding: 3px 8px 3px 0; - clear: both; - font-size: 0.8em; - text-align: right; -} - -div.footer a { - color: #86989B; - text-decoration: underline; -} - -/* -- body styles ----------------------------------------------------------- */ - -p { - margin: 0.8em 0 0.5em 0; -} - -a { - color: #CA7900; - text-decoration: none; -} - -a:hover { - color: #2491CF; -} - -div.body a { - text-decoration: underline; -} - -h1 { - margin: 0; - padding: 0.7em 0 0.3em 0; - font-size: 1.5em; - color: #11557C; -} - -h2 { - margin: 1.3em 0 0.2em 0; - font-size: 1.35em; - padding: 0; -} - -h3 { - margin: 1em 0 -0.3em 0; - font-size: 1.2em; -} - -div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { - color: black!important; -} - -h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { - display: none; - margin: 0 0 0 0.3em; - padding: 0 0.2em 0 0.2em; - color: #aaa!important; -} - -h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, -h5:hover a.anchor, h6:hover a.anchor { - display: inline; -} - -h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, -h5 a.anchor:hover, h6 a.anchor:hover { - color: #777; - background-color: #eee; -} - -a.headerlink { - color: #c60f0f!important; - font-size: 1em; - margin-left: 6px; - padding: 0 4px 0 4px; - text-decoration: none!important; -} - -a.headerlink:hover { - background-color: #ccc; - color: white!important; -} - -cite, code, tt { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.01em; -} - -tt { - background-color: #f2f2f2; - border-bottom: 1px solid #ddd; - color: #333; -} - -tt.descname, tt.descclassname, tt.xref { - border: 0; -} - -hr { - border: 1px solid #abc; - margin: 2em; -} - -a tt { - border: 0; - color: #CA7900; -} - -a tt:hover { - color: #2491CF; -} - -pre { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.015em; - line-height: 120%; - padding: 0.5em; - border: 1px solid #ccc; - background-color: #f8f8f8; -} - -pre a { - color: inherit; - text-decoration: underline; -} - -td.linenos pre { - padding: 0.5em 0; -} - -div.quotebar { - background-color: #f8f8f8; - max-width: 250px; - float: right; - padding: 2px 7px; - border: 1px solid #ccc; -} - -div.topic { - background-color: #f8f8f8; -} - -table { - border-collapse: collapse; - margin: 0 -0.5em 0 -0.5em; -} - -table td, table th { - padding: 0.2em 0.5em 0.2em 0.5em; -} - -div.admonition, div.warning { - font-size: 0.9em; - margin: 1em 0 1em 0; - border: 1px solid #86989B; - background-color: #f7f7f7; - padding: 0; -} - -div.admonition p, div.warning p { - margin: 0.5em 1em 0.5em 1em; - padding: 0; -} - -div.admonition pre, div.warning pre { - margin: 0.4em 1em 0.4em 1em; -} - -div.admonition p.admonition-title, -div.warning p.admonition-title { - margin: 0; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border-bottom: 1px solid #86989B; - font-weight: bold; - background-color: #AFC1C4; -} - -div.warning { - border: 1px solid #940000; -} - -div.warning p.admonition-title { - background-color: #CF0000; - border-bottom-color: #940000; -} - -div.admonition ul, div.admonition ol, -div.warning ul, div.warning ol { - margin: 0.1em 0.5em 0.5em 3em; - padding: 0; -} - -div.versioninfo { - margin: 1em 0 0 0; - border: 1px solid #ccc; - background-color: #DDEAF0; - padding: 8px; - line-height: 1.3em; - font-size: 0.9em; -} - -.viewcode-back { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} \ No newline at end of file diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 66add0cda8..a96d2dd40f 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -1,13 +1,44 @@ -0.6.0 -===== +0.6.0a +====== NEW --- +.. warning:: + + | **Python2 and 3.4 are no longer supported. We recommend upgrading to Python 3.6 minimum.** + | + | **Minimum supported versions of packages have been bumped up.** + | - Matplotlib -- v2.0 + | - Scikit-learn -- v0.19 + | - Scipy -- v0.19 + +- A new method for :class:`nilearn.input_data.NiftiMasker` instances + for generating reports viewable in a web browser, Jupyter Notebook, or VSCode. + +- joblib is now a dependency + +- Parcellation method ReNA: Fast agglomerative clustering based on recursive + nearest neighbor grouping. + Yields very fast & accurate models, without creation of giant + clusters. + :class:`nilearn.regions.ReNA` + +- Optimization to image resampling + :func:`nilearn.image.resample_img` has been optimized to pad rather than + resample images in the special case when there is only a translation + between two spaces. This is a common case in :class:`nilearn.input_data.NiftiMasker` + when using the `mask_strategy="template"` option for brains in MNI space. - New brain development fMRI dataset fetcher :func:`nilearn.datasets.fetch_development_fmri` can be used to download - movie-watching data in children and adults. A light-weight dataset implemented - for teaching and usage in the examples. + movie-watching data in children and adults. A light-weight dataset + implemented for teaching and usage in the examples. +- New example in `examples/05_advanced/plot_age_group_prediction_cross_val.py` + to compare methods for classifying subjects into age groups based on + functional connectivity. Similar example in + `examples/03_connectivity/plot_group_level_connectivity.py` simplified. + +- the Localizer dataset now follows the BIDS organization. Changes ------- @@ -15,6 +46,22 @@ Changes - All the connectivity examples are changed from ADHD to brain development fmri dataset. +- :func:`nilearn.plotting.view_img_on_surf`, :func:`nilearn.plotting.view_surf` + and :func:`nilearn.plotting.view_connectome` now allow disabling the colorbar, + and setting its height and the fontsize of its ticklabels. + +- :func:`nilearn.plotting.view_img_on_surf`, :func:`nilearn.plotting.view_surf` + and :func:`nilearn.plotting.view_connectome` can now display a title. + +- Rework of the standardize-options of :func:`nilearn.signal.clean` and the various Maskers + in `nilearn.input_data`. You can now set `standardize` to `zscore` or `psc`. `psc` stands + for `Percent Signal Change`, which can be a meaningful metric for BOLD. + +- :func:`nilearn.plotting.plot_img` now has explicit keyword arguments `bg_img`, + `vmin` and `vmax` to control the background image and the bounds of the + colormap. These arguments were already accepted in `kwargs` but not documented + before. + Fixes ----- @@ -28,6 +75,19 @@ Fixes - When :func:`nilearn.plotting.plot_surf_stat_map` is used with a thresholded map but without a background map, the surface mesh is displayed in half-transparent grey to maintain a 3D perception. +- :func:`nilearn.plotting.view_surf` now accepts surface data provided as a file + path. +- :func:`nilearn.plotting.plot_glass_brain` now correctly displays the left 'l' orientation even when + the given images are completely masked (empty images). +- :func:`nilearn.plotting.plot_matrix` providing labels=None, False, or an empty list now correctly disables labels. +- :func:`nilearn.plotting.plot_surf_roi` now takes vmin, vmax parameters +- :func:`nilearn.datasets.fetch_surf_nki_enhanced` is now downloading the correct + left and right functional surface data for each subject +- :func:`nilearn.datasets.fetch_atlas_schaefer_2018` now downloads from release + version 0.14.3 (instead of 0.8.1) by default, which includes corrected region label + names along with 700 and 900 region parcelations. +- Colormap creation functions have been updated to avoid matplotlib deprecation warnings + about colormap reversal 0.5.2 ===== @@ -75,6 +135,7 @@ NEW - NiftiLabelsMasker now consumes less memory when extracting the signal from a 3D/4D image. This is especially noteworthy when extracting signals from large 4D images. - New function :func:`nilearn.datasets.fetch_atlas_schaefer_2018` +- New function :func:`nilearn.datasets.fetch_coords_seitzman_2018` Changes ------- @@ -98,6 +159,13 @@ Changes - coords is now marker_coords - colors is now marker_color +- :func:`nilearn.plotting.view_img_on_surf` now accepts a `symmetric_cmap` + argument to control whether the colormap is centered around 0 and a `vmin` + argument. + +- Users can now control the size and fontsize of colorbars in interactive + surface and connectome plots, or disable the colorbar. + Fixes ----- diff --git a/examples/01_plotting/plot_3d_map_to_surface_projection.py b/examples/01_plotting/plot_3d_map_to_surface_projection.py index c8d08a0e6e..0b01f4197e 100644 --- a/examples/01_plotting/plot_3d_map_to_surface_projection.py +++ b/examples/01_plotting/plot_3d_map_to_surface_projection.py @@ -85,15 +85,16 @@ view = plotting.view_surf(fsaverage.infl_right, texture, threshold='90%', bg_map=fsaverage.sulc_right) -# uncomment this to open the plot in a web browser: -# view.open_in_browser() -############################################################################## # In a Jupyter notebook, if ``view`` is the output of a cell, it will # be displayed below the cell - view +############################################################################## + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + ############################################################################## # We don't need to do the projection ourselves, we can use view_img_on_surf: diff --git a/examples/01_plotting/plot_demo_plotting.py b/examples/01_plotting/plot_demo_plotting.py index 629864a913..9123648522 100644 --- a/examples/01_plotting/plot_demo_plotting.py +++ b/examples/01_plotting/plot_demo_plotting.py @@ -33,6 +33,7 @@ motor_images = datasets.fetch_neurovault_motor_task() stat_img = motor_images.images[0] + ############################################################################### # Plotting statistical maps with function `plot_stat_map` # -------------------------------------------------------- @@ -54,15 +55,15 @@ # for more details. view = plotting.view_img(stat_img, threshold=3) +# In a Jupyter notebook, if ``view`` is the output of a cell, it will +# be displayed below the cell +view + +############################################################################## # uncomment this to open the plot in a web browser: # view.open_in_browser() -############################################################################## -# In a Jupyter notebook, if ``view`` is the output of a cell, it will -# be displayed below the cell - -view ############################################################################### # Plotting statistical maps in a glass brain with function `plot_glass_brain` diff --git a/examples/01_plotting/plot_dim_plotting.py b/examples/01_plotting/plot_dim_plotting.py index afecc379c3..11253a33ad 100644 --- a/examples/01_plotting/plot_dim_plotting.py +++ b/examples/01_plotting/plot_dim_plotting.py @@ -21,9 +21,9 @@ localizer_dataset = datasets.fetch_localizer_button_task() # Contrast map of motor task -localizer_tmap_filename = localizer_dataset.tmap +localizer_tmap_filename = localizer_dataset.tmaps[0] # Subject specific anatomical image -localizer_anat_filename = localizer_dataset.anat +localizer_anat_filename = localizer_dataset.anats[0] ########################################################################### # Plotting with enhancement of background image with dim=-.5 # -------------------------------------------------------------------------- diff --git a/examples/01_plotting/plot_surf_atlas.py b/examples/01_plotting/plot_surf_atlas.py index 90fb6996dd..85b35a3dab 100644 --- a/examples/01_plotting/plot_surf_atlas.py +++ b/examples/01_plotting/plot_surf_atlas.py @@ -124,14 +124,14 @@ view = plotting.view_surf(fsaverage.infl_left, parcellation, cmap='gist_ncar', symmetric_cmap=False) -# uncomment this to open the plot in a web browser: -# view.open_in_browser() - -############################################################################## # In a Jupyter notebook, if ``view`` is the output of a cell, it will # be displayed below the cell view +############################################################################## + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() ############################################################################## # you can also use :func:`nilearn.plotting.view_connectome` to open an diff --git a/examples/01_plotting/plot_surface_projection_strategies.py b/examples/01_plotting/plot_surface_projection_strategies.py index 06d87f7596..15b614a5cf 100644 --- a/examples/01_plotting/plot_surface_projection_strategies.py +++ b/examples/01_plotting/plot_surface_projection_strategies.py @@ -61,7 +61,6 @@ for sample_points in [line_sample_points, ball_sample_points]: fig = plt.figure() ax = plt.subplot(projection='3d') - ax.set_aspect(1) ax.plot_trisurf(x, y, z, triangles=triangulation.triangles) diff --git a/examples/02_decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py index ba63648a27..ac72643b92 100644 --- a/examples/02_decoding/plot_haxby_stimuli.py +++ b/examples/02_decoding/plot_haxby_stimuli.py @@ -7,7 +7,6 @@ Cortex" (Science 2001) """ -from scipy.misc import imread import matplotlib.pyplot as plt from nilearn import datasets @@ -16,22 +15,19 @@ haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True) stimulus_information = haxby_dataset.stimuli -for stim_type in sorted(stimulus_information.keys()): - if stim_type == b'controls': - # skip control images, there are too many - continue - - file_names = stimulus_information[stim_type] - - plt.figure() - for i in range(48): - plt.subplot(6, 8, i + 1) - try: - plt.imshow(imread(file_names[i]), cmap=plt.cm.gray) - except: - # just go to the next one if the file is not present - pass - plt.axis("off") - plt.suptitle(stim_type) +for stim_type in stimulus_information: + # skip control images, there are too many + if stim_type != 'controls': + + file_names = stimulus_information[stim_type] + + fig, axes = plt.subplots(6, 8) + fig.suptitle(stim_type) + + for img_path, ax in zip(file_names, axes.ravel()): + ax.imshow(plt.imread(img_path), cmap=plt.cm.gray) + + for ax in axes.ravel(): + ax.axis("off") show() diff --git a/examples/02_decoding/plot_miyawaki_encoding.py b/examples/02_decoding/plot_miyawaki_encoding.py index 8520076823..e0e3b3697b 100644 --- a/examples/02_decoding/plot_miyawaki_encoding.py +++ b/examples/02_decoding/plot_miyawaki_encoding.py @@ -152,7 +152,7 @@ # bring the scores into the shape of the background brain score_map_img = masker.inverse_transform(cut_score) -thresholded_score_map_img = threshold_img(score_map_img, threshold=1e-6) +thresholded_score_map_img = threshold_img(score_map_img, threshold=1e-6, copy=False) ############################################################################## # Plotting the statistical map on a background brain, we mark four voxels diff --git a/examples/03_connectivity/plot_compare_decomposition.py b/examples/03_connectivity/plot_compare_decomposition.py index ff93baa186..29d0949a95 100644 --- a/examples/03_connectivity/plot_compare_decomposition.py +++ b/examples/03_connectivity/plot_compare_decomposition.py @@ -16,13 +16,11 @@ CanICA is an ICA method for group-level analysis of fMRI data. Compared to other strategies, it brings a well-controlled group model, as well as a thresholding algorithm controlling for specificity and sensitivity with -an explicit model of the signal. The reference papers are: +an explicit model of the signal. The reference paper is: * G. Varoquaux et al. "A group model for stable multi-subject ICA on fMRI datasets", NeuroImage Vol 51 (2010), p. 288-299 - -Pre-prints for both papers are available on hal -(http://hal.archives-ouvertes.fr) + `preprint `_ """ ############################################################################### @@ -39,7 +37,7 @@ #################################################################### -# Here we apply CanICA on the data +# Apply CanICA on the data # --------------------------------- # We use as "template" as a strategy to compute the mask, as this leads # to slightly faster and more reproducible results. However, the images @@ -64,7 +62,6 @@ #################################################################### # To visualize we plot the outline of all components on one figure -# ----------------------------------------------------------------- from nilearn.plotting import plot_prob_atlas # Plot all ICA components together @@ -73,7 +70,6 @@ #################################################################### # Finally, we plot the map for each ICA component separately -# ----------------------------------------------------------- from nilearn.image import iter_img from nilearn.plotting import plot_stat_map, show @@ -98,7 +94,6 @@ ############################################################################### # Create a dictionary learning estimator -# --------------------------------------------------------------- from nilearn.decomposition import DictLearning dict_learning = DictLearning(n_components=20, @@ -120,28 +115,21 @@ ############################################################################### # Visualize the results -# ---------------------- -from nilearn.plotting import find_xyz_cut_coords -from nilearn.image import index_img - -names = {dict_learning: 'DictionaryLearning', canica: 'CanICA'} -estimators = [canica, dict_learning] -components_imgs = [canica_components_img, dictlearning_components_img] - -# Selecting specific maps to display: maps were manually chosen to be similar -indices = {dict_learning: 8, canica: 14} -# We select relevant cut coordinates for displaying -cut_component = index_img(components_imgs[0], indices[dict_learning]) -cut_coords = find_xyz_cut_coords(cut_component) -for estimator, components in zip(estimators, components_imgs): - # 4D plotting - plot_prob_atlas(components, view_type="filled_contours", - title="%s" % names[estimator], - cut_coords=cut_coords, colorbar=False) - # 3D plotting - plot_stat_map(index_img(components, indices[estimator]), - title="%s" % names[estimator], - cut_coords=cut_coords, colorbar=False) +# +# First plot all DictLearning components together +plot_prob_atlas(dictlearning_components_img, + title='All DictLearning components') + + + +############################################################################### +# One plot of each component + +for i, cur_img in enumerate(iter_img(dictlearning_components_img)): + plot_stat_map(cur_img, display_mode="z", title="Comp %d" % i, + cut_coords=1, colorbar=False) + + show() ################################################################################ # .. note:: diff --git a/examples/03_connectivity/plot_data_driven_parcellations.py b/examples/03_connectivity/plot_data_driven_parcellations.py index 336095b9ce..552230d765 100644 --- a/examples/03_connectivity/plot_data_driven_parcellations.py +++ b/examples/03_connectivity/plot_data_driven_parcellations.py @@ -2,8 +2,8 @@ Clustering methods to learn a brain parcellation from fMRI ========================================================== -We use spatially-constrained Ward-clustering and KMeans to create a set -of parcels. +We use spatially-constrained Ward-clustering, KMeans, and Recursive Neighbor +Agglomeration (ReNA) to create a set of parcels. In a high dimensional regime, these methods can be interesting to create a 'compressed' representation of the data, replacing the data @@ -92,7 +92,7 @@ ########################################################################### # Visualize: Brain parcellations (Ward) -# ------------------------------------- +# ..................................... # # First, we display the parcellations of the brain image stored in attribute # `labels_img_` @@ -110,9 +110,10 @@ # Grab cut coordinates from this plot to use as a common for all plots cut_coords = first_plot.cut_coords + ########################################################################### # Compressed representation of Ward clustering -# -------------------------------------------- +# ............................................ # # Second, we illustrate the effect that the clustering has on the signal. # We show the original data, and the approximation provided by the @@ -165,16 +166,17 @@ # This object uses method='kmeans' for KMeans clustering with 10mm smoothing # and standardization ON +start = time.time() kmeans = Parcellations(method='kmeans', n_parcels=50, standardize=True, smoothing_fwhm=10., memory='nilearn_cache', memory_level=1, verbose=1) # Call fit on functional dataset: single subject (less samples) kmeans.fit(dataset.func) -print("KMeans 50 clusters: %.2fs" % (time.time() - start)) + ########################################################################### # Visualize: Brain parcellations (KMeans) -# --------------------------------------- +# ....................................... # # Grab parcellations of brain image stored in attribute `labels_img_` kmeans_labels_img = kmeans.labels_img_ @@ -188,6 +190,81 @@ kmeans_labels_img.to_filename('kmeans_parcellation.nii.gz') ################################################################## -# Finally show them +# Brain parcellations with ReNA Clustering +# ---------------------------------------- +# +# One interesting algorithmic property of ReNA (see References) is that +# it is very fast for a large number of parcels (notably faster than Ward). +# As before, the parcellation is done with a Parcellations object. +# The spatial constraints are implemented inside the Parcellations object. +# +# References +# .......... +# +# More about ReNA clustering algorithm in the original paper +# +# * A. Hoyos-Idrobo, G. Varoquaux, J. Kahn and B. Thirion, "Recursive +# Nearest Agglomeration (ReNA): Fast Clustering for Approximation of +# Structured Signals," in IEEE Transactions on Pattern Analysis and +# Machine Intelligence, vol. 41, no. 3, pp. 669-681, 1 March 2019. +# https://hal.archives-ouvertes.fr/hal-01366651/ +start = time.time() +rena = Parcellations(method='rena', n_parcels=5000, standardize=False, + smoothing_fwhm=2., scaling=True) + +rena.fit_transform(dataset.func) +print("ReNA 5000 clusters: %.2fs" % (time.time() - start)) + +################################################################## +# Visualize: Brain parcellations (ReNA) +# ..................................... +# +# First, we display the parcellations of the brain image stored in attribute +# `labels_img_` +rena_labels_img = rena.labels_img_ + +# Now, rena_labels_img are Nifti1Image object, it can be saved to file +# with the following code: +rena_labels_img.to_filename('rena_parcellation.nii.gz') + +plotting.plot_roi(ward_labels_img, title="ReNA parcellation", + display_mode='xz', cut_coords=cut_coords) + +################################################################## +# Compressed representation of ReNA clustering +# ............................................ +# +# We illustrate the effect that the clustering has on the signal. +# We show the original data, and the approximation provided by +# the clustering by averaging the signal on each parcel. +# +# We can then compare the results with the compressed representation +# obtained with Ward. + +# Display the original data +plotting.plot_epi(mean_func_img, cut_coords=cut_coords, + title='Original (%i voxels)' % original_voxels, + vmax=vmax, vmin=vmin, display_mode='xz') + +# A reduced data can be created by taking the parcel-level average: +# Note that, as many scikit-learn objects, the ReNA object exposes +# a transform method that modifies input features. Here it reduces their +# dimension. +# However, the data are in one single large 4D image, we need to use +# index_img to do the split easily: +fmri_reduced_rena = rena.transform(dataset.func) + +# Display the corresponding data compression using the parcellation +compressed_img_rena = rena.inverse_transform(fmri_reduced_rena) + +plotting.plot_epi(index_img(compressed_img_rena, 0), cut_coords=cut_coords, + title='ReNA compressed representation (5000 parcels)', + vmin=vmin, vmax=vmax, display_mode='xz') -plotting.show() +#################################################################### +# Even if the compressed signal is relatively close +# to the original signal, we can notice that Ward Clustering +# gives a slightly more accurate compressed representation. +# However, as said in the previous section, the computation time is +# reduced which could still make ReNA more relevant than Ward in +# some cases. diff --git a/examples/03_connectivity/plot_group_level_connectivity.py b/examples/03_connectivity/plot_group_level_connectivity.py index cf15f87899..daf21f8f70 100644 --- a/examples/03_connectivity/plot_group_level_connectivity.py +++ b/examples/03_connectivity/plot_group_level_connectivity.py @@ -1,35 +1,17 @@ """ -Functional connectivity matrices for group analysis of connectomes -================================================================== +Classification of age groups using functional connectivity +========================================================== This example compares different kinds of functional connectivity between -regions of interest : correlation, partial correlation, as well as a kind -called **tangent**. +regions of interest : correlation, partial correlation, and tangent space +embedding. The resulting connectivity coefficients can be used to -discriminate children from adults. In general, the **tangent kind** +discriminate children from adults. In general, the tangent space embedding **outperforms** the standard correlations: see `Dadi et al 2019 `_ for a careful study. """ -# Matrix plotting from Nilearn: nilearn.plotting.plot_matrix -import numpy as np -import matplotlib.pylab as plt - - -def plot_matrices(matrices, matrix_kind): - n_matrices = len(matrices) - fig = plt.figure(figsize=(n_matrices * 4, 4)) - for n_subject, matrix in enumerate(matrices): - plt.subplot(1, n_matrices, n_subject + 1) - matrix = matrix.copy() # avoid side effects - # Set diagonal to zero, for better visualization - np.fill_diagonal(matrix, 0) - vmax = np.max(np.abs(matrix)) - title = '{0}, subject {1}'.format(matrix_kind, n_subject) - plotting.plot_matrix(matrix, vmin=-vmax, vmax=vmax, cmap='RdBu_r', - title=title, figure=fig, colorbar=False) - ############################################################################### # Load brain development fMRI dataset and MSDL atlas @@ -37,7 +19,7 @@ def plot_matrices(matrices, matrix_kind): # We study only 30 subjects from the dataset, to save computation time. from nilearn import datasets -rest_data = datasets.fetch_development_fmri(n_subjects=30) +development_dataset = datasets.fetch_development_fmri(n_subjects=30) ############################################################################### # We use probabilistic regions of interest (ROIs) from the MSDL atlas. @@ -57,7 +39,7 @@ def plot_matrices(matrices, matrix_kind): masker = input_data.NiftiMapsMasker( msdl_data.maps, resampling_target="data", t_r=2, detrend=True, - low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1) + low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1).fit() ############################################################################### # Then we compute region signals and extract useful phenotypic informations. @@ -65,13 +47,13 @@ def plot_matrices(matrices, matrix_kind): pooled_subjects = [] groups = [] # child or adult for func_file, confound_file, phenotypic in zip( - rest_data.func, rest_data.confounds, rest_data.phenotypic): - time_series = masker.fit_transform(func_file, confounds=confound_file) + development_dataset.func, + development_dataset.confounds, + development_dataset.phenotypic): + time_series = masker.transform(func_file, confounds=confound_file) pooled_subjects.append(time_series) - is_child = phenotypic['Child_Adult'] == 'child' - if is_child: + if phenotypic['Child_Adult'] == 'child': children.append(time_series) - groups.append(phenotypic['Child_Adult']) print('Data has {0} children.'.format(len(children))) @@ -101,10 +83,14 @@ def plot_matrices(matrices, matrix_kind): print('Mean correlation has shape {0}.'.format(mean_correlation_matrix.shape)) ############################################################################### -# We display the connectome matrices of the first 4 children +# We display the connectome matrices of the first 3 children from nilearn import plotting +from matplotlib import pyplot as plt -plot_matrices(correlation_matrices[:4], 'correlation') +_, axes = plt.subplots(1, 3, figsize=(15, 5)) +for i, (matrix, ax) in enumerate(zip(correlation_matrices, axes)): + plotting.plot_matrix(matrix, tri='lower', colorbar=False, axes=ax, + title='correlation, child {}'.format(i)) ############################################################################### # The blocks structure that reflect functional networks are visible. @@ -119,19 +105,17 @@ def plot_matrices(matrices, matrix_kind): # We can also study **direct connections**, revealed by partial correlation # coefficients. We just change the `ConnectivityMeasure` kind partial_correlation_measure = ConnectivityMeasure(kind='partial correlation') - -############################################################################### -# and repeat the previous operation. partial_correlation_matrices = partial_correlation_measure.fit_transform( children) ############################################################################### -# Most of direct connections are weaker than full connections, -plot_matrices(partial_correlation_matrices[:4], 'partial') +# Most of direct connections are weaker than full connections. +_, axes = plt.subplots(1, 3, figsize=(15, 5)) +for i, (matrix, ax) in enumerate(zip(partial_correlation_matrices, axes)): + plotting.plot_matrix(matrix, tri='lower', colorbar=False, axes=ax, + title='partial correlation, child {}'.format(i)) ############################################################################### -# Compared to a connectome computed on correlations, the connectome graph -# with partial correlations is more sparse: plotting.plot_connectome( partial_correlation_measure.mean_, msdl_coords, title='mean partial correlation over all children') @@ -140,8 +124,8 @@ def plot_matrices(matrices, matrix_kind): # Extract subjects variabilities around a group connectivity # ---------------------------------------------------------- # We can use **both** correlations and partial correlations to capture -# reproducible connectivity patterns at the group-level and build a **robust** -# **group connectivity matrix**. This is done by the **tangent** kind. +# reproducible connectivity patterns at the group-level. +# This is done by the tangent space embedding. tangent_measure = ConnectivityMeasure(kind='tangent') ############################################################################### @@ -154,84 +138,80 @@ def plot_matrices(matrices, matrix_kind): # `tangent_matrices` model individual connectivities as # **perturbations** of the group connectivity matrix `tangent_measure.mean_`. # Keep in mind that these subjects-to-group variability matrices do not -# straight reflect individual brain connections. For instance negative +# directly reflect individual brain connections. For instance negative # coefficients can not be interpreted as anticorrelated regions. -plot_matrices(tangent_matrices[:4], 'tangent variability') +_, axes = plt.subplots(1, 3, figsize=(15, 5)) +for i, (matrix, ax) in enumerate(zip(tangent_matrices, axes)): + plotting.plot_matrix(matrix, tri='lower', colorbar=False, axes=ax, + title='tangent offset, child {}'.format(i)) + ############################################################################### -# The average tangent matrix cannot be interpreted, as the average -# variation is expected to be zero +# The average tangent matrix cannot be interpreted, as individual matrices +# represent deviations from the mean, which is set to 0. ############################################################################### # What kind of connectivity is most powerful for classification? # -------------------------------------------------------------- -# *ConnectivityMeasure* can output the estimated subjects coefficients -# as a 1D arrays through the parameter *vectorize*. -connectivity_biomarkers = {} -kinds = ['correlation', 'partial correlation', 'tangent'] -for kind in kinds: - conn_measure = ConnectivityMeasure(kind=kind, vectorize=True) - connectivity_biomarkers[kind] = conn_measure.fit_transform(pooled_subjects) - -# For each kind, all individual coefficients are stacked in a unique 2D matrix. -print('{0} correlation biomarkers for each subject.'.format( - connectivity_biomarkers['correlation'].shape[1])) - -############################################################################### -# Note that we use the **pooled groups**. This is crucial for **tangent** kind, -# to get the displacements from a **unique** `mean_` of all subjects. - -############################################################################### -# We stratify the dataset into homogeneous classes according to phenotypic -# and scan site. We then split the subjects into 3 folds with the same -# proportion of each class as in the whole cohort -from sklearn.model_selection import StratifiedKFold +# We will use connectivity matrices as features to distinguish children from +# adults. We use cross-validation and measure classification accuracy to +# compare the different kinds of connectivity matrices. +# We use random splits of the subjects into training/testing sets. +# StratifiedShuffleSplit allows preserving the proportion of children in the +# test set. +from sklearn.svm import LinearSVC +from sklearn.model_selection import StratifiedShuffleSplit +from sklearn.metrics import accuracy_score +import numpy as np +kinds = ['correlation', 'partial correlation', 'tangent'] _, classes = np.unique(groups, return_inverse=True) -cv = StratifiedKFold(n_splits=3) -############################################################################### -# and use the connectivity coefficients to classify children vs adults. +cv = StratifiedShuffleSplit(n_splits=15, random_state=0, test_size=5) +pooled_subjects = np.asarray(pooled_subjects) -# Note that in cv.split(X, y), -# providing y is sufficient to generate the splits and -# hence np.zeros(n_samples) may be used as a placeholder for X -# instead of actual training data. -from sklearn.svm import LinearSVC -from sklearn.model_selection import cross_val_score - -mean_scores = [] +scores = {} for kind in kinds: - svc = LinearSVC(random_state=0) - cv_scores = cross_val_score(svc, - connectivity_biomarkers[kind], - y=classes, - cv=cv, - groups=groups, - scoring='accuracy', - ) - mean_scores.append(cv_scores.mean()) - -############################################################################### -# Finally, we can display the classification scores. - -############################################################################### -# Finally, we can display the classification scores. -from nilearn.plotting import show + scores[kind] = [] + for train, test in cv.split(pooled_subjects, classes): + # *ConnectivityMeasure* can output the estimated subjects coefficients + # as a 1D arrays through the parameter *vectorize*. + connectivity = ConnectivityMeasure(kind=kind, vectorize=True) + # build vectorized connectomes for subjects in the train set + connectomes = connectivity.fit_transform(pooled_subjects[train]) + # fit the classifier + classifier = LinearSVC().fit(connectomes, classes[train]) + # make predictions for the left-out test subjects + predictions = classifier.predict( + connectivity.transform(pooled_subjects[test])) + # store the accuracy for this cross-validation fold + scores[kind].append(accuracy_score(classes[test], predictions)) + + +###################################################################### +# display the results + +mean_scores = [np.mean(scores[kind]) for kind in kinds] +scores_std = [np.std(scores[kind]) for kind in kinds] plt.figure(figsize=(6, 4)) positions = np.arange(len(kinds)) * .1 + .1 -plt.barh(positions, mean_scores, align='center', height=.05) -yticks = [kind.replace(' ', '\n') for kind in kinds] +plt.barh(positions, mean_scores, align='center', height=.05, xerr=scores_std) +yticks = [k.replace(' ', '\n') for k in kinds] plt.yticks(positions, yticks) -plt.xlabel('Classification accuracy') -plt.grid(True) +plt.gca().grid(True) +plt.gca().set_axisbelow(True) +plt.gca().axvline(.8, color='red', linestyle='--') +plt.xlabel('Classification accuracy\n(red line = chance level)') plt.tight_layout() + ############################################################################### -# While the comparison is not fully conclusive on this small dataset, +# This is a small example to showcase nilearn features. In practice such +# comparisons need to be performed on much larger cohorts and several +# datasets. # `Dadi et al 2019 # `_ # Showed that across many cohorts and clinical questions, the tangent # kind should be preferred. -show() +plotting.show() diff --git a/examples/03_connectivity/plot_inverse_covariance_connectome.py b/examples/03_connectivity/plot_inverse_covariance_connectome.py index 5ee1cef823..88c430659b 100644 --- a/examples/03_connectivity/plot_inverse_covariance_connectome.py +++ b/examples/03_connectivity/plot_inverse_covariance_connectome.py @@ -51,9 +51,13 @@ ############################################################################## # Compute the sparse inverse covariance # -------------------------------------- -from sklearn.covariance import GraphLassoCV -estimator = GraphLassoCV() +try: + from sklearn.covariance import GraphicalLassoCV +except ImportError: + # for Scitkit-Learn < v0.20.0 + from sklearn.covariance import GraphLassoCV as GraphicalLassoCV +estimator = GraphicalLassoCV() estimator.fit(time_series) ############################################################################## @@ -103,11 +107,12 @@ view = plotting.view_connectome(-estimator.precision_, coords) -# uncomment this to open the plot in a web browser: -# view.open_in_browser() - -############################################################################## # In a Jupyter notebook, if ``view`` is the output of a cell, it will # be displayed below the cell - view + +############################################################################## + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + diff --git a/examples/03_connectivity/plot_multi_subject_connectome.py b/examples/03_connectivity/plot_multi_subject_connectome.py index c7f45f0ce4..78dc2ce2b6 100644 --- a/examples/03_connectivity/plot_multi_subject_connectome.py +++ b/examples/03_connectivity/plot_multi_subject_connectome.py @@ -52,7 +52,7 @@ def plot_matrices(cov, prec, title, labels): from nilearn import input_data # A "memory" to avoid recomputation -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory mem = Memory('nilearn_cache') masker = input_data.NiftiMapsMasker( @@ -84,8 +84,13 @@ def plot_matrices(cov, prec, title, labels): gsc = GroupSparseCovarianceCV(verbose=2) gsc.fit(subject_time_series) -from sklearn import covariance -gl = covariance.GraphLassoCV(verbose=2) +try: + from sklearn.covariance import GraphicalLassoCV +except ImportError: + # for Scitkit-Learn < v0.20.0 + from sklearn.covariance import GraphLassoCV as GraphicalLassoCV + +gl = GraphicalLassoCV(verbose=2) gl.fit(np.concatenate(subject_time_series)) @@ -102,10 +107,10 @@ def plot_matrices(cov, prec, title, labels): display_mode="lzr") plotting.plot_connectome(-gl.precision_, atlas_region_coords, edge_threshold='90%', - title="Sparse inverse covariance (GraphLasso)", + title="Sparse inverse covariance (GraphicalLasso)", display_mode="lzr", edge_vmax=.5, edge_vmin=-.5) -plot_matrices(gl.covariance_, gl.precision_, "GraphLasso", labels) +plot_matrices(gl.covariance_, gl.precision_, "GraphicalLasso", labels) title = "GroupSparseCovariance" plotting.plot_connectome(-gsc.precisions_[..., 0], diff --git a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py index 62293fb4f0..21fb2153b6 100644 --- a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py +++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py @@ -1,6 +1,6 @@ """ -Extracting signals of a probabilistic atlas of movie watching functional regions -================================================================================ +Extracting signals of a probabilistic atlas of functional regions +================================================================= This example extracts the signal on regions defined via a probabilistic atlas, to construct a functional connectome. @@ -88,11 +88,12 @@ view = plotting.view_connectome(correlation_matrix, coords, threshold='80%') -# uncomment this to open the plot in a web browser: -# view.open_in_browser() - -############################################################################## # In a Jupyter notebook, if ``view`` is the output of a cell, it will # be displayed below the cell - view + +############################################################################## + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + diff --git a/examples/03_connectivity/plot_simulated_connectome.py b/examples/03_connectivity/plot_simulated_connectome.py index 385676c08d..3df491300c 100644 --- a/examples/03_connectivity/plot_simulated_connectome.py +++ b/examples/03_connectivity/plot_simulated_connectome.py @@ -49,8 +49,13 @@ # Fit one graph lasso per subject -from sklearn.covariance import GraphLassoCV -gl = GraphLassoCV(verbose=1) +try: + from sklearn.covariance import GraphicalLassoCV +except ImportError: + # for Scitkit-Learn < v0.20.0 + from sklearn.covariance import GraphLassoCV as GraphicalLassoCV + +gl = GraphicalLassoCV(verbose=1) for n, subject in enumerate(subjects[:n_displayed]): gl.fit(subject) diff --git a/examples/03_connectivity/plot_sphere_based_connectome.py b/examples/03_connectivity/plot_sphere_based_connectome.py index aa69f2d4f4..1559bb760d 100644 --- a/examples/03_connectivity/plot_sphere_based_connectome.py +++ b/examples/03_connectivity/plot_sphere_based_connectome.py @@ -1,10 +1,11 @@ """ Extract signals on spheres and plot a connectome -================================================ +============================================================== This example shows how to extract signals from spherical regions. We show how to build spheres around user-defined coordinates, as well as -centered on coordinates from Power-264 atlas [1] and Dosenbach-160 [2]. +centered on coordinates from the Power-264 atlas [1], the Dosenbach-160 +atlas [2], and the Seitzman-300 atlas [3]. **References** @@ -14,6 +15,11 @@ [2] Dosenbach N.U., Nardos B., et al. "Prediction of individual brain maturity using fMRI.", 2010, Science 329, 1358-1361. +[3] `Seitzman, B. A., et al. "A set of functionally-defined brain regions with +improved representation of the subcortex and cerebellum.", 2018, bioRxiv, +450452 +`_ + We estimate connectomes using two different methods: **sparse inverse covariance** and **partial_correlation**, to recover the functional brain **networks structure**. @@ -64,10 +70,10 @@ masker = input_data.NiftiSpheresMasker( dmn_coords, radius=8, detrend=True, standardize=True, - low_pass=0.1, high_pass=0.01, t_r=2.5, + low_pass=0.1, high_pass=0.01, t_r=2, memory='nilearn_cache', memory_level=1, verbose=2) -# Additionally, we pass confound information so ensure our extracted +# Additionally, we pass confound information to ensure our extracted # signal is cleaned from confounds. func_filename = dataset.func[16] @@ -138,25 +144,24 @@ view = plotting.view_connectome(partial_correlation_matrix, dmn_coords) -# uncomment this to open the plot in a web browser: -# view.open_in_browser() - - -############################################################################## # In a Jupyter notebook, if ``view`` is the output of a cell, it will # be displayed below the cell - view +############################################################################## + +# uncomment this to open the plot in a web browser: +# view.open_in_browser() + ########################################################################## # Extract signals on spheres from an atlas # ---------------------------------------- -# +# # Next, instead of supplying our own coordinates, we will use coordinates # generated at the center of mass of regions from two different atlases. # This time, we'll use a different correlation measure. -# +# # First we fetch the coordinates of the Power atlas power = datasets.fetch_coords_power_2011() @@ -188,7 +193,7 @@ # and define spheres masker, with small enough radius to avoid regions overlap. spheres_masker = input_data.NiftiSpheresMasker( - seeds=coords, smoothing_fwhm=4, radius=5., + seeds=coords, smoothing_fwhm=6, radius=5., detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2) timeseries = spheres_masker.fit_transform(func_filename, @@ -207,13 +212,17 @@ ############################################################################### # in which situation the graphical lasso **sparse inverse covariance** # estimator captures well the covariance **structure**. -from sklearn.covariance import GraphLassoCV +try: + from sklearn.covariance import GraphicalLassoCV +except ImportError: + # for Scitkit-Learn < v0.20.0 + from sklearn.covariance import GraphLassoCV as GraphicalLassoCV -covariance_estimator = GraphLassoCV(cv=3, verbose=1) +covariance_estimator = GraphicalLassoCV(cv=3, verbose=1) ############################################################################### -# We just fit our regions signals into the `GraphLassoCV` object +# We just fit our regions signals into the `GraphicalLassoCV` object covariance_estimator.fit(timeseries) @@ -289,13 +298,13 @@ )).T spheres_masker = input_data.NiftiSpheresMasker( - seeds=coords, smoothing_fwhm=4, radius=4.5, + seeds=coords, smoothing_fwhm=6, radius=4.5, detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2) timeseries = spheres_masker.fit_transform(func_filename, confounds=confounds_filename) -covariance_estimator = GraphLassoCV() +covariance_estimator = GraphicalLassoCV() covariance_estimator.fit(timeseries) matrix = covariance_estimator.covariance_ @@ -323,12 +332,58 @@ plotting.show() + +############################################################################### +# Connectome extracted from Seitzman's atlas +# ----------------------------------------------------- +# We repeat the same steps for Seitzman's atlas. +seitzman = datasets.fetch_coords_seitzman_2018() + +coords = np.vstack(( + seitzman.rois['x'], + seitzman.rois['y'], + seitzman.rois['z'], +)).T + +############################################################################### +# Before calculating the connectivity matrix, let's look at the distribution +# of the regions of the default mode network. +dmn_rois = seitzman.networks == "DefaultMode" +dmn_coords = coords[dmn_rois] +zero_matrix = np.zeros((len(dmn_coords), len(dmn_coords))) +plotting.plot_connectome(zero_matrix, dmn_coords, + title='Seitzman default mode network', + node_color='darkred', node_size=20) + +############################################################################### +# Now let's calculate connectivity for the Seitzman atlas. +spheres_masker = input_data.NiftiSpheresMasker( + seeds=coords, smoothing_fwhm=6, radius=4, + detrend=True, standardize=True, low_pass=0.1, high_pass=0.01, t_r=2, + allow_overlap=True) + +timeseries = spheres_masker.fit_transform(func_filename, + confounds=confounds_filename) + +covariance_estimator = GraphicalLassoCV() +covariance_estimator.fit(timeseries) +matrix = covariance_estimator.covariance_ + +plotting.plot_matrix(matrix, vmin=-1., vmax=1., colorbar=True, + title='Seitzman correlation matrix') + +plotting.plot_connectome(matrix, coords, title='Seitzman correlation graph', + edge_threshold="99.7%", node_size=20, colorbar=True) + + +############################################################################### +# We can easily identify the networks from the matrix blocks. +print('Seitzman networks names are {0}'.format(np.unique(seitzman.networks))) +plotting.show() + ############################################################################### # .. seealso:: # -# :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` +# * :ref:`sphx_glr_auto_examples_03_connectivity_plot_atlas_comparison.py` # -# .. seealso:: -# -# :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py` - +# * :ref:`sphx_glr_auto_examples_03_connectivity_plot_multi_subject_connectome.py` diff --git a/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py index 72773ff068..d63f7518c6 100644 --- a/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py +++ b/examples/04_manipulating_images/plot_extract_rois_statistical_maps.py @@ -28,12 +28,12 @@ # Two types of strategies can be used from this threshold function # Type 1: strategy used will be based on scoreatpercentile -threshold_percentile_img = threshold_img(tmap_filename, threshold='97%') +threshold_percentile_img = threshold_img(tmap_filename, threshold='97%', copy=False) # Type 2: threshold strategy used will be based on image intensity # Here, threshold value should be within the limits i.e. less than max value. -threshold_value_img = threshold_img(tmap_filename, threshold=3.0) +threshold_value_img = threshold_img(tmap_filename, threshold=3.0, copy=False) ################################################################################ # Visualization diff --git a/examples/04_manipulating_images/plot_mask_computation.py b/examples/04_manipulating_images/plot_mask_computation.py index d6b76db8e2..470a2506c4 100644 --- a/examples/04_manipulating_images/plot_mask_computation.py +++ b/examples/04_manipulating_images/plot_mask_computation.py @@ -17,7 +17,6 @@ """ - from nilearn.input_data import NiftiMasker import nilearn.image as image from nilearn.plotting import plot_roi, plot_epi, show @@ -48,10 +47,15 @@ masker = NiftiMasker() masker.fit(miyawaki_filename) -# Plot the generated mask +# Plot the generated mask using the mask_img_ attribute plot_roi(masker.mask_img_, miyawaki_mean_img, title="Mask from already masked data") +############################################################################### +# Plot the generated mask using the .generate_report method +report = masker.generate_report() +report + ############################################################################### # Computing a mask from raw EPI data @@ -77,7 +81,8 @@ # We need to specify an 'epi' mask_strategy, as this is raw EPI data masker = NiftiMasker(mask_strategy='epi') masker.fit(epi_img) -plot_roi(masker.mask_img_, mean_img, title='EPI automatic mask') +report = masker.generate_report() +report ############################################################################### # Generate mask with strong opening @@ -90,7 +95,8 @@ # skull parts in the image. masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10)) masker.fit(epi_img) -plot_roi(masker.mask_img_, mean_img, title='EPI Mask with strong opening') +report = masker.generate_report() +report ############################################################################### # Generate mask with a high lower cutoff @@ -107,8 +113,8 @@ mask_args=dict(upper_cutoff=.9, lower_cutoff=.8, opening=False)) masker.fit(epi_img) -plot_roi(masker.mask_img_, mean_img, - title='EPI Mask: high lower_cutoff') +report = masker.generate_report() +report ############################################################################### # Computing the mask from the MNI template @@ -119,9 +125,27 @@ masker = NiftiMasker(mask_strategy='template') masker.fit(epi_img) -plot_roi(masker.mask_img_, mean_img, - title='Mask from template') +report = masker.generate_report() +report + +############################################################################### +# Compute and resample a mask +############################################################################### +# +# NiftiMasker also allows passing parameters directly to `image.resample_img`. +# We can specify a `target_affine`, a `target_shape`, or both. +# For more information on these arguments, +# see :doc:`plot_affine_transformation`. +# +# The NiftiMasker report allows us to see the mask before and after resampling. +# Simply hover over the report to see the mask from the original image. + +import numpy as np +masker = NiftiMasker(mask_strategy='epi', target_affine=np.eye(3) * 8) +masker.fit(epi_img) +report = masker.generate_report() +report ############################################################################### # After mask computation: extracting time series @@ -136,7 +160,6 @@ detrended_data = detrended.fit_transform(epi_img) # The timeseries are numpy arrays, so we can manipulate them with numpy -import numpy as np print("Trended: mean %.2f, std %.2f" % (np.mean(trended_data), np.std(trended_data))) diff --git a/examples/04_manipulating_images/plot_nifti_simple.py b/examples/04_manipulating_images/plot_nifti_simple.py index 6f3045f4c9..b24122490c 100644 --- a/examples/04_manipulating_images/plot_nifti_simple.py +++ b/examples/04_manipulating_images/plot_nifti_simple.py @@ -10,7 +10,7 @@ # Retrieve the brain development functional dataset from nilearn import datasets -dataset = datasets.fetch_haxby() +dataset = datasets.fetch_development_fmri(n_subjects=1) func_filename = dataset.func[0] # print basic information on the dataset @@ -30,7 +30,7 @@ mask_img = nifti_masker.mask_img_ ########################################################################### -# Visualize the mask +# Visualize the mask using the plot_roi method from nilearn.plotting import plot_roi from nilearn.image.image import mean_img @@ -39,6 +39,13 @@ plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask") +########################################################################### +# Visualize the mask using the 'generate_report' method +# This report can be displayed in a Jupyter Notebook, +# opened in-browser using the .open_in_browser() method, +# or saved to a file using the .save_as_html(output_filepath) method. +report = nifti_masker.generate_report() +report ########################################################################### # Preprocess data with the NiftiMasker @@ -60,6 +67,10 @@ # Visualize results from nilearn.plotting import plot_stat_map, show from nilearn.image import index_img +from nilearn.image.image import mean_img + +# calculate mean image for the background +mean_func_img = mean_img(func_filename) plot_stat_map(index_img(components, 0), mean_func_img, display_mode='y', cut_coords=4, title="Component 0") diff --git a/examples/05_advanced/plot_age_group_prediction_cross_val.py b/examples/05_advanced/plot_age_group_prediction_cross_val.py new file mode 100644 index 0000000000..8a4f193efe --- /dev/null +++ b/examples/05_advanced/plot_age_group_prediction_cross_val.py @@ -0,0 +1,105 @@ +""" +Functional connectivity predicts age group +========================================== + +This example compares different kinds of functional connectivity between +regions of interest : correlation, partial correlation, and tangent space +embedding. + +The resulting connectivity coefficients can be used to +discriminate children from adults. In general, the tangent space embedding +**outperforms** the standard correlations: see `Dadi et al 2019 +`_ +for a careful study. +""" + +############################################################################### +# Load brain development fMRI dataset and MSDL atlas +# ------------------------------------------------------------------- +# We study only 60 subjects from the dataset, to save computation time. +from nilearn import datasets + +development_dataset = datasets.fetch_development_fmri(n_subjects=60) + +############################################################################### +# We use probabilistic regions of interest (ROIs) from the MSDL atlas. +from nilearn.input_data import NiftiMapsMasker + +msdl_data = datasets.fetch_atlas_msdl() +msdl_coords = msdl_data.region_coords + +masker = NiftiMapsMasker( + msdl_data.maps, resampling_target="data", t_r=2, detrend=True, + low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1).fit() +masked_data = [masker.transform(func, confounds) for + (func, confounds) in zip( + development_dataset.func, development_dataset.confounds)] + +############################################################################### +# What kind of connectivity is most powerful for classification? +# -------------------------------------------------------------- +# we will use connectivity matrices as features to distinguish children from +# adults. We use cross-validation and measure classification accuracy to +# compare the different kinds of connectivity matrices. + +# prepare the classification pipeline +from sklearn.pipeline import Pipeline +from nilearn.connectome import ConnectivityMeasure +from sklearn.svm import LinearSVC +from sklearn.dummy import DummyClassifier +from sklearn.model_selection import GridSearchCV + +kinds = ['correlation', 'partial correlation', 'tangent'] + +pipe = Pipeline( + [('connectivity', ConnectivityMeasure(vectorize=True)), + ('classifier', GridSearchCV(LinearSVC(), {'C': [.1, 1., 10.]}, cv=5))]) + +param_grid = [ + {'classifier': [DummyClassifier('most_frequent')]}, + {'connectivity__kind': kinds} +] + +###################################################################### +# We use random splits of the subjects into training/testing sets. +# StratifiedShuffleSplit allows preserving the proportion of children in the +# test set. +from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit +from sklearn.preprocessing import LabelEncoder + +groups = [pheno['Child_Adult'] for pheno in development_dataset.phenotypic] +classes = LabelEncoder().fit_transform(groups) + +cv = StratifiedShuffleSplit(n_splits=30, random_state=0, test_size=10) +gs = GridSearchCV(pipe, param_grid, scoring='accuracy', cv=cv, verbose=1, + refit=False, n_jobs=8) +gs.fit(masked_data, classes) +mean_scores = gs.cv_results_['mean_test_score'] +scores_std = gs.cv_results_['std_test_score'] + +###################################################################### +# display the results +from matplotlib import pyplot as plt + +plt.figure(figsize=(6, 4)) +positions = [.1, .2, .3, .4] +plt.barh(positions, mean_scores, align='center', height=.05, xerr=scores_std) +yticks = ['dummy'] + list(gs.cv_results_['param_connectivity__kind'].data[1:]) +yticks = [t.replace(' ', '\n') for t in yticks] +plt.yticks(positions, yticks) +plt.xlabel('Classification accuracy') +plt.gca().grid(True) +plt.gca().set_axisbelow(True) +plt.tight_layout() + + +############################################################################### +# This is a small example to showcase nilearn features. In practice such +# comparisons need to be performed on much larger cohorts and several +# datasets. +# `Dadi et al 2019 +# `_ +# Showed that across many cohorts and clinical questions, the tangent +# kind should be preferred. + +plt.show() diff --git a/examples/05_advanced/plot_localizer_mass_univariate_methods.py b/examples/05_advanced/plot_localizer_mass_univariate_methods.py index 9c8f4d5e2a..12531abf6c 100644 --- a/examples/05_advanced/plot_localizer_mass_univariate_methods.py +++ b/examples/05_advanced/plot_localizer_mass_univariate_methods.py @@ -35,7 +35,7 @@ tested_var = localizer_dataset.ext_vars['pseudo'] # Quality check / Remove subjects with bad tested variate -mask_quality_check = np.where(tested_var != b'None')[0] +mask_quality_check = np.where(tested_var != b'n/a')[0] n_samples = mask_quality_check.size contrast_map_filenames = [localizer_dataset.cmaps[i] for i in mask_quality_check] diff --git a/nilearn/__init__.py b/nilearn/__init__.py index 7bfee2f735..5d39246d7b 100644 --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -35,11 +35,17 @@ import gzip import sys import warnings +import os from distutils.version import LooseVersion from .version import _check_module_dependencies, __version__ +# Workaround issue discovered in intel-openmp 2019.5: +# https://github.com/ContinuumIO/anaconda-issues/issues/11294 +# +# see also https://github.com/scikit-learn/scikit-learn/pull/15020 +os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") def _py2_deprecation_warning(): py2_warning = ('Python2 support is deprecated and will be removed in ' diff --git a/nilearn/_utils/cache_mixin.py b/nilearn/_utils/cache_mixin.py index 30bde14440..3f82def65f 100644 --- a/nilearn/_utils/cache_mixin.py +++ b/nilearn/_utils/cache_mixin.py @@ -13,16 +13,10 @@ import nibabel import sklearn -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory MEMORY_CLASSES = (Memory, ) -try: - from joblib import Memory as JoblibMemory - MEMORY_CLASSES = (Memory, JoblibMemory) -except ImportError: - pass - import nilearn from .compat import _basestring diff --git a/nilearn/_utils/compat.py b/nilearn/_utils/compat.py index f76de77cae..27b648fdae 100644 --- a/nilearn/_utils/compat.py +++ b/nilearn/_utils/compat.py @@ -7,6 +7,7 @@ from distutils.version import LooseVersion import nibabel +import sklearn if sys.version_info[0] == 3: @@ -66,3 +67,15 @@ def md5_hash(string): m = hashlib.md5() m.update(string) return m.hexdigest() + + +if sklearn.__version__ < '0.21': + from sklearn.externals import joblib +else: + import joblib + +Memory = joblib.Memory +Parallel = joblib.Parallel +hash = joblib.hash +delayed = joblib.delayed +cpu_count = joblib.cpu_count diff --git a/nilearn/_utils/extmath.py b/nilearn/_utils/extmath.py index 6cbc8c6daf..615464f1eb 100644 --- a/nilearn/_utils/extmath.py +++ b/nilearn/_utils/extmath.py @@ -6,8 +6,6 @@ import numpy as np -from numpy import partition - def fast_abs_percentile(data, percentile=80): """ A fast version of the percentile of the absolute value. @@ -36,11 +34,8 @@ def fast_abs_percentile(data, percentile=80): data = np.abs(data) data = data.ravel() index = int(data.size * .01 * percentile) - if partition is not None: - # Partial sort: faster than sort - data = partition(data, index) - else: - data.sort() + # Partial sort: faster than sort + data = np.partition(data, index) return data[index] diff --git a/nilearn/_utils/niimg_conversions.py b/nilearn/_utils/niimg_conversions.py index efdfad485c..3ecf93a159 100644 --- a/nilearn/_utils/niimg_conversions.py +++ b/nilearn/_utils/niimg_conversions.py @@ -10,7 +10,7 @@ import nilearn as ni import numpy as np import itertools -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from .cache_mixin import cache from .niimg import _safe_get_data, load_niimg diff --git a/nilearn/conftest.py b/nilearn/conftest.py new file mode 100644 index 0000000000..7d2d5abfe6 --- /dev/null +++ b/nilearn/conftest.py @@ -0,0 +1,30 @@ +from distutils.version import LooseVersion + +import numpy as np +import pytest + +from _pytest.doctest import DoctestItem + +try: + import matplotlib +except ImportError: + collect_ignore = ['plotting'] +else: + matplotlib # Prevents flake8 erring due to unused entities. + + +def pytest_collection_modifyitems(items): + + # numpy changed the str/repr formatting of numpy arrays in 1.14. We want to + # run doctests only for numpy >= 1.14.Adapted from scikit-learn + if LooseVersion(np.__version__) < LooseVersion('1.14'): + reason = 'doctests are only run for numpy >= 1.14' + skip_doctests = True + else: + skip_doctests = False + + if skip_doctests: + skip_marker = pytest.mark.skip(reason=reason) + for item in items: + if isinstance(item, DoctestItem): + item.add_marker(skip_marker) diff --git a/nilearn/connectome/__init__.py b/nilearn/connectome/__init__.py index 51262f8196..701675768b 100644 --- a/nilearn/connectome/__init__.py +++ b/nilearn/connectome/__init__.py @@ -9,7 +9,8 @@ cov_to_corr, prec_to_partial) from .group_sparse_cov import (GroupSparseCovariance, - GroupSparseCovarianceCV, group_sparse_covariance) + GroupSparseCovarianceCV, + group_sparse_covariance) __all__ = ['sym_matrix_to_vec', 'vec_to_sym_matrix', 'sym_to_vec', 'ConnectivityMeasure', 'cov_to_corr', 'prec_to_partial', diff --git a/nilearn/connectome/connectivity_matrices.py b/nilearn/connectome/connectivity_matrices.py index fce8fd064a..e2988d0632 100644 --- a/nilearn/connectome/connectivity_matrices.py +++ b/nilearn/connectome/connectivity_matrices.py @@ -486,7 +486,7 @@ def _fit_transform(self, X, do_transform=False, do_fit=False): # Compute all the matrices, stored in "connectivities" if self.kind == 'correlation': covariances_std = [self.cov_estimator_.fit( - signal._standardize(x, detrend=False, normalize=True) + signal._standardize(x, detrend=False, standardize=True) ).covariance_ for x in X] connectivities = [cov_to_corr(cov) for cov in covariances_std] else: diff --git a/nilearn/connectome/group_sparse_cov.py b/nilearn/connectome/group_sparse_cov.py index f43076b992..06317097aa 100644 --- a/nilearn/connectome/group_sparse_cov.py +++ b/nilearn/connectome/group_sparse_cov.py @@ -15,7 +15,7 @@ from sklearn.base import BaseEstimator from sklearn.covariance import empirical_covariance -from sklearn.externals.joblib import Memory, delayed, Parallel +from nilearn._utils.compat import Memory, delayed, Parallel from sklearn.model_selection import check_cv from sklearn.utils.extmath import fast_logdet @@ -891,7 +891,7 @@ class GroupSparseCovarianceCV(BaseEstimator, CacheMixin): See also -------- GroupSparseCovariance, - sklearn.covariance.GraphLassoCV + sklearn.covariance.GraphicalLassoCV Notes ----- diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py index 3c383da582..280689e9b4 100644 --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -17,6 +17,7 @@ from .atlas import (fetch_atlas_craddock_2012, fetch_atlas_destrieux_2009, fetch_atlas_harvard_oxford, fetch_atlas_msdl, fetch_coords_power_2011, + fetch_coords_seitzman_2018, fetch_atlas_smith_2009, fetch_atlas_yeo_2011, fetch_atlas_aal, fetch_atlas_basc_multiscale_2015, @@ -44,6 +45,7 @@ 'fetch_atlas_harvard_oxford', 'fetch_atlas_msdl', 'fetch_atlas_schaefer_2018', 'fetch_coords_power_2011', + 'fetch_coords_seitzman_2018', 'fetch_atlas_smith_2009', 'fetch_atlas_allen_2011', 'fetch_atlas_yeo_2011', 'fetch_mixed_gambles', 'fetch_atlas_aal', diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py index 36689b8105..3897f95110 100644 --- a/nilearn/datasets/atlas.py +++ b/nilearn/datasets/atlas.py @@ -10,6 +10,7 @@ import nibabel as nb import numpy as np +from numpy.lib import recfunctions from sklearn.datasets.base import Bunch from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr @@ -808,6 +809,79 @@ def fetch_coords_dosenbach_2010(ordered_regions=True): return Bunch(**params) +def fetch_coords_seitzman_2018(ordered_regions=True): + """Load the Seitzman et al. 300 ROIs. These ROIs cover cortical, + subcortical and cerebellar regions and are assigned to one of 13 + networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention, + FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience, + SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and + have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus, + basalGanglia, amygdala, cortexMid). + + .. versionadded:: 0.5.1 + + Parameters + ---------- + ordered_regions : bool, optional + ROIs from same networks are grouped together and ordered with respect + to their locations (anterior to posterior). + + Returns + ------- + data: sklearn.datasets.base.Bunch + dictionary-like object, contains: + - "rois": Coordinates of 300 ROIs in MNI space + - "radius": Radius of each ROI in mm + - "networks": Network names + - "regions": Region names + + References + ---------- + Seitzman, B. A., Gratton, C., Marek, S., Raut, R. V., Dosenbach, N. U., + Schlaggar, B. L., et al. (2018). A set of functionally-defined brain + regions with improved representation of the subcortex and cerebellum. + bioRxiv, 450452. http://doi.org/10.1101/450452 + """ + dataset_name = 'seitzman_2018' + fdescr = _get_dataset_descr(dataset_name) + package_directory = os.path.dirname(os.path.abspath(__file__)) + roi_file = os.path.join(package_directory, "data", + "seitzman_2018_ROIs_300inVol_MNI_allInfo.txt") + anatomical_file = os.path.join(package_directory, "data", + "seitzman_2018_ROIs_anatomicalLabels.txt") + + rois = np.recfromcsv(roi_file, delimiter=" ") + rois = recfunctions.rename_fields(rois, {"netname": "network", + "radiusmm": "radius"}) + rois.network = rois.network.astype(str) + + # get integer regional labels and convert to text labels with mapping + # from header line + with open(anatomical_file, 'r') as fi: + header = fi.readline() + region_mapping = {} + for r in header.strip().split(","): + i, region = r.split("=") + region_mapping[int(i)] = region + + anatomical = np.genfromtxt(anatomical_file, skip_header=1) + anatomical_names = np.array([region_mapping[a] for a in anatomical]) + + rois = recfunctions.merge_arrays((rois, anatomical_names), + asrecarray=True, flatten=True) + rois.dtype.names = rois.dtype.names[:-1] + ("region",) + + if ordered_regions: + rois = np.sort(rois, order=['network', 'y']) + + params = dict(rois=rois[['x', 'y', 'z']], + radius=rois['radius'], + networks=rois['network'].astype(str), + regions=rois['region'], description=fdescr) + + return Bunch(**params) + + def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1): """Download and return file names for the Allen and MIALAB ICA atlas (dated 2011). @@ -1201,7 +1275,7 @@ def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, ---------- n_rois: int number of regions of interest {100, 200, 300, 400 (default), 500, 600, - 800, 1000} + 700, 800, 900, 1000} yeo_networks: int ROI annotation according to yeo networks {7 (default), 17} @@ -1233,7 +1307,7 @@ def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, References ---------- For more information on this dataset, see - https://github.com/ThomasYeoLab/CBIG/tree/v0.8.1-Schaefer2018_LocalGlobal/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal + https://github.com/ThomasYeoLab/CBIG/tree/v0.14.3-Update_Yeo2011_Schaefer2018_labelname/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations Schaefer A, Kong R, Gordon EM, Laumann TO, Zuo XN, Holmes AJ, Eickhoff SB, Yeo BTT. Local-Global parcellation of the human @@ -1246,8 +1320,15 @@ def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, intrinsic functional connectivity. J Neurophysiol 106(3):1125-65, 2011. Licence: MIT. + + Notes + ----- + Release v0.14.3 of the Schaefer 2018 parcellation is used by + default. Versions prior to v0.14.3 are known to contain erroneous region + label names. For more details, see + https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md """ - valid_n_rois = [100, 200, 300, 400, 500, 600, 800, 1000] + valid_n_rois = list(range(100, 1100, 100)) valid_yeo_networks = [7, 17] valid_resolution_mm = [1, 2] if n_rois not in valid_n_rois: @@ -1264,9 +1345,9 @@ def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1, if base_url is None: base_url = ('https://raw.githubusercontent.com/ThomasYeoLab/CBIG/' - 'v0.8.1-Schaefer2018_LocalGlobal/stable_projects/' - 'brain_parcellation/Schaefer2018_LocalGlobal/' - 'Parcellations/MNI/' + 'v0.14.3-Update_Yeo2011_Schaefer2018_labelname/' + 'stable_projects/brain_parcellation/' + 'Schaefer2018_LocalGlobal/Parcellations/MNI/' ) files = [] diff --git a/nilearn/datasets/data/seitzman_2018_ROIs_300inVol_MNI_allInfo.txt b/nilearn/datasets/data/seitzman_2018_ROIs_300inVol_MNI_allInfo.txt new file mode 100644 index 0000000000..6e73eb8025 --- /dev/null +++ b/nilearn/datasets/data/seitzman_2018_ROIs_300inVol_MNI_allInfo.txt @@ -0,0 +1,301 @@ +x y z radius(mm) netWorkbenchLabel netName +-56.16 -44.76 -24.23 5 0 unassigned +-24.66 -97.84 -12.33 5 0 unassigned +8.13 41.12 -24.31 5 0 unassigned +26.68 -97.3 -13.49 5 0 unassigned +48.52 -2.85 -38.49 5 0 unassigned +51.79 -34.17 -27.23 5 0 unassigned +55.18 -30.8 -16.93 5 0 unassigned +-57.97 -25.69 -14.73 5 0 unassigned +64.6 -24.41 -18.57 5 0 unassigned +-50.06 -7.09 -39.24 5 0 unassigned +33.55 38.46 -12.03 5 0 unassigned +-46.68 -50.91 -20.91 5 0 unassigned +-7.12 -52.22 60.71 5 10 SomatomotorDorsal +0.05 -14.53 46.74 5 10 SomatomotorDorsal +-53.52 -22.54 43.1 5 10 SomatomotorDorsal +-45.1 -31.85 46.63 5 10 SomatomotorDorsal +-39.63 -19.04 54.21 5 10 SomatomotorDorsal +-38.28 -27.17 69.45 5 10 SomatomotorDorsal +-38.24 -14.57 68.72 5 10 SomatomotorDorsal +-29.1 -43 60.66 5 10 SomatomotorDorsal +-22.5 -30.1 72.44 5 10 SomatomotorDorsal +-20.66 -31.33 60.85 5 10 SomatomotorDorsal +-16.25 -45.8 73.22 5 10 SomatomotorDorsal +-13.74 -17.95 39.84 5 10 SomatomotorDorsal +-12.96 -17.34 74.66 5 10 SomatomotorDorsal +-6.9 -20.59 65.21 5 10 SomatomotorDorsal +-6.79 -33.09 72.27 5 10 SomatomotorDorsal +2.4 -27.94 60.15 5 10 SomatomotorDorsal +3.45 -17.44 58.45 5 10 SomatomotorDorsal +9.5 -1.84 44.73 5 10 SomatomotorDorsal +9.94 -45.52 72.63 5 10 SomatomotorDorsal +10.09 -17.1 74.14 5 10 SomatomotorDorsal +13.19 -32.82 74.98 5 10 SomatomotorDorsal +20.21 -28.8 59.8 5 10 SomatomotorDorsal +22.45 -42.29 68.99 5 10 SomatomotorDorsal +28.88 -16.95 70.55 5 10 SomatomotorDorsal +28.54 -39.24 59.17 5 10 SomatomotorDorsal +37.74 -17.3 45.01 5 10 SomatomotorDorsal +42.14 -20.24 54.59 5 10 SomatomotorDorsal +44.34 -7.55 56.98 5 10 SomatomotorDorsal +47.21 -29.75 48.7 5 10 SomatomotorDorsal +50.24 -20.37 41.74 5 10 SomatomotorDorsal +19.33 -7.71 63.88 5 10 SomatomotorDorsal +54.22 -27.83 33.64 5 10 SomatomotorDorsal +-52.84 -10.23 24.41 5 11 SomatomotorLateral +-49.47 -11.06 34.95 5 11 SomatomotorLateral +36.04 -9.44 13.95 5 11 SomatomotorLateral +51.14 -5.8 32.42 5 11 SomatomotorLateral +65.64 -7.88 24.83 5 11 SomatomotorLateral +-51.26 8.26 -2.06 5 9 CinguloOpercular +-44.76 0.1 8.83 5 9 CinguloOpercular +-34.37 3.29 4.19 5 9 CinguloOpercular +-16.14 -4.82 70.83 5 9 CinguloOpercular +-10.48 -2.1 42.02 5 9 CinguloOpercular +-5.33 17.8 34.41 5 9 CinguloOpercular +-2.88 2.38 53.21 5 9 CinguloOpercular +6.52 7.69 50.58 5 9 CinguloOpercular +13.21 -1.36 69.98 5 9 CinguloOpercular +35.83 10.32 1.18 5 9 CinguloOpercular +36.73 0.78 -3.57 5 9 CinguloOpercular +49.4 8.32 -1.12 5 9 CinguloOpercular +-30.12 -27.02 12.2 5 9 CinguloOpercular +42.05 -0.39 47.1 5 9 CinguloOpercular +35.91 21.91 2.62 5 9 CinguloOpercular +-39.12 50.79 17.38 5 9 CinguloOpercular +31.24 32.79 26.39 5 9 CinguloOpercular +31.07 55.71 14.49 5 9 CinguloOpercular +36.89 32.35 -2.24 5 9 CinguloOpercular +-52.92 -21.83 22.97 5 12 Auditory +43.45 -22.93 19.85 5 12 Auditory +59.4 -17.34 28.69 5 12 Auditory +-60.48 -25.22 13.82 5 12 Auditory +-55.22 -9.42 11.73 5 12 Auditory +-49.77 -34.36 25.74 5 12 Auditory +-49.14 -26.3 5.18 5 12 Auditory +-38.43 -33.34 16.98 5 12 Auditory +31.75 -26.33 12.91 5 12 Auditory +55.96 -5.03 13.25 5 12 Auditory +57.88 -15.62 7.49 5 12 Auditory +65.43 -33.2 19.97 5 12 Auditory +-68.47 -22.66 -15.74 5 1 DefaultMode +-68.3 -41.41 -5.14 5 1 DefaultMode +-57.75 -29.7 -3.94 5 1 DefaultMode +-55.72 -12.96 -10.24 5 1 DefaultMode +-52.89 2.55 -27.06 5 1 DefaultMode +-49.3 -42.15 0.83 5 1 DefaultMode +-45.79 -60.69 20.85 5 1 DefaultMode +-46.17 31.26 -13.03 5 1 DefaultMode +-44.45 -64.64 34.78 5 1 DefaultMode +-43.58 11.99 -34.15 5 1 DefaultMode +-40.5 -75.27 25.8 5 1 DefaultMode +-39.05 -74.95 43.72 5 1 DefaultMode +-35.36 19.86 50.8 5 1 DefaultMode +-19.78 45.07 39.48 5 1 DefaultMode +-20.16 63.65 19.39 5 1 DefaultMode +-17.65 63.19 -9.17 5 1 DefaultMode +-16.4 28.52 53.05 5 1 DefaultMode +-12.6 -39.64 0.93 5 1 DefaultMode +-11.29 -56.2 15.6 5 1 DefaultMode +-10.09 39.09 52.29 5 1 DefaultMode +-10.33 54.63 38.71 5 1 DefaultMode +-11.06 44.62 7.61 5 1 DefaultMode +-7.55 48.08 23.18 5 1 DefaultMode +-6.84 -54.9 27.05 5 1 DefaultMode +-7.04 50.82 -1.29 5 1 DefaultMode +-2.94 -48.79 12.87 5 1 DefaultMode +-2.2 -36.68 43.85 5 1 DefaultMode +-3.06 44.41 -9.46 5 1 DefaultMode +-2.06 37.85 36.34 5 1 DefaultMode +-2.5 41.7 16.05 5 1 DefaultMode +5.55 66.69 -3.55 5 1 DefaultMode +5.91 -58.82 35.45 5 1 DefaultMode +5.94 54.42 16.18 5 1 DefaultMode +6.11 63.98 21.96 5 1 DefaultMode +7.94 -48.37 30.57 5 1 DefaultMode +7.51 42.49 -5.35 5 1 DefaultMode +8.36 47.59 -15.18 5 1 DefaultMode +8.8 54.23 3.45 5 1 DefaultMode +10.77 -53.83 17.09 5 1 DefaultMode +12.73 54.87 38.19 5 1 DefaultMode +13.08 29.99 58.65 5 1 DefaultMode +12.25 35.63 20.3 5 1 DefaultMode +15.12 -63.09 25.98 5 1 DefaultMode +22.11 39.21 38.9 5 1 DefaultMode +23.33 33.07 47.68 5 1 DefaultMode +43.43 -72.21 28 5 1 DefaultMode +45.64 16.2 -30.02 5 1 DefaultMode +46.68 -50.08 28.76 5 1 DefaultMode +49.26 35.47 -12.2 5 1 DefaultMode +52.04 -59.37 35.52 5 1 DefaultMode +52.16 -2.43 -16.4 5 1 DefaultMode +51.9 6.81 -29.61 5 1 DefaultMode +64.64 -11.8 -19.3 5 1 DefaultMode +64.8 -30.55 -8.7 5 1 DefaultMode +-33.93 -38.06 -15.6 5 1 DefaultMode +4.2 -48.06 50.71 5 15 ParietoMedial +-6.58 -71.47 41.74 5 15 ParietoMedial +-2.47 -34.8 31.07 5 15 ParietoMedial +1.75 -24.25 30.36 5 15 ParietoMedial +11.27 -66.01 42.09 5 15 ParietoMedial +-12.08 -94.56 -12.8 5 2 Visual +7.98 -91.08 -7.1 5 2 Visual +17.27 -91.09 -13.64 5 2 Visual +-46.54 -75.95 -9.95 5 2 Visual +-42.1 -73.62 0.38 5 2 Visual +-40.21 -88.44 -6.19 5 2 Visual +-33 -79.02 -13.24 5 2 Visual +-28.07 -79.45 19.43 5 2 Visual +-26.39 -90.23 3.12 5 2 Visual +-23.94 -90.98 18.96 5 2 Visual +-17.87 -68.03 4.81 5 2 Visual +-16.21 -76.97 33.82 5 2 Visual +-15.85 -52.34 -1.43 5 2 Visual +-15.02 -72.42 -7.68 5 2 Visual +-14.22 -90.66 31.4 5 2 Visual +-8.43 -80.5 7.44 5 2 Visual +-2.88 -81.25 21.1 5 2 Visual +5.59 -71.65 23.52 5 2 Visual +6.21 -81.41 6.11 5 2 Visual +8.45 -71.84 10.79 5 2 Visual +15.27 -87.09 36.89 5 2 Visual +15.18 -76.68 31 5 2 Visual +17.53 -46.86 -9.88 5 2 Visual +19.81 -65.56 1.72 5 2 Visual +19.64 -85.62 -2.39 5 2 Visual +24.41 -87.21 24.01 5 2 Visual +25.66 -79.47 -15.56 5 2 Visual +26.93 -59.37 -9.36 5 2 Visual +28.68 -76.62 25.42 5 2 Visual +36.76 -84.11 12.99 5 2 Visual +36.51 -81.16 1.2 5 2 Visual +39.98 -72.49 14.36 5 2 Visual +41.6 -65.5 -8.27 5 2 Visual +42.52 -78.17 -11.78 5 2 Visual +-52.6 -48.83 42.5 5 3 FrontoParietal +-46.5 10.85 23.04 5 3 FrontoParietal +-43.93 1.8 45.7 5 3 FrontoParietal +-42.23 38.21 21.35 5 3 FrontoParietal +-42.09 -54.98 44.74 5 3 FrontoParietal +-42.1 24.68 29.53 5 3 FrontoParietal +-41.06 5.81 32.72 5 3 FrontoParietal +-41.68 45.16 -2.31 5 3 FrontoParietal +-34.16 54.83 4.36 5 3 FrontoParietal +-28.4 -57.93 47.78 5 3 FrontoParietal +-22.53 10.76 63.73 5 3 FrontoParietal +-2.98 26.41 44.42 5 3 FrontoParietal +24.07 44.61 -15.35 5 3 FrontoParietal +31.83 14.37 55.98 5 3 FrontoParietal +33.38 -53.12 44.02 5 3 FrontoParietal +33.6 54.22 -12.95 5 3 FrontoParietal +37.45 -64.7 40.38 5 3 FrontoParietal +38.37 43.18 15.06 5 3 FrontoParietal +39.87 18.39 39.72 5 3 FrontoParietal +43.93 -52.95 46.95 5 3 FrontoParietal +43.25 49.25 -2.31 5 3 FrontoParietal +47.01 9.93 32.66 5 3 FrontoParietal +47.98 24.56 26.5 5 3 FrontoParietal +49.18 -42.41 45.16 5 3 FrontoParietal +58.31 -52.79 -13.61 5 3 FrontoParietal +47.6 22.16 9.74 5 3 FrontoParietal +55.27 -44.59 36.7 5 3 FrontoParietal +-35.44 20.03 0.07 5 8 Salience +-27.5 52.04 21.28 5 8 Salience +-10.76 25.99 24.54 5 8 Salience +-0.94 14.86 43.99 5 8 Salience +-0.2 30.35 27.22 5 8 Salience +5.23 23.22 37.03 5 8 Salience +10.26 22.06 27.48 5 8 Salience +26.07 49.56 26.58 5 8 Salience +33.56 16.45 -7.58 5 8 Salience +51.28 -28.52 -4.3 5 7 VentralAttention +-56.47 -50.48 9.92 5 7 VentralAttention +-55.3 -39.89 13.51 5 7 VentralAttention +51.52 -32.52 7.55 5 7 VentralAttention +55.75 -46.07 11.42 5 7 VentralAttention +-49.07 25.13 -0.98 5 7 VentralAttention +-9.88 10.95 66.61 5 7 VentralAttention +52.68 32.58 0.57 5 7 VentralAttention +53.9 -42.76 21.83 5 7 VentralAttention +-26.6 -70.72 36.86 5 5 DorsalAttention +-37.26 -28.8 -25.58 5 5 DorsalAttention +45.68 -46.67 -16.85 5 5 DorsalAttention +10.51 -38.54 50.02 5 5 DorsalAttention +-52.44 -63.14 5.29 5 5 DorsalAttention +-42.26 -60.12 -8.85 5 5 DorsalAttention +-32.56 -46.42 47.2 5 5 DorsalAttention +-32.23 -1.08 54.06 5 5 DorsalAttention +-16.5 -58.57 64.46 5 5 DorsalAttention +9.61 -61.5 60.88 5 5 DorsalAttention +21.9 -64.74 48.12 5 5 DorsalAttention +25.34 -58.18 60.34 5 5 DorsalAttention +28.56 -4.62 53.99 5 5 DorsalAttention +46.09 -58.93 3.93 5 5 DorsalAttention +-31.13 -9.99 -36.32 5 17 MedialTemporalLobe +32.85 -12.41 -34.41 5 17 MedialTemporalLobe +-25.57 -11.78 -21.54 4 17 MedialTemporalLobe +24.73 -11.25 -22.68 4 17 MedialTemporalLobe +-25.24 -38.78 -2.01 4 1 DefaultMode +25.08 -37.18 -2.16 4 1 DefaultMode +-30.63 18.71 -18.98 5 4 Reward +-21.14 40.87 -20.48 5 4 Reward +23.96 31.94 -17.78 5 4 Reward +27.06 16.22 -16.93 5 4 Reward +-20.3 -2.27 -22.21 4 4 Reward +19.51 -1.85 -23.11 4 4 Reward +12.66 17.32 -5.06 4 4 Reward +-12.49 17.05 -4.49 4 4 Reward +11.87 17.51 6.57 4 1 DefaultMode +-13.28 17.24 7.14 4 1 DefaultMode +14.15 -1.19 18.18 4 3 FrontoParietal +-15.19 -1.5 18.84 4 3 FrontoParietal +28.56 -7.53 7.66 4 10 SomatomotorDorsal +-28.02 -10.23 9.07 4 10 SomatomotorDorsal +28.28 -6.7 -5.06 4 10 SomatomotorDorsal +-28.31 -10.45 -3.6 4 10 SomatomotorDorsal +25.26 1.75 -1.26 4 10 SomatomotorDorsal +-28.26 -0.97 -3.09 4 10 SomatomotorDorsal +25.46 5.05 7 4 10 SomatomotorDorsal +-24.83 7.67 7.95 4 10 SomatomotorDorsal +18.88 -4.68 -3.91 4 10 SomatomotorDorsal +-18.84 -5.08 -3.06 4 10 SomatomotorDorsal +3.42 -7.79 8.23 4 1 DefaultMode +-2.88 -9.96 8.5 4 1 DefaultMode +18.89 -28.79 0.66 4 2 Visual +-18.84 -29.18 1.51 4 2 Visual +12.9 -13.94 11.55 4 9 CinguloOpercular +-15.39 -14.24 12.19 4 9 CinguloOpercular +9.56 -7.84 1.76 4 9 CinguloOpercular +-9.35 -10.17 0.2 4 9 CinguloOpercular +13.67 -19.39 0.23 4 10 SomatomotorDorsal +-13.57 -19.67 0.84 4 10 SomatomotorDorsal +15.95 -22.37 8.8 4 10 SomatomotorDorsal +-18.63 -22.73 9.59 4 10 SomatomotorDorsal +32 -49 -51 4 0 unassigned +-13 -52 -50 4 0 unassigned +14 -48 -52 4 0 unassigned +-32 -78 -38 4 1 DefaultMode +32 -81 -38 4 1 DefaultMode +-24 -76 -28 4 1 DefaultMode +24 -76 -28.01 4 1 DefaultMode +-5.72 -50.8 -40.84 4 1 DefaultMode +8 -50 -40 4 1 DefaultMode +0 -74 -25 4 2 Visual +-10 -78 -28 4 3 FrontoParietal +10 -78 -28 4 3 FrontoParietal +-34 -72.01 -48 4 3 FrontoParietal +34 -72 -48 4 3 FrontoParietal +-30.5 -66 -30 4 3 FrontoParietal +31.68 -62.83 -30.4 4 3 FrontoParietal +40 -44 -38 4 3 FrontoParietal +43.5 -60 -30 4 9 CinguloOpercular +-43.5 -60 -30 4 9 CinguloOpercular +-34 -42 -44 4 9 CinguloOpercular +-33 -51 -50 4 10 SomatomotorDorsal +-6 -74 -42 4 10 SomatomotorDorsal +7.5 -72 -39 4 10 SomatomotorDorsal +-10 -62 -18 4 10 SomatomotorDorsal +10 -62 -18 4 10 SomatomotorDorsal +-12 -44 -18 4 10 SomatomotorDorsal +12 -44 -18 4 10 SomatomotorDorsal diff --git a/nilearn/datasets/data/seitzman_2018_ROIs_anatomicalLabels.txt b/nilearn/datasets/data/seitzman_2018_ROIs_anatomicalLabels.txt new file mode 100644 index 0000000000..9b41c0b51a --- /dev/null +++ b/nilearn/datasets/data/seitzman_2018_ROIs_anatomicalLabels.txt @@ -0,0 +1,301 @@ +0=cortexMid,1=cortexL,2=cortexR,3=hippocampus,4=amygdala,5=basalGanglia,6=thalamus,7=cerebellum +1 +1 +2 +2 +2 +2 +2 +1 +2 +1 +2 +1 +1 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +1 +1 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +1 +2 +2 +1 +2 +2 +2 +1 +2 +2 +1 +1 +1 +1 +1 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +1 +2 +1 +1 +2 +2 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +1 +1 +2 +2 +1 +1 +2 +2 +1 +1 +2 +2 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +1 +2 +3 +3 +3 +3 +1 +1 +2 +2 +4 +4 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +6 +6 +6 +6 +6 +6 +6 +6 +6 +6 +6 +6 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 +7 diff --git a/nilearn/datasets/description/fsaverage5_sphere.rst b/nilearn/datasets/description/fsaverage5_sphere.rst new file mode 100644 index 0000000000..8b2aba6ea8 --- /dev/null +++ b/nilearn/datasets/description/fsaverage5_sphere.rst @@ -0,0 +1,16 @@ +fsaverage5 + + +Notes +----- +Fsaverage5 standard spheres as distributed with Freesurfer (Fischl et al, 1999) + +Content +------- + :'sphere_left': Gifti file, left hemisphere spherical surface mesh + :'sphere_right': Gifti file, right hemisphere spherical surface mesh + +References +---------- +Fischl et al, (1999). High-resolution intersubject averaging and a +coordinate system for the cortical surface. Hum Brain Mapp 8, 272-284. diff --git a/nilearn/datasets/description/seitzman_2018.rst b/nilearn/datasets/description/seitzman_2018.rst new file mode 100644 index 0000000000..26c9e62686 --- /dev/null +++ b/nilearn/datasets/description/seitzman_2018.rst @@ -0,0 +1,35 @@ +Seitzman 2018 atlas + + +Notes +----- +300 ROI coordinates in cortical, subcortical and cerebellar regions. + +"Here, we apply a winner-take-all partitioning method to resting-state fMRI data and careful consideration of +anatomy to generate novel functionally-constrained ROIs in the thalamus, basal ganglia, amygdala, hippocampus, and +cerebellum. We validate these ROIs in three datasets via several anatomical and functional criteria, including known +anatomical divisions and functions, as well as agreement with existing literature. +Further, we demonstrate that combining these ROIs with established cortical ROIs recapitulates and extends +previously described functional network organization." (Seitzman et. al, 2018) + + + +Content +------- + :"rois": Coordinates of 300 ROIs in MNI space + :"radius": Radius of each ROI in mm + :"networks": Network names + :"regions": Region names + +References +---------- +For more information see: +https://greenelab.wustl.edu/data_software + +ROI coordinates downloaded from: +https://wustl.box.com/s/twpyb1pflj6vrlxgh3rohyqanxbdpelw + + +Seitzman, B. A., Gratton, C., Marek, S., Raut, R. V., Dosenbach, N. U., Schlaggar, B. L., et al. (2018). +A set of functionally-defined brain regions with improved representation of the subcortex and cerebellum. +bioRxiv, 450452. http://doi.org/10.1101/450452 \ No newline at end of file diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py index 64281abfdd..38844a93b1 100644 --- a/nilearn/datasets/func.py +++ b/nilearn/datasets/func.py @@ -13,7 +13,7 @@ from sklearn.utils import deprecated from .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr, - _read_md5_sum_file, _tree, _filter_columns) + _read_md5_sum_file, _tree, _filter_columns, _fetch_file) from .._utils import check_niimg from .._utils.compat import BytesIO, _basestring, _urllib from .._utils.numpy_conversions import csv_to_array @@ -824,7 +824,7 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, if n_subjects is None: n_subjects = 94 # 94 subjects available if (isinstance(n_subjects, numbers.Number) and - ((n_subjects > 94) or (n_subjects < 1))): + ((n_subjects > 94) or (n_subjects < 1))): warnings.warn("Wrong value for \'n_subjects\' (%d). The maximum " "value will be used instead (\'n_subjects=94\')") n_subjects = 94 # 94 subjects available @@ -886,6 +886,7 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, "button press vs calculation and sentence listening/reading": "auditory&visual motor vs cognitive processing"} allowed_contrasts = list(contrast_name_wrapper.values()) + # convert contrast names contrasts_wrapped = [] # get a unique ID for each contrast. It is used to give a unique name to @@ -893,21 +894,27 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, contrasts_indices = [] for contrast in contrasts: if contrast in allowed_contrasts: - contrasts_wrapped.append(contrast) + contrasts_wrapped.append(contrast.title().replace(" ", "")) contrasts_indices.append(allowed_contrasts.index(contrast)) elif contrast in contrast_name_wrapper: name = contrast_name_wrapper[contrast] - contrasts_wrapped.append(name) + contrasts_wrapped.append(name.title().replace(" ", "")) contrasts_indices.append(allowed_contrasts.index(name)) else: raise ValueError("Contrast \'%s\' is not available" % contrast) - # It is better to perform several small requests than a big one because: - # - Brainomics server has no cache (can lead to timeout while the archive - # is generated on the remote server) - # - Local (cached) version of the files can be checked for each contrast - opts = {'uncompress': True} + # Get the dataset OSF index + dataset_name = "brainomics_localizer" + index_url = "https://osf.io/hwbm2/download" + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, + verbose=verbose) + index_file = _fetch_file(index_url, data_dir, verbose=verbose) + with open(index_file, "rt") as of: + index = json.load(of) + # Build data URLs that will be fetched + files = {} + root_url = "https://osf.io/download/{0}" if isinstance(n_subjects, numbers.Number): subject_mask = np.arange(1, n_subjects + 1) subject_id_max = "S%02d" % n_subjects @@ -916,26 +923,18 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, subject_id_max = "S%02d" % np.max(n_subjects) n_subjects = len(n_subjects) subject_ids = ["S%02d" % s for s in subject_mask] - data_types = ["c map"] + data_types = ["cmaps"] if get_tmaps: - data_types.append("t map") - rql_types = str.join(", ", ["\"%s\"" % x for x in data_types]) - root_url = "http://brainomics.cea.fr/localizer/" - - base_query = ("Any X,XT,XL,XI,XF,XD WHERE X is Scan, X type XT, " - "X concerns S, " - "X label XL, X identifier XI, " - "X format XF, X description XD, " - 'S identifier <= "%s", ' % (subject_id_max, ) + - 'X type IN(%(types)s), X label "%(label)s"') - - urls = ["%sbrainomics_data_%d.zip?rql=%s&vid=data-zip" - % (root_url, i, - _urllib.parse.quote(base_query % {"types": rql_types, - "label": c}, - safe=',()')) - for c, i in zip(contrasts_wrapped, contrasts_indices)] + data_types.append("tmaps") filenames = [] + + def _is_valid_path(path, index, verbose): + if path not in index: + if verbose > 0: + print("Skiping path '{0}'...".format(path)) + return False + return True + for subject_id in subject_ids: for data_type in data_types: for contrast_id, contrast in enumerate(contrasts_wrapped): @@ -943,80 +942,87 @@ def fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False, str.join('_', [data_type, contrast]), ' ', '_') file_path = os.path.join( "brainomics_data", subject_id, "%s.nii.gz" % name_aux) - file_tarball_url = urls[contrast_id] - filenames.append((file_path, file_tarball_url, opts)) + path = "/".join([ + "/localizer", "derivatives", "spm_1st_level", + "sub-%s" % subject_id, + "sub-%s_task-localizer_acq-%s_%s.nii.gz" % ( + subject_id, contrast, data_type)]) + if _is_valid_path(path, index, verbose=verbose): + file_url = root_url.format(index[path][1:]) + opts = {"move": file_path} + filenames.append((file_path, file_url, opts)) + files.setdefault(data_type, []).append(file_path) + # Fetch masks if asked by user if get_masks: - urls.append("%sbrainomics_data_masks.zip?rql=%s&vid=data-zip" - % (root_url, - _urllib.parse.quote(base_query % {"types": '"boolean mask"', - "label": "mask"}, - safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "boolean_mask_mask.nii.gz") - file_tarball_url = urls[-1] - filenames.append((file_path, file_tarball_url, opts)) + path = "/".join([ + "/localizer", "derivatives", "spm_1st_level", + "sub-%s" % subject_id, "sub-%s_mask.nii.gz" % subject_id]) + if _is_valid_path(path, index, verbose=verbose): + file_url = root_url.format(index[path][1:]) + opts = {"move": file_path} + filenames.append((file_path, file_url, opts)) + files.setdefault("masks", []).append(file_path) + # Fetch anats if asked by user if get_anats: - urls.append("%sbrainomics_data_anats.zip?rql=%s&vid=data-zip" - % (root_url, - _urllib.parse.quote(base_query % {"types": '"normalized T1"', - "label": "anatomy"}, - safe=',()'))) for subject_id in subject_ids: file_path = os.path.join( "brainomics_data", subject_id, "normalized_T1_anat_defaced.nii.gz") - file_tarball_url = urls[-1] - filenames.append((file_path, file_tarball_url, opts)) - # Fetch subject characteristics (separated in two files) - if url is None: - url_csv = ("%sdataset/cubicwebexport.csv?rql=%s&vid=csvexport" - % (root_url, _urllib.parse.quote("Any X WHERE X is Subject"))) - url_csv2 = ("%sdataset/cubicwebexport2.csv?rql=%s&vid=csvexport" - % (root_url, - _urllib.parse.quote("Any X,XI,XD WHERE X is QuestionnaireRun, " - "X identifier XI, X datetime " - "XD", safe=',') - )) - else: - url_csv = "%s/cubicwebexport.csv" % url - url_csv2 = "%s/cubicwebexport2.csv" % url - filenames += [("cubicwebexport.csv", url_csv, {}), - ("cubicwebexport2.csv", url_csv2, {})] + path = "/".join([ + "/localizer", "derivatives", "spm_preprocessing", + "sub-%s" % subject_id, "sub-%s_T1w.nii.gz" % subject_id]) + if _is_valid_path(path, index, verbose=verbose): + file_url = root_url.format(index[path][1:]) + opts = {"move": file_path} + filenames.append((file_path, file_url, opts)) + files.setdefault("anats", []).append(file_path) + + # Fetch subject characteristics + participants_file = os.path.join("brainomics_data", "participants.tsv") + path = "/localizer/participants.tsv" + if _is_valid_path(path, index, verbose=verbose): + file_url = root_url.format(index[path][1:]) + opts = {"move": participants_file} + filenames.append((participants_file, file_url, opts)) + + # Fetch behavioural + behavioural_file = os.path.join( + "brainomics_data", "phenotype", "behavioural.tsv") + path = "/localizer/phenotype/behavioural.tsv" + if _is_valid_path(path, index, verbose=verbose): + file_url = root_url.format(index[path][1:]) + opts = {"move": behavioural_file} + filenames.append((behavioural_file, file_url, opts)) # Actual data fetching - dataset_name = 'brainomics_localizer' - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, - verbose=verbose) fdescr = _get_dataset_descr(dataset_name) - files = _fetch_files(data_dir, filenames, verbose=verbose) - anats = None - masks = None - tmaps = None - # combine data from both covariates files into one single recarray + _fetch_files(data_dir, filenames, verbose=verbose) + for key, value in files.items(): + files[key] = [os.path.join(data_dir, val) for val in value] + + # Load covariates file from numpy.lib.recfunctions import join_by - ext_vars_file2 = files[-1] - csv_data2 = np.recfromcsv(ext_vars_file2, delimiter=';') - files = files[:-1] - ext_vars_file = files[-1] - csv_data = np.recfromcsv(ext_vars_file, delimiter=';') - files = files[:-1] - # join_by sorts the output along the key - csv_data = join_by('subject_id', csv_data, csv_data2, - usemask=False, asrecarray=True)[subject_mask - 1] - if get_anats: - anats = files[-n_subjects:] - files = files[:-n_subjects] - if get_masks: - masks = files[-n_subjects:] - files = files[:-n_subjects] - if get_tmaps: - tmaps = files[1::2] - files = files[::2] - return Bunch(cmaps=files, tmaps=tmaps, masks=masks, anats=anats, - ext_vars=csv_data, description=fdescr) + participants_file = os.path.join(data_dir, participants_file) + csv_data = np.recfromcsv(participants_file, delimiter='\t') + behavioural_file = os.path.join(data_dir, behavioural_file) + csv_data2 = np.recfromcsv(behavioural_file, delimiter='\t') + csv_data = join_by( + "participant_id", csv_data, csv_data2, usemask=False, asrecarray=True) + subject_names = csv_data["participant_id"].tolist() + subjects_indices = [] + for name in subject_ids: + name = name.encode("utf8") + if name not in subject_names: + continue + subjects_indices.append(subject_names.index(name)) + csv_data = csv_data[subjects_indices] + + return Bunch(ext_vars=csv_data, description=fdescr, **files) def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, @@ -1064,20 +1070,20 @@ def fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None, get_tmaps=False, get_masks=False, get_anats=False, data_dir=data_dir, url=url, resume=True, verbose=verbose) - data.pop('tmaps') - data.pop('masks') - data.pop('anats') return data -def fetch_localizer_button_task(data_dir=None, url=None, verbose=1): +def fetch_localizer_button_task(n_subjects=None, data_dir=None, url=None, + verbose=1): """Fetch left vs right button press contrast maps from the localizer. - This function ships only 2nd subject (S02) specific tmap and - its normalized T1 image. - Parameters ---------- + n_subjects: int, optional + The number of subjects to load. If None is given, + this function ships only 2nd subject (S02) specific tmap and + its normalized T1 image. + data_dir: string, optional Path of the data directory. Used to force data storage in a specified location. @@ -1093,6 +1099,7 @@ def fetch_localizer_button_task(data_dir=None, url=None, verbose=1): ------- data: Bunch Dictionary-like object, the interest attributes are : + 'cmaps': string list, giving paths to nifti contrast maps 'tmap': string, giving paths to nifti contrast maps 'anat': string, giving paths to normalized anatomical image @@ -1109,30 +1116,19 @@ def fetch_localizer_button_task(data_dir=None, url=None, verbose=1): nilearn.datasets.fetch_localizer_contrasts """ - # The URL can be retrieved from the nilearn account on OSF (Open - # Science Framework). Uploaded files specific to S02 from - # fetch_localizer_contrasts ['left vs right button press'] - if url is None: - url = 'https://osf.io/dx9jn/download' - - tmap = "t_map_left_auditory_&_visual_click_vs_right_auditory&visual_click.nii.gz" - anat = "normalized_T1_anat_defaced.nii.gz" - - opts = {'uncompress': True} - - options = ('tmap', 'anat') - filenames = [(os.path.join('localizer_button_task', name), url, opts) - for name in (tmap, anat)] - - dataset_name = 'brainomics' - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, - verbose=verbose) - files = _fetch_files(data_dir, filenames, verbose=verbose) - - fdescr = _get_dataset_descr('brainomics_localizer') - - params = dict([('description', fdescr)] + list(zip(options, files))) - return Bunch(**params) + if n_subjects is None: + n_subjects = [2] + data = fetch_localizer_contrasts(["left vs right button press"], + n_subjects=n_subjects, + get_tmaps=True, get_masks=False, + get_anats=True, data_dir=data_dir, + url=url, resume=True, verbose=verbose) + # TODO: remove -> only here for compatibility + if len(data["tmaps"]) == 1: + setattr(data, "tmap", data["tmaps"][0]) + if len(data["anats"]) == 1: + setattr(data, "anat", data["anats"][0]) + return data def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac', @@ -1360,7 +1356,7 @@ def _load_mixed_gambles(zmap_imgs): def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, - return_raw_data=False, verbose=0): + return_raw_data=False, verbose=1): """Fetch Jimura "mixed gambles" dataset. Parameters @@ -1380,7 +1376,7 @@ def fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True, resume: bool, optional (default True) If true, try resuming download if possible. - verbose: int, optional (default 0) + verbose: int, optional (default 1) Defines the level of verbosity of the output. return_raw_data: bool, optional (default True) @@ -1808,7 +1804,7 @@ def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, 'A00056097', 'A00056098', 'A00056164', 'A00056372', 'A00056452', 'A00056489', 'A00056949'] - nitrc_ids = range(8260, 8470) + nitrc_ids = range(8260, 8464) max_subjects = len(ids) if n_subjects is None: n_subjects = max_subjects @@ -1816,7 +1812,6 @@ def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, warnings.warn('Warning: there are only %d subjects' % max_subjects) n_subjects = max_subjects ids = ids[:n_subjects] - nitrc_ids = nitrc_ids[:n_subjects] # Dataset description fdescr = _get_dataset_descr(dataset_name) @@ -1850,13 +1845,13 @@ def fetch_surf_nki_enhanced(n_subjects=10, data_dir=None, func = os.path.join('%s', '%s_%s_preprocessed_fwhm6.gii') rh = _fetch_files(data_dir, [(func % (ids[i], ids[i], 'right'), - archive % (nitrc_ids[i], ids[i], 'rh'), + archive % (nitrc_ids[2*i+1], ids[i], 'rh'), {'move': func % (ids[i], ids[i], 'right')} )], resume=resume, verbose=verbose) lh = _fetch_files(data_dir, [(func % (ids[i], ids[i], 'left'), - archive % (nitrc_ids[i], ids[i], 'lh'), + archive % (nitrc_ids[2*i], ids[i], 'lh'), {'move': func % (ids[i], ids[i], 'left')} )], resume=resume, verbose=verbose) @@ -1995,7 +1990,7 @@ def _fetch_development_fmri_functional(participants, data_dir, url, verbose): def fetch_development_fmri(n_subjects=None, reduce_confounds=True, - data_dir=None, resume=True, verbose=0): + data_dir=None, resume=True, verbose=1): """Fetch movie watching based brain development dataset (fMRI) The data is downsampled to 4mm resolution for convenience. The origin of @@ -2024,7 +2019,7 @@ def fetch_development_fmri(n_subjects=None, reduce_confounds=True, resume: bool, optional (default True) Whether to resume download of a partly-downloaded file. - verbose: int, optional (default 0) + verbose: int, optional (default 1) Defines the level of verbosity of the output. Returns diff --git a/nilearn/datasets/neurovault.py b/nilearn/datasets/neurovault.py index 0f51b319ac..7843ddcd3e 100644 --- a/nilearn/datasets/neurovault.py +++ b/nilearn/datasets/neurovault.py @@ -2629,8 +2629,6 @@ def fetch_neurovault_motor_task(data_dir=None, verbose=1): Notes ------ - This function is only a caller for the fetch_localizer_contrasts in order - to simplify examples reading and understanding. The 'left vs right button press' contrast is used: https://neurovault.org/images/10426/ @@ -2673,8 +2671,6 @@ def fetch_neurovault_auditory_computation_task(data_dir=None, verbose=1): Notes ------ - This function is only a caller for the fetch_localizer_contrasts in order - to simplify examples reading and understanding. The 'auditory_calculation_vs_baseline' contrast is used: https://neurovault.org/images/32980/ diff --git a/nilearn/datasets/struct.py b/nilearn/datasets/struct.py index 8393e44bd4..b452d589bc 100644 --- a/nilearn/datasets/struct.py +++ b/nilearn/datasets/struct.py @@ -146,7 +146,7 @@ def load_mni152_brain_mask(): See Also -------- - nilearn.datasets.load_mni152_template for details about version of the + nilearn.datasets.load_mni152_template : for details about version of the MNI152 T1 template and related. """ # Load MNI template @@ -440,6 +440,7 @@ def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None): mesh: str, optional (default='fsaverage5') Which mesh to fetch. 'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes) + 'fsaverage5_sphere': the low-resolution fsaverage5 spheres (10242 nodes) 'fsaverage': the high-resolution fsaverage mesh (163842 nodes) (high-resolution fsaverage will result in more computation time and memory usage) @@ -467,6 +468,7 @@ def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None): """ meshes = {'fsaverage5': _fetch_surf_fsaverage5, + 'fsaverage5_sphere': _fetch_surf_fsaverage5_sphere, 'fsaverage': _fetch_surf_fsaverage} if mesh not in meshes: raise ValueError( @@ -476,6 +478,11 @@ def fetch_surf_fsaverage(mesh='fsaverage5', data_dir=None): def _fetch_surf_fsaverage(data_dir=None): + """Helper function to ship fsaverage (highest resolution) surfaces + and sulcal information with Nilearn. + + The source of the data is downloaded from nitrc. + """ dataset_dir = _get_dataset_dir('fsaverage', data_dir=data_dir) url = 'https://www.nitrc.org/frs/download.php/10846/fsaverage.tar.gz' if not os.path.isdir(os.path.join(dataset_dir, 'fsaverage')): @@ -571,3 +578,27 @@ def _fetch_surf_fsaverage5(data_dir=None, url=None, resume=True, verbose=1): sulc_left=sulcs[0], sulc_right=sulcs[1], description=fdescr) + +def _fetch_surf_fsaverage5_sphere(data_dir=None): + """Helper function to ship fsaverage5 spherical meshes. + + These meshes can be used for visualization purposes, but also to run + cortical surface-based searchlight decoding. + + The source of the data is downloaded from OSF. + """ + + fsaverage_dir = _get_dataset_dir('fsaverage', data_dir=data_dir) + dataset_dir = _get_dataset_dir('fsaverage5_sphere', data_dir=fsaverage_dir) + url = 'https://osf.io/b79fy/download' + opts = {'uncompress': True} + names = ['sphere_right', 'sphere_left'] + filenames = [('{}.gii'.format(name), url, opts) + for name in names] + _fetch_files(dataset_dir, filenames) + result = { + name: os.path.join(dataset_dir, '{}.gii'.format(name)) + for name in names} + + result['description'] = str(_get_dataset_descr('fsaverage5_sphere')) + return Bunch(**result) diff --git a/nilearn/datasets/tests/data/localizer_behavioural.tsv b/nilearn/datasets/tests/data/localizer_behavioural.tsv new file mode 100644 index 0000000000..d7c18f0a3c --- /dev/null +++ b/nilearn/datasets/tests/data/localizer_behavioural.tsv @@ -0,0 +1,95 @@ +participant_id nuage2 pente_dots_lin couleur nom_des_gens pb_phono lin_vs_log aspect pb_express facilite_addition espece_plante visu_vb_pour_calcul tv r2_dots_log ratio_pw_w pb_allocentrique pb_rotation details score_cal_complexe activites_pratiques_nb bruxe espece_animales mot pb_lecture moyenne_pw_w trajet score_3d nuage10 nuage11 nuage12 pb_ecriture gaucherie score_addition probleme_ecole facilite_soustraction pente_dots_log arc perspective score_multiplication r2_dots_lin pair1 pair3 pair2 pair5 pair4 itineraire_plan score_soustraction erreur_w imagerie type_pb_langage decomposition niveau_etude francais1erl geometrie coef_bisec pente_sujet_estim_lin pb_prononc seine parle_tard pb_a_l_ecole autoestimation_calcul lettre_miroir rapport_pente_lin_groupe comment nuage3 nuage1 nuage6 nuage7 nuage4 nuage5 strategie nuage8 nuage9 score_tableau_langage pb_binaire erreur_langage droitier erreur_ps_w pb_consonne facilite_multiplication dyslexie visage coef_estim_qt pb_nonlangage bilingue dessin_fini bus billet pente_sujet_estim_log autoevaluation_cx pb_reconnaissance difference_pw_w score_pb_g_d car synaesthete error difficult_apprend_lire pente_bisec orthophoniste lieux orientation normalized_pw autoevaluation_dessin bonne_reconnaissance lycee avion id difficulte_calc_localizer dessin_note_sur_20 rapport_pente_log_groupe score_imageur visuel_verbal algebre pain edimburgh arbre g_contrarie lecture_ok pb_en_3d pb_2eme_langue oiseau pb_egocentrique pseudo pente_estim_qt autoevaluation_rotation pb_orthogr +S01 35.0 0.416922 n/a n/a False -0.0952051 4.0 False 4.0 n/a 1.0 17.0 0.916693 1.875 False False 2.0 0.966667 n/a 500.0 n/a 8.0 False 11.5 9.0 0.916667 20.0 10.0 35.0 False 0.0 10.0 0.0 4.0 0.749906 40.0 2.0 10.0 0.821488 3.0 23.0 5.0 68.0 43.0 n/a 10.0 1.0 3.0 0 False 4.0 True 4.0 0.985224 0.642131 False 60.0 False False 3.0 False 0.620551 n/a 10.0 25.0 20.0 40.0 50.0 65.0 u 15.0 30.0 4.0 False 0.0 True 3.0 False 4.0 False 1.0 0.969218 False False 1.0 17.0 13.0 0.862849 4.0 0 7.0 0.0 53.0 0 n/a False 0.953448 False 2.0 4.0 0.304348 2.0 lieux 4.0 70.0 165 n/a 18.0 0.85468 9.51389 4.0 4.0 60.0 10.0 30.0 False True False False 2.4 False 15.0 0.988097 4.0 False +S02 30.0 0.529093 n/a n/a False 0.0151144 3.5 False 3.0 n/a 3.0 20.0 0.954898 1.77778 False False 1.0 0.933333 3.4 700.0 n/a 9.0 False 12.5 5.0 0.916667 30.0 10.0 40.0 False 0.0 10.0 0.0 4.0 0.76985 20.0 2.0 10.0 0.970012 3.0 29.0 4.0 65.0 43.0 n/a 8.0 0.0 3.0 0 True 3.0 True 3.0 0.999742 0.776734 False 50.0 False False 3.0 False 0.787507 n/a 15.0 25.0 20.0 50.0 55.0 70.0 n/a 20.0 60.0 2.5 False 0.0 True 2.0 False 5.0 False 1.0 0.90535 False False 1.0 8.0 10.0 0.873827 3.0 0 7.0 0.0 52.0 n/a n/a False 1.00269 False 1.0 3.0 0.28 1.0 0 3.0 20.0 174 n/a 15.0 0.87741 7.80556 8.0 4.0 90.0 10.0 80.0 False True False False 1.5 False 16.0 1.15221 3.0 False +S03 35.0 0.45857 n/a n/a False -0.0038151 2.0 False 2.0 n/a 3.0 10.0 0.894501 1.55556 False False 1.0 0.866667 3.6 40.0 0.0 9.0 False 11.5 10.0 0.5 30.0 10.0 40.0 False 0.25 10.0 0.0 3.0 0.715695 100.0 2.0 10.0 0.890686 3.0 33.0 5.0 63.0 42.0 n/a 9.0 0.0 2.0 0 False 4.0 True 3.0 0.995588 0.682245 False 30.0 False False 2.0 False 0.68254 n/a 15.0 30.0 15.0 30.0 50.0 70.0 d 20.0 50.0 4.0 False 0.0 True 0.0 False 4.0 False 2.0 0.658032 False False 1.0 12.0 10.0 0.816513 2.33333 animaux 5.0 0.0 60.0 0 n/a False 0.966338 False 2.0 4.0 0.217391 2.0 visages, lieux 4.0 100.0 204 n/a 12.0 0.815689 7.375 6.0 4.0 80.0 11.0 80.0 False False False False 2.5 False 14.0 0.740778 2.0 False +S04 20.0 0.240791 1.0 1.0 False 0.0366737 3.5 False 5.0 1.0 1.0 45.0 0.937822 1.58333 True True 2.0 0.766667 3.6 450.0 1.0 12.0 False 15.5 10.0 0.833333 15.0 10.0 25.0 False 0.333333 10.0 0.0 2.0 0.584904 70.0 2.0 9.0 0.974495 3.0 32.0 4.0 55.0 44.0 1.0 9.0 0.0 1.0 0 False 3.0 True 4.0 0.997322 0.356258 False 100.0 False False 3.0 False 0.358396 n/a 10.0 15.0 15.0 25.0 30.0 35.0 d 10.0 30.0 4.0 False 0.0 True 0.0 False 5.0 False 2.0 0.350432 False False 1.0 25.0 12.0 0.668016 4.0 LIEUX 7.0 2.0 50.0 0 n/a False 0.984509 False 0.0 2.0 0.225806 1.0 VISAGE 5.0 250.0 322 n/a 17.0 0.666625 5.52778 6.0 5.0 1.0 14.0 100.0 True True True False 3.5 False 19.0 0.791295 2.0 False +S05 40.0 0.3345 1.0 1.0 False -0.0530016 1.5 False 3.0 1.0 1.0 10.0 0.914142 1.77778 False True 0.0 0.433333 1.8 400.0 1.0 9.0 False 12.5 8.0 0.583333 20.0 10.0 30.0 False 0.0 9.0 0.0 2.0 0.635478 20.0 0.0 6.0 0.86114 3.0 23.0 3.0 60.0 40.0 1.0 4.0 0.0 3.0 0 True 2.0 True 0.0 0.991584 0.503873 False 50.0 False False 2.0 False 0.497873 n/a 15.0 30.0 20.0 30.0 45.0 50.0 n/a 15.0 40.0 2.0 False 0.0 True 4.0 False 3.0 False 1.0 0.917151 False False 1.0 10.0 10.0 0.724752 0.0 n/a 7.0 0.0 30.0 0 n/a False 1.01049 False 1.0 2.5 0.28 1.0 n/a 0.0 30.0 298 n/a 5.0 0.724264 6.40278 6.0 2.0 40.0 11.0 15.0 False True True True 1.0 False 16.0 1.02732 3.0 False +S06 30.0 0.52638 n/a n/a False -0.00565731 3.5 False 4.0 n/a 1.0 25.0 0.933403 2.0 False False 2.0 0.633333 3.6 400.0 n/a 9.0 False 13.5 8.0 0.75 20.0 10.0 50.0 False 0.0 10.0 2.0 3.0 0.834941 75.0 2.0 10.0 0.927745 3.0 34.0 4.0 64.0 43.0 n/a 8.0 0.0 3.0 0 False 2.0 True 5.0 0.998702 0.781213 False 20.0 True False 3.0 False 0.783469 n/a 10.0 20.0 20.0 50.0 50.0 70.0 u 20.0 50.0 2.0 True 0.0 True 5.0 False 5.0 False 2.0 0.881779 False False 1.0 15.0 10.0 0.951082 0.0 n/a 9.0 0.0 45.0 0 n/a True 1.01283 False 2.0 3.0 0.333333 1.0 LIEUX VISAGES 3.0 200.0 311 n/a 17.0 0.951596 8.0 4.0 4.0 75.0 10.0 75.0 False True False True 2.0 False 18.0 1.0508 n/a False +S07 60.0 0.887654 1.0 1.0 False -0.013893 2.0 False 3.0 1.0 1.0 10.0 0.923668 2.0 False True 1.0 0.7 3.8 300.0 1.0 11.0 True 16.5 7.0 0.666667 50.0 15.0 80.0 False 0.25 9.0 2.0 2.0 0.967964 10.0 0.0 10.0 0.909775 3.0 30.0 3.0 62.0 40.0 1.0 6.0 0.0 3.0 aprentissage lecture , difference d/t , p/b a 6ans duree 1ans False 4.0 True 5.0 0.994361 1.30039 False 50.0 False False 2.0 False 1.32119 n/a 10.0 40.0 30.0 100.0 80.0 100.0 d 30.0 100.0 2.0 True 0.0 True 6.0 False 3.0 False 1.0 0.884219 False False 1.0 4.0 10.0 1.09569 2.0 n/a 11.0 0.0 40.0 0 n/a True 1.03761 True 2.0 1.0 0.333333 1.0 LIEUX 4.0 30.0 333 n/a 8.0 1.1032 5.84722 6.0 5.0 40.0 10.0 30.0 False True True False 2.0 False 22.0 0.958523 1.0 False +S08 30.0 0.378301 n/a n/a False 0.0574613 3.0 False 4.0 n/a 2.0 5.0 0.914426 1.35714 False False 2.0 0.833333 7.0 350.0 n/a 14.0 False 16.5 64.0 0.416667 20.0 15.0 35.0 False 0.0 10.0 0.0 3.0 0.657123 80.0 2.0 10.0 0.971887 3.0 32.0 4.0 63.0 42.0 n/a 8.0 2.0 1.0 0 False 4.0 False 5.0 0.999458 0.551338 False 100.0 False False 3.0 True 0.563067 n/a 10.0 20.0 18.0 35.0 40.0 50.0 u 15.0 50.0 4.0 False 0.0 True 2.0 False 5.0 False 1.0 0.721875 False True 1.0 20.0 10.0 0.74894 4.0 LIEUX 5.0 0.0 30.0 0 n/a False 1.00225 False 0.0 1.5 0.151515 2.0 PHYSIONOMIE 4.0 100.0 290 n/a 16.0 0.748933 4.84722 8.0 4.0 90.0 10.0 80.0 False True False False 1.9 False 19.0 0.959285 3.0 False +S09 100.0 1.29591 n/a n/a False -0.0193175 4.0 False 3.0 n/a 1.0 12.0 0.943651 1.47826 False True 2.0 0.533333 2.9 800.0 n/a 11.5 False 14.25 10.0 0.5 80.0 15.0 100.0 False 0.2 10.0 0.0 2.0 1.03413 200.0 2.0 10.0 0.924333 3.0 35.0 3.0 55.0 42.0 n/a 6.0 0.0 3.0 0 False 3.0 True 2.0 0.988621 1.94941 False 50.0 False False 1.0 False 1.92885 n/a 20.0 50.0 35.0 110.0 130.0 160.0 u 25.0 120.0 3.0 False 0.0 True 2.0 False 4.0 False 1.0 0.85892 False False 1.0 15.0 8.0 1.18304 0.0 n/a 5.5 0.0 50.0 0 n/a False 1.03538 False 2.0 3.0 0.192982 1.0 LIEUX 2.0 300.0 336 n/a 18.0 1.17861 7.5 4.0 2.0 45.0 14.0 30.0 False True True False 1.5 False 17.0 1.22631 1.0 False +S10 43.0 0.534045 n/a n/a False 0.0017111 4.5 False 3.0 n/a 1.0 40.0 0.973282 1.66667 False True 2.0 0.6 4.2 500.0 0.0 9.0 True 12.0 10.0 0.583333 27.0 12.0 39.0 False 0.25 8.0 4.0 2.0 0.780877 50.0 2.0 10.0 0.974993 3.0 30.0 3.0 66.0 43.0 n/a 7.0 1.0 3.0 APPRENTISSAGE LECTURE/DYSLEXIE/BEGAIEMENT ZEZAIEMENT False 4.0 True 5.0 0.994773 0.792601 False 70.0 True False 2.0 False 0.794878 n/a 12.0 25.0 25.0 48.0 58.0 70.0 d 19.0 60.0 4.0 True 2.0 True 4.0 True 3.0 True 2.0 0.944997 False False 1.0 12.0 11.0 0.890339 2.33333 animaux 6.0 0.0 50.0 0 n/a True 1.05757 True 1.0 3.0 0.25 3.0 lieux 3.0 80.0 163 n/a 19.0 0.889978 8.06944 6.0 3.0 70.0 10.0 100.0 False True False False 3.0 False 15.0 0.999617 2.0 False +S11 50.0 0.501391 n/a n/a False -0.155042 3.5 False 4.0 n/a 1.0 25.0 0.877283 1.25 True True 2.0 0.466667 1.6 400.0 n/a 8.0 False 9.0 8.0 0.583333 30.0 10.0 40.0 False 0.0 10.0 0.0 2.0 0.786632 200.0 2.0 7.0 0.722241 3.0 30.0 5.0 70.0 41.0 n/a 6.0 0.0 3.0 MATHS False 2.0 True 1.0 0.994483 0.767568 False 80.0 False True 1.0 True 0.746275 n/a 16.0 38.0 20.0 75.0 55.0 70.0 d 16.0 40.0 2.0 False 3.0 True 1.0 False 4.0 False 2.0 0.918997 False True 1.0 15.0 11.0 0.901555 2.66667 0 2.0 3.0 46.0 Lettres, chiffres, sons, temps, en espace et en couleur n/a False 0.973921 False 2.0 n/a 0.111111 3.0 visages, lieux 1.0 90.0 176 n/a 17.0 0.896537 4.81944 6.0 1.0 80.0 16.0 80.0 False True n/a False 3.0 True 10.0 1.03446 2.0 False +S12 40.0 0.45318 n/a n/a False -0.0294667 0.5 False 4.0 n/a 2.0 5.0 0.923912 1.75 False True 1.0 0.633333 3.2 500.0 n/a 12.0 False 16.5 8.0 0.5 35.0 12.0 37.0 False 0.25 8.0 2.0 3.0 0.722995 20.0 1.0 7.0 0.894445 3.0 32.0 4.0 60.0 45.0 n/a 8.0 1.0 3.0 FRANCAIS ORTHOGRAPHE LECTURE False 3.0 True 3.0 0.999026 0.676939 False 100.0 False False 3.0 False 0.674518 n/a 12.0 30.0 25.0 35.0 50.0 70.0 u 15.0 50.0 2.0 True 0.0 True 4.0 False 3.0 False 1.0 0.883083 False False 1.0 8.0 15.0 0.822753 3.33333 0 9.0 0.0 56.0 n/a n/a True 1.00245 False 1.0 1.5 0.272727 2.0 0 4.0 40.0 182 n/a 7.0 0.824009 5.91667 6.0 4.0 50.0 11.0 80.0 True True True True 2.0 False 21.0 1.08707 2.0 True +S13 30.0 0.553522 1.0 1.0 False -0.0442015 3.5 False 3.0 1.0 1.0 12.0 0.962172 1.54545 False True 2.0 1.0 3.8 350.0 1.0 11.0 False 14.0 10.0 0.666667 25.0 10.0 50.0 False 0.2 10.0 0.0 4.0 0.840788 80.0 2.0 10.0 0.91797 3.0 33.0 4.0 70.0 44.0 1.0 10.0 0.0 3.0 0 False 3.0 True 4.0 0.999519 0.797884 False 75.0 False False 3.0 False 0.823867 n/a 12.0 30.0 20.0 59.0 55.0 60.0 d 20.0 70.0 2.0 False 0.0 True 2.0 False 5.0 False 1.0 0.96621 False False 1.0 10.0 15.0 0.94617 5.0 n/a 6.0 0.0 60.0 0 n/a False 1.02888 False 2.0 1.5 0.214286 1.0 LIEUX 5.0 100.0 318 n/a 17.0 0.958259 6.84722 4.0 5.0 85.0 12.0 80.0 False True False False 2.5 False 17.0 1.04223 2.0 False +S14 40.0 0.536215 n/a n/a False -0.0167022 1.5 False 4.0 n/a 1.0 7.0 0.982381 1.90909 False False 1.0 0.8 3.4 350.0 n/a 11.0 False 16.0 9.0 0.75 25.0 10.0 50.0 False 0.11 10.0 0.0 3.0 0.842689 50.0 1.0 10.0 0.965678 3.0 36.0 4.0 62.0 45.0 n/a 9.0 0.0 3.0 0 False 3.0 True 3.0 0.997089 0.788024 False 60.0 False False 3.0 False 0.798107 n/a 12.0 30.0 20.0 40.0 50.0 70.0 d 15.0 60.0 4.0 False 0.0 True 2.0 False 4.0 False 2.0 0.910639 False False 1.0 20.0 10.0 0.955723 3.33333 0 10.0 0.0 50.0 n/a n/a False 1.01764 False 2.0 2.5 0.3125 1.0 LIEUX VISAGES 4.0 100.0 219 n/a 9.0 0.960426 7.14583 5.0 3.0 60.0 10.0 30.0 False True True n/a 4.0 False 21.0 0.927176 2.0 False +S15 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 366 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a +S16 55.0 0.432409 n/a n/a False -0.114653 2.0 False 3.0 n/a 1.0 40.0 0.930288 2.0 False False 1.0 0.633333 3.4 350.0 n/a 7.0 False 10.5 8.0 0.666667 30.0 10.0 55.0 False 0.0 9.0 0.0 2.0 0.735609 50.0 1.0 8.0 0.815635 3.0 30.0 4.0 67.0 43.0 n/a 6.0 0.0 3.0 0 False 4.0 True 3.0 0.99974 0.651471 False 100.0 False False 2.0 False 0.643601 n/a 15.0 35.0 25.0 45.0 50.0 60.0 d 20.0 50.0 4.0 False 0.0 True 3.0 False 3.0 False 2.0 0.896679 False False 1.0 15.0 15.0 0.835647 2.33333 0 7.0 2.0 50.0 n/a n/a False 1.01069 False 1.0 3.5 0.333333 2.0 physionomie 3.0 40.0 194 n/a 10.0 0.838385 7.74306 7.0 3.0 45.0 10.0 80.0 False True n/a False 2.0 True 14.0 0.951493 2.0 False +S17 50.0 1.90848 n/a n/a False 0.00262361 5.0 False 2.0 n/a 1.0 20.0 0.934938 2.0 True True 2.0 0.266667 1.8 200.0 n/a 11.0 False 16.5 8.0 0.416667 20.0 10.0 100.0 False 0.0 8.0 0.0 n/a 1.48161 30.0 2.0 7.0 0.937562 3.0 30.0 5.0 60.0 40.0 n/a 0.0 1.0 3.0 0 False 3.0 True 4.0 0.995747 2.7377 False 200.0 False False 2.0 False 2.8406 n/a 10.0 50.0 20.0 100.0 130.0 200.0 u 10.0 200.0 3.0 False 0.0 True 2.0 False 3.0 False 2.0 0.744411 False False 1.0 50.0 8.0 1.67618 2.0 0 11.0 2.0 57.0 0 n/a False 0.944038 False 1.0 3.0 0.333333 3.0 Visages 3.0 150.0 205 n/a 20.0 1.68861 8.18056 4.0 3.0 50.0 11.0 100.0 False True False False 2.0 False 22.0 0.940993 3.0 False +S18 40.0 2.15291 1.0 1.0 False -0.169848 3.5 False 3.0 1.0 1.0 10.0 0.890863 1.41667 False True 2.0 0.466667 3.0 300.0 1.0 12.0 False 14.5 9.0 0.583333 35.0 7.0 40.0 False 0.5 10.0 1.0 2.0 1.41522 20.0 2.0 10.0 0.721015 3.0 30.0 4.0 63.0 43.0 1.0 4.0 0.0 1.0 0 False 2.0 True 3.0 1.0 2.99655 False 10.0 False False 2.0 False 3.20441 n/a 10.0 35.0 25.0 70.0 150.0 200.0 u 20.0 300.0 3.0 True 0.0 True 2.0 False 5.0 False 2.0 0.764588 False False 1.0 4.0 15.0 1.59256 0.0 n/a 5.0 0.0 50.0 0 n/a True 1.0 False 1.0 3.0 0.172414 2.0 VISAGES 1.0 10.0 332 n/a 17.0 1.61294 5.98611 6.0 2.0 80.0 12.0 50.0 False True True True 1.5 False 17.0 0.944911 2.0 False +S19 40.0 0.396693 n/a n/a False -0.018355 1.0 True 3.0 n/a 2.0 10.0 0.958646 1.64286 True False 1.0 0.5 3.4 600.0 n/a 14.0 False 18.5 8.5 0.666667 30.0 14.0 45.0 False 0.0 6.0 4.0 2.0 0.623431 50.0 0.0 8.0 0.940291 3.0 25.0 4.0 59.0 42.0 n/a 5.0 0.0 3.0 DYSLEXIE DYSORTHOGRAPHIE PB EXPRESSION False 2.0 True 4.0 0.997793 0.589586 False 60.0 True True 2.0 False 0.590441 n/a 15.0 35.0 20.0 45.0 50.0 58.0 d 25.0 50.0 1.0 True 1.0 True 3.0 False 4.0 True 1.0 0.969703 False False 1.0 12.0 11.0 0.708503 2.0 0 9.0 2.0 40.0 0 n/a False 0.970941 True 2.0 4.0 0.243243 1.0 lieux 2.0 110.0 183 n/a 6.0 0.710534 8.53472 5.0 2.0 50.0 10.0 60.0 False True False True 1.5 False 23.0 1.14744 3.0 True +S20 65.0 0.580366 n/a n/a False -0.0676551 2.5 False 4.0 n/a 1.0 15.0 0.938926 1.875 False True 1.0 0.833333 2.0 300.0 n/a 8.0 False 11.5 8.0 0.666 30.0 15.0 45.0 False 0.0 9.0 0.0 3.0 0.757288 100.0 1.0 9.0 0.871271 3.0 30.0 5.0 63.0 45.0 n/a 8.0 0.0 3.0 0 False 4.0 True 2.0 0.996139 0.879881 False 50.0 False False 2.0 False 0.863822 n/a 15.0 40.0 25.0 50.0 70.0 80.0 d 20.0 60.0 3.0 False 0.0 True 2.0 False 5.0 False 2.0 0.675853 False True 1.0 10.0 20.0 0.867901 3.5 0 7.0 0.0 50.0 n/a n/a False 0.96733 False 1.0 3.0 0.304348 2.0 Visages 3.0 25.0 166 n/a 11.0 0.863093 7.51333 4.0 3.0 70.0 13.0 500.0 False True False False 7.0 False 15.0 0.861326 2.0 False +S21 75.0 0.827894 n/a n/a False -0.0550406 1.0 False 3.0 n/a 3.0 10.0 0.933896 1.5 False True 1.0 0.6 2.8 500.0 n/a 10.0 False 12.5 11.0 0.25 25.0 10.0 60.0 False 0.2 6.0 0.0 3.0 0.97081 30.0 0.0 9.0 0.878856 3.0 30.0 5.0 60.0 40.0 n/a 4.0 0.0 1.0 n/a False 4.0 True 2.0 0.995747 1.23959 False 20.0 False False 3.0 False 1.23225 n/a 15.0 40.0 25.0 45.0 90.0 100.0 d 20.0 85.0 3.0 False 0.0 True 2.0 False 4.0 False 2.0 0.885648 False False 0.0 10.0 13.0 1.10749 3.0 lieux 5.0 0.0 100.0 n/a n/a False 0.944038 False 0.0 0.0 0.2 1.0 visage 3.0 40.0 104 n/a 4.0 1.10645 1.875 12.0 3.0 60.0 11.0 80.0 False True True False 3.0 False 15.0 0.975003 2.0 False +S22 50.0 0.310351 n/a n/a False -0.249045 4.0 False 5.0 n/a 1.0 20.0 0.783854 1.5 False False 2.0 0.866667 4.4 300.0 n/a 12.0 False 15.0 7.0 0.666667 20.0 10.0 35.0 False 0.0 9.0 0.0 4.0 0.695147 50.0 2.0 10.0 0.534809 3.0 28.0 4.0 62.0 45.0 n/a 10.0 0.0 3.0 0 False 4.0 True 5.0 0.999154 0.471502 False 40.0 False False 3.0 False 0.461929 n/a 10.0 50.0 20.0 30.0 40.0 50.0 d 15.0 35.0 4.0 False 0.0 True 2.0 False 5.0 False 2.0 0.977155 False True 1.0 15.0 10.0 0.786574 4.33333 0 6.0 0.0 40.0 0 n/a False 0.997375 False 0.0 4.0 0.2 3.0 Visages 4.0 80.0 188 n/a 18.0 0.79227 9.20139 5.0 5.0 70.0 10.0 50.0 False True False False 2.5 False 18.0 0.955999 3.0 False +S23 50.0 0.508968 1.0 1.0 False -0.0350254 2.5 False 3.0 1.0 1.0 10.0 0.871767 1.54545 False True 1.0 0.433333 2.8 800.0 1.0 11.0 False 14.0 5.0 0.5 20.0 10.0 30.0 False 0.0 10.0 0.0 2.0 0.800143 15.0 0.0 10.0 0.836741 3.0 23.0 4.0 64.0 44.0 1.0 3.0 0.0 3.0 0 False 4.0 True 2.0 0.992409 0.7681 False 30.0 False True 2.0 False 0.757553 n/a 15.0 40.0 15.0 40.0 60.0 70.0 u 15.0 50.0 2.0 False 0.0 True 3.0 False 4.0 False 1.0 0.855351 False False 1.0 10.0 10.0 0.916341 0.0 n/a 6.0 0.0 40.0 0 n/a False 0.984198 False 1.0 2.0 0.214286 2.0 n/a 2.0 40.0 323 n/a 9.0 0.911936 6.14583 7.0 2.0 50.0 10.0 200.0 False True True False 2.0 False 17.0 1.14246 2.0 False +S24 60.0 0.671362 1.0 1.0 False -0.0573619 3.0 False 3.0 1.0 2.0 25.0 0.944763 0.947368 True True 2.0 0.5 3.2 300.0 1.0 19.0 False 18.5 8.0 0.333333 35.0 10.0 75.0 False 0.0 9.0 0.0 2.0 0.951386 600.0 1.0 9.0 0.887401 3.0 34.0 3.0 57.0 45.0 1.0 6.0 0.0 1.0 0 False 2.0 True 2.0 0.990636 1.00301 False 45.0 False True 1.0 False 0.999262 n/a 10.0 35.0 25.0 60.0 70.0 80.0 u 25.0 70.0 2.0 False 0.0 True 2.0 False 3.0 False 0.0 0.673082 False False 1.0 20.0 20.0 1.08045 0.0 VISAGES -1.0 2.0 30.0 0 n/a False 1.04792 False 2.0 1.5 -0.027027 1.0 LIEUX 3.0 500.0 335 n/a 14.0 1.08431 4.13194 7.0 4.0 90.0 10.0 300.0 False True True True 3.0 False 18.0 1.06108 2.0 False +S25 50.0 0.688949 1.0 1.0 False -0.0489149 1.0 False 4.0 1.0 1.0 20.0 0.948051 2.0 False True 0.0 0.566667 3.0 500.0 0.0 10.0 False 15.0 8.5 0.666667 40.0 10.0 60.0 False 0.0 10.0 0.0 3.0 0.875224 800.0 1.0 10.0 0.899136 3.0 40.0 5.0 76.0 35.0 1.0 7.0 0.0 3.0 n/a False 2.0 True 2.0 0.979872 1.04007 False 70.0 False False 2.0 False 1.02544 n/a 15.0 30.0 25.0 50.0 70.0 100.0 u 20.0 60.0 4.0 False 0.0 True 1.0 False 4.0 False 1.0 0.575015 False False 1.0 4.0 15.0 1.00105 3.33333 animaux 10.0 2.0 50.0 0 n/a False 0.991503 False 1.0 2.5 0.333333 2.0 n/a 3.0 10.0 9 n/a 6.0 0.997507 7.05556 4.0 3.0 50.0 16.0 6.0 False True False False 1.2 True 20.0 1.09846 3.0 False +S26 35.0 0.455105 n/a n/a False -0.00552304 1.0 False 2.0 n/a 2.0 5.0 0.96991 2.07692 True True 1.0 0.433333 1.8 240.0 n/a 13.0 True 20.0 12.0 0.25 20.0 10.0 35.0 False 0.2 9.0 3.0 1.0 0.801922 5.0 0.0 10.0 0.964387 3.0 32.0 5.0 64.0 43.0 n/a 4.0 1.0 3.0 dyslexie, pblm de: lecture, inversion de syllabes,dorganisation, emploi du tps False 4.0 True 4.0 0.996225 0.68226 False 10.0 True False 1.0 False 0.677383 n/a 10.0 20.0 17.0 40.0 50.0 60.0 d 15.0 45.0 2.0 True 0.0 True 3.0 False 4.0 True 2.0 0.610165 False False 1.0 6.0 7.0 0.919298 0.0 n/a 14.0 3.0 50.0 0 n/a False 0.969552 True 2.0 2.0 0.35 3.0 VISAGES LIEUX 3.0 15.0 347 n/a 6.0 0.913964 6.52083 5.0 3.0 60.0 10.0 4.0 False True True True 1.5 True 27.0 0.830617 3.0 False +S27 30.0 0.400822 1.0 1.0 False -0.0253511 2.0 False 4.0 1.0 2.0 4.0 0.941145 1.38462 False False 0.0 0.966667 4.2 300.0 1.0 13.0 False 15.5 8.0 0.75 20.0 10.0 40.0 False 0.0 10.0 0.0 3.0 0.676112 60.0 1.0 10.0 0.915794 3.0 27.0 4.0 65.0 42.0 1.0 10.0 0.0 3.0 0 False 4.0 True 4.0 0.998677 0.60153 False 80.0 False False 3.0 False 0.596588 n/a 15.0 30.0 20.0 40.0 50.0 60.0 d 20.0 40.0 4.0 False 0.0 True 1.0 False 4.0 False 2.0 0.870445 False True 1.0 15.0 16.0 0.768781 3.0 n/a 5.0 0.0 80.0 0 n/a False 0.993971 False 1.0 1.5 0.16129 1.0 VISAGES 4.0 100.0 331 n/a 8.0 0.770575 6.22917 7.0 4.0 50.0 11.0 100.0 False True False False 2.0 False 18.0 1.06635 2.0 False +S28 53.0 1.08503 n/a n/a False -0.0193036 4.0 False 3.0 n/a 3.0 15.0 0.918141 1.77778 True False 2.0 0.666667 3.2 600.0 n/a 9.0 False 12.5 8.0 0.583333 20.0 10.0 55.0 False 0.0 10.0 0.0 3.0 1.09877 25.0 2.0 9.0 0.898837 3.0 25.0 4.0 65.0 42.0 n/a 7.0 0.0 3.0 0 False 2.0 True 5.0 0.99648 1.60522 False 30.0 False False 2.0 False 1.61497 n/a 15.0 25.0 25.0 105.0 76.0 135.0 u 15.0 90.0 3.0 False 0.0 True 2.0 False 4.0 False 2.0 0.814187 False False 1.0 9.0 16.0 1.25582 2.0 n/a 7.0 2.0 50.0 0 n/a False 0.987764 False 1.0 1.5 0.28 3.0 VIGAGE 5.0 50.0 294 n/a 18.0 1.25228 6.56944 8.0 5.0 70.0 13.0 50.0 False True False True 12.0 False 16.0 0.81182 2.0 False +S29 34.0 0.403552 n/a n/a False -0.032888 3.5 False 3.0 n/a 2.0 5.0 0.915869 1.8 False True 2.0 0.433333 3.0 800.0 2.0 10.0 False 14.0 10.0 0.666667 15.0 10.0 30.0 False 0.0 8.0 0.0 3.0 0.737564 500.0 2.0 10.0 0.882981 3.0 33.0 4.0 57.0 42.0 1.0 5.0 0.0 3.0 0 False 2.0 True 4.0 0.997662 0.613726 False 40.0 False False 2.0 False 0.600651 n/a 10.0 20.0 20.0 35.0 47.0 60.0 d 15.0 35.0 4.0 False 0.0 True 1.0 False 4.0 False 2.0 0.404527 False False 1.0 10.0 15.0 0.84771 0.0 n/a 8.0 0.0 40.0 0 n/a False 0.987342 False 1.0 1.5 0.285714 3.0 VISAGES ANIMAUX 4.0 500.0 330 n/a 17.0 0.840613 7.01389 4.0 4.0 1.0 11.0 300.0 False True False False 3.0 False 18.0 1.08016 3.0 False +S30 60.0 0.46732 n/a n/a False -0.281506 1.0 False 4.0 n/a 1.0 50.0 0.818848 1.88889 False False 0.0 0.566667 3.6 400.0 n/a 9.0 False 13.0 10.0 0.75 30.0 10.0 40.0 False 0.5 9.0 0.0 3.0 0.757308 70.0 1.0 8.0 0.537342 3.0 25.0 5.0 62.0 42.0 n/a 7.0 0.0 3.0 0 False 4.0 True 4.0 0.9921 0.72291 False 250.0 False False 2.0 False 0.695564 n/a 15.0 40.0 30.0 90.0 60.0 60.0 u 20.0 40.0 3.0 False 0.0 True 3.0 False 3.0 False 2.0 0.78383 False False 1.0 15.0 15.0 0.864595 3.33333 0 8.0 0.0 50.0 nombre / espace n/a False 0.941167 False 1.0 4.0 0.307692 1.0 Visages 3.0 60.0 177 n/a 6.0 0.863115 8.60417 5.0 2.0 50.0 10.0 20.0 False True False False 1.0 False 17.0 1.05261 3.0 False +S31 40.0 0.580558 n/a n/a False 0.0131732 2.5 False 4.0 n/a 2.0 5.0 0.967509 1.46154 False True 1.0 0.533333 4.4 800.0 n/a 13.0 False 16.0 11.0 0.666667 20.0 11.0 40.0 False 0.0 10.0 0.0 1.0 0.868769 200.0 1.0 10.0 0.980682 3.0 30.0 4.0 63.0 45.0 n/a 4.0 0.0 2.0 0 False 4.0 True 3.0 0.999827 0.86083 False 600.0 False False 2.0 False 0.864108 n/a 12.0 25.0 18.0 50.0 60.0 70.0 d 14.0 60.0 4.0 False 0.0 True 5.0 False 4.0 False 0.0 0.789305 False False 1.0 7.0 12.0 0.993075 4.0 VISAGE 6.0 0.0 50.0 0 n/a False 1.00572 False 2.0 3.0 0.1875 2.0 LIEUX 3.0 150.0 295 n/a 11.0 0.99015 6.74306 5.0 4.0 80.0 10.0 300.0 False True True False 2.5 False 19.0 1.33373 2.0 False +S32 60.0 1.50179 n/a n/a False -0.0628016 0.0 False 4.0 n/a 1.0 10.0 0.937975 1.57143 False True 0.0 0.833333 3.6 500.0 0.0 14.0 False 18.0 7.0 n/a 40.0 15.0 120.0 False 0.0 10.0 0.0 3.0 1.12305 20.0 0.0 8.0 0.875174 3.0 27.0 4.0 64.0 43.0 n/a 9.0 0.0 1.0 0 False 4.0 True 5.0 0.998799 2.15269 False 60.0 False False 3.0 False 2.23528 n/a 15.0 40.0 30.0 150.0 100.0 150.0 d 30.0 170.0 4.0 False 1.0 True 0.0 False 4.0 False 0.0 0.866485 False False 1.0 10.0 15.0 1.26973 3.33333 Visages animaux 8.0 0.0 40.0 0 n/a False 0.994238 False 2.0 2.5 0.222222 1.0 lieux 5.0 150.0 185 n/a 2.0 1.27996 4.5625 5.0 5.0 80.0 14.0 15.0 True True False False 2.0 False 22.0 1.06178 n/a False +S33 80.0 1.00954 n/a n/a False -0.037087 2.0 False 3.0 n/a 3.0 6.0 0.970922 1.875 True True 1.0 0.733333 2.8 400.0 n/a 8.0 True 11.5 11.0 0.583333 35.0 10.0 90.0 False 0.2 7.0 2.0 4.0 1.07425 30.0 2.0 9.0 0.933835 3.0 32.0 4.0 65.0 42.0 n/a 10.0 0.0 3.0 apprentissage lecture False 3.0 True 4.0 0.999484 1.50266 False 80.0 False False 2.0 False 1.5026 n/a 15.0 50.0 25.0 90.0 100.0 110.0 u 25.0 100.0 3.0 True 0.0 True 1.0 False 5.0 False 1.0 0.882616 False False 1.0 20.0 15.0 1.22089 3.33333 0 7.0 2.0 100.0 0 n/a True 1.00767 False 1.0 2.0 0.304348 2.0 0 3.0 150.0 216 n/a 12.0 1.22434 6.61111 6.0 4.0 80.0 10.0 70.0 False True False False 2.5 False 15.0 1.04328 1.0 False +S34 39.0 0.546959 n/a n/a False -0.0169346 3.5 False 2.0 n/a 1.0 20.0 0.978523 1.77778 False False 2.0 0.766667 3.0 350.0 n/a 9.0 False 12.5 8.0 1.0 29.0 13.0 55.0 False 0.33 10.0 0.0 2.0 0.790664 50.0 2.0 10.0 0.961589 3.0 33.0 4.0 72.0 44.0 n/a 9.0 0.0 3.0 timbre de voix False 3.0 True n/a 0.999336 0.808399 False 90.0 False False 2.0 False 0.8141 n/a 13.0 36.0 21.0 50.0 60.0 70.0 d 18.0 60.0 4.0 False 0.0 True 0.0 False 3.0 False 1.0 0.95104 True False 1.0 17.0 13.0 0.897307 1.66667 0 7.0 0.0 40.0 n/a n/a False 1.03377 True 2.0 3.0 0.28 2.0 lieux 2.0 50.0 191 n/a 17.0 0.901132 7.875 4.0 2.0 75.0 16.0 100.0 False True True False 3.0 False 16.0 0.952927 3.0 False +S35 45.0 0.739662 n/a n/a False -0.0104976 3.0 False 4.0 n/a 1.0 4.0 0.956899 2.1 True False 1.0 0.533333 2.4 350.0 n/a 10.0 False 15.5 9.0 0.833333 30.0 12.0 50.0 False 0.6 7.0 0.0 1.0 0.966636 30.0 0.0 8.0 0.946401 3.0 32.0 4.0 70.0 43.0 n/a 3.0 0.0 1.0 positionement main gauche False 4.0 True 4.0 0.999391 1.07854 False 8.0 False False 2.0 False 1.10092 n/a 10.0 30.0 30.0 75.0 70.0 80.0 d 15.0 85.0 3.0 False 1.0 True 2.0 False 4.0 False 1.0 0.814216 True False 1.0 5.0 6.0 1.09427 2.66667 0 11.0 2.0 60.0 0 n/a False 1.02351 True 1.0 4.0 0.354839 1.0 0 4.0 60.0 192 n/a 10.0 1.10169 6.90278 6.0 1.0 50.0 13.0 20.0 True True False False 2.5 False 21.0 0.995345 2.0 False +S36 24.0 0.373996 n/a n/a False 0.0331708 0.5 False 4.0 n/a 1.0 4.0 0.885147 2.22222 False True 0.0 0.8 2.8 450.0 n/a 9.0 False 14.5 7.0 0.583333 20.0 11.0 39.0 False 0.5 9.0 0.0 3.0 0.668698 167.0 0.0 9.0 0.918318 3.0 28.0 4.0 55.0 43.0 n/a 7.0 0.0 3.0 0 False 4.0 True 2.0 0.998932 0.556324 False 30.0 False False 3.0 False 0.556659 n/a 13.0 15.0 16.0 35.0 45.0 50.0 u 15.0 41.0 4.0 False 0.0 True 2.0 False 4.0 False 1.0 0.856854 False True 1.0 7.0 11.0 0.768194 3.66667 0 11.0 0.0 45.0 0 n/a False 0.970847 False 1.0 1.0 0.37931 2.0 tout 2.0 30.0 200 n/a 3.0 0.762126 5.50694 5.0 2.0 70.0 12.0 40.0 False True True False 2.5 False 20.0 1.08659 3.0 False +S37 60.0 1.20749 n/a n/a False 0.00848409 4.0 False 0.0 n/a 3.0 20.0 0.911086 1.33333 False False 2.0 0.733333 3.8 300.0 n/a 9.0 False 10.5 9.0 0.833333 45.0 15.0 85.0 False 0.0 6.0 0.0 0.0 0.988676 60.0 2.0 10.0 0.91957 3.0 30.0 4.0 64.0 44.0 n/a 8.0 0.0 3.0 0 False 4.0 True 3.0 0.999972 1.76399 False 45.0 False False 3.0 False 1.79724 n/a 16.0 26.0 30.0 60.0 100.0 150.0 d 25.0 130.0 4.0 False 0.0 True 5.0 False 0.0 False 1.0 0.957402 False True 1.0 15.0 12.0 1.12971 0.0 n/a 3.0 0.0 50.0 0 n/a False 1.00563 False 2.0 4.0 0.142857 1.0 LIEUX,VISAGE 5.0 35.0 291 n/a 18.0 1.12681 8.63194 7.0 2.0 90.0 13.0 80.0 False True True False 2.5 False 12.0 0.938097 2.0 False +S38 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 368 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a +S39 50.0 0.621052 n/a n/a False -0.0259647 0.5 False 5.0 n/a 1.0 25.0 0.923161 1.58333 False False 0.0 0.733333 2.8 800.0 n/a 12.0 False 15.5 10.0 0.666667 25.0 10.0 30.0 False 0.0 9.0 0.0 4.0 0.840421 110.0 0.0 10.0 0.897196 3.0 30.0 5.0 61.0 41.0 n/a 10.0 0.0 3.0 LANGUES False 4.0 True 5.0 0.995976 0.934155 False 50.0 False False 2.0 False 0.924379 n/a 15.0 30.0 20.0 60.0 70.0 80.0 d 20.0 60.0 2.0 False 0.0 True 2.0 False 4.0 False 2.0 0.947885 True False 1.0 13.0 10.0 0.962968 4.0 0 7.0 0.0 58.0 n/a n/a False 0.950016 False 2.0 4.0 0.225806 1.0 visages, lieux 4.0 100.0 139 n/a 3.0 0.957841 8.68056 4.0 5.0 80.0 10.0 30.0 False True False True 2.0 False 19.0 1.13179 3.0 False +S40 60.0 0.669262 n/a n/a False -0.120663 3.0 False 4.0 n/a 1.0 6.0 0.869703 1.26667 False True 2.0 0.85 2.8 400.0 n/a 15.0 False 17.0 6.0 0.75 30.0 12.0 35.0 False 0.29 10.0 0.0 3.0 0.798569 80.0 1.0 10.0 0.74904 3.0 22.0 3.0 61.0 42.0 0.0 7.0 0.0 2.0 0 False 4.0 True 2.0 0.988815 1.03409 False 100.0 False False 1.0 True 0.996137 n/a 15.0 30.0 28.0 50.0 70.0 110.0 d 18.0 45.0 4.0 False 0.0 True 0.0 False 3.0 False 2.0 0.917762 False False 1.0 7.0 18.0 0.923717 3.5 lieux 4.0 0.0 60.0 Chiffres lettres, noms, mots / couleurs, prenoms / gout, toucher, nombres / temps n/a False 1.01591 False 1.0 2.0 0.117647 2.0 Visages 3.0 50.0 197 n/a 14.0 0.910141 5.83333 6.0 2.0 70.0 10.0 90.0 False True False False 2.0 False 19.0 1.1343 2.0 False +S41 50.0 0.634911 n/a n/a False -0.123459 1.5 False 4.0 n/a 3.0 6.0 0.911603 1.33333 False True 0.0 0.8 5.0 400.0 n/a 12.0 False 14.0 10.0 0.583333 25.0 10.0 40.0 False 0.0 10.0 0.0 3.0 0.892371 60.0 1.0 10.0 0.788143 3.0 27.0 3.0 63.0 41.0 n/a 10.0 0.0 3.0 0 False 2.0 True 2.0 0.994467 0.985329 False 75.0 False False 2.0 False 0.945008 n/a 12.0 30.0 20.0 50.0 80.0 90.0 d 15.0 40.0 4.0 False 0.0 True 1.0 False 5.0 False 1.0 0.910247 False False 1.0 20.0 10.0 1.03135 2.0 n/a 4.0 0.0 50.0 0 n/a False 1.035 False 1.0 1.0 0.142857 1.0 n/a 4.0 80.0 348 n/a 7.0 1.01705 5.29861 7.0 4.0 40.0 11.0 50.0 False True True False 3.0 False 16.0 0.979685 2.0 False +S42 70.0 0.971599 n/a n/a False -0.0145183 3.0 False 3.0 n/a 3.0 20.0 0.93927 1.83333 False False 1.0 0.9 3.4 500.0 n/a 12.0 False 17.0 10.0 0.666667 30.0 15.0 77.0 False 0.0 10.0 0.0 2.0 0.955744 50.0 1.0 10.0 0.924752 3.0 32.0 4.0 65.0 44.0 2.0 10.0 0.0 3.0 0 True 4.0 True 4.0 0.99983 1.46297 False 250.0 False False 3.0 False 1.44614 n/a 15.0 40.0 20.0 70.0 90.0 130.0 d 25.0 80.0 4.0 False 0.0 True 0.0 False 4.0 False 2.0 0.893647 False False 1.0 15.0 15.0 1.09782 2.0 n/a 10.0 0.0 70.0 0 n/a False 1.01353 False 2.0 2.0 0.294118 3.0 VISAGE, LIEUX,PHYSIONOMIE 5.0 130.0 324 n/a 12.0 1.08928 6.53472 9.0 4.0 65.0 11.0 50.0 False True False False 4.0 False 22.0 1.00161 2.0 False +S43 50.0 0.444483 n/a n/a False -0.0592845 2.5 False 3.0 n/a 2.0 35.0 0.889422 1.91667 False False 2.0 0.933333 3.2 600.0 n/a 12.0 False 17.5 7.0 0.916667 25.0 15.0 40.0 False 0.0 10.0 0.0 2.0 0.608799 40.0 1.0 10.0 0.830138 3.0 22.0 4.0 68.0 43.0 n/a 9.0 1.0 3.0 0 False 2.0 True 3.0 0.988576 0.669578 False 100.0 False False 2.0 False 0.661573 n/a 20.0 50.0 25.0 50.0 60.0 70.0 u 25.0 50.0 4.0 False 0.0 True 1.0 False 4.0 False 0.0 0.8933 False False 1.0 12.0 18.0 0.692751 3.0 VISAGES 11.0 0.0 50.0 0 n/a False 0.988251 False 2.0 3.0 0.314286 1.0 LIEUX 5.0 40.0 349 n/a 13.0 0.693858 7.93056 6.0 4.0 60.0 12.0 25.0 False True False True 2.5 False 23.0 0.986973 3.0 False +S44 60.0 1.05413 n/a n/a False -0.0291271 5.0 False 4.0 n/a 2.0 10.0 0.913654 1.07692 False True 2.0 0.666667 2.4 600.0 n/a 13.0 False 13.5 10.0 0.833333 50.0 15.0 65.0 False 0.0 10.0 0.0 3.0 0.863541 20.0 2.0 7.0 0.884527 3.0 25.0 5.0 55.0 42.0 n/a 10.0 0.0 3.0 0 False 4.0 True 2.0 0.99285 1.59484 False 100.0 False True 2.0 True 1.56897 n/a 25.0 40.0 25.0 78.0 115.0 150.0 u 30.0 85.0 3.0 False 1.0 True 2.0 False 3.0 False 2.0 0.6925 False False 1.0 6.0 15.0 0.99406 0.0 n/a 1.0 0.0 50.0 0 n/a False 0.920358 False 2.0 1.5 0.037037 3.0 VISAGE ET LIEUX 2.0 15.0 293 n/a 20.0 0.984191 6.73611 6.0 1.0 80.0 13.0 6.0 False True False False 3.0 False 14.0 0.943294 3.0 False +S45 30.0 0.287899 n/a n/a False -0.0142111 1.5 False 4.0 n/a 3.0 30.0 0.89745 2.4 False True 1.0 0.733333 3.4 600.0 n/a 10.0 False 17.0 6.0 0.583333 20.0 6.0 20.0 False 0.25 9.0 0.0 3.0 0.706925 80.0 1.0 9.0 0.883239 3.0 27.0 5.0 70.0 45.0 n/a 7.0 0.0 3.0 0 False 4.0 True 3.0 0.992768 0.419834 False 30.0 False False 2.0 False 0.428512 n/a 12.0 25.0 15.0 30.0 30.0 40.0 u 12.0 40.0 2.0 False 0.0 True 2.0 False 5.0 False 1.0 0.912762 False False 1.0 8.0 12.0 0.796829 3.33333 0 14.0 0.0 40.0 0 n/a False 0.977134 False 2.0 2.5 0.411765 1.0 lieux 3.0 60.0 170 n/a 9.0 0.805694 7.23611 6.0 3.0 50.0 10.0 100.0 False True False False 3.0 False 24.0 1.04265 4.0 False +S46 50.0 0.719223 n/a n/a False -0.0376729 3.0 False 2.0 n/a 1.0 150.0 0.917437 1.69231 True True 2.0 0.733333 2.6 400.0 n/a 13.0 False 17.5 6.0 0.5 25.0 15.0 80.0 False 0.0 10.0 0.0 2.0 0.927479 40.0 1.0 10.0 0.879764 3.0 30.0 5.0 52.0 40.0 0.0 8.0 0.0 3.0 0 False 4.0 True 4.0 0.994182 1.05988 False 200.0 False False 2.0 False 1.0705 n/a 10.0 30.0 20.0 70.0 65.0 80.0 d 20.0 75.0 1.0 False 0.0 True 4.0 False 3.0 False 2.0 0.743006 False False 1.0 20.0 13.0 1.05708 1.0 FLEURS 9.0 2.0 50.0 0 n/a False 0.919182 False 2.0 2.5 0.257143 1.0 VISAGES LIEUX 4.0 150.0 337 n/a 14.0 1.05706 7.20833 6.0 4.0 80.0 15.0 100.0 False True True True 3.0 False 22.0 0.976954 2.0 False +S47 50.0 0.747239 n/a n/a False -0.0657451 4.0 False 4.0 n/a 2.0 n/a 0.91669 1.81818 False True 2.0 0.533333 3.6 n/a n/a 11.0 False 15.5 n/a 0.916667 25.0 12.0 45.0 False 0.0 6.0 0.0 2.0 0.839307 n/a 2.0 10.0 0.850945 3.0 30.0 4.0 60.0 45.0 n/a 9.0 0.0 3.0 0 False 2.0 True 2.0 0.999447 1.11427 False n/a False False 3.0 False 1.1122 n/a 18.0 40.0 30.0 45.0 100.0 90.0 d 20.0 80.0 1.0 False 2.0 True 1.0 False 4.0 False 1.0 n/a False False 1.0 n/a n/a 0.95479 2.66667 0 9.0 0.0 n/a n/a n/a False 0.997244 False 1.0 2.5 0.290323 2.0 0 4.0 n/a 218 n/a 18.0 0.956572 7.24306 9.0 4.0 n/a 10.0 n/a False True False False n/a False 20.0 n/a 3.0 False +S48 30.0 0.507131 n/a n/a False -0.00956098 0.5 False 2.0 n/a 1.0 20.0 0.939729 2.77778 False True 0.0 0.733333 4.0 500.0 n/a 9.0 False 17.0 7.0 0.833333 20.0 10.0 40.0 False 0.0 10.0 0.0 3.0 0.824241 40.0 1.0 10.0 0.930168 3.0 30.0 5.0 65.0 43.0 n/a 9.0 0.0 2.0 0 False 4.0 True 4.0 0.996104 0.752623 False 20.0 False False 2.0 True 0.754819 n/a 10.0 20.0 15.0 30.0 50.0 70.0 d 15.0 50.0 4.0 False 0.0 True 0.0 False 4.0 False 1.0 0.917877 False False 1.0 15.0 7.0 0.943009 2.33333 lieux 16.0 0.0 52.0 n/a n/a False 0.96704 False 0.0 0.0 0.470588 1.0 Visages 4.0 70.0 168 n/a 5.0 0.9394 4.09028 7.0 5.0 50.0 13.0 30.0 False True True False 3.0 False 25.0 0.966885 2.0 False +S49 30.0 0.329163 n/a n/a False -0.0534856 2.0 False 3.0 n/a 1.0 7.0 0.781903 2.09091 False False 1.0 0.8 3.0 400.0 n/a 11.0 True 17.0 10.0 0.833333 20.0 10.0 25.0 False 0.0 9.0 3.0 3.0 0.616801 30.0 2.0 9.0 0.728418 3.0 27.0 4.0 62.0 42.0 n/a 7.0 0.0 3.0 APPRENTISSAGE LECTURE / DYSLEXIE False 4.0 True 3.0 0.999214 0.50128 False 50.0 False False 2.0 False 0.489929 n/a 10.0 20.0 30.0 25.0 50.0 55.0 u 20.0 35.0 2.0 True 0.0 True 1.0 True 3.0 True 2.0 0.942988 False False 1.0 8.0 10.0 0.704073 3.0 0 12.0 0.0 40.0 0 n/a True 0.985763 True 1.0 3.5 0.352941 2.0 Visages 3.0 100.0 171 n/a 12.0 0.702978 8.17361 5.0 4.0 45.0 10.0 60.0 False True False False 1.2 False 23.0 1.1189 2.0 False +S50 60.0 0.801137 n/a n/a False -0.0291117 0.5 False 4.0 n/a 1.0 50.0 0.98375 1.66667 False True 0.5 0.666667 2.6 n/a n/a 9.0 False 12.0 10.0 0.666667 40.0 15.0 80.0 False 0.25 10.0 0.0 3.0 0.826063 60.0 0.0 10.0 0.954638 3.0 25.0 5.0 63.0 43.0 n/a 7.0 1.0 3.0 n/a False 4.0 True 3.0 0.991647 1.16622 False 30.0 False False 2.0 False 1.19242 n/a 20.0 50.0 30.0 70.0 80.0 95.0 u 27.0 100.0 4.0 False 0.0 True 1.0 False 4.0 False 0.0 n/a False False 1.0 8.0 12.5 0.933209 3.33333 visage 6.0 0.0 70.0 n/a n/a False 0.946907 False 1.0 1.5 0.25 1.0 n/a 3.0 150.0 117 n/a 4.0 0.941477 6.13889 4.0 3.0 75.0 10.0 250.0 False False True True 1.5 False 15.0 n/a 2.0 False +S51 120.0 0.902599 n/a n/a False -0.231889 4.0 False 3.0 n/a 2.0 10.0 0.752758 1.71429 False True 1.0 0.833333 2.6 700.0 n/a 7.0 False 9.5 7.0 0.833333 50.0 17.0 54.0 False 0.17 10.0 0.0 2.0 0.812932 30.0 2.0 10.0 0.520869 3.0 30.0 4.0 60.0 40.0 n/a 7.0 0.0 2.0 0 False 4.0 True 3.0 0.999712 1.25798 False 200.0 False False 2.0 False 1.34344 n/a 20.0 80.0 42.0 120.0 50.0 90.0 d 30.0 160.0 4.0 False 0.0 True 2.0 False 4.0 False 1.0 0.89375 False False 1.0 10.0 18.0 0.90344 2.66667 0 5.0 0.0 40.0 n/a n/a False 0.982427 False 1.0 1.5 0.263158 2.0 0 2.0 100.0 173 n/a 16.0 0.926512 5.69444 8.0 3.0 60.0 11.0 115.0 False True True False 2.0 False 12.0 1.1715 3.0 False +S52 55.0 0.87824 n/a n/a False -0.00832569 2.0 False 5.0 n/a 1.0 20.0 0.971116 1.77778 True True 1.0 0.766667 2.6 400.0 n/a 9.0 False 12.5 7.0 0.5 35.0 10.0 75.0 False 0.0 8.0 0.0 2.0 1.0228 75.0 1.0 7.0 0.96279 3.0 25.0 4.0 61.0 42.0 n/a 9.0 0.0 1.0 0 False 4.0 True 1.0 0.997485 1.29562 False 150.0 False False 1.0 True 1.30718 n/a 12.0 30.0 25.0 80.0 80.0 100.0 u 25.0 90.0 3.0 False 0.0 True 2.0 False 4.0 False 2.0 0.855701 False False 1.0 20.0 12.0 1.16281 3.66667 0 7.0 2.0 60.0 0 n/a False 0.976732 False 1.0 2.0 0.28 2.0 Visages 3.0 100.0 206 n/a 10.0 1.1657 4.66667 4.0 5.0 60.0 10.0 15.0 False True False False 2.0 False 16.0 1.02892 2.0 False +S53 57.0 0.721953 1.0 1.0 False -0.038159 2.0 False 3.0 1.0 2.0 21.0 0.949123 2.0 False True 1.0 0.6 2.0 400.0 1.0 10.0 False 15.0 7.0 0.666667 18.0 8.0 47.0 False 0.5 10.0 0.0 2.0 1.04459 500.0 2.0 9.0 0.910964 3.0 25.0 4.0 53.0 34.0 1.0 8.0 0.0 3.0 "confusion entre ""p""et""b"",""a"" et""d"",""v""et""f"" a9ans pendant3-4mois " True 2.0 False 3.0 0.999318 1.09054 False 40.0 False False 2.0 False 1.07456 n/a 11.0 36.0 22.0 52.0 78.0 89.0 u 13.0 61.0 2.0 False 1.0 True 6.0 True 3.0 False 1.0 0.727899 False False 1.0 9.0 17.0 1.19193 0.0 n/a 10.0 0.0 40.0 0 n/a False 0.92573 True 1.0 1.5 0.333333 1.0 n/a 4.0 60.0 299 n/a 12.0 1.19054 6.53472 5.0 4.0 70.0 12.0 12.0 False True False False 1.8 False 20.0 1.04877 3.0 False +S54 38.0 0.383918 n/a 0.0 False -0.103711 1.0 False 3.0 n/a 3.0 5.0 0.86639 1.8 True True 1.0 0.666667 2.8 300.0 n/a 10.0 False 14.0 12.0 0.75 20.0 10.0 30.0 False 0.33 8.0 1.0 3.0 0.731965 10.0 1.0 9.0 0.76268 3.0 30.0 4.0 64.0 42.0 n/a 9.0 0.0 2.0 DYSORTHOGRAPHIE DYSLEXIE False 3.0 True 2.0 0.999901 0.594111 False 20.0 False False 2.0 False 0.571427 n/a 10.0 25.0 15.0 25.0 50.0 60.0 u 12.0 30.0 4.0 True 0.0 True 1.0 True 4.0 True 2.0 0.799681 False False 0.0 4.0 8.0 0.846727 3.0 noms 8.0 2.0 45.0 n/a n/a False 0.999775 True 1.0 1.5 0.285714 1.0 Visages 2.0 12.0 179 n/a 6.0 0.834232 4.625 12.0 2.0 40.0 10.0 50.0 False True True False 1.5 False 18.0 0.95225 3.0 False +S55 38.0 0.351037 n/a n/a False -0.0959172 2.5 False 2.0 n/a 2.0 15.0 0.874188 1.4 True True 2.0 0.53 2.6 500.0 n/a 10.0 False 12.0 8.0 0.5 19.0 9.0 25.0 False 0.0 10.0 0.0 2.0 0.670693 75.0 1.0 7.0 0.778271 3.0 28.0 4.0 75.0 45.0 n/a 5.0 1.0 3.0 0 False 3.0 True 1.0 0.997022 0.542072 False 35.0 False False 1.0 True 0.522487 n/a 13.0 27.0 20.0 39.0 45.0 57.0 d 15.0 29.0 4.0 False 0.0 True 2.0 False 3.0 False 1.0 0.969419 False False 1.0 10.0 13.0 0.771524 2.0 n/a 4.0 3.0 45.0 0 n/a False 1.03044 False 2.0 3.0 0.166667 3.0 LIEUX 2.0 50.0 297 n/a 13.0 0.764399 7.08333 6.0 1.0 55.0 10.0 75.0 False True True False 2.5 True 14.0 1.02732 2.0 False +S56 50.0 0.728235 1.0 1.0 False -0.0200327 2.5 False 3.0 1.0 2.0 13.0 0.958526 1.55556 False False 1.0 0.7 3.0 450.0 1.0 9.0 False 11.5 9.0 0.666667 30.0 10.0 40.0 False 0.0 8.0 0.0 2.0 0.891965 35.0 2.0 9.0 0.938493 3.0 32.0 5.0 62.0 44.0 1.0 6.0 0.0 3.0 zozottement False 3.0 True 4.0 0.996171 1.08598 False 45.0 False False 2.0 False 1.08391 n/a 15.0 30.0 20.0 50.0 70.0 100.0 d 20.0 70.0 0.0 False 1.0 True 1.0 False 4.0 False 2.0 0.976141 True False 1.0 13.0 14.0 1.02026 2.66667 n/a 5.0 0.0 45.0 0 n/a False 0.966929 True 1.0 1.0 0.217391 1.0 visage 3.0 75.0 39 n/a 13.0 1.01659 6.15972 5.0 3.0 80.0 13.0 35.0 False True True True 3.0 False 14.0 0.967209 2.0 False +S57 45.0 0.543197 n/a n/a False -0.036005 0.5 False 4.0 n/a 1.0 20.0 0.969654 2.0 False False 0.0 0.766667 2.4 700.0 n/a 9.0 False 13.5 6.5 0.583333 30.0 10.0 60.0 False 0.13 10.0 0.0 1.0 0.817328 30.0 1.0 10.0 0.933649 3.0 25.0 4.0 60.0 45.0 n/a 10.0 0.0 3.0 0 False 3.0 True 4.0 0.996329 0.804771 False 50.0 False False 2.0 False 0.8085 n/a 15.0 30.0 25.0 50.0 55.0 70.0 d 17.0 60.0 1.0 False 0.0 True 0.0 False 4.0 False 2.0 0.930744 False False 0.0 10.0 15.0 0.927787 2.33333 0 9.0 0.0 50.0 0 n/a False 0.98254 False 2.0 3.0 0.333333 2.0 visages, lieux 4.0 25.0 181 n/a 3.0 0.931521 7.61111 4.0 2.0 80.0 12.0 80.0 False True True False 1.2 False 18.0 1.14636 3.0 False +S58 49.0 0.482002 n/a n/a False -0.0484288 1.5 False 5.0 n/a 1.0 10.0 0.95826 2.5 False False 1.0 0.933333 4.2 450.0 n/a 8.0 False 14.0 6.0 0.833333 30.0 10.0 50.0 False 0.2 9.0 0.0 4.0 0.770706 30.0 1.0 9.0 0.909831 3.0 35.0 4.0 62.0 45.0 n/a 9.0 0.0 3.0 0 False 4.0 True 4.0 0.997881 0.72737 False 50.0 False False 3.0 False 0.717416 n/a 15.0 30.0 20.0 50.0 55.0 65.0 u 20.0 50.0 4.0 False 0.0 True 4.0 False 5.0 False 1.0 0.950643 False True 1.0 14.0 12.0 0.880939 4.33333 0 12.0 0.0 50.0 0 n/a False 1.01537 False 1.0 3.0 0.428571 2.0 0 4.0 30.0 201 n/a 9.0 0.878385 8.06944 4.0 4.0 100.0 10.0 40.0 False True True False 2.5 False 20.0 1.01326 3.0 False +S59 80.0 1.21402 n/a n/a False -0.0345568 0.0 False 3.0 n/a 1.0 15.0 0.975677 1.46154 False False 1.0 0.6 4.2 400.0 n/a 13.0 False 16.0 8.0 0.583333 40.0 10.0 100.0 False 0.0 8.0 0.0 4.0 1.14589 100.0 0.0 8.0 0.94112 3.0 35.0 5.0 60.0 40.0 n/a 4.0 0.0 3.0 0 False 4.0 True 3.0 0.992796 1.81669 False 50.0 False False 3.0 False 1.80695 n/a 15.0 40.0 30.0 100.0 100.0 150.0 d 20.0 100.0 3.0 False 0.0 True 1.0 False 3.0 False 1.0 0.748922 False False 0.0 20.0 100.0 1.30699 3.5 0 6.0 0.0 50.0 0 n/a False 0.956471 False 1.0 3.0 0.1875 3.0 0 3.5 60.0 203 n/a 2.0 1.30599 7.65278 n/a 4.0 100.0 15.0 30.0 False True True False 5.0 False 19.0 0.806267 2.0 False +S60 45.0 0.705189 1.0 1.0 False -0.0101898 0.0 False 3.0 1.0 1.0 7.0 0.974984 1.55556 False False 0.0 0.933333 3.6 380.0 1.0 9.0 False 11.5 8.5 0.416667 30.0 12.0 55.0 False 0.333333 10.0 0.0 3.0 0.828774 10.0 0.0 9.0 0.964794 3.0 30.0 5.0 64.0 45.0 1.0 10.0 0.0 2.0 n/a False 4.0 True 2.0 0.996178 1.04281 False 20.0 False n/a 3.0 False 1.04961 n/a 15.0 35.0 30.0 50.0 75.0 95.0 d 25.0 75.0 3.0 False 0.0 True 1.0 False 5.0 False 2.0 0.680142 False False 0.0 12.0 12.0 0.939798 3.0 lieux 5.0 0.0 50.0 0 n/a False 0.970066 False 0.0 2.5 0.217391 1.0 visage 3.0 40.0 132 n/a 0.0 0.944566 5.28472 7.0 5.0 50.0 12.0 10.0 False True True False 7.0 False 14.0 0.738193 2.0 False +S61 30.0 0.420859 n/a n/a False -0.0346947 1.0 False 3.0 n/a 3.0 17.5 0.946556 2.45455 False False 2.0 0.5 3.4 400.0 n/a 11.0 False 19.0 8.0 0.833333 20.0 10.0 30.0 False 0.666667 9.0 0.0 2.0 0.753665 100.0 1.0 8.0 0.911862 3.0 29.0 3.0 60.0 45.0 n/a 1.0 1.0 3.0 0 False 2.0 True 3.0 0.994075 0.603338 False 40.0 False False 1.0 False 0.626411 n/a 10.0 20.0 15.0 30.0 40.0 50.0 u 15.0 60.0 1.0 False 0.0 True 3.0 False 3.0 False 0.0 0.853415 False False 1.0 10.0 15.0 0.854203 2.5 physionomie 16.0 0.0 52.0 n/a n/a False 1.044 False 2.0 4.0 0.421053 2.0 lieux 3.0 25.0 199 n/a 10.0 0.858964 8.25694 9.0 3.0 50.0 11.0 200.0 False True False False 2.0 False 27.0 1.02602 3.0 False +S62 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 367 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a +S63 40.0 0.710036 1.0 1.0 False -0.00756445 1.0 False 4.0 1.0 3.0 5.0 0.952403 1.85714 False True 0.0 0.833333 2.4 1000.0 1.0 7.0 False 10.0 8.0 0.583333 25.0 10.0 60.0 False 0.0 10.0 0.0 2.0 0.912774 800.0 1.0 10.0 0.944839 3.0 25.0 3.5 60.0 42.0 1.0 9.0 0.0 2.0 0 False 2.0 True 1.0 0.997006 1.03295 False 400.0 False True 2.0 False 1.05682 n/a 15.0 25.0 20.0 70.0 60.0 80.0 u 20.0 80.0 4.0 False 0.0 True 3.0 False 4.0 False 2.0 0.235942 False False 0.0 4.0 8.0 1.03665 3.0 LIEUX 6.0 0.0 50.0 0 n/a False 0.996833 False 0.0 1.0 0.3 1.0 VISAGES 2.0 200.0 325 n/a 4.0 1.0403 3.75694 11.0 1.0 60.0 14.0 500.0 False True True False 500.0 False 13.0 0.782561 2.0 False +S64 40.0 0.526118 n/a n/a False -0.000933116 4.0 False 4.0 n/a 1.0 50.0 0.971842 1.7 False False 2.0 0.4 2.4 600.0 0.0 10.0 False 13.5 7.0 0.75 20.0 10.0 40.0 False 0.75 8.0 1.0 3.0 0.88201 30.0 2.0 7.0 0.970909 3.0 36.0 5.0 60.0 40.0 n/a 4.0 0.0 3.0 0 False 2.0 True 1.0 0.991759 0.772525 False 10.0 False False 1.0 False 0.783079 n/a 10.0 25.0 15.0 50.0 50.0 60.0 u 15.0 60.0 1.0 True 0.0 True 3.0 False 4.0 False 2.0 0.726744 False False 1.0 3.0 20.0 1.00469 0.0 ESPECES ANIMALES 7.0 0.0 45.0 0 n/a True 0.958743 False 1.0 2.5 0.259259 2.0 VISAGE 1.0 40.0 296 n/a 18.0 1.00524 7.35417 5.0 1.0 50.0 12.0 20.0 False True False False 2.0 False 17.0 0.975383 3.0 False +S65 30.0 0.422172 1.0 1.0 False -0.0217241 3.5 False 3.0 1.0 1.0 10.0 0.956833 1.58333 False False 2.0 0.866667 3.8 500.0 1.0 12.0 False 15.5 7.0 0.916667 20.0 10.0 40.0 False 0.0 10.0 0.0 4.0 0.757414 40.0 2.0 10.0 0.935109 3.0 30.0 5.0 64.0 43.0 1.0 8.0 0.0 3.0 0 False 3.0 True 4.0 0.996173 0.615527 False 60.0 False False 3.0 False 0.628364 n/a 10.0 30.0 20.0 30.0 40.0 60.0 d 15.0 50.0 4.0 False 0.0 True 1.0 False 4.0 False 1.0 0.939091 False False 1.0 30.0 12.0 0.853278 2.0 n/a 7.0 0.0 55.0 0 n/a False 0.964347 False 1.0 2.5 0.225806 2.0 n/a 4.0 70.0 327 n/a 17.0 0.863237 8.05556 4.0 4.0 80.0 10.0 40.0 False True False False 3.0 False 19.0 0.994149 3.0 False +S66 50.0 0.602292 n/a n/a False -0.0338606 1.5 False 4.0 n/a 1.0 20.0 0.934056 2.11111 False False 0.0 0.6 3.2 400.0 n/a 9.0 False 14.0 9.0 0.5 15.0 7.0 35.0 False 0.0 10.0 0.0 3.0 1.0295 25.0 1.0 9.0 0.900195 3.0 25.0 3.0 65.0 42.0 n/a 8.0 0.0 3.0 0 False 4.0 True 3.0 0.992627 0.904788 False 35.0 False False 3.0 False 0.896458 n/a 10.0 30.0 15.0 50.0 50.0 80.0 u 10.0 50.0 4.0 False 0.0 True 1.0 False 5.0 False 2.0 0.894138 False False 1.0 20.0 15.0 1.17632 0.0 n/a 10.0 0.0 50.0 0 n/a False 1.03726 False 2.0 3.0 0.357143 1.0 VISAGES LIEUX 3.0 90.0 344 n/a 7.0 1.17333 7.27083 5.0 3.0 80.0 12.0 20.0 False True False True 2.0 False 19.0 0.942724 2.0 False +S67 50.0 0.760311 n/a n/a False -0.090743 0.5 False 3.0 n/a 2.0 40.0 0.957141 1.75 False False 0.0 0.7 2.8 300.0 n/a 8.0 False 11.0 7.0 0.166667 40.0 12.0 60.0 False 0.25 8.0 0.0 2.0 0.895573 50.0 0.0 10.0 0.866398 3.0 32.0 5.0 65.0 44.0 2.0 5.0 0.0 3.0 0 False 3.0 True 2.0 0.996432 1.11832 False 150.0 False False 2.0 True 1.13165 n/a 15.0 45.0 25.0 100.0 70.0 90.0 u 25.0 80.0 3.0 False 0.0 True 2.0 False 5.0 False 1.0 0.719087 False False 1.0 3.0 12.0 1.01291 2.33333 0 6.0 0.0 58.0 Chiffres, voyelles en couleur n/a False 0.975137 False 2.0 3.0 0.272727 1.0 Lieux, itineraires 2.0 40.0 215 n/a 3.0 1.0207 6.49306 5.0 2.0 80.0 10.0 8.0 False True True False 2.0 False 14.0 1.00441 1.0 False +S68 42.0 0.518261 n/a n/a False -0.0204349 2.0 False 5.0 n/a 1.0 5.0 0.936235 1.88889 False False 1.0 1.0 4.2 700.0 n/a 9.0 False 13.0 7.0 0.833333 27.0 12.0 47.0 False 0.666667 10.0 1.0 3.0 0.753259 15.0 1.0 10.0 0.9158 3.0 33.0 4.0 63.0 43.0 n/a 10.0 0.0 3.0 n/a False 4.0 True 5.0 0.999161 0.76944 False 50.0 True False 3.0 False 0.771384 n/a 12.0 27.0 23.0 32.0 58.0 72.0 u 21.0 59.0 1.0 True 0.0 True 0.0 False 5.0 False 1.0 0.850019 False False 1.0 8.0 15.0 0.858164 4.0 n/a 8.0 0.0 40.0 0 n/a False 1.00769 False 2.0 4.0 0.307692 1.0 lieux 5.0 50.0 94 n/a 10.0 0.858501 8.90278 6.0 5.0 60.0 16.0 130.0 False True False False 3.0 False 17.0 1.08324 3.0 False +S69 40.0 0.320483 n/a n/a False -0.0823311 4.0 False 4.0 n/a 1.0 32.5 0.906829 2.14286 False False 2.0 0.7 4.2 450.0 n/a 7.0 False 11.0 9.0 0.5 20.0 13.0 40.0 False 0.0 7.0 0.0 4.0 0.601481 150.0 2.0 9.0 0.824498 3.0 25.0 5.0 60.0 48.0 n/a 8.0 0.0 3.0 n/a False 4.0 True 4.0 0.989427 0.488542 False 2500.0 False False 2.0 True 0.47701 n/a 12.0 30.0 25.0 40.0 45.0 50.0 u 20.0 35.0 4.0 False 0.0 True 3.0 False 5.0 False 2.0 0.554342 False False 1.0 8.0 15.0 0.68626 4.0 n/a 8.0 0.0 35.0 couleur, mot, espace n/a False 0.95227 False 2.0 4.0 0.363636 2.0 visage, lieux 4.0 200.0 105 n/a 18.0 0.685518 9.0 4.0 5.0 70.0 13.0 10.0 False True False False 2.5 False 15.0 1.11756 3.0 False +S70 350.0 4.37116 n/a n/a False -0.082708 3.0 False 4.0 n/a 1.0 15.0 0.923019 1.5 False False 2.0 0.766667 3.8 500.0 n/a 10.0 False 12.5 9.0 0.833333 50.0 15.0 250.0 False 0.0 8.0 0.0 3.0 1.71511 20.0 2.0 10.0 0.840311 3.0 25.0 4.0 68.0 43.0 n/a 8.0 0.0 3.0 0 False 4.0 True 4.0 0.995391 6.66643 False 200.0 False False 3.0 False 6.50608 n/a 15.0 100.0 30.0 200.0 400.0 500.0 d 20.0 300.0 2.0 False 0.0 True 1.0 False 5.0 False 2.0 0.832625 False False 1.0 5.0 6.0 1.97093 3.33333 0 5.0 0.0 40.0 Odeurs, images : souvenirs n/a False 0.998561 False 0.0 3.0 0.2 1.0 Visages 5.0 70.0 187 n/a 16.0 1.95474 8.42361 5.0 5.0 40.0 14.0 50.0 False True True False 3.0 False 15.0 1.0604 4.0 False +S71 20.0 0.212617 n/a n/a False -0.0629865 1.5 False n/a n/a 2.0 15.0 0.830302 1.55556 False False 0.0 0.5 3.2 400.0 n/a 9.0 False 11.5 8.0 0.5 15.0 10.0 20.0 False 0.0 6.0 0.0 n/a 0.516706 40.0 1.0 7.0 0.767316 3.0 25.0 4.0 60.0 42.0 n/a 3.0 0.0 3.0 0 False 3.0 True 2.0 0.997657 0.325419 False 50.0 False False 2.0 False 0.316461 n/a 10.0 20.0 15.0 20.0 30.0 40.0 u 10.0 20.0 1.0 False 0.0 True 2.0 False n/a False 2.0 0.968262 False False 1.0 15.0 10.0 0.594702 n/a 0 5.0 0.0 50.0 n/a n/a False 0.97386 False 1.0 3.0 0.217391 1.0 Visages 2.0 60.0 159 n/a 7.0 0.588898 7.16667 6.0 3.0 100.0 10.0 100.0 False True True False 2.0 False 14.0 1.05611 3.0 False +S72 60.0 0.715723 1.0 1.0 False -0.0551373 2.0 False 2.0 2.0 1.0 40.0 0.958627 1.33333 True True 1.0 0.433333 3.6 450.0 1.0 15.0 False 17.5 7.0 0.416667 30.0 10.0 70.0 False 0.0 8.0 2.0 0.0 1.0041 30.0 1.0 7.0 0.90349 3.0 21.0 3.0 67.0 45.0 1.0 5.0 0.0 3.0 pbl de deglutition a9ans(suce son pouce) False 3.0 True 3.0 0.98314 1.07017 False 50.0 False False 1.0 False 1.06529 n/a 10.0 30.0 20.0 70.0 70.0 80.0 d 20.0 70.0 2.0 True 0.0 True 2.0 False 3.0 False 0.0 0.830847 False False 1.0 20.0 14.0 1.14585 0.0 PHYSIONOMIE 5.0 3.0 60.0 0 n/a True 1.03714 True 1.0 2.5 0.142857 1.0 ESPECES VEGETALES 3.0 110.0 329 n/a 10.0 1.14439 6.59722 8.0 2.0 50.0 10.0 20.0 False True True False 5.0 True 20.0 0.841678 2.0 True +S73 40.0 0.927448 1.0 1.0 False -0.00182578 n/a False n/a 1.0 3.0 15.0 0.979823 1.90476 False False n/a 0.766667 3.2 300.0 0.0 8.4 False 12.2 8.0 0.916667 25.0 10.0 50.0 False 0.0 9.0 0.0 n/a 1.07666 60.0 n/a 10.0 0.977997 3.0 30.0 3.0 63.0 41.0 1.0 10.0 0.0 3.0 n/a n/a 2.0 True 4.0 0.994564 1.36044 False 70.0 False False n/a False 1.38042 n/a 10.0 30.0 20.0 70.0 80.0 110.0 n/a 18.0 90.0 4.0 False 0.0 True 3.0 False n/a False 2.0 0.979557 False False n/a 15.0 13.0 1.22344 n/a espece animal 7.6 0.0 60.0 n/a n/a False 1.0435 False 1.0 1.0 0.311475 1.0 visage 3.0 60.0 365 n/a n/a 1.22709 n/a 7.0 3.0 50.0 10.0 55.0 False True n/a False 3.0 False 16.0 0.929294 3.0 False +S74 30.0 0.382676 n/a n/a False -0.0665277 2.5 True 4.0 n/a 1.0 40.0 0.920906 1.66667 False True 1.0 0.8 3.6 250.0 n/a 9.0 False 12.0 12.0 0.833333 30.0 12.0 50.0 False 0.2 9.0 1.0 3.0 0.637316 20.0 1.0 10.0 0.854378 3.0 32.0 4.0 62.0 42.0 n/a 7.0 0.0 3.0 EXPRESSION ORALE False 4.0 True 5.0 0.999392 0.562843 False 30.0 False False 3.0 False 0.569578 n/a 15.0 30.0 20.0 50.0 50.0 50.0 U? 25.0 50.0 1.0 True 2.0 True 3.0 True 4.0 False 2.0 0.859567 False False 1.0 10.0 10.0 0.720039 3.5 lieux 6.0 2.0 60.0 0 n/a False 0.999466 True 0.0 0.0 0.25 1.0 Visages 5.0 50.0 207 n/a 11.0 0.726359 5.81944 4.0 5.0 50.0 10.0 30.0 False True True False 2.5 True 15.0 0.819761 3.0 False +S75 90.0 1.17674 1.0 1.0 False -0.0322937 1.5 False 4.0 1.0 1.0 30.0 0.962962 1.5 False False 1.0 0.966667 5.0 300.0 1.0 10.0 False 12.5 7.0 0.666667 40.0 10.0 80.0 False 0.2 10.0 0.0 5.0 1.06134 15.0 1.0 10.0 0.930669 3.0 32.0 4.0 64.0 43.0 1.0 10.0 0.0 3.0 0 False 3.0 True 3.0 0.999677 1.77613 False 40.0 False False 3.0 False 1.75148 n/a 20.0 50.0 30.0 80.0 120.0 150.0 u 25.0 100.0 3.0 False 0.0 True 1.0 False 5.0 False 2.0 0.843897 False False 0.0 7.0 10.0 1.2123 4.0 n/a 5.0 0.0 60.0 0 n/a False 1.00794 False 2.0 3.5 0.2 2.0 LIEUX VISAGES 3.0 60.0 319 n/a 7.0 1.20962 7.72222 6.0 5.0 70.0 13.0 120.0 False True True True 3.0 False 15.0 0.945441 2.0 False +S76 50.0 0.611865 n/a n/a False -0.0818007 3.5 False 4.0 n/a 1.0 20.0 0.921049 1.66667 False False 2.0 0.833333 2.6 500.0 n/a 9.0 False 12.0 7.0 0.25 30.0 10.0 35.0 False 0.0 10.0 0.0 3.0 0.873615 150.0 2.0 10.0 0.839249 3.0 33.0 4.0 66.0 45.0 n/a 6.0 0.0 3.0 0 False 4.0 True 3.0 0.999685 0.93416 False 200.0 False False 2.0 True 0.910705 n/a 10.0 30.0 20.0 40.0 70.0 90.0 u 20.0 50.0 3.0 False 0.0 True 3.0 False 3.0 False 2.0 0.825401 False True 1.0 50.0 15.0 1.0031 3.33333 0 6.0 0.0 30.0 Couleur / mots n/a False 1.02149 False 2.0 3.5 0.25 2.0 visages, lieux 3.0 200.0 172 n/a 17.0 0.995672 7.95833 6.0 3.0 65.0 10.0 100.0 False True False False 2.0 False 15.0 1.09488 3.0 False +S77 45.0 0.689299 n/a n/a False -0.0180914 1.0 False 5.0 n/a 2.0 10.0 0.968552 2.11111 True True 0.0 0.966667 3.0 500.0 n/a 9.0 False 14.0 11.0 0.416667 20.0 10.0 40.0 False 0.0 9.0 0.0 4.0 0.981267 15.0 1.0 10.0 0.950461 3.0 30.0 4.0 64.0 43.0 n/a 10.0 n/a 3.0 n/a False 4.0 True 3.0 0.999983 1.01458 False 500.0 False False 3.0 False 1.02596 n/a 10.0 30.0 20.0 70.0 60.0 80.0 u 15.0 70.0 2.0 False 1.0 True 3.0 False 5.0 False 2.0 0.671399 False False 1.0 7.0 10.0 1.11533 4.33333 lieux 10.0 3.0 60.0 0 n/a False 1.00274 False 1.0 0.0 0.357143 2.0 visage 4.0 700.0 115 n/a 6.0 1.11837 4.65972 5.0 4.0 80.0 15.0 20.0 False True True False 2.0 True 19.0 1.19359 2.0 False +S78 40.0 0.548867 n/a n/a False -0.0193456 3.0 False 4.0 n/a 2.0 25.0 0.950317 1.7 False False 2.0 0.7 3.8 600.0 n/a 10.0 False 13.5 15.0 0.916667 40.0 10.0 50.0 False 0.0 10.0 0.0 3.0 0.813199 60.0 1.0 10.0 0.930971 3.0 30.0 4.0 62.0 41.0 1.0 7.0 2.0 3.0 0 False 2.0 True 3.0 0.999859 0.815318 False 45.0 False False 2.0 False 0.816939 n/a 15.0 30.0 20.0 60.0 60.0 70.0 u 20.0 60.0 2.0 False 0.0 True 2.0 False 4.0 False 2.0 0.905331 False False 1.0 10.0 15.0 0.925195 2.0 n/a 7.0 0.0 50.0 0 n/a False 0.991229 False 1.0 2.5 0.259259 1.0 VISAGES 1.0 30.0 328 n/a 14.0 0.926816 6.74306 9.0 2.0 60.0 16.0 112.0 False True False False 3.0 False 17.0 0.960007 n/a False +S79 42.0 0.449873 n/a 0.0 False 0.00970278 4.0 False 3.0 n/a 1.0 12.0 0.905277 1.4 False False 2.0 0.566667 1.8 350.0 n/a 10.0 False 12.0 7.0 0.666667 28.0 16.0 30.0 False 0.25 9.0 0.0 1.0 0.603731 30.0 2.0 9.0 0.914979 3.0 27.0 4.0 68.0 45.0 n/a 6.0 0.0 3.0 0 False 4.0 True 1.0 0.997786 0.671734 False 100.0 False True 1.0 False 0.669595 n/a 18.0 28.0 21.0 50.0 59.0 63.0 d 22.0 55.0 3.0 False 0.0 True 3.0 True 5.0 False 1.0 0.917245 False False 1.0 20.0 12.0 0.693804 1.66667 Mettre nom sur un visage 4.0 0.0 70.0 Chiffres ds espace n/a False 1.01049 False 1.0 3.5 0.166667 3.0 tout 1.0 80.0 198 n/a 18.0 0.688082 8.22222 4.0 1.0 50.0 12.0 100.0 False False False False 2.0 False 14.0 1.03246 3.0 False +S80 48.0 0.634019 n/a n/a False -0.00810036 2.5 False 4.0 n/a 2.0 30.0 0.948885 1.5 False True 1.0 0.733333 2.6 530.0 n/a 10.0 False 12.5 13.0 0.5 31.0 8.0 41.0 False 0.0 7.0 0.0 2.0 0.91701 250.0 0.0 10.0 0.940784 3.0 25.0 4.0 62.0 43.0 n/a 10.0 0.0 3.0 0 False 3.0 True 3.0 0.996996 0.946805 False 20.0 False False 2.0 False 0.94368 n/a 12.0 35.0 34.0 56.0 68.0 86.0 u 19.0 63.0 2.0 False 1.0 True 1.0 False 5.0 False 2.0 0.798489 False False 1.0 6.0 18.0 1.03757 3.33333 0 5.0 0.0 62.0 0 n/a False 0.982516 False 2.0 2.0 0.2 1.0 visages, lieux 3.0 45.0 180 n/a 9.0 1.04513 6.375 8.0 3.0 75.0 10.0 50.0 False True True False 3.0 False 15.0 0.987773 3.0 False +S81 n/a n/a n/a n/a False n/a n/a False n/a n/a n/a 15.0 n/a n/a n/a n/a n/a n/a n/a 500.0 n/a n/a False n/a 8.0 n/a n/a n/a n/a False n/a n/a n/a n/a n/a 40.0 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a False 40.0 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 1.0 0.979913 False n/a n/a 10.0 15.0 n/a n/a n/a n/a n/a 55.0 n/a n/a n/a n/a n/a 1.0 n/a n/a n/a n/a n/a 50.0 217 n/a n/a n/a n/a n/a n/a 90.0 n/a 60.0 n/a n/a n/a n/a 3.0 n/a n/a 1.00486 n/a False +S82 55.0 0.611077 n/a n/a False -0.0475203 3.5 False 3.0 n/a 1.0 15.0 0.963192 2.0 True True 1.0 0.85 2.4 1000.0 n/a 10.0 False 15.0 8.0 0.583333 25.0 11.0 45.0 False 0.33 10.0 0.0 3.0 0.877608 15.0 2.0 10.0 0.915672 3.0 30.0 3.0 66.0 43.0 n/a 7.0 0.0 1.0 0 False 3.0 True 3.0 0.994773 0.92664 False 25.0 False False 2.0 False 0.909533 n/a 12.0 30.0 20.0 50.0 68.0 80.0 u 17.0 55.0 2.0 False 0.0 True 2.0 False 4.0 False 2.0 0.745845 False False 1.0 8.0 12.0 1.0073 3.0 lieux 10.0 2.0 50.0 0 n/a False 1.05757 False 0.0 2.0 0.333333 2.0 Visages 3.0 20.0 195 n/a 15.0 1.00022 5.27778 4.0 4.0 50.0 13.0 6.0 False True False False 2.0 False 20.0 1.00696 2.0 False +S83 55.0 0.752174 n/a n/a False -0.0102645 3.0 False 3.0 n/a 1.0 7.0 0.983114 1.25 False True 1.0 1.0 3.4 600.0 n/a 12.0 False 13.5 8.0 0.75 30.0 10.0 60.0 False 0.0 10.0 0.0 3.0 0.946814 15.0 2.0 10.0 0.972849 3.0 35.0 3.0 65.0 43.0 n/a 10.0 0.0 3.0 0 False 3.0 True 4.0 0.992436 1.10866 False 100.0 False False 3.0 False 1.11954 n/a 13.0 40.0 25.0 75.0 70.0 90.0 d 25.0 80.0 4.0 False 0.0 True 1.0 False 4.0 False 2.0 0.777745 False False 1.0 8.0 12.0 1.07195 2.66667 lieux 3.0 0.0 50.0 0 n/a False 1.06735 False 0.0 1.0 0.111111 3.0 Visages 4.0 30.0 184 n/a 14.0 1.0791 6.4375 5.0 4.0 60.0 11.0 50.0 False True False False 8.0 False 15.0 0.904109 3.0 False +S84 70.0 0.918418 n/a n/a False 0.053325 1.0 False 4.0 n/a 1.0 7.0 0.888236 1.54545 False False 1.0 0.8 3.4 450.0 n/a 11.0 False 14.0 11.0 0.75 40.0 12.0 75.0 False 0.25 10.0 0.0 3.0 1.15816 80.0 1.0 10.0 0.941561 3.0 33.0 5.0 63.0 42.0 n/a 9.0 0.0 3.0 0 False 4.0 True 3.0 0.995588 1.37754 False 70.0 False False 3.0 False 1.36698 n/a 5.0 40.0 35.0 70.0 95.0 110.0 d 20.0 85.0 4.0 False 0.0 True 0.0 False 4.0 False 2.0 0.818761 False False 1.0 5.0 14.0 1.3146 3.33333 0 6.0 0.0 45.0 0 n/a False 0.966338 False 2.0 2.5 0.214286 2.0 visages, lieux 3.0 25.0 193 n/a 8.0 1.31998 7.10417 5.0 3.0 60.0 10.0 9.0 False True True False 1.5 False 17.0 1.05465 2.0 False +S85 25.0 0.278677 n/a n/a False -0.022051 3.0 False 4.0 n/a 2.0 20.0 0.94183 2.0 False False 1.0 0.6 4.0 500.0 0.0 9.0 False 13.5 9.0 0.833333 25.0 10.0 30.0 False 0.33 8.0 0.0 2.0 0.562786 60.0 2.0 9.0 0.919779 3.0 30.0 5.0 60.0 40.0 n/a 9.0 0.0 3.0 0 False 3.0 True 3.0 0.995747 0.400515 False 80.0 False False 2.0 False 0.414786 n/a 15.0 25.0 20.0 30.0 35.0 40.0 u 15.0 45.0 4.0 False 0.0 True 3.0 False 4.0 False 2.0 0.962147 False False 1.0 10.0 12.0 0.631332 3.0 animaux 9.0 0.0 65.0 0 n/a False 0.944038 False 1.0 n/a 0.333333 2.0 Visages 3.0 50.0 169 n/a 14.0 0.641416 5.56944 6.0 4.0 60.0 13.0 30.0 False True n/a False 2.0 False 18.0 1.04717 4.0 False +S86 40.0 0.780401 1.0 1.0 False -0.0458562 1.5 False 4.0 1.0 1.0 50.0 0.878883 1.75 False True 0.0 0.5 3.4 750.0 1.0 10.0 False 13.75 9.0 0.416667 40.0 12.0 90.0 True 0.0 9.0 3.0 3.0 0.993049 90.0 2.0 10.0 0.833027 3.0 32.0 4.0 63.0 45.0 1.0 6.0 0.0 3.0 dyslexie, confusion che/jeu, f/v False 4.0 True 4.0 0.999649 1.14484 True 30.0 False False 2.0 False 1.16156 n/a 10.0 20.0 28.0 70.0 65.0 90.0 u 15.0 80.0 4.0 True 0.0 True 2.0 True 4.0 True 1.0 0.89167 False False 1.0 12.0 11.0 1.13006 0.0 n/a 7.5 0.0 60.0 0 n/a False 1.01092 True 1.0 1.5 0.272727 1.0 n/a 2.0 80.0 346 n/a 9.0 1.13179 6.26389 6.0 4.0 70.0 13.0 30.0 False True n/a True 2.5 False 17.5 1.04276 3.0 False +S87 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a 369 n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a n/a +S88 45.0 0.525943 n/a 0.0 False -0.0442923 0.0 False 4.0 n/a 1.0 35.0 0.958444 1.875 False False 0.0 0.6 3.6 500.0 n/a 8.0 True 11.5 10.0 0.75 25.0 10.0 40.0 False 0.5 8.0 2.0 2.0 0.825556 60.0 1.0 10.0 0.914152 3.0 32.0 5.0 60.0 42.0 n/a 8.0 0.0 3.0 APPRENTISSAGE LECTURE, math False 3.0 True 3.0 0.995658 0.77895 False 100.0 True True 1.0 False 0.782818 n/a 15.0 35.0 20.0 60.0 60.0 60.0 d 15.0 60.0 3.0 True 1.0 True 0.0 False 3.0 False 2.0 0.860257 False True 0.0 20.0 15.0 0.93793 2.33333 noms 7.0 0.0 60.0 nombre / espace, gouts et sons n/a False 0.955381 True 2.0 3.0 0.304348 1.0 visages, lieux 1.0 150.0 178 n/a 2.0 0.940899 7.375 4.0 1.0 80.0 10.0 400.0 False True True False 3.0 False 15.0 1.05283 2.0 False +S89 70.0 1.09973 n/a n/a False -0.0425949 1.0 False 5.0 n/a 3.0 40.0 0.927972 2.55556 False True 0.0 0.666667 3.0 200.0 n/a 9.0 False 16.0 20.0 0.583333 40.0 11.0 60.0 False 0.0 10.0 0.0 3.0 1.01285 150.0 0.0 10.0 0.885377 3.0 24.0 3.0 62.0 38.0 n/a 7.0 0.0 2.0 0 True 4.0 True 3.0 0.992471 1.64304 False 40.0 False False 3.0 False 1.63685 n/a 15.0 30.0 25.0 50.0 100.0 150.0 d 25.0 100.0 2.0 False 0.0 True 1.0 False 5.0 False 2.0 0.00909366 False True 0.0 15.0 10.0 1.16136 4.0 LIEUX 14.0 0.0 5.0 0 n/a False 1.01316 False 0.0 0.0 0.4375 1.0 VISAGE 5.0 1000.0 289 n/a 2.0 1.15436 3.38194 9.0 3.0 50.0 18.0 300.0 False True True False 1000.0 False 23.0 0.126279 2.0 False +S90 80.0 1.19538 n/a n/a False -0.00869327 3.0 False 2.0 n/a 3.0 15.0 0.894658 1.71429 False False 2.0 0.7 2.8 400.0 n/a 7.0 False 9.5 7.0 0.666667 60.0 10.0 40.0 False 0.33 10.0 1.0 1.0 1.11558 20.0 2.0 10.0 0.885965 3.0 22.0 5.0 62.0 45.0 n/a 7.0 0.0 3.0 en histoire False 4.0 True 2.0 0.983444 1.75143 False 30.0 True False 1.0 False 1.77922 n/a 15.0 60.0 30.0 80.0 100.0 150.0 d 15.0 130.0 2.0 True 1.0 True 0.0 False 4.0 False 1.0 0.905508 False False 1.0 8.0 10.0 1.26396 1.66667 lieux 5.0 0.0 60.0 n/a n/a False 0.939536 False 0.0 1.5 0.263158 1.0 0 3.0 20.0 213 n/a 16.0 1.27144 5.84722 10.0 2.0 80.0 10.0 30.0 False True True False 2.5 False 12.0 0.953995 2.0 False +S91 50.0 0.611427 1.0 1.0 False -0.00681096 3.5 False 3.0 1.0 2.0 20.0 0.966239 1.6 False False 2.0 0.9 4.2 500.0 1.0 10.0 False 13.0 7.0 0.833333 20.0 10.0 50.0 False 0.0 10.0 0.0 3.0 0.920033 40.0 2.0 10.0 0.959428 3.0 23.0 5.0 61.0 43.0 1.0 10.0 0.0 3.0 deglutition a 15ans pendant 4mois False 4.0 True 4.0 0.987777 0.899697 False 120.0 False False 2.0 False 0.910054 n/a 10.0 30.0 20.0 50.0 60.0 70.0 u 20.0 70.0 2.0 False 0.0 True 1.0 False 4.0 False 1.0 n/a False False 1.0 15.0 10.0 1.04483 2.0 n/a 6.0 0.0 60.0 0 n/a False 0.934578 True 2.0 3.0 0.230769 2.0 LIEUX 4.0 60.0 334 n/a 17.0 1.04858 8.25694 7.0 3.0 n/a 10.0 140.0 False True False True 2.0 False 16.0 n/a 4.0 False +S92 38.0 0.475912 n/a n/a False -0.0850252 1.5 False 5.0 n/a 1.0 6.0 0.785702 1.44444 False True 1.0 0.8 3.2 500.0 n/a 9.0 False 11.0 9.0 0.75 25.0 12.0 29.0 False 0.0 10.0 0.0 3.0 0.696258 120.0 1.0 9.0 0.700677 3.0 32.0 5.0 65.0 43.0 n/a 8.0 2.0 3.0 0 False 4.0 True 4.0 0.99624 0.744678 False 200.0 False False 3.0 False 0.708352 n/a 15.0 25.0 24.0 44.0 66.0 77.0 u 12.0 29.0 4.0 False 0.0 True n/a False 5.0 False 1.0 0.815552 False True 1.0 15.0 12.0 0.810939 3.66667 0 4.0 0.0 52.0 Suites ds espaCe et couleurs n/a False 0.972245 False 1.0 2.0 0.181818 2.0 0 5.0 20.0 196 n/a 9.0 0.793536 6.95833 6.0 5.0 80.0 10.0 50.0 False True False False 3.0 False 13.0 1.04952 3.0 False +S93 50.0 0.717351 n/a 0.0 False -0.0384674 3.5 False 3.0 n/a 1.0 20.0 0.959921 2.08333 False False 1.0 1.0 4.8 280.0 n/a 12.0 False 18.5 8.0 0.916667 40.0 12.0 70.0 False 0.0 10.0 0.0 3.0 0.856346 30.0 1.0 10.0 0.921454 3.0 35.0 4.0 60.0 43.0 n/a 10.0 0.0 3.0 0 False 3.0 True 4.0 0.997048 1.0501 False 30.0 False False 3.0 False 1.06771 n/a 15.0 40.0 30.0 50.0 60.0 100.0 d 20.0 80.0 4.0 False 0.0 True 1.0 False 4.0 False 1.0 0.907422 False False 1.0 10.0 12.0 0.967146 3.0 NOMS 13.0 0.0 50.0 NUM DE TELEPHONE n/a False 1.00396 False 2.0 3.0 0.351351 1.0 LIEUX CHIFFRES 4.0 120.0 343 n/a 13.0 0.975992 8.03472 5.0 5.0 50.0 12.0 100.0 False True n/a False 2.0 False 25.0 0.980029 2.0 False +S94 53.0 0.913518 n/a n/a False -0.0127455 4.0 False 3.0 n/a 1.0 17.0 0.992355 2.1 False False 1.0 0.9 4.7 250.0 n/a 10.0 False 15.5 6.5 1.0 30.0 10.0 60.0 False 0.0 9.0 0.0 3.0 1.03356 50.0 2.0 10.0 0.97961 3.0 36.0 5.0 64.0 43.0 n/a 9.0 0.0 3.0 0 False 1.0 True 5.0 0.994238 1.35131 False 65.0 False False 3.0 False 1.35969 n/a 13.0 38.0 25.0 60.0 90.0 110.0 d 18.0 90.0 4.0 False 0.0 True 5.0 False 4.0 False 1.0 0.937513 False False 1.0 15.0 15.0 1.17454 3.33333 0 11.0 0.0 50.0 nombre / espace n/a False 0.979051 False 1.0 4.0 0.354839 3.0 0 4.0 55.0 189 n/a 16.0 1.17797 9.66667 4.0 4.0 60.0 10.0 120.0 False True n/a False 2.75 False 21.0 0.933082 4.0 False diff --git a/nilearn/datasets/tests/data/localizer_index.json b/nilearn/datasets/tests/data/localizer_index.json new file mode 100755 index 0000000000..2d0bb9fb2f --- /dev/null +++ b/nilearn/datasets/tests/data/localizer_index.json @@ -0,0 +1,114 @@ +[ + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_mask.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_spm.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculationVsSentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculationVsSentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculationVsSentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculation_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculation_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualCalculation_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualMotorVsCognitiveProcessing_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualMotorVsCognitiveProcessing_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualMotorVsCognitiveProcessing_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualSentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualSentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Auditory&VisualSentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculationVsAuditorySentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculationVsAuditorySentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculationVsAuditorySentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculation_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculation_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryCalculation_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryClickVsAuditorySentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryClickVsAuditorySentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryClickVsAuditorySentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessingVsVisualProcessing_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessingVsVisualProcessing_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessingVsVisualProcessing_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessing_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessing_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditoryProcessing_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditorySentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditorySentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-AuditorySentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Checkerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Checkerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Checkerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-CognitiveProcessingVsMotor_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-CognitiveProcessingVsMotor_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-CognitiveProcessingVsMotor_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-EffectsOfInterest_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-EffectsOfInterest_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-EffectsOfInterest_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalVsVerticalCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalVsVerticalCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-HorizontalVsVerticalCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClickVsRightAuditory&VisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClickVsRightAuditory&VisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClickVsRightAuditory&VisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditory&VisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditoryClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditoryClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftAuditoryClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftVisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftVisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-LeftVisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Motor_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Motor_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-Motor_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClickVsLeftAuditory&VisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClickVsLeftAuditory&VisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClickVsLeftAuditory&VisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditory&VisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditoryClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditoryClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightAuditoryClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightVisualClick_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightVisualClick_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-RightVisualClick_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalVsHorizontalCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalVsHorizontalCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VerticalVsHorizontalCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculationVsSentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculationVsSentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculationVsSentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculation_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculation_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualCalculation_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualClickVsVisualSentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualClickVsVisualSentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualClickVsVisualSentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsAuditoryProcessing_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsAuditoryProcessing_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsAuditoryProcessing_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessingVsCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessing_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessing_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualProcessing_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentencesVsCheckerboard_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentencesVsCheckerboard_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentencesVsCheckerboard_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentences_cmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentences_contrasts.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_acq-VisualSentences_tmaps.nii.gz", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_design.json", + "/localizer/derivatives/spm_1st_level/sub-{0}/sub-{0}_task-localizer_design.npy", + "/localizer/derivatives/spm_preprocessing/sub-{0}/sub-{0}_T1w.nii.gz", + "/localizer/derivatives/spm_preprocessing/sub-{0}/sub-{0}_task-localizer_bold.nii.gz", + "/localizer/sub-{0}/anat/sub-{0}_T1w.nii.gz", + "/localizer/sub-{0}/func/sub-{0}_task-localizer_bold.json", + "/localizer/sub-{0}/func/sub-{0}_task-localizer_bold.nii.gz" +] + diff --git a/nilearn/datasets/tests/data/localizer_participants.tsv b/nilearn/datasets/tests/data/localizer_participants.tsv new file mode 100644 index 0000000000..a029d440d6 --- /dev/null +++ b/nilearn/datasets/tests/data/localizer_participants.tsv @@ -0,0 +1,95 @@ +participant_id age sex site family language +S01 24 M SHFJ F01 French +S02 20 M SHFJ F02 French +S03 22 F SHFJ F03 French +S04 20 M Neurospin F04 French +S05 23 F Neurospin F05 French +S06 19 M Neurospin F06 French +S07 26 F Neurospin F07 French +S08 27 M Neurospin F89 French +S09 21 F Neurospin F09 French +S10 24 F SHFJ F10 French +S11 47 F SHFJ F11 French +S12 23 F SHFJ F12 French +S13 19 M Neurospin F13 French +S14 21 F SHFJ F14 French +S15 22 F SHFJ F15 French +S16 24 F SHFJ F16 French +S17 22 M SHFJ F17 French +S18 20 F Neurospin F18 French +S19 18 M SHFJ F52 French +S20 25 F SHFJ F20 French +S21 21 M SHFJ F21 French +S22 40 M SHFJ F22 French +S23 24 F Neurospin F23 French +S24 18 F Neurospin F24 French +S25 24 F SHFJ F25 French +S26 24 F Neurospin F26 French +S27 30 M Neurospin F27 French +S28 18 F Neurospin F28 French +S29 21 F Neurospin F29 French +S30 34 F SHFJ F30 French +S31 22 F Neurospin F31 French +S32 26 F SHFJ F32 French +S33 21 F SHFJ F33 French +S34 21 M SHFJ F34 French +S35 24 F SHFJ F35 French +S36 25 F SHFJ F36 French +S37 22 M Neurospin F37 French +S38 n/a M SHFJ F38 French +S39 22 M SHFJ F39 French +S40 24 F SHFJ F40 French +S41 18 M Neurospin F41 French +S42 26 M Neurospin F42 French +S43 49 M Neurospin F43 French +S44 34 F SHFJ F44 French +S45 21 M SHFJ F45 French +S46 24 F Neurospin F46 French +S47 19 M SHFJ F47 French +S48 21 F SHFJ F48 French +S49 22 F SHFJ F49 French +S50 32 F SHFJ F50 French +S51 30 M SHFJ F51 French +S52 22 F SHFJ F52 French +S53 18 F Neurospin F53 French +S54 20 F SHFJ F54 French +S55 21 F Neurospin F55 French +S56 23 M Neurospin F56 French +S57 20 F SHFJ F57 French +S58 34 M SHFJ F58 French +S59 30 M SHFJ F59 French +S60 21 M Neurospin F60 French +S61 23 M SHFJ F61 French +S62 n/a F SHFJ F62 French +S63 19 F Neurospin F63 French +S64 21 F Neurospin F64 French +S65 19 M Neurospin F65 French +S66 26 M Neurospin F66 French +S67 44 F SHFJ F67 French +S68 20 M SHFJ F68 French +S69 21 F SHFJ F69 French +S70 25 M SHFJ F59 French +S71 21 M SHFJ F71 French +S72 37 F Neurospin F72 French +S73 19 M Neurospin F73 French +S74 21 M SHFJ F74 French +S75 20 M Neurospin F75 French +S76 28 F SHFJ F76 French +S77 23 F SHFJ F77 French +S78 29 M Neurospin F78 French +S79 43 F SHFJ F79 French +S80 19 F SHFJ F80 French +S81 n/a M SHFJ F81 French +S82 20 F SHFJ F82 French +S83 20 F Neurospin F83 French +S84 23 F SHFJ F84 French +S85 21 M SHFJ F85 French +S86 31 M Neurospin F86 French +S87 n/a M SHFJ F87 French +S88 19 M SHFJ F88 French +S89 29 M Neurospin F89 French +S90 21 M SHFJ F90 French +S91 23 M Neurospin F91 French +S92 26 F SHFJ F92 French +S93 36 M Neurospin F93 French +S94 41 M SHFJ F94 French diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py index f6cffc48d1..a2ce5b8cf1 100644 --- a/nilearn/datasets/tests/test_atlas.py +++ b/nilearn/datasets/tests/test_atlas.py @@ -308,6 +308,24 @@ def test_fetch_coords_power_2011(): assert_not_equal(bunch.description, '') +def test_fetch_coords_seitzman_2018(): + bunch = atlas.fetch_coords_seitzman_2018() + assert_equal(len(bunch.rois), 300) + assert_equal(len(bunch.radius), 300) + assert_equal(len(bunch.networks), 300) + assert_equal(len(bunch.regions), 300) + assert_equal(len(np.unique(bunch.networks)), 14) + assert_equal(len(np.unique(bunch.regions)), 8) + np.testing.assert_array_equal(bunch.networks, np.sort(bunch.networks)) + assert_not_equal(bunch.description, '') + + assert bunch.regions[0] == "cortexL" + + bunch = atlas.fetch_coords_seitzman_2018(ordered_regions=False) + assert_true(np.any(bunch.networks != np.sort(bunch.networks))) + + + @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_destrieux_2009(): @@ -547,7 +565,7 @@ def test_fetch_atlas_pauli_2017(): @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) def test_fetch_atlas_schaefer_2018(): - valid_n_rois = [100, 200, 300, 400, 500, 600, 800, 1000] + valid_n_rois = list(range(100, 1100, 100)) valid_yeo_networks = [7, 17] valid_resolution_mm = [1, 2] diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py index 27193e9ac0..9c52e3dbca 100644 --- a/nilearn/datasets/tests/test_func.py +++ b/nilearn/datasets/tests/test_func.py @@ -5,6 +5,7 @@ # License: simplified BSD import os +import uuid import numpy as np import json import nibabel @@ -12,7 +13,8 @@ from sklearn.utils import check_random_state from nose import with_setup -from nose.tools import assert_true, assert_equal, assert_not_equal +from nose.tools import ( + assert_true, assert_false, assert_equal, assert_not_equal) from . import test_utils as tst from nilearn.datasets import utils, func @@ -146,193 +148,145 @@ def test_miyawaki2008(): assert_not_equal(dataset.description, '') +with open(os.path.join(tst.datadir, 'localizer_index.json')) as of: + localizer_template = json.load(of) +LOCALIZER_INDEX = {} +for idx in range(1, 95): + idx = str(idx).zfill(2) + sid = 'S{0}'.format(idx) + LOCALIZER_INDEX.update(dict( + (key.format(sid), uuid.uuid4().hex) + for key in localizer_template)) +LOCALIZER_INDEX['/localizer/phenotype/behavioural.tsv'] = uuid.uuid4().hex +LOCALIZER_PARTICIPANTS = np.recfromcsv( + os.path.join(tst.datadir, 'localizer_participants.tsv'), delimiter='\t') +LOCALIZER_BEHAVIOURAL = np.recfromcsv( + os.path.join(tst.datadir, 'localizer_behavioural.tsv'), delimiter='\t') + + +def mock_localizer_index(*args, **kwargs): + return LOCALIZER_INDEX + + +def mock_np_recfromcsv(*args, **kwargs): + if args[0].endswith('participants.tsv'): + return LOCALIZER_PARTICIPANTS + elif args[0].endswith('behavioural.tsv'): + return LOCALIZER_BEHAVIOURAL + else: + raise ValueError('Unexpected args!') + + +def setup_localizer(): + global original_json_load + global mock_json_load + mock_json_load = mock_localizer_index + original_json_load = json.load + json.load = mock_json_load + + global original_np_recfromcsv + global mock_np_recfromcsv + mock_np_recfromcsv = mock_np_recfromcsv + original_np_recfromcsv = np.recfromcsv + np.recfromcsv = mock_np_recfromcsv + + +def teardown_localizer(): + global original_json_load + json.load = original_json_load + + global original_np_recfromcsv + np.recfromcsv = original_np_recfromcsv + + @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +@with_setup(setup_localizer, teardown_localizer) def test_fetch_localizer_contrasts(): - local_url = "file://" + tst.datadir - ids = np.asarray([('S%2d' % i).encode() for i in range(94)]) - ids = ids.view(dtype=[('subject_id', 'S3')]) - tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) - tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) - - # Disabled: cannot be tested without actually fetching covariates CSV file - # All subjects - dataset = func.fetch_localizer_contrasts(["checkerboard"], - data_dir=tst.tmpdir, - url=local_url, - verbose=0) - assert_true(dataset.anats is None) - assert_true(dataset.tmaps is None) - assert_true(dataset.masks is None) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.cmaps), 94) - - # 20 subjects - dataset = func.fetch_localizer_contrasts(["checkerboard"], - n_subjects=20, - data_dir=tst.tmpdir, - url=local_url, - verbose=0) - assert_true(dataset.anats is None) - assert_true(dataset.tmaps is None) - assert_true(dataset.masks is None) + # 2 subjects + dataset = func.fetch_localizer_contrasts( + ['checkerboard'], + n_subjects=2, + data_dir=tst.tmpdir, + verbose=1) + assert_false(hasattr(dataset, 'anats')) + assert_false(hasattr(dataset, 'tmaps')) + assert_false(hasattr(dataset, 'masks')) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_equal(len(dataset.cmaps), 20) - assert_equal(dataset.ext_vars.size, 20) + assert_equal(len(dataset.cmaps), 2) + assert_equal(dataset.ext_vars.size, 2) # Multiple contrasts dataset = func.fetch_localizer_contrasts( - ["checkerboard", "horizontal checkerboard"], - n_subjects=20, data_dir=tst.tmpdir, - verbose=0) - assert_true(dataset.anats is None) - assert_true(dataset.tmaps is None) - assert_true(dataset.masks is None) + ['checkerboard', 'horizontal checkerboard'], + n_subjects=2, + data_dir=tst.tmpdir, + verbose=1) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(len(dataset.cmaps), 20 * 2) # two contrasts are fetched - assert_equal(dataset.ext_vars.size, 20) - - # get_anats=True - dataset = func.fetch_localizer_contrasts(["checkerboard"], - data_dir=tst.tmpdir, - url=local_url, - get_anats=True, - verbose=0) - assert_true(dataset.masks is None) - assert_true(dataset.tmaps is None) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.anats[0], _basestring)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.anats), 94) - assert_equal(len(dataset.cmaps), 94) - - # get_masks=True - dataset = func.fetch_localizer_contrasts(["checkerboard"], - data_dir=tst.tmpdir, - url=local_url, - get_masks=True, - verbose=0) - assert_true(dataset.anats is None) - assert_true(dataset.tmaps is None) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_true(isinstance(dataset.masks[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.cmaps), 94) - assert_equal(len(dataset.masks), 94) - - # get_tmaps=True - dataset = func.fetch_localizer_contrasts(["checkerboard"], - data_dir=tst.tmpdir, - url=local_url, - get_tmaps=True, - verbose=0) - assert_true(dataset.anats is None) - assert_true(dataset.masks is None) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_true(isinstance(dataset.tmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.cmaps), 94) - assert_equal(len(dataset.tmaps), 94) + assert_equal(len(dataset.cmaps), 2 * 2) # two contrasts are fetched + assert_equal(dataset.ext_vars.size, 2) # all get_*=True - dataset = func.fetch_localizer_contrasts(["checkerboard"], - data_dir=tst.tmpdir, - url=local_url, - get_anats=True, - get_masks=True, - get_tmaps=True, - verbose=0) - + dataset = func.fetch_localizer_contrasts( + ['checkerboard'], + n_subjects=1, + data_dir=tst.tmpdir, + get_anats=True, + get_masks=True, + get_tmaps=True, + verbose=1) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.anats[0], _basestring)) assert_true(isinstance(dataset.cmaps[0], _basestring)) assert_true(isinstance(dataset.masks[0], _basestring)) assert_true(isinstance(dataset.tmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 94) - assert_equal(len(dataset.anats), 94) - assert_equal(len(dataset.cmaps), 94) - assert_equal(len(dataset.masks), 94) - assert_equal(len(dataset.tmaps), 94) + assert_equal(dataset.ext_vars.size, 1) + assert_equal(len(dataset.anats), 1) + assert_equal(len(dataset.cmaps), 1) + assert_equal(len(dataset.masks), 1) + assert_equal(len(dataset.tmaps), 1) assert_not_equal(dataset.description, '') # grab a given list of subjects - dataset2 = func.fetch_localizer_contrasts(["checkerboard"], - n_subjects=[2, 3, 5], - data_dir=tst.tmpdir, - url=local_url, - get_anats=True, - get_masks=True, - get_tmaps=True, - verbose=0) - - # Check that we are getting only 3 subjects + dataset2 = func.fetch_localizer_contrasts( + ['checkerboard'], + n_subjects=[2, 3, 5], + data_dir=tst.tmpdir, + verbose=1) assert_equal(dataset2.ext_vars.size, 3) - assert_equal(len(dataset2.anats), 3) assert_equal(len(dataset2.cmaps), 3) - assert_equal(len(dataset2.masks), 3) - assert_equal(len(dataset2.tmaps), 3) - np.testing.assert_array_equal(dataset2.ext_vars, - dataset.ext_vars[[1, 2, 4]]) - np.testing.assert_array_equal(dataset2.anats, - np.array(dataset.anats)[[1, 2, 4]]) - np.testing.assert_array_equal(dataset2.cmaps, - np.array(dataset.cmaps)[[1, 2, 4]]) - np.testing.assert_array_equal(dataset2.masks, - np.array(dataset.masks)[[1, 2, 4]]) - np.testing.assert_array_equal(dataset2.tmaps, - np.array(dataset.tmaps)[[1, 2, 4]]) + assert_equal([row[0] for row in dataset2.ext_vars], + [b'S02', b'S03', b'S05']) @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +@with_setup(setup_localizer, teardown_localizer) def test_fetch_localizer_calculation_task(): - local_url = "file://" + tst.datadir - ids = np.asarray(['S%2d' % i for i in range(94)]) - ids = ids.view(dtype=[('subject_id', 'S3')]) - tst.mock_fetch_files.add_csv('cubicwebexport.csv', ids) - tst.mock_fetch_files.add_csv('cubicwebexport2.csv', ids) - - # Disabled: cannot be tested without actually fetching covariates CSV file - # All subjects - dataset = func.fetch_localizer_calculation_task(data_dir=tst.tmpdir, - url=local_url, - verbose=0) - assert_true(isinstance(dataset.ext_vars, np.recarray)) - assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 1) - assert_equal(len(dataset.cmaps), 1) - - # 20 subjects - dataset = func.fetch_localizer_calculation_task(n_subjects=20, - data_dir=tst.tmpdir, - url=local_url, - verbose=0) + # 2 subjects + dataset = func.fetch_localizer_calculation_task( + n_subjects=2, + data_dir=tst.tmpdir, + verbose=1) assert_true(isinstance(dataset.ext_vars, np.recarray)) assert_true(isinstance(dataset.cmaps[0], _basestring)) - assert_equal(dataset.ext_vars.size, 20) - assert_equal(len(dataset.cmaps), 20) + assert_equal(dataset.ext_vars.size, 2) + assert_equal(len(dataset.cmaps), 2) assert_not_equal(dataset.description, '') @with_setup(setup_mock, teardown_mock) @with_setup(tst.setup_tmpdata, tst.teardown_tmpdata) +@with_setup(setup_localizer, teardown_localizer) def test_fetch_localizer_button_task(): - local_url = "file://" + tst.datadir - - # Disabled: cannot be tested without actually fetching covariates CSV file - # Only one subject - dataset = func.fetch_localizer_button_task(data_dir=tst.tmpdir, - url=local_url, - verbose=0) - assert_true(isinstance(dataset.tmap, _basestring)) - assert_true(isinstance(dataset.anat, _basestring)) + # 2 subjects + dataset = func.fetch_localizer_button_task( + data_dir=tst.tmpdir, + verbose=1) + assert_true(isinstance(dataset.tmaps[0], _basestring)) + assert_true(isinstance(dataset.anats[0], _basestring)) assert_not_equal(dataset.description, '') diff --git a/nilearn/datasets/tests/test_struct.py b/nilearn/datasets/tests/test_struct.py index f94a99833e..fbcce318f4 100644 --- a/nilearn/datasets/tests/test_struct.py +++ b/nilearn/datasets/tests/test_struct.py @@ -175,3 +175,14 @@ def test_fetch_surf_fsaverage(): assert keys.issubset(set(dataset.keys())) assert_not_equal(dataset.description, '') + +def test_fetch_surf_fsaverage5_sphere(): + for mesh in ['fsaverage5_sphere']: + + dataset = struct.fetch_surf_fsaverage( + mesh, data_dir=tst.tmpdir) + + keys = {'sphere_left', 'sphere_right'} + + assert keys.issubset(set(dataset.keys())) + assert_not_equal(dataset.description, '') diff --git a/nilearn/datasets/utils.py b/nilearn/datasets/utils.py index 446cae359e..d9aade43fa 100644 --- a/nilearn/datasets/utils.py +++ b/nilearn/datasets/utils.py @@ -336,9 +336,13 @@ def _uncompress_file(file_, delete_archive=True, verbose=1): processed = True elif ext == '.gz' or header.startswith(b'\x1f\x8b'): import gzip - gz = gzip.open(file_) if ext == '.tgz': filename = filename + '.tar' + elif ext == '': + # For gzip file, we rely on the assumption that there is an extenstion + shutil.move(file_, file_ + '.gz') + file_ = file_ + '.gz' + gz = gzip.open(file_) out = open(filename, 'wb') shutil.copyfileobj(gz, out, 8192) gz.close() diff --git a/nilearn/decoding/searchlight.py b/nilearn/decoding/searchlight.py index 5829b1ff5b..c853f9e0de 100644 --- a/nilearn/decoding/searchlight.py +++ b/nilearn/decoding/searchlight.py @@ -16,7 +16,7 @@ import numpy as np -from sklearn.externals.joblib import Parallel, delayed, cpu_count +from nilearn._utils.compat import Parallel, delayed, cpu_count from sklearn import svm from sklearn.base import BaseEstimator from sklearn.exceptions import ConvergenceWarning diff --git a/nilearn/decoding/space_net.py b/nilearn/decoding/space_net.py index 9867b3ca21..368513a8ef 100644 --- a/nilearn/decoding/space_net.py +++ b/nilearn/decoding/space_net.py @@ -24,7 +24,7 @@ from sklearn.linear_model.base import LinearModel from sklearn.feature_selection import (SelectPercentile, f_regression, f_classif) -from sklearn.externals.joblib import Memory, Parallel, delayed +from nilearn._utils.compat import Memory, Parallel, delayed from sklearn.preprocessing import LabelBinarizer from sklearn.metrics import accuracy_score from ..input_data.masker_validation import check_embedded_nifti_masker diff --git a/nilearn/decoding/tests/test_graph_net.py b/nilearn/decoding/tests/test_graph_net.py index 9a7ddb1a65..45cf046ffc 100644 --- a/nilearn/decoding/tests/test_graph_net.py +++ b/nilearn/decoding/tests/test_graph_net.py @@ -2,7 +2,7 @@ import numpy as np import scipy as sp from numpy.testing import assert_almost_equal -from sklearn.utils import extmath +from scipy import linalg from sklearn.utils import check_random_state from nilearn.decoding.objective_functions import _gradient, _div from nilearn.decoding.space_net_solvers import ( @@ -167,12 +167,12 @@ def test__squared_loss_derivative_lipschitz_constant(): for _ in range(20): x_1 = rng.rand(*w.shape) * rng.randint(1000) x_2 = rng.rand(*w.shape) * rng.randint(1000) - gradient_difference = extmath.norm( + gradient_difference = linalg.norm( _squared_loss_and_spatial_grad_derivative(X, y, x_1, mask, grad_weight) - _squared_loss_and_spatial_grad_derivative(X, y, x_2, mask, grad_weight)) - point_difference = extmath.norm(x_1 - x_2) + point_difference = linalg.norm(x_1 - x_2) assert_true( gradient_difference <= lipschitz_constant * point_difference) @@ -186,12 +186,12 @@ def test_logistic_derivative_lipschitz_constant(): for _ in range(20): x_1 = rng.rand((w.shape[0] + 1)) * rng.randint(1000) x_2 = rng.rand((w.shape[0] + 1)) * rng.randint(1000) - gradient_difference = extmath.norm( + gradient_difference = linalg.norm( _logistic_data_loss_and_spatial_grad_derivative( X, y, x_1, mask, grad_weight) - _logistic_data_loss_and_spatial_grad_derivative( X, y, x_2, mask, grad_weight)) - point_difference = extmath.norm(x_1 - x_2) + point_difference = linalg.norm(x_1 - x_2) assert_true( gradient_difference <= lipschitz_constant * point_difference) @@ -224,12 +224,12 @@ def test_tikhonov_regularization_vs_graph_net(): screening_percentile=100., standardize=False) graph_net.fit(X_, y.copy()) coef_ = graph_net.coef_[0] - graph_net_perf = 0.5 / y.size * extmath.norm( + graph_net_perf = 0.5 / y.size * linalg.norm( np.dot(X, coef_) - y) ** 2\ - + 0.5 * extmath.norm(np.dot(G, coef_)) ** 2 - optimal_model_perf = 0.5 / y.size * extmath.norm( + + 0.5 * linalg.norm(np.dot(G, coef_)) ** 2 + optimal_model_perf = 0.5 / y.size * linalg.norm( np.dot(X, optimal_model) - y) ** 2\ - + 0.5 * extmath.norm(np.dot(G, optimal_model)) ** 2 + + 0.5 * linalg.norm(np.dot(G, optimal_model)) ** 2 assert_almost_equal(graph_net_perf, optimal_model_perf, decimal=1) diff --git a/nilearn/decoding/tests/test_searchlight.py b/nilearn/decoding/tests/test_searchlight.py index c731ad92e0..fc7b42f9df 100644 --- a/nilearn/decoding/tests/test_searchlight.py +++ b/nilearn/decoding/tests/test_searchlight.py @@ -100,10 +100,10 @@ def test_searchlight(): rand = np.random.RandomState(0) data = rand.rand(5, 5, 5) data_img = nibabel.Nifti1Image(data, affine=np.eye(4)) - imgs = [data_img, data_img, data_img, data_img, data_img, data_img] + imgs = [data_img] * 12 # labels - y = [0, 1, 0, 1, 0, 1] + y = [0, 1] * 6 # run searchlight on list of 3D images sl = searchlight.SearchLight(mask_img) diff --git a/nilearn/decoding/tests/test_space_net.py b/nilearn/decoding/tests/test_space_net.py index f75b23ebd0..bd18818bb9 100644 --- a/nilearn/decoding/tests/test_space_net.py +++ b/nilearn/decoding/tests/test_space_net.py @@ -4,8 +4,8 @@ from nose.tools import (assert_equal, assert_true, assert_false, assert_raises) import numpy as np +from scipy import linalg from sklearn.datasets import load_iris -from sklearn.utils import extmath from sklearn.linear_model import Lasso from sklearn.utils import check_random_state from sklearn.linear_model import LogisticRegression @@ -241,7 +241,7 @@ def test_lasso_vs_graph_net(): penalty="graph-net", max_iter=100) lasso.fit(X_, y) graph_net.fit(X, y) - lasso_perf = 0.5 / y.size * extmath.norm(np.dot( + lasso_perf = 0.5 / y.size * linalg.norm(np.dot( X_, lasso.coef_) - y) ** 2 + np.sum(np.abs(lasso.coef_)) graph_net_perf = 0.5 * ((graph_net.predict(X) - y) ** 2).mean() np.testing.assert_almost_equal(graph_net_perf, lasso_perf, decimal=3) diff --git a/nilearn/decomposition/base.py b/nilearn/decomposition/base.py index aef70a7219..d479dd2078 100644 --- a/nilearn/decomposition/base.py +++ b/nilearn/decomposition/base.py @@ -14,7 +14,7 @@ import sklearn import nilearn from sklearn.base import BaseEstimator, TransformerMixin -from sklearn.externals.joblib import Memory, Parallel, delayed +from nilearn._utils.compat import Memory, Parallel, delayed from sklearn.linear_model import LinearRegression from sklearn.utils import check_random_state from sklearn.utils.extmath import randomized_svd, svd_flip @@ -84,7 +84,6 @@ def fast_svd(X, n_components, random_state=None): return U, S, V - def mask_and_reduce(masker, imgs, confounds=None, reduction_ratio='auto', @@ -218,10 +217,10 @@ def _mask_and_reduce_single(masker, n_samples = int(ceil(data_n_samples * reduction_ratio)) U, S, V = cache(fast_svd, memory, - memory_level=memory_level, - func_memory_level=3)(this_data.T, - n_samples, - random_state=random_state) + memory_level=memory_level, + func_memory_level=3)(this_data.T, + n_samples, + random_state=random_state) U = U.T.copy() U = U * S[:, np.newaxis] return U diff --git a/nilearn/decomposition/canica.py b/nilearn/decomposition/canica.py index ef6d77d63a..2a4ad45aac 100644 --- a/nilearn/decomposition/canica.py +++ b/nilearn/decomposition/canica.py @@ -10,7 +10,7 @@ import numpy as np from scipy.stats import scoreatpercentile from sklearn.decomposition import fastica -from sklearn.externals.joblib import Memory, delayed, Parallel +from nilearn._utils.compat import Memory, delayed, Parallel from sklearn.utils import check_random_state from .multi_pca import MultiPCA diff --git a/nilearn/decomposition/dict_learning.py b/nilearn/decomposition/dict_learning.py index e31b2e734f..25c79a435f 100644 --- a/nilearn/decomposition/dict_learning.py +++ b/nilearn/decomposition/dict_learning.py @@ -15,7 +15,7 @@ import numpy as np import sklearn from sklearn.decomposition import dict_learning_online -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from sklearn.linear_model import Ridge from .base import BaseDecomposition diff --git a/nilearn/decomposition/multi_pca.py b/nilearn/decomposition/multi_pca.py index 1cc60f6229..84b6daeb45 100644 --- a/nilearn/decomposition/multi_pca.py +++ b/nilearn/decomposition/multi_pca.py @@ -3,7 +3,7 @@ This is a good initialization method for ICA. """ import numpy as np -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from sklearn.utils.extmath import randomized_svd from .base import BaseDecomposition @@ -32,7 +32,8 @@ class MultiPCA(BaseDecomposition): If smoothing_fwhm is not None, it gives the size in millimeters of the spatial smoothing to apply to the signal. - mask: Niimg-like object, instance of NiftiMasker or MultiNiftiMasker, optional + mask: Niimg-like object, instance of NiftiMasker or MultiNiftiMasker, + optional Mask to be used on data. If an instance of masker is passed, then its mask will be used. If no mask is given, it will be computed automatically by a MultiNiftiMasker with default diff --git a/nilearn/externals/README.md b/nilearn/externals/README.md new file mode 100644 index 0000000000..0e6519bbed --- /dev/null +++ b/nilearn/externals/README.md @@ -0,0 +1,5 @@ +This directory contains bundled external dependencies. + +Note for distribution packagers: if you want to remove the duplicated +code and depend on a packaged version, we suggest that you simply do a +symbolic link in this directory. diff --git a/nilearn/externals/__init__.py b/nilearn/externals/__init__.py new file mode 100644 index 0000000000..c861213890 --- /dev/null +++ b/nilearn/externals/__init__.py @@ -0,0 +1,7 @@ +""" +External, bundled dependencies for Nilearn. + +To ignore linting on these files, at the top define: + +# flake8: noqa +""" diff --git a/nilearn/externals/conftest.py b/nilearn/externals/conftest.py new file mode 100644 index 0000000000..f3bb9d9e9a --- /dev/null +++ b/nilearn/externals/conftest.py @@ -0,0 +1,8 @@ +# Do not collect any tests in externals. This is more robust than using +# --ignore because --ignore needs a path and it is not convenient to pass in +# the externals path (very long install-dependent path in site-packages) when +# using --pyargs + + +def pytest_ignore_collect(path, config): + return True diff --git a/nilearn/externals/install_tempita.sh b/nilearn/externals/install_tempita.sh new file mode 100644 index 0000000000..9e6c1d4425 --- /dev/null +++ b/nilearn/externals/install_tempita.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# Script to do a local install of tempita +set +x +export LC_ALL=C +INSTALL_FOLDER=tmp/tempita_install +rm -rf tempita $INSTALL_FOLDER +if [ -z "$1" ] +then + TEMPITA=tempita +else + TEMPITA=$1 +fi + +pip install --no-cache $TEMPITA --target $INSTALL_FOLDER +cp -r $INSTALL_FOLDER/tempita tempita +rm -rf $INSTALL_FOLDER + +# Needed to rewrite the doctests +# Note: BSD sed -i needs an argument unders OSX +# so first renaming to .bak and then deleting backup files +find tempita -name "*.py" | xargs sed -i.bak "s/from tempita/from nilearn.externals.tempita/" +find tempita -name "*.bak" | xargs rm diff --git a/nilearn/externals/tempita/__init__.py b/nilearn/externals/tempita/__init__.py new file mode 100644 index 0000000000..91f4091672 --- /dev/null +++ b/nilearn/externals/tempita/__init__.py @@ -0,0 +1,1311 @@ +# flake8: noqa +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" +from __future__ import absolute_import, division, print_function + +import re +import sys +try: + from urllib.parse import quote as url_quote + from io import StringIO + from html import escape as html_escape +except ImportError: + from urllib import quote as url_quote + from cStringIO import StringIO + from cgi import escape as html_escape +import os +import tokenize +from ._looper import looper +from .compat3 import ( + PY3, bytes, basestring_, next, is_unicode, coerce_text, iteritems) + +__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate', + 'sub_html', 'html', 'bunch'] + +in_re = re.compile(r'\s+in\s+') +var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) + + +class TemplateError(Exception): + """Exception raised while parsing a template + """ + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = ' '.join(self.args) + if self.position: + msg = '%s at line %s column %s' % ( + msg, self.position[0], self.position[1]) + if self.name: + msg += ' in %s' % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, + get_template=from_template.get_template) + + +class Template(object): + + default_namespace = { + 'start_braces': '{{', + 'end_braces': '}}', + 'looper': looper, + } + + default_encoding = 'utf8' + default_inherit = None + + def __init__(self, content, name=None, namespace=None, stacklevel=None, + get_template=None, default_inherit=None, line_offset=0, + delimeters=None): + self.content = content + + # set delimeters + if delimeters is None: + delimeters = (self.default_namespace['start_braces'], + self.default_namespace['end_braces']) + else: + assert len(delimeters) == 2 and all( + [isinstance(delimeter, basestring_) + for delimeter in delimeters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace['start_braces'] = delimeters[0] + self.default_namespace['end_braces'] = delimeters[1] + self.delimeters = delimeters + + self._unicode = is_unicode(content) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__file__' in globals: + name = globals['__file__'] + if name.endswith('.pyc') or name.endswith('.pyo'): + name = name[:-1] + elif '__name__' in globals: + name = globals['__name__'] + else: + name = '' + if lineno: + name += ':%s' % lineno + self.name = name + self._parsed = parse( + content, name=name, line_offset=line_offset, + delimeters=self.delimeters) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename(cls, filename, namespace=None, encoding=None, + default_inherit=None, get_template=get_file_template): + f = open(filename, 'rb') + c = f.read() + f.close() + if encoding: + c = c.decode(encoding) + elif PY3: + c = c.decode('latin-1') + return cls(content=c, name=filename, namespace=namespace, + default_inherit=default_inherit, get_template=get_template) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return '<%s %s name=%r>' % ( + self.__class__.__name__, + hex(id(self))[2:], self.name) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError( + "You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError( + "You can only give one positional argument") + if not hasattr(args[0], 'items'): + raise TypeError( + ("If you pass in a single argument, you must pass in a ", + "dict-like object (with a .items() method); you gave %r") + % (args[0],)) + kw = args[0] + ns = kw + ns['__template_name__'] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + # __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if '__inherit__' in defs: + inherit = defs.pop('__inherit__') + else: + inherit = None + return ''.join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + # __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + 'You cannot use inheritance without passing in get_template', + position=None, name=self.name) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in iteritems(defs): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns['self'] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + # __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + # __traceback_hide__ = True + name, pos = code[0], code[1] + if name == 'py': + self._exec(code[2], ns, pos) + elif name == 'continue': + raise _TemplateContinue() + elif name == 'break': + raise _TemplateBreak() + elif name == 'for': + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == 'cond': + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == 'expr': + parts = code[2].split('|') + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == 'default': + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == 'inherit': + expr = code[2] + value = self._eval(expr, ns, pos) + defs['__inherit__'] = value + elif name == 'def': + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos) + elif name == 'comment': + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + # __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + 'Need %i items to unpack (got %i items)' + % (len(vars), len(item))) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + # __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == 'else': + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + # __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError( + 'invalid syntax in expression: %s' % code) + return value + except: + exc_info = sys.exc_info() + e = exc_info[1] + if getattr(e, 'args', None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + if PY3: + raise(e) + else: + raise (exc_info[1], e, exc_info[2]) + + def _exec(self, code, ns, pos): + # __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except: + exc_info = sys.exc_info() + e = exc_info[1] + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + if PY3: + raise(e) + else: + raise (exc_info[1], e, exc_info[2]) + + def _repr(self, value, pos): + # __traceback_hide__ = True + try: + if value is None: + return '' + if self._unicode: + value = str(value) + if not is_unicode(value): + value = value.decode('utf-8') + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if (is_unicode(value) and self.default_encoding): + value = value.encode(self.default_encoding) + except: + exc_info = sys.exc_info() + e = exc_info[1] + e.args = (self._add_line_info(e.args[0], pos),) + if PY3: + raise(e) + else: + raise (exc_info[1], e, exc_info[2]) + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + 'Cannot decode bytes value %r into unicode ' + '(no default_encoding provided)' % value) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + ' in string %r' % value) + elif not self._unicode and is_unicode(value): + if not self.default_encoding: + raise UnicodeEncodeError( + 'Cannot encode unicode value %r into bytes ' + '(no default_encoding provided)' % value) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % ( + msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimeters=None, **kw): + name = kw.get('__name') + tmpl = Template(content, name=name, delimeters=delimeters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + + def __init__(self, **kw): + for name, value in iteritems(kw): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if 'default' in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, 'default') + else: + return dict.__getitem__(self, key) + + def __repr__(self): + items = [ + (k, v) for k, v in iteritems(self)] + items.sort() + return '<%s %s>' % ( + self.__class__.__name__, + ' '.join(['%s=%r' % (k, v) for k, v in items])) + +############################################################ +# HTML Templating +############################################################ + + +class html(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return self.value + + def __html__(self): + return self.value + + def __repr__(self): + return '<%s %r>' % ( + self.__class__.__name__, self.value) + + +def html_quote(value, force=True): + if not force and hasattr(value, '__html__'): + return value.__html__() + if value is None: + return '' + if not isinstance(value, basestring_): + value = coerce_text(value) + if sys.version >= "3" and isinstance(value, bytes): + value = html_escape(value.decode('latin1'), 1) + value = value.encode('latin1') + else: + value = html_escape(value, 1) + if sys.version < "3": + if is_unicode(value): + value = value.encode('ascii', 'xmlcharrefreplace') + return value + + +def url(v): + v = coerce_text(v) + if is_unicode(v): + v = v.encode('utf8') + return url_quote(v) + + +def attr(**kw): + kw = list(iteritems(kw)) + kw.sort() + parts = [] + for name, value in kw: + if value is None: + continue + if name.endswith('_'): + name = name[:-1] + parts.append('%s="%s"' % (html_quote(name), html_quote(value))) + return html(' '.join(parts)) + + +class HTMLTemplate(Template): + + default_namespace = Template.default_namespace.copy() + default_namespace.update(dict( + html=html, + attr=attr, + url=url, + html_quote=html_quote)) + + def _repr(self, value, pos): + if hasattr(value, '__html__'): + value = value.__html__() + quote = False + else: + quote = True + plain = Template._repr(self, value, pos) + if quote: + return html_quote(plain) + else: + return plain + + +def sub_html(content, **kw): + name = kw.get('__name') + tmpl = HTMLTemplate(content, name=name) + return tmpl.substitute(kw) + + +class TemplateDef(object): + def __init__(self, template, func_name, func_signature, + body, ns, pos, bound_self=None): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return '' % ( + self._func_name, self._func_signature, + self._template.name, self._pos) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns['self'] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return ''.join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, self._func_name, self._func_signature, + self._body, self._ns, self._pos, bound_self=obj) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in iteritems(kw): + if not var_kw and name not in sig_args: + raise TypeError( + 'Unexpected argument %s' % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + 'Extra position arguments: %s' + % ', '.join(repr(v) for v in args)) + for name, value_expr in iteritems(defaults): + if name not in values: + values[name] = self._template._eval( + value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError( + 'Missing argument: %s' % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject(object): + + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter(object): + + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return '<%s around %r>' % ( + self.__class__.__name__, self.__template_obj) + + +class _Empty(object): + def __call__(self, *args, **kw): + return self + + def __str__(self): + return '' + + def __repr__(self): + return 'Empty' + + def __unicode__(self): + return '' if PY3 else u'' + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + if sys.version < "3": + __nonzero__ = __bool__ + +Empty = _Empty() +del _Empty + +############################################################ +# Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None): + if delimeters is None: + delimeters = (Template.default_namespace['start_braces'], + Template.default_namespace['end_braces']) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]), + re.escape(delimeters[1]))) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimeters[0] and in_expr: + raise TemplateError('%s inside expression' % delimeters[0], + position=pos, + name=name) + elif expr == delimeters[1] and not in_expr: + raise TemplateError('%s outside expression' % delimeters[1], + position=pos, + name=name) + if expr == delimeters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last:match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError('No %s to finish last expression' % delimeters[1], + name=name, position=last_pos) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + +lex.__doc__ = """ +Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + tempita.TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + tempita.TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + tempita.TemplateError: {{ inside expression at line 1 column 10 + +""" if PY3 else """ +Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + +""" + +statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') +single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] +trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') +lead_whitespace_re = re.compile(r'^[\t ]*\n') + + +def trim_lex(tokens): + last_trim = None + for i in range(len(tokens)): + current = tokens[i] + if isinstance(tokens[i], basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = '' + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = '' + else: + next_chunk = tokens[i + 1] + if (not + isinstance(next_chunk, basestring_) or + not isinstance(prev, basestring_)): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = 'last' + if (prev_ok and (not next_chunk or lead_whitespace_re.search( + next_chunk) or ( + i == len(tokens) - 2 and not next_chunk.strip()))): + if prev: + if ((i == 1 and not prev.strip()) or prev_ok == 'last'): + tokens[i - 1] = '' + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[:m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = '' + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + +trim_lex.__doc__ = r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ if PY3 else r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + + +def find_position(string, index, last_index, last_pos): + """ + Given a string and index, return (line, column) + """ + lines = string.count('\n', last_index, index) + if lines > 0: + column = index - string.rfind('\n', last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimeters=None): + + if delimeters is None: + delimeters = (Template.default_namespace['start_braces'], + Template.default_namespace['end_braces']) + tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + +parse.__doc__ = r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse( + ... 'series->{{for x in y}}x={{x}}{{endfor}}' + ... ) #doctest: +NORMALIZE_WHITESPACE + ['series->', + ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse( + ... '{{if x}}a{{elif y}}b{{else}}c{{endif}}' + ... ) #doctest: +NORMALIZE_WHITESPACE + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), + ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + tempita.TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + tempita.TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + tempita.TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + tempita.TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + tempita.TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + tempita.TemplateError: Multi-line py blocks must start + with a newline at line 1 column 3 + """ if PY3 else r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse( + ... 'series->{{for x in y}}x={{x}}{{endfor}}' + ... ) #doctest: +NORMALIZE_WHITESPACE + ['series->', + ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse( + ... '{{if x}}a{{elif y}}b{{else}}c{{endif}}' + ... ) #doctest: +NORMALIZE_WHITESPACE + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), + ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start + with a newline at line 1 column 3 + """ + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith('py:'): + expr = expr[3:].lstrip(' \t') + if expr.startswith('\n') or expr.startswith('\r'): + expr = expr.lstrip('\r\n') + if '\r' in expr: + expr = expr.replace('\r\n', '\n') + expr = expr.replace('\r', '') + expr += '\n' + else: + if '\n' in expr: + raise TemplateError( + 'Multi-line py blocks must start with a newline', + position=pos, name=name) + return ('py', pos, expr), tokens[1:] + elif expr in ('continue', 'break'): + if 'for' not in context: + raise TemplateError( + 'continue outside of for loop', + position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith('if '): + return parse_cond(tokens, name, context) + elif (expr.startswith('elif ') or expr == 'else'): + raise TemplateError( + '%s outside of an if block' % expr.split()[0], + position=pos, name=name) + elif expr in ('if', 'elif', 'for'): + raise TemplateError( + '%s with no expression' % expr, + position=pos, name=name) + elif expr in ('endif', 'endfor', 'enddef'): + raise TemplateError( + 'Unexpected %s' % expr, + position=pos, name=name) + elif expr.startswith('for '): + return parse_for(tokens, name, context) + elif expr.startswith('default '): + return parse_default(tokens, name, context) + elif expr.startswith('inherit '): + return parse_inherit(tokens, name, context) + elif expr.startswith('def '): + return parse_def(tokens, name, context) + elif expr.startswith('#'): + return ('comment', pos, tokens[0][0]), tokens[1:] + return ('expr', pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ('if',) + while 1: + if not tokens: + raise TemplateError( + 'Missing {{endif}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'): + return ('cond', start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(':'): + first = first[:-1] + if first.startswith('if '): + part = ('if', pos, first[3:].lstrip(), content) + elif first.startswith('elif '): + part = ('elif', pos, first[5:].lstrip(), content) + elif first == 'else': + part = ('else', pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError( + 'No {{endif}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) and ( + tokens[0][0] == 'endif' or tokens[0][0].startswith( + 'elif ') or tokens[0][0] == 'else')): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ('for',) + context + content = [] + assert first.startswith('for ') + if first.endswith(':'): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError( + 'Bad for (no "in") in %r' % first, + position=pos, name=name) + vars = first[:match.start()] + if '(' in vars: + raise TemplateError( + 'You cannot have () in the variable section of a for loop (%r)' + % vars, position=pos, name=name) + vars = tuple([ + v.strip() for v in first[:match.start()].split(',') + if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError( + 'No {{endfor}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'): + return ('for', pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('default ') + first = first.split(None, 1)[1] + parts = first.split('=', 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % + first, position=pos, name=name) + var = parts[0].strip() + if ',' in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", + position=pos, name=name) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" + % var, position=pos, name=name) + expr = parts[1].strip() + return ('default', pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('inherit ') + expr = first.split(None, 1)[1] + return ('inherit', pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith('def ') + first = first.split(None, 1)[1] + if first.endswith(':'): + first = first[:-1] + if '(' not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(')'): + raise TemplateError("Function definition doesn't end with ): %s" % + first, position=start, name=name) + else: + first = first[:-1] + func_name, sig_text = first.split('(', 1) + sig = parse_signature(sig_text, name, start) + context = context + ('def',) + content = [] + while 1: + if not tokens: + raise TemplateError( + 'Missing {{enddef}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'): + return ('def', start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next( + tokens) + except StopIteration: + return tokenize.ENDMARKER, '' + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and ( + tok_string == '*' or tok_string == '**'): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == ','): + if var_arg_type == '*': + var_arg = var_name + elif var_arg_type == '**': + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if tok_type == tokenize.OP and tok_string == '=': + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if (not nest_count and + (tok_type == tokenize.ENDMARKER or + (tok_type == tokenize.OP and tok_string == ','))): + default_expr = isolate_expression( + sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count \ + and tok_type == tokenize.OP \ + and tok_string == nest_type: + nest_count += 1 + elif nest_count \ + and tok_type == tokenize.OP \ + and tok_string == unnest_type: + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif not nest_count \ + and tok_type == tokenize.OP \ + and tok_string in ('(', '[', '{'): + nest_type = tok_string + nest_count = 1 + unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow + 1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return ''.join(parts) + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution('Paste') + parser = optparse.OptionParser( + version=coerce_text(dist), + usage=_fill_command_usage) + parser.add_option( + '-o', '--output', + dest='output', + metavar="FILENAME", + help="File to write output to (default stdout)") + parser.add_option( + '--html', + dest='use_html', + action='store_true', + help="Use HTML style filling (including automatic HTML quoting)") + parser.add_option( + '--env', + dest='use_env', + action='store_true', + help="Put the environment in as top-level variables") + options, args = parser.parse_args(args) + if len(args) < 1: + print('You must give a template filename') + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if '=' not in value: + print('Bad argument: %r' % value) + sys.exit(2) + name, value = value.split('=', 1) + if name.startswith('py:'): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == '-': + template_content = sys.stdin.read() + template_name = '' + else: + f = open(template_name, 'rb', encoding="latin-1") + template_content = f.read() + f.close() + if options.use_html: + TemplateClass = HTMLTemplate + else: + TemplateClass = Template + template = TemplateClass(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + f = open(options.output, 'wb') + f.write(result) + f.close() + else: + sys.stdout.write(result) + +if __name__ == '__main__': + fill_command() diff --git a/nilearn/externals/tempita/_looper.py b/nilearn/externals/tempita/_looper.py new file mode 100644 index 0000000000..a2a67800d8 --- /dev/null +++ b/nilearn/externals/tempita/_looper.py @@ -0,0 +1,163 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print(loop.number, item) + ... if not loop.last: + ... print('---') + 1 a + --- + 2 b + --- + 3 c + +""" +from __future__ import absolute_import, division, print_function + +import sys +from .compat3 import basestring_ + +__all__ = ['looper'] + + +class looper(object): + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter(object): + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + if sys.version < "3": + next = __next__ + + +class loop_pos(object): + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + if sys.version < "3": + next = __next__ + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/nilearn/externals/tempita/compat3.py b/nilearn/externals/tempita/compat3.py new file mode 100644 index 0000000000..861f5aaf1a --- /dev/null +++ b/nilearn/externals/tempita/compat3.py @@ -0,0 +1,56 @@ +# flake8: noqa +from __future__ import absolute_import, division, print_function + +import sys + +__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode', + 'iteritems'] + +PY3 = True if sys.version_info[0] == 3 else False + +if sys.version_info[0] < 3: + + def next(obj): + return obj.next() + + def iteritems(d, **kw): + return d.iteritems(**kw) + + b = bytes = str + basestring_ = basestring + +else: + + def b(s): + if isinstance(s, str): + return s.encode('latin1') + return bytes(s) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + next = next + basestring_ = (bytes, str) + bytes = bytes + +text = str + + +def is_unicode(obj): + if sys.version_info[0] < 3: + return isinstance(obj, unicode) + else: + return isinstance(obj, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if sys.version_info[0] < 3: + attr = '__unicode__' + else: + attr = '__str__' + if hasattr(v, attr): + return unicode(v) + else: + return bytes(v) + return v diff --git a/nilearn/image/image.py b/nilearn/image/image.py index e51a29bc06..61623200df 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -14,7 +14,7 @@ from scipy.stats import scoreatpercentile import copy import nibabel -from sklearn.externals.joblib import Parallel, delayed +from nilearn._utils.compat import Parallel, delayed from .. import signal from .._utils import (check_niimg_4d, check_niimg_3d, check_niimg, as_ndarray, @@ -172,8 +172,8 @@ def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True): filtering. copy: bool - if True, input array is not modified. False by default: the filtering - is performed in-place. + if True, input array is not modified. True by default: the filtering + is not performed in-place. Returns ------- @@ -327,7 +327,7 @@ def _crop_img_to(img, slices, copy=True): return new_img_like(img, cropped_data, new_affine) -def crop_img(img, rtol=1e-8, copy=True): +def crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False): """Crops img as much as possible Will crop img, removing as many zero entries as possible @@ -349,10 +349,22 @@ def crop_img(img, rtol=1e-8, copy=True): copy: boolean Specifies whether cropped data is copied or not. + pad: boolean + Toggles adding 1-voxel of 0s around the border. Recommended. + + return_offset: boolean + Specifies whether to return a tuple of the removed padding. + Returns ------- cropped_img: image Cropped version of the input image + + offset: list (optional) + List of tuples representing the number of voxels removed (before, after) + the cropped volumes, i.e.: + [(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)] + """ img = check_niimg(img) @@ -364,16 +376,53 @@ def crop_img(img, rtol=1e-8, copy=True): if data.ndim == 4: passes_threshold = np.any(passes_threshold, axis=-1) coords = np.array(np.where(passes_threshold)) - start = coords.min(axis=1) - end = coords.max(axis=1) + 1 + + # Sets full range if no data are found along the axis + if coords.shape[1] == 0: + start, end = [0, 0, 0], list(data.shape) + else: + start = coords.min(axis=1) + end = coords.max(axis=1) + 1 # pad with one voxel to avoid resampling problems - start = np.maximum(start - 1, 0) - end = np.minimum(end + 1, data.shape[:3]) + if pad: + start = np.maximum(start - 1, 0) + end = np.minimum(end + 1, data.shape[:3]) + + slices = [slice(s, e) for s, e in zip(start, end)][:3] + cropped_im = _crop_img_to(img, slices, copy=copy) + return cropped_im if not return_offset else (cropped_im, tuple(slices)) + - slices = [slice(s, e) for s, e in zip(start, end)] +def _pad_array(array, pad_sizes): + """Pad an ndarray with zeros of quantity specified + as follows pad_sizes = [x1minpad, x1maxpad, x2minpad, + x2maxpad, x3minpad, ...] + """ + + if len(pad_sizes) % 2 != 0: + raise ValueError("Please specify as many max paddings as min" + " paddings. You have specified %d arguments" % + len(pad_sizes)) + + all_paddings = np.zeros([array.ndim, 2], dtype=np.int64) + all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2) + + lower_paddings, upper_paddings = all_paddings.T + new_shape = np.array(array.shape) + upper_paddings + lower_paddings - return _crop_img_to(img, slices, copy=copy) + padded = np.zeros(new_shape, dtype=array.dtype) + source_slices = [slice(max(-lp, 0), min(s + up, s)) + for lp, up, s in zip(lower_paddings, + upper_paddings, + array.shape)] + target_slices = [slice(max(lp, 0), min(s - up, s)) + for lp, up, s in zip(lower_paddings, + upper_paddings, + new_shape)] + + padded[tuple(target_slices)] = array[source_slices].copy() + return padded def _compute_mean(imgs, target_affine=None, @@ -656,7 +705,7 @@ def new_img_like(ref_niimg, data, affine=None, copy_header=False): return klass(data, affine, header=header) -def threshold_img(img, threshold, mask_img=None): +def threshold_img(img, threshold, mask_img=None, copy=True): """ Threshold the given input image, mostly statistical or atlas images. Thresholding can be done based on direct image intensities or selection @@ -683,6 +732,10 @@ def threshold_img(img, threshold, mask_img=None): Mask image applied to mask the input data. If None, no masking will be applied. + copy: bool + if True, input array is not modified. True by default: the filtering + is not performed in-place. + Returns ------- threshold_img: Nifti1Image @@ -693,6 +746,8 @@ def threshold_img(img, threshold, mask_img=None): img = check_niimg(img) img_data = _safe_get_data(img, ensure_finite=True) + if copy: + img_data = img_data.copy() affine = img.affine if mask_img is not None: diff --git a/nilearn/image/resampling.py b/nilearn/image/resampling.py index 27dede7712..a699351341 100644 --- a/nilearn/image/resampling.py +++ b/nilearn/image/resampling.py @@ -13,6 +13,7 @@ import scipy from scipy import ndimage, linalg +from .image import crop_img from .. import _utils from .._utils.compat import _basestring @@ -289,7 +290,7 @@ def _resample_one_img(data, A, b, target_shape, def resample_img(img, target_affine=None, target_shape=None, interpolation='continuous', copy=True, order="F", - clip=True, fill_value=0): + clip=True, fill_value=0, force_resample=False): """Resample a Niimg-like object Parameters @@ -331,6 +332,9 @@ def resample_img(img, target_affine=None, target_shape=None, fill_value: float, optional Use a fill value for points outside of input volume (default 0). + force_resample: bool, optional + Intended for testing, this prevents the use of a padding optimzation + Returns ------- resampled: nibabel.Nifti1Image @@ -418,18 +422,19 @@ def resample_img(img, target_affine=None, target_shape=None, input_img_is_string = False img = _utils.check_niimg(img) + shape = img.shape + affine = img.affine # noop cases if target_affine is None and target_shape is None: if copy and not input_img_is_string: img = _utils.copy_img(img) return img + if target_affine is affine and target_shape is shape: + return img if target_affine is not None: target_affine = np.asarray(target_affine) - shape = img.shape - affine = img.affine - if (np.all(np.array(target_shape) == shape[:3]) and np.allclose(target_affine, affine)): if copy and not input_img_is_string: @@ -486,14 +491,6 @@ def resample_img(img, target_affine=None, target_shape=None, else: transform_affine = np.dot(linalg.inv(affine), target_affine) A, b = to_matrix_vector(transform_affine) - # If A is diagonal, ndimage.affine_transform is clever enough to use a - # better algorithm. - if np.all(np.diag(np.diag(A)) == A): - if LooseVersion(scipy.__version__) < LooseVersion('0.18'): - # Before scipy 0.18, ndimage.affine_transform was applying a - # different logic to the offset for diagonal affine - b = np.dot(linalg.inv(A), b) - A = np.diag(A) data_shape = list(data.shape) # Make sure that we have a list here @@ -501,6 +498,11 @@ def resample_img(img, target_affine=None, target_shape=None, target_shape = target_shape.tolist() target_shape = tuple(target_shape) + if LooseVersion(scipy.__version__) < LooseVersion('0.20'): + # Before scipy 0.20, force native data types due to endian issues + # that caused instability. + data = data.astype(data.dtype.newbyteorder('N')) + if interpolation == 'continuous' and data.dtype.kind == 'i': # cast unsupported data types to closest support dtype aux = data.dtype.name.replace('int', 'float') @@ -527,20 +529,58 @@ def resample_img(img, target_affine=None, target_shape=None, # Code is generic enough to work for both 3D and 4D images other_shape = data_shape[3:] - resampled_data = np.empty(list(target_shape) + other_shape, + resampled_data = np.zeros(list(target_shape) + other_shape, order=order, dtype=resampled_data_dtype) all_img = (slice(None), ) * 3 - # Iterate over a set of 3D volumes, as the interpolation problem is - # separable in the extra dimensions. This reduces the - # computational cost - for ind in np.ndindex(*other_shape): - _resample_one_img(data[all_img + ind], A, b, target_shape, - interpolation_order, - out=resampled_data[all_img + ind], - copy=not input_img_is_string, - fill_value=fill_value) + # if (A == I OR some combination of permutation(I) and sign-flipped(I)) AND + # all(b == integers): + if (np.all(np.eye(3) == A) and all(bt == np.round(bt) for bt in b) and + not force_resample): + # TODO: also check for sign flips + # TODO: also check for permutations of I + + # ... special case: can be solved with padding alone + # crop source image and keep N voxels offset before/after volume + cropped_img, offsets = crop_img(img, pad=False, return_offset=True) + + # TODO: flip axes that are flipped + # TODO: un-shuffle permuted dimensions + + # offset the original un-cropped image indices by the relative + # translation, b. + indices = [(int(off.start - dim_b), int(off.stop - dim_b)) + for off, dim_b in zip(offsets[:3], b[:3])] + + # If image are not fully overlapping, place only portion of image. + slices = [] + for dimsize, index in zip(resampled_data.shape, indices): + slices.append(slice(np.max((0, index[0])), + np.min((dimsize, index[1])))) + slices = tuple(slices) + + # ensure the source image being placed isn't larger than the dest + subset_indices = tuple(slice(0, s.stop-s.start) for s in slices) + resampled_data[slices] = cropped_img.get_data()[subset_indices] + else: + # If A is diagonal, ndimage.affine_transform is clever enough to use a + # better algorithm. + if np.all(np.diag(np.diag(A)) == A): + if LooseVersion(scipy.__version__) < LooseVersion('0.18'): + # Before scipy 0.18, ndimage.affine_transform was applying a + # different logic to the offset for diagonal affine + b = np.dot(linalg.inv(A), b) + A = np.diag(A) + # Iterate over a set of 3D volumes, as the interpolation problem is + # separable in the extra dimensions. This reduces the + # computational cost + for ind in np.ndindex(*other_shape): + _resample_one_img(data[all_img + ind], A, b, target_shape, + interpolation_order, + out=resampled_data[all_img + ind], + copy=not input_img_is_string, + fill_value=fill_value) if clip: # force resampled data to have a range contained in the original data @@ -556,7 +596,7 @@ def resample_img(img, target_affine=None, target_shape=None, def resample_to_img(source_img, target_img, interpolation='continuous', copy=True, order='F', - clip=False, fill_value=0): + clip=False, fill_value=0, force_resample=False): """Resample a Niimg-like source image on a target Niimg-like image (no registration is performed: the image should already be aligned). @@ -593,6 +633,9 @@ def resample_to_img(source_img, target_img, fill_value: float, optional Use a fill value for points outside of input volume (default 0). + force_resample: bool, optional + Intended for testing, this prevents the use of a padding optimzation + Returns ------- resampled: nibabel.Nifti1Image @@ -616,7 +659,8 @@ def resample_to_img(source_img, target_img, target_affine=target.affine, target_shape=target_shape, interpolation=interpolation, copy=copy, order=order, - clip=clip, fill_value=fill_value) + clip=clip, fill_value=fill_value, + force_resample=force_resample) def reorder_img(img, resample=None): diff --git a/nilearn/image/tests/test_image.py b/nilearn/image/tests/test_image.py index 92232c75e2..fe78003965 100644 --- a/nilearn/image/tests/test_image.py +++ b/nilearn/image/tests/test_image.py @@ -499,6 +499,27 @@ def test_threshold_img(): # when threshold is a percentile thr_maps_percent2 = threshold_img(img, threshold='2%') +def test_threshold_img_copy(): + + img_zeros = Nifti1Image(np.zeros((10, 10, 10, 10)), np.eye(4)) + img_ones = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + + # Check that copy does not mutate. It returns modified copy. + thresholded = threshold_img(img_ones, 2) # threshold 2 > 1 + + # Original img_ones should have all ones. + assert_array_equal(img_ones.get_data(), np.ones((10, 10, 10, 10))) + # Thresholded should have all zeros. + assert_array_equal(thresholded.get_data(), np.zeros((10, 10, 10, 10))) + + # Check that not copying does mutate. + img_to_mutate = Nifti1Image(np.ones((10, 10, 10, 10)), np.eye(4)) + thresholded = threshold_img(img_to_mutate, 2, copy=False) + # Check that original mutates + assert_array_equal(img_to_mutate.get_data(), np.zeros((10, 10, 10, 10))) + # And that returned value is also thresholded. + assert_array_equal(img_to_mutate.get_data(), thresholded.get_data()) + def test_isnan_threshold_img_data(): shape = (10, 10, 10) diff --git a/nilearn/image/tests/test_resampling.py b/nilearn/image/tests/test_resampling.py index a0b66d551d..9cff7ed923 100644 --- a/nilearn/image/tests/test_resampling.py +++ b/nilearn/image/tests/test_resampling.py @@ -18,6 +18,7 @@ from nilearn.image.resampling import from_matrix_vector, coord_transform from nilearn.image.resampling import get_bounds from nilearn.image.resampling import BoundingBoxError +from nilearn.image.image import _pad_array, crop_img from nilearn._utils import testing @@ -37,37 +38,6 @@ def rotation(theta, phi): return np.dot(a1, a2) -def pad(array, *args): - """Pad an ndarray with zeros of quantity specified - in args as follows args = (x1minpad, x1maxpad, x2minpad, - x2maxpad, x3minpad, ...) - """ - - if len(args) % 2 != 0: - raise ValueError("Please specify as many max paddings as min" - " paddings. You have specified %d arguments" % - len(args)) - - all_paddings = np.zeros([array.ndim, 2], dtype=np.int64) - all_paddings[:len(args) // 2] = np.array(args).reshape(-1, 2) - - lower_paddings, upper_paddings = all_paddings.T - new_shape = np.array(array.shape) + upper_paddings + lower_paddings - - padded = np.zeros(new_shape, dtype=array.dtype) - source_slices = [slice(max(-lp, 0), min(s + up, s)) - for lp, up, s in zip(lower_paddings, - upper_paddings, - array.shape)] - target_slices = [slice(max(lp, 0), min(s - up, s)) - for lp, up, s in zip(lower_paddings, - upper_paddings, - new_shape)] - - padded[target_slices] = array[source_slices].copy() - return padded - - ############################################################################### # Tests def test_identity_resample(): @@ -121,6 +91,11 @@ def test_downsample(): np.testing.assert_almost_equal(downsampled, rot_img.get_data()[:x, :y, :z, ...]) + rot_img_2 = resample_img(Nifti1Image(data, affine), + target_affine=2 * affine, interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(rot_img_2.get_data(), + rot_img.get_data()) # Test with non native endian data # Test to check that if giving non native endian data as input should @@ -471,8 +446,8 @@ def test_resampling_result_axis_permutation(): offset_cropping = np.vstack([-offset[ap][np.newaxis, :], np.zeros([1, 3])] ).T.ravel().astype(int) - what_resampled_data_should_be = pad(full_data.transpose(ap), - *list(offset_cropping)) + what_resampled_data_should_be = _pad_array(full_data.transpose(ap), + list(offset_cropping)) assert_array_almost_equal(resampled_data, what_resampled_data_should_be) @@ -558,6 +533,56 @@ def test_resample_to_img(): np.testing.assert_almost_equal(downsampled, result_img.get_data()[:x, :y, :z, ...]) +def test_crop(): + # Testing that padding of arrays and cropping of images work symmetrically + shape = (4, 6, 2) + data = np.ones(shape) + padded = _pad_array(data, [3, 2, 4, 4, 5, 7]) + padd_nii = Nifti1Image(padded, np.eye(4)) + + cropped = crop_img(padd_nii, pad=False) + np.testing.assert_equal(cropped.get_data(), data) + + +def test_resample_identify_affine_int_translation(): + # Testing resample to img function + rand_gen = np.random.RandomState(0) + + source_shape = (6, 4, 6) + source_affine = np.eye(4) + source_affine[:, 3] = np.append(np.random.randint(0, 4, 3), 1) + source_data = rand_gen.random_sample(source_shape) + source_img = Nifti1Image(source_data, source_affine) + + target_shape = (11, 10, 9) + target_data = np.zeros(target_shape) + target_affine = source_affine + target_affine[:3, 3] -= 3 # add an offset of 3 in x, y, z + target_data[3:9, 3:7, 3:9] = source_data # put the data at the offset location + target_img = Nifti1Image(target_data, target_affine) + + result_img = resample_to_img(source_img, target_img, + interpolation='nearest') + np.testing.assert_almost_equal(target_img.get_data(), + result_img.get_data()) + + result_img_2 = resample_to_img(result_img, source_img, + interpolation='nearest') + np.testing.assert_almost_equal(source_img.get_data(), + result_img_2.get_data()) + + result_img_3 = resample_to_img(result_img, source_img, + interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(result_img_2.get_data(), + result_img_3.get_data()) + + result_img_4 = resample_to_img(source_img, target_img, + interpolation='nearest', + force_resample=True) + np.testing.assert_almost_equal(target_img.get_data(), + result_img_4.get_data()) + def test_resample_clip(): # Resample and image and get larger and smaller # value than in the original. Use clip to get rid of these images diff --git a/nilearn/input_data/base_masker.py b/nilearn/input_data/base_masker.py index f90b54785f..af604b581e 100644 --- a/nilearn/input_data/base_masker.py +++ b/nilearn/input_data/base_masker.py @@ -10,7 +10,7 @@ import numpy as np from sklearn.base import BaseEstimator, TransformerMixin -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from .. import masking from .. import image diff --git a/nilearn/input_data/multi_nifti_masker.py b/nilearn/input_data/multi_nifti_masker.py index f0f5737684..b70f44297e 100644 --- a/nilearn/input_data/multi_nifti_masker.py +++ b/nilearn/input_data/multi_nifti_masker.py @@ -8,7 +8,7 @@ import itertools import warnings -from sklearn.externals.joblib import Memory, Parallel, delayed +from nilearn._utils.compat import Memory, Parallel, delayed from .. import _utils from .. import image @@ -36,12 +36,18 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): fine tune the mask extraction. smoothing_fwhm: float, optional - If smoothing_fwhm is not None, it gives the size in millimeters of the - spatial smoothing to apply to the signal. - - standardize: boolean, optional - If standardize is True, the time-series are centered and normed: - their mean is put to 0 and their variance to 1 in the time dimension. + If smoothing_fwhm is not None, it gives the size in millimeters of + the spatial smoothing to apply to the signal. + + standardize: {'zscore', 'psc', True, False}, default is 'zscore' + Strategy to standardize the signal. + 'zscore': the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + 'psc': Timeseries are shifted to zero mean value and scaled + to percent signal change (as compared to original mean signal). + True : the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + False : Do not standardize the data. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related @@ -121,13 +127,11 @@ class MultiNiftiMasker(NiftiMasker, CacheMixin): """ def __init__(self, mask_img=None, smoothing_fwhm=None, - standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, - target_affine=None, target_shape=None, - mask_strategy='background', mask_args=None, dtype=None, - memory=Memory(cachedir=None), memory_level=0, - n_jobs=1, verbose=0 - ): + standardize=False, detrend=False, low_pass=None, + high_pass=None, t_r=None, target_affine=None, + target_shape=None, mask_strategy='background', + mask_args=None, dtype=None, memory=Memory(cachedir=None), + memory_level=0, n_jobs=1, verbose=0): # Mask is provided or computed self.mask_img = mask_img diff --git a/nilearn/input_data/nifti_labels_masker.py b/nilearn/input_data/nifti_labels_masker.py index f4e6dca9ea..22223e6b52 100644 --- a/nilearn/input_data/nifti_labels_masker.py +++ b/nilearn/input_data/nifti_labels_masker.py @@ -4,7 +4,7 @@ import numpy as np -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from .. import _utils from .._utils import logger, CacheMixin, _compose_err_msg @@ -56,9 +56,15 @@ class NiftiLabelsMasker(BaseMasker, CacheMixin): If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. - standardize: boolean, optional - If standardize is True, the time-series are centered and normed: - their mean is put to 0 and their variance to 1 in the time dimension. + standardize: {'zscore', 'psc', True, False}, default is 'zscore' + Strategy to standardize the signal. + 'zscore': the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + 'psc': Timeseries are shifted to zero mean value and scaled + to percent signal change (as compared to original mean signal). + True : the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + False : Do not standardize the data. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related diff --git a/nilearn/input_data/nifti_maps_masker.py b/nilearn/input_data/nifti_maps_masker.py index 0cb57567f9..c9ff8525ab 100644 --- a/nilearn/input_data/nifti_maps_masker.py +++ b/nilearn/input_data/nifti_maps_masker.py @@ -3,7 +3,7 @@ """ import numpy as np -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from .. import _utils from .._utils import logger, CacheMixin @@ -22,11 +22,11 @@ def __init__(self, _resampled_maps_img_, _resampled_mask_img_): self._resampled_mask_img_ = _resampled_mask_img_ def __call__(self, imgs): - from ..regions import signal_extraction + from ..regions import signal_extraction - return signal_extraction.img_to_signals_maps( - imgs, self._resampled_maps_img_, - mask_img=self._resampled_mask_img_) + return signal_extraction.img_to_signals_maps( + imgs, self._resampled_maps_img_, + mask_img=self._resampled_mask_img_) class NiftiMapsMasker(BaseMasker, CacheMixin): @@ -58,9 +58,15 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. - standardize: boolean, optional - If standardize is True, the time-series are centered and normed: - their mean is put to 0 and their variance to 1 in the time dimension. + standardize: {'zscore', 'psc', True, False}, default is 'zscore' + Strategy to standardize the signal. + 'zscore': the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + 'psc': Timeseries are shifted to zero mean value and scaled + to percent signal change (as compared to original mean signal). + True : the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + False : Do not standardize the data. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related @@ -116,10 +122,9 @@ class NiftiMapsMasker(BaseMasker, CacheMixin): # memory and memory_level are used by CacheMixin. def __init__(self, maps_img, mask_img=None, - allow_overlap=True, - smoothing_fwhm=None, standardize=False, detrend=False, - low_pass=None, high_pass=None, t_r=None, dtype=None, - resampling_target="data", + allow_overlap=True, smoothing_fwhm=None, standardize=False, + detrend=False, low_pass=None, high_pass=None, t_r=None, + dtype=None, resampling_target="data", memory=Memory(cachedir=None, verbose=0), memory_level=0, verbose=0): self.maps_img = maps_img diff --git a/nilearn/input_data/nifti_masker.py b/nilearn/input_data/nifti_masker.py index 9a752b7c89..cd1a29fa36 100644 --- a/nilearn/input_data/nifti_masker.py +++ b/nilearn/input_data/nifti_masker.py @@ -4,14 +4,16 @@ # Author: Gael Varoquaux, Alexandre Abraham # License: simplified BSD +import warnings from copy import copy as copy_object -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from .base_masker import BaseMasker, filter_and_extract from .. import _utils from .. import image from .. import masking +from nilearn.reporting import ReportMixin from .._utils import CacheMixin from .._utils.class_inspect import get_params from .._utils.niimg import img_data_dtype @@ -63,7 +65,7 @@ def filter_and_mask(imgs, mask_img_, parameters, return data -class NiftiMasker(BaseMasker, CacheMixin): +class NiftiMasker(BaseMasker, CacheMixin, ReportMixin): """Applying a mask to extract time-series from Niimg-like objects. NiftiMasker is useful when preprocessing (detrending, standardization, @@ -86,25 +88,31 @@ class NiftiMasker(BaseMasker, CacheMixin): If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. - standardize : boolean, optional - If standardize is True, the time-series are centered and normed: - their mean is put to 0 and their variance to 1 in the time dimension. + standardize: {'zscore', 'psc', True, False}, default is 'zscore' + Strategy to standardize the signal. + 'zscore': the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + 'psc': Timeseries are shifted to zero mean value and scaled + to percent signal change (as compared to original mean signal). + True : the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + False : Do not standardize the data. detrend : boolean, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details: :func:`nilearn.signal.clean`. low_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details: :func:`nilearn.signal.clean`. high_pass: None or float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details: :func:`nilearn.signal.clean`. t_r : float, optional This parameter is passed to signal.clean. Please see the related - documentation for details + documentation for details: :func:`nilearn.signal.clean`. target_affine : 3x3 or 4x4 matrix, optional This parameter is passed to image.resample_img. Please see the @@ -178,7 +186,7 @@ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, mask_strategy='background', mask_args=None, sample_mask=None, dtype=None, memory_level=1, memory=Memory(cachedir=None), - verbose=0 + verbose=0, reports=True, ): # Mask is provided or computed self.mask_img = mask_img @@ -200,9 +208,78 @@ def __init__(self, mask_img=None, sessions=None, smoothing_fwhm=None, self.memory = memory self.memory_level = memory_level self.verbose = verbose + self.reports = reports + self._report_description = ('This report shows the input Nifti ' + 'image overlaid with the outlines of the ' + 'mask (in green). We recommend to inspect ' + 'the report for the overlap between the ' + 'mask and its input image. ') + self._overlay_text = ('\n To see the input Nifti image before ' + 'resampling, hover over the displayed image.') self._shelving = False + def _reporting(self): + """ + Returns + ------- + displays : list + A list of all displays to be rendered. + """ + try: + from nilearn import plotting + except ImportError: + with warnings.catch_warnings(): + mpl_unavail_msg = ('Matplotlib is not imported! ' + 'No reports will be generated.') + warnings.filterwarnings('always', message=mpl_unavail_msg) + warnings.warn(category=ImportWarning, + message=mpl_unavail_msg) + return [None] + + img = self._reporting_data['images'] + mask = self._reporting_data['mask'] + if img is not None: + dim = image.load_img(img).shape + if len(dim) == 4: + # compute middle image from 4D series for plotting + img = image.index_img(img, dim[-1] // 2) + else: # images were not provided to fit + img = mask + + # create display of retained input mask, image + # for visual comparison + init_display = plotting.plot_img(img, + black_bg=False, + cmap='CMRmap_r') + init_display.add_contours(mask, levels=[.5], colors='g', + linewidths=2.5) + + if 'transform' not in self._reporting_data: + return [init_display] + + else: # if resampling was performed + self._report_description = (self._report_description + + self._overlay_text) + + # create display of resampled NiftiImage and mask + # assuming that resampl_img has same dim as img + resampl_img, resampl_mask = self._reporting_data['transform'] + if resampl_img is not None: + if len(dim) == 4: + # compute middle image from 4D series for plotting + resampl_img = image.index_img(resampl_img, dim[-1] // 2) + else: # images were not provided to fit + resampl_img = resampl_mask + + final_display = plotting.plot_img(resampl_img, + black_bg=False, + cmap='CMRmap_r') + final_display.add_contours(resampl_mask, levels=[.5], + colors='g', linewidths=2.5) + + return [init_display, final_display] + def _check_fitted(self): if not hasattr(self, 'mask_img_'): raise ValueError('It seems that %s has not been fitted. ' @@ -248,6 +325,11 @@ def fit(self, imgs=None, y=None): else: self.mask_img_ = _utils.check_niimg_3d(self.mask_img) + if self.reports: # save inputs for reporting + self._reporting_data = {'images': imgs, 'mask': self.mask_img_} + else: + self._reporting_data = None + # If resampling is requested, resample also the mask # Resampling: allows the user to change the affine, the shape or both if self.verbose > 0: @@ -257,14 +339,25 @@ def fit(self, imgs=None, y=None): target_affine=self.target_affine, target_shape=self.target_shape, copy=False, interpolation='nearest') - if self.target_affine is not None: + if self.target_affine is not None: # resample image to target affine self.affine_ = self.target_affine - else: + else: # resample image to mask affine self.affine_ = self.mask_img_.affine # Load data in memory self.mask_img_.get_data() if self.verbose > 10: print("[%s.fit] Finished fit" % self.__class__.__name__) + + if (self.target_shape is not None) or (self.target_affine is not None): + if self.reports: + if imgs is not None: + resampl_imgs = self._cache(image.resample_img)( + imgs, target_affine=self.affine_, + copy=False, interpolation='nearest') + else: # imgs not provided to fit + resampl_imgs = None + self._reporting_data['transform'] = [resampl_imgs, self.mask_img_] + return self def transform_single_imgs(self, imgs, confounds=None, copy=True): @@ -279,7 +372,7 @@ def transform_single_imgs(self, imgs, confounds=None, copy=True): confounds: CSV file or array-like, optional This parameter is passed to signal.clean. Please see the related - documentation for details. + documentation for details: :func:`nilearn.signal.clean`. shape: (number of scans, number of confounds) Returns diff --git a/nilearn/input_data/nifti_spheres_masker.py b/nilearn/input_data/nifti_spheres_masker.py index fc7e829d58..9eb8aef701 100644 --- a/nilearn/input_data/nifti_spheres_masker.py +++ b/nilearn/input_data/nifti_spheres_masker.py @@ -7,7 +7,7 @@ import numpy as np import warnings from sklearn import neighbors -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from ..image.resampling import coord_transform from .._utils.niimg_conversions import _safe_get_data @@ -167,9 +167,15 @@ class NiftiSpheresMasker(BaseMasker, CacheMixin): If smoothing_fwhm is not None, it gives the full-width half maximum in millimeters of the spatial smoothing to apply to the signal. - standardize: boolean, optional - If standardize is True, the time-series are centered and normed: - their mean is set to 0 and their variance to 1 in the time dimension. + standardize: {'zscore', 'psc', True, False}, default is 'zscore' + Strategy to standardize the signal. + 'zscore': the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + 'psc': Timeseries are shifted to zero mean value and scaled + to percent signal change (as compared to original mean signal). + True : the signal is z-scored. Timeseries are shifted + to zero mean and scaled to unit variance. + False : Do not standardize the data. detrend: boolean, optional This parameter is passed to signal.clean. Please see the related diff --git a/nilearn/input_data/tests/test_base_masker.py b/nilearn/input_data/tests/test_base_masker.py index 3903e9935b..967b6d03c2 100644 --- a/nilearn/input_data/tests/test_base_masker.py +++ b/nilearn/input_data/tests/test_base_masker.py @@ -37,7 +37,7 @@ def test_cropping_code_paths(): "low_pass": None, "t_r": None, "detrend": None, - "standardize": None + "standardize": 'zscore', } # Now do the two maskings diff --git a/nilearn/input_data/tests/test_masker_validation.py b/nilearn/input_data/tests/test_masker_validation.py index 7972769919..4522d7edb3 100644 --- a/nilearn/input_data/tests/test_masker_validation.py +++ b/nilearn/input_data/tests/test_masker_validation.py @@ -3,7 +3,7 @@ import numpy as np from sklearn.base import BaseEstimator -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from nilearn._utils.testing import assert_warns from nilearn.input_data.masker_validation import check_embedded_nifti_masker @@ -54,10 +54,10 @@ def test_check_embedded_nifti_masker(): if param_key not in ['memory', 'memory_level', 'n_jobs', 'verbose']: assert_equal(getattr(masker, param_key), - getattr(mask, param_key)) + getattr(mask, param_key)) else: assert_equal(getattr(masker, param_key), - getattr(owner, param_key)) + getattr(owner, param_key)) # Check use of mask as mask_img shape = (6, 8, 10, 5) diff --git a/nilearn/input_data/tests/test_multi_nifti_masker.py b/nilearn/input_data/tests/test_multi_nifti_masker.py index 620864d10c..ef46fe0814 100644 --- a/nilearn/input_data/tests/test_multi_nifti_masker.py +++ b/nilearn/input_data/tests/test_multi_nifti_masker.py @@ -14,7 +14,7 @@ from nose import SkipTest from nose.tools import assert_true, assert_false, assert_raises, assert_equal from numpy.testing import assert_array_equal -from sklearn.externals.joblib import Memory +from nilearn._utils.compat import Memory from nilearn._utils.exceptions import DimensionError from nilearn._utils.testing import assert_raises_regex, write_tmp_imgs @@ -116,7 +116,7 @@ def test_3d_images(): def test_joblib_cache(): - from sklearn.externals.joblib import hash + from nilearn._utils.compat import hash # Dummy mask mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 @@ -196,3 +196,37 @@ def test_dtype(): masked_img = masker.transform([[img]]) assert(masked_img[0].dtype == np.float32) + + +def test_standardization(): + data_shape = (9, 9, 5) + n_samples = 500 + + signals = np.random.randn(2, np.prod(data_shape), n_samples) + means = np.random.randn(2, np.prod(data_shape), 1) * 50 + 1000 + signals += means + + img1 = Nifti1Image(signals[0].reshape(data_shape + (n_samples,)), + np.eye(4)) + img2 = Nifti1Image(signals[1].reshape(data_shape + (n_samples,)), + np.eye(4)) + + mask = Nifti1Image(np.ones(data_shape), np.eye(4)) + + # z-score + masker = MultiNiftiMasker(mask, standardize='zscore') + trans_signals = masker.fit_transform([img1, img2]) + + for ts in trans_signals: + np.testing.assert_almost_equal(ts.mean(0), 0) + np.testing.assert_almost_equal(ts.std(0), 1) + + # psc + masker = MultiNiftiMasker(mask, standardize='psc') + trans_signals = masker.fit_transform([img1, img2]) + + for ts, s in zip(trans_signals, signals): + np.testing.assert_almost_equal(ts.mean(0), 0) + np.testing.assert_almost_equal(ts, + (s / s.mean(1)[:, np.newaxis] * + 100 - 100).T) diff --git a/nilearn/input_data/tests/test_nifti_labels_masker.py b/nilearn/input_data/tests/test_nifti_labels_masker.py index 18e7148e3b..fe936028a6 100644 --- a/nilearn/input_data/tests/test_nifti_labels_masker.py +++ b/nilearn/input_data/tests/test_nifti_labels_masker.py @@ -266,3 +266,38 @@ def test_nifti_labels_masker_resampling(): compressed_img2 = masker.inverse_transform(transformed2) np.testing.assert_array_equal(compressed_img.get_data(), compressed_img2.get_data()) + + +def test_standardization(): + data_shape = (9, 9, 5) + n_samples = 500 + + signals = np.random.randn(np.prod(data_shape), n_samples) + means = np.random.randn(np.prod(data_shape), 1) * 50 + 1000 + signals += means + img = nibabel.Nifti1Image( + signals.reshape(data_shape + (n_samples,)), np.eye(4) + ) + + labels = data_gen.generate_labeled_regions((9, 9, 5), 10) + + # Unstandarized + masker = NiftiLabelsMasker(labels, standardize=False) + unstandarized_label_signals = masker.fit_transform(img) + + # z-score + masker = NiftiLabelsMasker(labels, standardize='zscore') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal(trans_signals.std(0), 1) + + # psc + masker = NiftiLabelsMasker(labels, standardize='psc') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal(trans_signals, + (unstandarized_label_signals / + unstandarized_label_signals.mean(0) * + 100 - 100)) diff --git a/nilearn/input_data/tests/test_nifti_maps_masker.py b/nilearn/input_data/tests/test_nifti_maps_masker.py index 82c09b9b76..e931220999 100644 --- a/nilearn/input_data/tests/test_nifti_maps_masker.py +++ b/nilearn/input_data/tests/test_nifti_maps_masker.py @@ -298,3 +298,39 @@ def test_nifti_maps_masker_overlap(): allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', non_overlapping_masker.fit_transform, fmri_img) + + +def test_standardization(): + data_shape = (9, 9, 5) + n_samples = 500 + + signals = np.random.randn(np.prod(data_shape), n_samples) + means = np.random.randn(np.prod(data_shape), 1) * 50 + 1000 + signals += means + img = nibabel.Nifti1Image(signals.reshape(data_shape + (n_samples,)), + np.eye(4)) + + maps, _ = data_gen.generate_maps((9, 9, 5), 10) + + # Unstandarized + masker = NiftiMapsMasker(maps, standardize=False) + unstandarized_label_signals = masker.fit_transform(img) + + # z-score + masker = NiftiMapsMasker(maps, + standardize='zscore') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal(trans_signals.std(0), 1) + + # psc + masker = NiftiMapsMasker(maps, standardize='psc') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal( + trans_signals, + unstandarized_label_signals / + unstandarized_label_signals.mean(0) * 100 - 100, + ) diff --git a/nilearn/input_data/tests/test_nifti_masker.py b/nilearn/input_data/tests/test_nifti_masker.py index 4c857ff820..94eb09aa54 100644 --- a/nilearn/input_data/tests/test_nifti_masker.py +++ b/nilearn/input_data/tests/test_nifti_masker.py @@ -233,7 +233,7 @@ def test_sessions(): def test_joblib_cache(): - from sklearn.externals.joblib import hash, Memory + from nilearn._utils.compat import hash, Memory mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 mask_img = Nifti1Image(mask, np.eye(4)) @@ -390,3 +390,31 @@ def test_dtype(): masker_2 = NiftiMasker(dtype='float64') assert(masker_2.fit_transform(img_32).dtype == np.float64) assert(masker_2.fit_transform(img_64).dtype == np.float64) + + +def test_standardization(): + data_shape = (9, 9, 5) + n_samples = 500 + + signals = np.random.randn(np.prod(data_shape), n_samples) + means = np.random.randn(np.prod(data_shape), 1) * 50 + 1000 + signals += means + img = Nifti1Image(signals.reshape(data_shape + (n_samples,)), np.eye(4)) + + mask = Nifti1Image(np.ones(data_shape), np.eye(4)) + + # z-score + masker = NiftiMasker(mask, standardize='zscore') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal(trans_signals.std(0), 1) + + # psc + masker = NiftiMasker(mask, standardize='psc') + trans_signals = masker.fit_transform(img) + + np.testing.assert_almost_equal(trans_signals.mean(0), 0) + np.testing.assert_almost_equal(trans_signals, + (signals / signals.mean(1)[:, np.newaxis] * + 100 - 100).T) diff --git a/nilearn/input_data/tests/test_nifti_spheres_masker.py b/nilearn/input_data/tests/test_nifti_spheres_masker.py index 21d1c2cef7..0df537ec1c 100644 --- a/nilearn/input_data/tests/test_nifti_spheres_masker.py +++ b/nilearn/input_data/tests/test_nifti_spheres_masker.py @@ -5,6 +5,7 @@ from nilearn._utils.testing import assert_raises_regex from nose.tools import assert_false + def test_seed_extraction(): data = np.random.random((3, 3, 3, 5)) img = nibabel.Nifti1Image(data, np.eye(4)) @@ -62,6 +63,7 @@ def test_anisotropic_sphere_extraction(): affine_2[0, 0] = 4 mask_img = nibabel.Nifti1Image(mask_img, affine=affine_2) masker = NiftiSpheresMasker([(2, 1, 2)], radius=1, mask_img=mask_img) + masker.fit() s = masker.transform(img) assert_array_equal(s[:, 0], data[1, 0, 1]) @@ -151,3 +153,26 @@ def test_is_nifti_spheres_masker_give_nans(): # When mask_img is provided, the seed interacts within the brain, so no nan masker = NiftiSpheresMasker(seeds=seed, radius=2., mask_img=mask_img) assert_false(np.isnan(np.sum(masker.fit_transform(img)))) + + +def test_standardization(): + data = np.random.random((3, 3, 3, 5)) + img = nibabel.Nifti1Image(data, np.eye(4)) + + # test zscore + masker = NiftiSpheresMasker([(1, 1, 1)], standardize='zscore') + # Test the fit + s = masker.fit_transform(img) + + np.testing.assert_almost_equal(s.mean(), 0) + np.testing.assert_almost_equal(s.std(), 1) + + # test psc + masker = NiftiSpheresMasker([(1, 1, 1)], standardize='psc') + # Test the fit + s = masker.fit_transform(img) + + np.testing.assert_almost_equal(s.mean(), 0) + np.testing.assert_almost_equal(s.ravel(), data[1, 1, 1] / + data[1, 1, 1].mean() * 100 - 100, + ) diff --git a/nilearn/masking.py b/nilearn/masking.py index 711d005f36..6708151537 100644 --- a/nilearn/masking.py +++ b/nilearn/masking.py @@ -8,7 +8,7 @@ import numpy as np from scipy import ndimage -from sklearn.externals.joblib import Parallel, delayed +from nilearn._utils.compat import Parallel, delayed from . import _utils from .image import new_img_like @@ -52,7 +52,7 @@ def _load_mask_img(mask_img, allow_empty=False): 'The mask is invalid as it is empty: it masks all data.') elif len(values) == 2: # If there are 2 different values, one of them must be 0 (background) - if not 0 in values: + if 0 not in values: raise ValueError('Background of the mask must be represented with' '0. Given mask contains: %s.' % values) elif len(values) != 2: @@ -91,8 +91,8 @@ def _extrapolate_out_mask(data, mask, iterations=1): extrapolation.append(masked_data[this_x, this_y, this_z]) extrapolation = np.array(extrapolation) - extrapolation = (np.nansum(extrapolation, axis=0) - / np.sum(np.isfinite(extrapolation), axis=0)) + extrapolation = (np.nansum(extrapolation, axis=0) / + np.sum(np.isfinite(extrapolation), axis=0)) extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0 new_data = np.zeros_like(masked_data) new_data[outer_shell] = extrapolation @@ -162,7 +162,8 @@ def intersect_masks(mask_imgs, threshold=0.5, connected=True): if np.any(grp_mask > 0) and connected: grp_mask = largest_connected_component(grp_mask) grp_mask = _utils.as_ndarray(grp_mask, dtype=np.int8) - return new_img_like(_utils.check_niimg_3d(mask_imgs[0]), grp_mask, ref_affine) + return new_img_like(_utils.check_niimg_3d(mask_imgs[0]), grp_mask, + ref_affine) def _post_process_mask(mask, affine, opening=2, connected=True, @@ -258,10 +259,10 @@ def compute_epi_mask(epi_img, lower_cutoff=0.2, upper_cutoff=0.85, # Delayed import to avoid circular imports from .image.image import _compute_mean - mean_epi, affine = cache(_compute_mean, memory)(epi_img, - target_affine=target_affine, - target_shape=target_shape, - smooth=(1 if opening else False)) + mean_epi, affine = \ + cache(_compute_mean, memory)(epi_img, target_affine=target_affine, + target_shape=target_shape, + smooth=(1 if opening else False)) if ensure_finite: # Get rid of memmapping @@ -278,13 +279,14 @@ def compute_epi_mask(epi_img, lower_cutoff=0.2, upper_cutoff=0.85, delta = sorted_input[lower_cutoff + 1:upper_cutoff + 1] \ - sorted_input[lower_cutoff:upper_cutoff] ia = delta.argmax() - threshold = 0.5 * (sorted_input[ia + lower_cutoff] - + sorted_input[ia + lower_cutoff + 1]) + threshold = 0.5 * (sorted_input[ia + lower_cutoff] + + sorted_input[ia + lower_cutoff + 1]) mask = mean_epi >= threshold mask, affine = _post_process_mask(mask, affine, opening=opening, - connected=connected, warning_msg="Are you sure that input " + connected=connected, + warning_msg="Are you sure that input " "data are EPI images not detrended. ") return new_img_like(epi_img, mask, affine) @@ -426,7 +428,8 @@ def compute_background_mask(data_imgs, border_size=2, # Delayed import to avoid circular imports from .image.image import _compute_mean data, affine = cache(_compute_mean, memory)(data_imgs, - target_affine=target_affine, target_shape=target_shape, + target_affine=target_affine, + target_shape=target_shape, smooth=False) background = np.median(get_border_data(data, border_size)) @@ -438,7 +441,8 @@ def compute_background_mask(data_imgs, border_size=2, mask = data != background mask, affine = _post_process_mask(mask, affine, opening=opening, - connected=connected, warning_msg="Are you sure that input " + connected=connected, + warning_msg="Are you sure that input " "images have a homogeneous background.") return new_img_like(data_imgs, mask, affine) @@ -866,9 +870,9 @@ def unmask(X, mask_img, order="F"): def _unmask_from_to_3d_array(w, mask): - """Unmask an image into whole brain, with off-mask voxels set to 0. Used as - a stand-alone function in low-level decoding (SpaceNet) and clustering (ReNA) - functions. + """Unmask an image into whole brain, with off-mask voxels set to 0. + Used as a stand-alone function in low-level decoding (SpaceNet) and + clustering (ReNA) functions. Parameters ---------- diff --git a/nilearn/mass_univariate/permuted_least_squares.py b/nilearn/mass_univariate/permuted_least_squares.py index bf4e616bf6..91d3dda2bf 100644 --- a/nilearn/mass_univariate/permuted_least_squares.py +++ b/nilearn/mass_univariate/permuted_least_squares.py @@ -8,7 +8,7 @@ import numpy as np from scipy import linalg from sklearn.utils import check_random_state -import sklearn.externals.joblib as joblib +from nilearn._utils.compat import joblib def normalize_matrix_on_axis(m, axis=0): @@ -34,11 +34,11 @@ def normalize_matrix_on_axis(m, axis=0): ... normalize_matrix_on_axis) >>> X = np.array([[0, 4], [1, 0]]) >>> normalize_matrix_on_axis(X) - array([[ 0., 1.], - [ 1., 0.]]) + array([[0., 1.], + [1., 0.]]) >>> normalize_matrix_on_axis(X, axis=1) - array([[ 0., 1.], - [ 1., 0.]]) + array([[0., 1.], + [1., 0.]]) """ if m.ndim > 2: diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py index 9db06858cd..9fab7632d0 100644 --- a/nilearn/plotting/__init__.py +++ b/nilearn/plotting/__init__.py @@ -53,5 +53,5 @@ def _set_mpl_backend(): 'show', 'plot_matrix', 'view_surf', 'view_img_on_surf', 'view_img', 'view_connectome', 'view_markers', 'find_parcellation_cut_coords', 'find_probabilistic_atlas_cut_coords', - 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi', + 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi' ] diff --git a/nilearn/plotting/cm.py b/nilearn/plotting/cm.py index 1c989f4caa..9ee553e664 100644 --- a/nilearn/plotting/cm.py +++ b/nilearn/plotting/cm.py @@ -38,16 +38,16 @@ def _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')): orig_cdict = cmap._segmentdata.copy() cdict = dict() - cdict['green'] = [(0.5*(1-p), c1, c2) - for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] - cdict['blue'] = [(0.5*(1-p), c1, c2) - for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] - cdict['red'] = [(0.5*(1-p), c1, c2) - for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] + cdict['green'] = [(0.5 * (1 - p), c1, c2) + for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] + cdict['blue'] = [(0.5 * (1 - p), c1, c2) + for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] + cdict['red'] = [(0.5 * (1 - p), c1, c2) + for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] for color in ('red', 'green', 'blue'): - cdict[color].extend([(0.5*(1+p), c1, c2) - for (p, c1, c2) in orig_cdict[color]]) + cdict[color].extend([(0.5 * (1 + p), c1, c2) + for (p, c1, c2) in orig_cdict[color]]) return cdict @@ -149,19 +149,28 @@ def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): _cmaps_data['bwr'] = _colors.LinearSegmentedColormap.from_list( 'bwr', _bwr_data)._segmentdata.copy() + ################################################################################ # Build colormaps and their reverse. + +# backported and adapted from matplotlib since it's deprecated in 3.2 +def _revcmap(data): + data_r = {} + for key, val in data.items(): + data_r[key] = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)] + return data_r + + _cmap_d = dict() for _cmapname in list(_cmaps_data.keys()): # needed as dict changes within loop _cmapname_r = _cmapname + '_r' _cmapspec = _cmaps_data[_cmapname] - _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec) + _cmaps_data[_cmapname_r] = _revcmap(_cmapspec) _cmap_d[_cmapname] = _colors.LinearSegmentedColormap( - _cmapname, _cmapspec, _cm.LUTSIZE) + _cmapname, _cmapspec, _cm.LUTSIZE) _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap( - _cmapname_r, _cmaps_data[_cmapname_r], - _cm.LUTSIZE) + _cmapname_r, _cmaps_data[_cmapname_r], _cm.LUTSIZE) ################################################################################ # A few transparent colormaps @@ -212,13 +221,15 @@ def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): def dim_cmap(cmap, factor=.3, to_white=True): """ Dim a colormap to white, or to black. """ - assert factor >= 0 and factor <=1, ValueError( - 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' - % factor) + assert 0 <= factor <= 1, ValueError( + 'Dimming factor must be larger than 0 and smaller than 1, %s was ' + 'passed.' % factor) if to_white: - dimmer = lambda c: 1 - factor*(1-c) + def dimmer(c): + return 1 - factor * (1 - c) else: - dimmer = lambda c: factor*c + def dimmer(c): + return factor * c cdict = cmap._segmentdata.copy() for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() @@ -227,19 +238,17 @@ def dim_cmap(cmap, factor=.3, to_white=True): cdict[color] = color_lst return _colors.LinearSegmentedColormap( - '%s_dimmed' % cmap.name, - cdict, - _cm.LUTSIZE) + '%s_dimmed' % cmap.name, cdict, _cm.LUTSIZE) def replace_inside(outer_cmap, inner_cmap, vmin, vmax): """ Replace a colormap by another inside a pair of values. """ assert vmin < vmax, ValueError('vmin must be smaller than vmax') - assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' - % vmin) - assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' - % vmax) + assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' + % vmin) + assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' + % vmax) outer_cdict = outer_cmap._segmentdata.copy() inner_cdict = inner_cmap._segmentdata.copy() @@ -257,7 +266,6 @@ def replace_inside(outer_cmap, inner_cmap, vmin, vmax): this_cdict['green'].append((p, g, g)) this_cdict['blue'].append((p, b, b)) - for c_index, color in enumerate(('red', 'green', 'blue')): color_lst = list() @@ -266,8 +274,9 @@ def replace_inside(outer_cmap, inner_cmap, vmin, vmax): break color_lst.append((value, c1, c2)) - color_lst.append((vmin, outer_cmap(vmin)[c_index], - inner_cmap(vmin)[c_index])) + color_lst.append((vmin, + outer_cmap(vmin)[c_index], + inner_cmap(vmin)[c_index])) for value, c1, c2 in inner_cdict[color]: if value <= vmin: @@ -276,8 +285,9 @@ def replace_inside(outer_cmap, inner_cmap, vmin, vmax): break color_lst.append((value, c1, c2)) - color_lst.append((vmax, inner_cmap(vmax)[c_index], - outer_cmap(vmax)[c_index])) + color_lst.append((vmax, + inner_cmap(vmax)[c_index], + outer_cmap(vmax)[c_index])) for value, c1, c2 in outer_cdict[color]: if value <= vmax: @@ -287,8 +297,5 @@ def replace_inside(outer_cmap, inner_cmap, vmin, vmax): cdict[color] = color_lst return _colors.LinearSegmentedColormap( - '%s_inside_%s' % (inner_cmap.name, outer_cmap.name), - cdict, - _cm.LUTSIZE) - - + '%s_inside_%s' % (inner_cmap.name, outer_cmap.name), + cdict, _cm.LUTSIZE) diff --git a/nilearn/plotting/data/html/connectome_plot_template.html b/nilearn/plotting/data/html/connectome_plot_template.html index 92a72a5ddc..11e592a173 100644 --- a/nilearn/plotting/data/html/connectome_plot_template.html +++ b/nilearn/plotting/data/html/connectome_plot_template.html @@ -3,7 +3,8 @@ connectome plot - $INSERT_JS_LIBRARIES_HERE + + $INSERT_JS_LIBRARIES_HERE - diff --git a/nilearn/plotting/data/html/surface_plot_template.html b/nilearn/plotting/data/html/surface_plot_template.html index ef3d1487f4..ba56e986fe 100644 --- a/nilearn/plotting/data/html/surface_plot_template.html +++ b/nilearn/plotting/data/html/surface_plot_template.html @@ -9,6 +9,7 @@