diff --git a/.gitignore b/.gitignore index 5b5b00407..8a1784a8a 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ x86_64 i386 i686 +_build # Python files build diff --git a/.travis.yml b/.travis.yml index e0e827268..b8a0fdcce 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,8 +18,8 @@ after_success: - coveralls cache: directories: - - $HOME/nest-master + - $HOME/nest-2.12.0 - $HOME/nrn-7.4 - - $HOME/build/nest-master + - $HOME/build/nest-2.12.0 - $HOME/build/nrn-7.4 - $HOME/.cache/pip diff --git a/MANIFEST.in b/MANIFEST.in index 472921169..2ead9c992 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,8 @@ include pyNN/neuron/nmodl/*.mod +include pyNN/nest/extensions/*.h +include pyNN/nest/extensions/*.cpp +include pyNN/nest/extensions/CMakeLists.txt +include pyNN/nest/extensions/sli/* include pyNN/descriptions/templates/*/*.txt include test/parameters/* include test/system/*.py @@ -10,3 +14,4 @@ include LICENSE include AUTHORS include README.rst include changelog +include requirements.txt diff --git a/ci/install.sh b/ci/install.sh index 87a4c3865..c48da8e64 100644 --- a/ci/install.sh +++ b/ci/install.sh @@ -3,9 +3,9 @@ set -e # stop execution in case of errors sudo apt-get install -qq python-numpy python3-numpy python-scipy python3-scipy libgsl0-dev openmpi-bin libopenmpi-dev - pip install -r requirements.txt pip install coverage coveralls +pip install nose-testconfig source ci/install_brian.sh source ci/install_nest.sh source ci/install_neuron.sh diff --git a/ci/install_nest.sh b/ci/install_nest.sh index 78cc79e65..ca8a186e6 100644 --- a/ci/install_nest.sh +++ b/ci/install_nest.sh @@ -4,12 +4,19 @@ set -e # stop execution in case of errors if [ "$TRAVIS_PYTHON_VERSION" == "2.7" ] || [ "$TRAVIS_PYTHON_VERSION" == "3.5" ]; then echo -e "\n========== Installing NEST ==========\n" - #export NEST_VERSION="master" - export NEST_VERSION="2.12.0" + # Specify which version of NEST to install + export NEST_VERSION="master" + #export NEST_VERSION="2.12.0" + export NEST="nest-simulator-$NEST_VERSION" pip install cython==0.23.4 - #wget https://github.com/nest/nest-simulator/archive/$NEST_VERSION.tar.gz -O $HOME/$NEST.tar.gz; - wget https://github.com/nest/nest-simulator/releases/download/v$NEST_VERSION/nest-$NEST_VERSION.tar.gz -O $HOME/$NEST.tar.gz + + if [ "$NEST_VERSION" = "master" ]; then + wget https://github.com/nest/nest-simulator/archive/$NEST_VERSION.tar.gz -O $HOME/$NEST.tar.gz; + else + wget https://github.com/nest/nest-simulator/releases/download/v$NEST_VERSION/nest-$NEST_VERSION.tar.gz -O $HOME/$NEST.tar.gz + fi + pushd $HOME; tar xzf $NEST.tar.gz; ls; @@ -18,7 +25,7 @@ if [ "$TRAVIS_PYTHON_VERSION" == "2.7" ] || [ "$TRAVIS_PYTHON_VERSION" == "3.5" mkdir -p $HOME/build/$NEST pushd $HOME/build/$NEST export VENV=`python -c "import sys; print(sys.prefix)"`; - ln -s /opt/python/2.7.12/lib/libpython2.7.so $VENV/lib/libpython2.7.so; + ln -s /opt/python/2.7.13/lib/libpython2.7.so $VENV/lib/libpython2.7.so; ln -s /opt/python/3.5.2/lib/libpython3.5m.so $VENV/lib/libpython3.5.so; export PYTHON_INCLUDE_DIR=$VENV/include/python${TRAVIS_PYTHON_VERSION} if [ "$TRAVIS_PYTHON_VERSION" == "3.5" ]; then diff --git a/doc/build_examples.py b/doc/build_examples.py index e50f335b2..6a9457d55 100644 --- a/doc/build_examples.py +++ b/doc/build_examples.py @@ -35,6 +35,8 @@ "synaptic_input.py", "tsodyksmarkram.py", "varying_poisson.py", + "stochastic_synapses.py", + "stochastic_deterministic_comparison.py" ) # todo: add line numbering to code examples @@ -62,8 +64,9 @@ tmp_dir = tempfile.mkdtemp() results_dir = os.path.join(tmp_dir, "Results") -if not os.path.exists(image_dir): - os.makedirs(image_dir) +for dir_name in (image_dir, results_dir): + if not os.path.exists(dir_name): + os.makedirs(dir_name) def run(python_script, simulator, *extra_args): @@ -92,8 +95,9 @@ def list_files(filter): for filename in x[2] if filter in filename]) +print("Running examples in {}".format(tmp_dir)) for example in examples: - new_files = run(example, simulators.next()) + new_files = run(example, next(simulators)) if len(new_files) > 1: raise Exception("Multiple image files") img_path, = new_files diff --git a/doc/conf.py b/doc/conf.py index bead0019b..e4c04cd64 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -78,9 +78,9 @@ class MockNESTModule(mock.Mock): # built documents. # # The short X.Y version. -version = '0.8' +version = '0.9' # The full version, including alpha/beta/rc tags. -release = '0.8.3' +release = '0.9.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/data_handling.txt b/doc/data_handling.txt index d41fd87d6..6cad6740d 100644 --- a/doc/data_handling.txt +++ b/doc/data_handling.txt @@ -22,8 +22,8 @@ top-level data container, which contains one or more :class:`Segment`\s. Each :class:`Segment` is a container for data sharing a common time basis - a new :class:`Segment` is added every time the :func:`reset` function is called. -A :class:`Segment` can contain lists of :class:`AnalogSignal`, -:class:`AnalogSignalArray` and :class:`SpikeTrain` objects. These data objects +A :class:`Segment` can contain lists of :class:`AnalogSignal` and :class:`SpikeTrain` objects. +These data objects inherit from NumPy's array class, and so can be treated in further processing (analysis, visualization, etc.) in exactly the same way as NumPy arrays, but in addition they carry metadata about units, sampling interval, etc. diff --git a/doc/developers/contributing.txt b/doc/developers/contributing.txt index 6f4c4ee5f..9ee8381a8 100644 --- a/doc/developers/contributing.txt +++ b/doc/developers/contributing.txt @@ -114,6 +114,23 @@ The suggested way to do this is to write test functions, in a separate file, that take a simulator module as an argument, and then call these functions from ``test_neuron.py``, ``test_nest.py``, etc. +System tests defined in the scenarios directory are treated as a single test +(test_scenarios()) while running nosetests. To run only the tests within a file +named 'test_electrodes' located inside system/scenarios, use:: + + $ nosetests -s --tc=testFile:test_electrodes test_nest.py + +To run a single specific test named 'test_changing_electrode' located within +some file (and added to registry) inside system/scenarios, use:: + + $ nosetests -s --tc=testName:test_changing_electrode test_nest.py + +Note that this would also run the tests specified within the simulator specific +files such as test_brian.py, test_nest.py and test_neuron.py. To avoid +this, specify the 'test_scenarios function' on the command line:: + + $ nosetests -s --tc=testName:test_changing_electrode test_nest.py:test_scenarios + The ``test/unsorted`` directory contains a number of old tests that are either no longer useful or have not yet been adapted to the nose framework. These are not part of the test suite, but we are gradually adapting those tests that are @@ -249,4 +266,4 @@ If this is a final release, there are a few more steps: .. _`issue tracker`: https://github.com/NeuralEnsemble/PyNN/issues/ .. _fork: https://github.com/NeuralEnsemble/PyNN/fork .. _`PyNN repository`: https://github.com/NeuralEnsemble/PyNN/ -.. _contents: http://software.incf.org/software/pynn/pynn/folder_contents \ No newline at end of file +.. _contents: http://software.incf.org/software/pynn/pynn/folder_contents diff --git a/doc/examples.txt b/doc/examples.txt index ba7d2842e..7290cc8e8 100644 --- a/doc/examples.txt +++ b/doc/examples.txt @@ -15,4 +15,6 @@ Examples examples/synaptic_input examples/tsodyksmarkram examples/varying_poisson + examples/stochastic_synapses + examples/stochastic_deterministic_comparison examples/VAbenchmarks diff --git a/doc/examples/Izhikevich.txt b/doc/examples/Izhikevich.txt index 110371818..5b902c2c2 100644 --- a/doc/examples/Izhikevich.txt +++ b/doc/examples/Izhikevich.txt @@ -1,7 +1,7 @@ A selection of Izhikevich neurons ================================= -.. image:: ../images/examples/Izhikevich_nest_np1_20151005-081145.png +.. image:: ../images/examples/Izhikevich_nest_np1_20170505-150315.png .. literalinclude:: ../../examples/Izhikevich.py diff --git a/doc/examples/VAbenchmarks.txt b/doc/examples/VAbenchmarks.txt index eaf7bb241..e6e134aad 100644 --- a/doc/examples/VAbenchmarks.txt +++ b/doc/examples/VAbenchmarks.txt @@ -1,7 +1,7 @@ Balanced network of excitatory and inhibitory neurons ===================================================== -.. image:: ../images/examples/VAbenchmarks_CUBA_20151005-081325.png +.. image:: ../images/examples/VAbenchmarks_CUBA_20170505-150538.png .. literalinclude:: ../../examples/VAbenchmarks.py diff --git a/doc/examples/cell_type_demonstration.txt b/doc/examples/cell_type_demonstration.txt index 41fac3f5e..a90a61fb2 100644 --- a/doc/examples/cell_type_demonstration.txt +++ b/doc/examples/cell_type_demonstration.txt @@ -1,7 +1,7 @@ A demonstration of the responses of different standard neuron models to current injection ========================================================================================= -.. image:: ../images/examples/cell_type_demonstration_nest_20151005-081150.png +.. image:: ../images/examples/cell_type_demonstration_nest_20170505-150320.png .. literalinclude:: ../../examples/cell_type_demonstration.py diff --git a/doc/examples/current_injection.txt b/doc/examples/current_injection.txt index e22385500..7c1558fb5 100644 --- a/doc/examples/current_injection.txt +++ b/doc/examples/current_injection.txt @@ -1,7 +1,7 @@ Injecting time-varying current into a cell ========================================== -.. image:: ../images/examples/current_injection_neuron_20151005-081148.png +.. image:: ../images/examples/current_injection_neuron_20170505-150317.png .. literalinclude:: ../../examples/current_injection.py diff --git a/doc/examples/random_numbers.txt b/doc/examples/random_numbers.txt index 2c1fbaf42..59a4cee6a 100644 --- a/doc/examples/random_numbers.txt +++ b/doc/examples/random_numbers.txt @@ -1,7 +1,7 @@ An example to illustrate random number handling in PyNN ======================================================= -.. image:: ../images/examples/random_numbers_neuron_20151005-081153.png +.. image:: ../images/examples/random_numbers_neuron_20170505-150323.png .. literalinclude:: ../../examples/random_numbers.py diff --git a/doc/examples/simple_STDP.txt b/doc/examples/simple_STDP.txt index 6247d06d1..74d0f711f 100644 --- a/doc/examples/simple_STDP.txt +++ b/doc/examples/simple_STDP.txt @@ -1,7 +1,7 @@ A very simple example of using STDP =================================== -.. image:: ../images/examples/simple_stdp_neuron_20151005-081202.png +.. image:: ../images/examples/simple_stdp_neuron_20170505-150331.png .. literalinclude:: ../../examples/simple_STDP.py diff --git a/doc/examples/small_network.txt b/doc/examples/small_network.txt index e255091fc..505170da7 100644 --- a/doc/examples/small_network.txt +++ b/doc/examples/small_network.txt @@ -1,7 +1,7 @@ Small network created with the Population and Projection classes ================================================================ -.. image:: ../images/examples/small_network_nest_np1_20151005-081205.png +.. image:: ../images/examples/small_network_nest_np1_20170505-150334.png .. literalinclude:: ../../examples/small_network.py diff --git a/doc/examples/stochastic_deterministic_comparison.txt b/doc/examples/stochastic_deterministic_comparison.txt new file mode 100644 index 000000000..46fc9c9fc --- /dev/null +++ b/doc/examples/stochastic_deterministic_comparison.txt @@ -0,0 +1,7 @@ +Example of facilitating and depressing synapses in deterministic and stochastic versions +======================================================================================== + +.. image:: ../images/examples/stochastic_comparison_neuron_20170505-150418.png + +.. literalinclude:: ../../examples/stochastic_deterministic_comparison.py + diff --git a/doc/examples/stochastic_synapses.txt b/doc/examples/stochastic_synapses.txt new file mode 100644 index 000000000..ec6f2b29c --- /dev/null +++ b/doc/examples/stochastic_synapses.txt @@ -0,0 +1,7 @@ +Example of simple stochastic synapses +===================================== + +.. image:: ../images/examples/stochastic_synapses__nest_20170505-150345.png + +.. literalinclude:: ../../examples/stochastic_synapses.py + diff --git a/doc/examples/synaptic_input.txt b/doc/examples/synaptic_input.txt index 1fd2baa1a..2630eb5b2 100644 --- a/doc/examples/synaptic_input.txt +++ b/doc/examples/synaptic_input.txt @@ -1,7 +1,7 @@ A demonstration of the responses of different standard neuron models to synaptic input ====================================================================================== -.. image:: ../images/examples/synaptic_input_neuron_20151005-081208.png +.. image:: ../images/examples/synaptic_input_neuron_20170505-150337.png .. literalinclude:: ../../examples/synaptic_input.py diff --git a/doc/examples/tsodyksmarkram.txt b/doc/examples/tsodyksmarkram.txt index f0cb934b2..d62317c6c 100644 --- a/doc/examples/tsodyksmarkram.txt +++ b/doc/examples/tsodyksmarkram.txt @@ -1,7 +1,7 @@ Example of depressing and facilitating synapses =============================================== -.. image:: ../images/examples/tsodyksmarkram_nest_20151005-081211.png +.. image:: ../images/examples/tsodyksmarkram_nest_20170505-150340.png .. literalinclude:: ../../examples/tsodyksmarkram.py diff --git a/doc/examples/varying_poisson.txt b/doc/examples/varying_poisson.txt index 11e753bdc..0863b21f7 100644 --- a/doc/examples/varying_poisson.txt +++ b/doc/examples/varying_poisson.txt @@ -1,7 +1,7 @@ A demonstration of the use of callbacks to vary the rate of a SpikeSourcePoisson ================================================================================ -.. image:: ../images/examples/varying_poisson_neuron_20151005-081216.png +.. image:: ../images/examples/varying_poisson_neuron_20170505-150343.png .. literalinclude:: ../../examples/varying_poisson.py diff --git a/doc/images/examples/Izhikevich_nest_np1_20151005-081145.png b/doc/images/examples/Izhikevich_nest_np1_20151005-081145.png deleted file mode 100644 index 93c228a75..000000000 Binary files a/doc/images/examples/Izhikevich_nest_np1_20151005-081145.png and /dev/null differ diff --git a/doc/images/examples/Izhikevich_nest_np1_20170505-150315.png b/doc/images/examples/Izhikevich_nest_np1_20170505-150315.png new file mode 100644 index 000000000..30b76d01f Binary files /dev/null and b/doc/images/examples/Izhikevich_nest_np1_20170505-150315.png differ diff --git a/doc/images/examples/VAbenchmarks_CUBA_20151005-081325.png b/doc/images/examples/VAbenchmarks_CUBA_20151005-081325.png deleted file mode 100644 index 02157614b..000000000 Binary files a/doc/images/examples/VAbenchmarks_CUBA_20151005-081325.png and /dev/null differ diff --git a/doc/images/examples/VAbenchmarks_CUBA_20170505-150538.png b/doc/images/examples/VAbenchmarks_CUBA_20170505-150538.png new file mode 100644 index 000000000..eb551dd27 Binary files /dev/null and b/doc/images/examples/VAbenchmarks_CUBA_20170505-150538.png differ diff --git a/doc/images/examples/cell_type_demonstration_nest_20151005-081150.png b/doc/images/examples/cell_type_demonstration_nest_20151005-081150.png deleted file mode 100644 index cada3724e..000000000 Binary files a/doc/images/examples/cell_type_demonstration_nest_20151005-081150.png and /dev/null differ diff --git a/doc/images/examples/cell_type_demonstration_nest_20170505-150320.png b/doc/images/examples/cell_type_demonstration_nest_20170505-150320.png new file mode 100644 index 000000000..d4b364f1b Binary files /dev/null and b/doc/images/examples/cell_type_demonstration_nest_20170505-150320.png differ diff --git a/doc/images/examples/current_injection_neuron_20151005-081148.png b/doc/images/examples/current_injection_neuron_20151005-081148.png deleted file mode 100644 index e2ba64485..000000000 Binary files a/doc/images/examples/current_injection_neuron_20151005-081148.png and /dev/null differ diff --git a/doc/images/examples/current_injection_neuron_20170505-150317.png b/doc/images/examples/current_injection_neuron_20170505-150317.png new file mode 100644 index 000000000..4fa6f6892 Binary files /dev/null and b/doc/images/examples/current_injection_neuron_20170505-150317.png differ diff --git a/doc/images/examples/random_distributions.png b/doc/images/examples/random_distributions.png index eb8164d06..15bb9e078 100644 Binary files a/doc/images/examples/random_distributions.png and b/doc/images/examples/random_distributions.png differ diff --git a/doc/images/examples/random_numbers_neuron_20151005-081153.png b/doc/images/examples/random_numbers_neuron_20151005-081153.png deleted file mode 100644 index 401bed609..000000000 Binary files a/doc/images/examples/random_numbers_neuron_20151005-081153.png and /dev/null differ diff --git a/doc/images/examples/random_numbers_neuron_20170505-150323.png b/doc/images/examples/random_numbers_neuron_20170505-150323.png new file mode 100644 index 000000000..db9a65c27 Binary files /dev/null and b/doc/images/examples/random_numbers_neuron_20170505-150323.png differ diff --git a/doc/images/examples/simple_stdp_neuron_20151005-081202.png b/doc/images/examples/simple_stdp_neuron_20151005-081202.png deleted file mode 100644 index 96e066293..000000000 Binary files a/doc/images/examples/simple_stdp_neuron_20151005-081202.png and /dev/null differ diff --git a/doc/images/examples/simple_stdp_neuron_20170505-150331.png b/doc/images/examples/simple_stdp_neuron_20170505-150331.png new file mode 100644 index 000000000..f1ef949fd Binary files /dev/null and b/doc/images/examples/simple_stdp_neuron_20170505-150331.png differ diff --git a/doc/images/examples/small_network_nest_np1_20151005-081205.png b/doc/images/examples/small_network_nest_np1_20151005-081205.png deleted file mode 100644 index f15fbef25..000000000 Binary files a/doc/images/examples/small_network_nest_np1_20151005-081205.png and /dev/null differ diff --git a/doc/images/examples/small_network_nest_np1_20170505-150334.png b/doc/images/examples/small_network_nest_np1_20170505-150334.png new file mode 100644 index 000000000..3b14eca1e Binary files /dev/null and b/doc/images/examples/small_network_nest_np1_20170505-150334.png differ diff --git a/doc/images/examples/stochastic_comparison_neuron_20170505-150418.png b/doc/images/examples/stochastic_comparison_neuron_20170505-150418.png new file mode 100644 index 000000000..78bdb494b Binary files /dev/null and b/doc/images/examples/stochastic_comparison_neuron_20170505-150418.png differ diff --git a/doc/images/examples/stochastic_synapses__nest_20170505-150345.png b/doc/images/examples/stochastic_synapses__nest_20170505-150345.png new file mode 100644 index 000000000..55401785b Binary files /dev/null and b/doc/images/examples/stochastic_synapses__nest_20170505-150345.png differ diff --git a/doc/images/examples/synaptic_input_neuron_20151005-081208.png b/doc/images/examples/synaptic_input_neuron_20151005-081208.png deleted file mode 100644 index 49757fab8..000000000 Binary files a/doc/images/examples/synaptic_input_neuron_20151005-081208.png and /dev/null differ diff --git a/doc/images/examples/synaptic_input_neuron_20170505-150337.png b/doc/images/examples/synaptic_input_neuron_20170505-150337.png new file mode 100644 index 000000000..b9da1b501 Binary files /dev/null and b/doc/images/examples/synaptic_input_neuron_20170505-150337.png differ diff --git a/doc/images/examples/tsodyksmarkram_nest_20151005-081211.png b/doc/images/examples/tsodyksmarkram_nest_20151005-081211.png deleted file mode 100644 index 7449d254f..000000000 Binary files a/doc/images/examples/tsodyksmarkram_nest_20151005-081211.png and /dev/null differ diff --git a/doc/images/examples/tsodyksmarkram_nest_20170505-150340.png b/doc/images/examples/tsodyksmarkram_nest_20170505-150340.png new file mode 100644 index 000000000..8d1c7d35b Binary files /dev/null and b/doc/images/examples/tsodyksmarkram_nest_20170505-150340.png differ diff --git a/doc/images/examples/varying_poisson_neuron_20151005-081216.png b/doc/images/examples/varying_poisson_neuron_20151005-081216.png deleted file mode 100644 index c4524d766..000000000 Binary files a/doc/images/examples/varying_poisson_neuron_20151005-081216.png and /dev/null differ diff --git a/doc/images/examples/varying_poisson_neuron_20170505-150343.png b/doc/images/examples/varying_poisson_neuron_20170505-150343.png new file mode 100644 index 000000000..d90ac64e3 Binary files /dev/null and b/doc/images/examples/varying_poisson_neuron_20170505-150343.png differ diff --git a/doc/index.txt b/doc/index.txt index 34683c94a..0e39147cf 100644 --- a/doc/index.txt +++ b/doc/index.txt @@ -2,9 +2,6 @@ PyNN: documentation =================== -This is the documentation for version |release|. For versions 0.7 and earlier, -see http://neuralensemble.org/trac/PyNN - .. toctree:: :maxdepth: 1 @@ -25,6 +22,13 @@ see http://neuralensemble.org/trac/PyNN contributors release_notes +.. note:: + + This is the documentation for version |release|. Earlier versions: + + - `version 0.8`_ + - `version 0.7 and earlier`_ + .. add 'logging' after 'units' once configure_logging() implemented. .. add 'descriptions' after logging @@ -62,3 +66,6 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` + +.. _`version 0.8`: http://neuralensemble.org/docs/PyNN/0.8/ +.. _`version 0.7 and earlier`: http://neuralensemble.org/docs/PyNN/0.7/ \ No newline at end of file diff --git a/doc/installation.txt b/doc/installation.txt index b6ba8bda2..f4c800fac 100644 --- a/doc/installation.txt +++ b/doc/installation.txt @@ -10,7 +10,7 @@ Installing PyNN requires: * Python (version 2.6, 2.7, 3.3-3.6) * a recent version of the NumPy_ package * the lazyarray_ package - * the Neo_ package + * the Neo_ package (>= 0.5.0) * at least one of the supported simulators: e.g. NEURON, NEST, or Brian. Optional dependencies are: @@ -22,7 +22,7 @@ Optional dependencies are: Installing PyNN =============== -.. note:: if using NEURON, it is easiest if you install NEURON *before* you install PyNN (see below). +.. note:: if using NEURON or NEST, it is easiest if you install NEURON/NEST *before* you install PyNN (see below). The easiest way to get PyNN is to use pip_:: @@ -32,8 +32,8 @@ If you are running Debian or Ubuntu, there are :doc:`binary packages ` available. If you would prefer to install manually, :doc:`download the latest source distribution `, then run the setup script, e.g.:: - $ tar xzf PyNN-0.8.3.tar.gz - $ cd PyNN-0.8.3 + $ tar xzf PyNN-0.9.1.tar.gz + $ cd PyNN-0.9.1 $ python setup.py install This will install it to your Python :file:`site-packages` directory, and may @@ -49,6 +49,8 @@ Test it using something like the following:: >>> sim.end() (This assumes you have NEST installed). +If you get a warning "Unable to install NEST extensions. Certain models may not be available" then ensure the +program :command:`nest-config` is on your system PATH. With NEURON as the simulator, make sure you install NEURON *before* you install PyNN. The PyNN installation will then compile PyNN-specific membrane mechanisms, which are loaded when importing the :mod:`neuron` module:: @@ -116,20 +118,6 @@ Earlier versions of NEST will not work with this version of PyNN. The full installation instructions are available in the file INSTALL, which you can find in the NEST source package, or at ``_. -.. note:: NumPy must be installed *before* installing NEST. - -.. note:: Make sure you have the GNU Scientific Library (GSL) installed, - otherwise some PyNN standard models (e.g. :class:`IF_cond_exp`) will not be available. - -On Linux, most Unix variants and Mac OS X, installation is usually as simple as:: - - $ ./configure --with-mpi - $ make - $ make install - -This will install PyNEST to your Python :file:`site-packages` directory. -If you wish to install it elsewhere, see the full installation instructions. - Now try it out:: $ cd ~ diff --git a/doc/neurons.txt b/doc/neurons.txt index 95adcece5..f1cfcf202 100644 --- a/doc/neurons.txt +++ b/doc/neurons.txt @@ -378,8 +378,8 @@ or written to file using :meth:`write_data()`: Neo see the documentation at http://packages.python.org/neo. Here, it will suffice to note that a :class:`Block` is the top-level container, and contains one or more :class:`Segments`. Each :class:`Segment` is a container for data -that share a common time basis, and can contain lists of :class:`AnalogSignal`, -:class:`AnalogSignalArray` and :class:`SpikeTrain` objects. These data objects +that share a common time basis, and can contain lists of :class:`AnalogSignal` +and :class:`SpikeTrain` objects. These data objects inherit from NumPy array, and so can be treated in further processing (analysis, visualization, etc.) in exactly the same way as plain arrays, but in addition they carry metadata about units, sampling interval, etc. diff --git a/doc/pyplots/ac_source.py b/doc/pyplots/ac_source.py index 20db11323..d91d438f3 100644 --- a/doc/pyplots/ac_source.py +++ b/doc/pyplots/ac_source.py @@ -18,7 +18,7 @@ sim.run(500.0) t, i_inj = sine._get_data() -v = population.get_data().segments[0].analogsignalarrays[0] +v = population.get_data().segments[0].analogsignals[0] plot_current_source(t, i_inj, v, v_range=(-66, -49), diff --git a/doc/pyplots/continuous_time_spiking.py b/doc/pyplots/continuous_time_spiking.py index fbba8d0e9..ac2f378d6 100644 --- a/doc/pyplots/continuous_time_spiking.py +++ b/doc/pyplots/continuous_time_spiking.py @@ -17,7 +17,7 @@ def test_sim(on_or_off_grid, sim_time): prj = Projection(src, nrn, OneToOneConnector(), StaticSynapse(weight=weight)) nrn.record('v') run(sim_time) - return nrn.get_data().segments[0].analogsignalarrays[0] + return nrn.get_data().segments[0].analogsignals[0] sim_time = 10.0 off = test_sim('off_grid', sim_time) diff --git a/doc/pyplots/dc_source.py b/doc/pyplots/dc_source.py index 5d98f8d68..dbaf7d23e 100644 --- a/doc/pyplots/dc_source.py +++ b/doc/pyplots/dc_source.py @@ -17,7 +17,7 @@ sim.run(100.0) t, i_inj = pulse._get_data() -v = population.get_data().segments[0].analogsignalarrays[0] +v = population.get_data().segments[0].analogsignals[0] plot_current_source(t, i_inj, v, v_range=(-65.5, -59.5), diff --git a/doc/pyplots/neo_example.py b/doc/pyplots/neo_example.py index bd74b9e41..2a543eb9b 100644 --- a/doc/pyplots/neo_example.py +++ b/doc/pyplots/neo_example.py @@ -45,13 +45,13 @@ def plot_signal(signal, index, colour='b'): plt.setp(plt.gca().get_xticklabels(), visible=False) plt.legend() -n_panels = sum(a.shape[1] for a in data_out.segments[0].analogsignalarrays) + 2 +n_panels = sum(a.shape[1] for a in data_out.segments[0].analogsignals) + 2 plt.subplot(n_panels, 1, 1) plot_spiketrains(spikes_in.segments[0]) plt.subplot(n_panels, 1, 2) plot_spiketrains(data_out.segments[0]) panel = 3 -for array in data_out.segments[0].analogsignalarrays: +for array in data_out.segments[0].analogsignals: for i in range(array.shape[1]): plt.subplot(n_panels, 1, panel) plot_signal(array, i, colour='bg'[panel % 2]) diff --git a/doc/pyplots/noise_source.py b/doc/pyplots/noise_source.py index ffe0ea254..16297ea52 100644 --- a/doc/pyplots/noise_source.py +++ b/doc/pyplots/noise_source.py @@ -18,7 +18,7 @@ sim.run(500.0) t, i_inj = noise._get_data() -v = population.get_data().segments[0].analogsignalarrays[0] +v = population.get_data().segments[0].analogsignals[0] plot_current_source(t, i_inj, v, v_range=(-66, -48), diff --git a/doc/pyplots/reset_example.py b/doc/pyplots/reset_example.py index 684506f44..9be414a7c 100644 --- a/doc/pyplots/reset_example.py +++ b/doc/pyplots/reset_example.py @@ -20,7 +20,7 @@ sim.end() for segment in data.segments: - vm = segment.analogsignalarrays[0] + vm = segment.analogsignals[0] plt.plot(vm.times, vm, label=str(segment.annotations["amplitude"])) plt.legend(loc="upper left") diff --git a/doc/pyplots/step_source.py b/doc/pyplots/step_source.py index 6aa865ce5..4c6428041 100644 --- a/doc/pyplots/step_source.py +++ b/doc/pyplots/step_source.py @@ -18,7 +18,7 @@ sim.run(250.0) t, i_inj = steps._get_data() -v = population.get_data().segments[0].analogsignalarrays[0] +v = population.get_data().segments[0].analogsignals[0] plot_current_source(t, i_inj, v, #v_range=(-66, -49), diff --git a/doc/release_notes.txt b/doc/release_notes.txt index f0f35b8e1..5ca5ac06d 100644 --- a/doc/release_notes.txt +++ b/doc/release_notes.txt @@ -6,6 +6,8 @@ Release notes .. toctree:: :maxdepth: 1 + releases/0.9.1.txt + releases/0.9.0.txt releases/0.8.3.txt releases/0.8.2.txt releases/0.8.1.txt diff --git a/doc/releases/0.8.3.txt b/doc/releases/0.8.3.txt index a45ce7528..c91621244 100644 --- a/doc/releases/0.8.3.txt +++ b/doc/releases/0.8.3.txt @@ -10,7 +10,7 @@ Welcome to PyNN 0.8.3! NeuroML 2 --------- -The `neuroml` module has been completely rewritten, and updated from NeuroML v1 to v2. +The :mod:`neuroml` module has been completely rewritten, and updated from NeuroML v1 to v2. This module works like other PyNN "backends", i.e. ``import pyNN.neuroml as sim``... but instead of running a simulation, it exports the network to an XML file in NeuroML format. @@ -28,4 +28,5 @@ Other changes .. _Brian: http://briansimulator.org +.. _NEST: http://nest-simulator.org .. _`A couple of bug fixes`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aclosed+milestone%3A0.8.3 \ No newline at end of file diff --git a/doc/releases/0.9.0.txt b/doc/releases/0.9.0.txt new file mode 100644 index 000000000..a31f93a96 --- /dev/null +++ b/doc/releases/0.9.0.txt @@ -0,0 +1,21 @@ +======================== +PyNN 0.9.0 release notes +======================== + +April 12th 2017 + +Welcome to PyNN 0.9.0! + +This version of PyNN adopts the new, simplified Neo_ object model, first released as Neo 0.5.0, +for the data structures returned by :class:`Population.get_data()`. +For more information on the new Neo API, see the `release notes`_. + +The main difference for a PyNN user is that the :class:`AnalogSignalArray` class has been renamed +to :class:`AnalogSignal`, and similarly the :attr:`Segment.analogsignalarrays` attribute is now +called :attr:`Segment.analogsignals`. + +In addition, a `number of bugs`_ with current injection for the :mod:`pyNN.brian` module have been fixed. + +.. _Neo: http://neuralensemble.org/neo +.. _`release notes`: http://neo.readthedocs.io/en/0.5.0/releases/0.5.0.html +.. _`number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aissue+milestone%3A0.9.0+is%3Aclosed diff --git a/doc/releases/0.9.1.txt b/doc/releases/0.9.1.txt new file mode 100644 index 000000000..3cc5bc6fc --- /dev/null +++ b/doc/releases/0.9.1.txt @@ -0,0 +1,32 @@ +======================== +PyNN 0.9.1 release notes +======================== + +May 4th 2017 + +Welcome to PyNN 0.9.1! + + +Stochastic synapses +------------------- + +This release adds three new standard synapse models, available for the NEST and NEURON simulators. They are: + +* :class:`SimpleStochasticSynapse` - each spike is transmitted with a probability `p`. +* :class:`StochasticTsodyksMarkramSynapse` - synapse exhibiting facilitation and depression, implemented using the model + of Tsodyks, Markram et al., in its stochastic version. +* :class:`MultiQuantalSynapse` - synapse exhibiting facilitation and depression with multiple quantal release sites. + +There are some new example scripts which demonstrate use of the synapse models - see :doc:`../examples/stochastic_synapses` +and :doc:`../examples/stochastic_deterministic_comparison`. + +Note that the new models require building a NEST extension; +this is done automatically during installation (when running :command:`pip install` or :command:`setup.py install`). + + +Bug fixes +--------- + +A `number of bugs`_ have been fixed. + +.. _`number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aissue+milestone%3A0.9.1+is%3Aclosed diff --git a/examples/HH_cond_exp2.py b/examples/HH_cond_exp2.py index 0b03f2870..4c4a60591 100644 --- a/examples/HH_cond_exp2.py +++ b/examples/HH_cond_exp2.py @@ -60,8 +60,8 @@ plt.ion() data = hhcell.get_data() - signal_names = [s.name for s in data.segments[0].analogsignalarrays] - vm = data.segments[0].analogsignalarrays[signal_names.index('v')] + signal_names = [s.name for s in data.segments[0].analogsignals] + vm = data.segments[0].analogsignals[signal_names.index('v')] plt.plot(vm.times, vm) plt.xlabel("time (ms)") plt.ylabel("Vm (mV)") @@ -69,7 +69,7 @@ if simulator_name in var_names: plt.figure(2) for var_name, native_name in var_names[simulator_name].items(): - signal = data.segments[0].analogsignalarrays[signal_names.index(native_name)] + signal = data.segments[0].analogsignals[signal_names.index(native_name)] plt.plot(signal.times, signal, label=var_name) plt.xlabel("time (ms)") plt.legend() diff --git a/examples/multiquantal_synapses.py b/examples/multiquantal_synapses.py new file mode 100644 index 000000000..16e066815 --- /dev/null +++ b/examples/multiquantal_synapses.py @@ -0,0 +1,111 @@ +# encoding: utf-8 +""" +... + +""" + +import matplotlib +matplotlib.use('Agg') +import numpy as np +import neo +from pyNN.utility import get_simulator, init_logging, normalized_filename + + +# === Configure the simulator ================================================ + +sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file.", {"action": "store_true"}), + ("--debug", "Print debugging information")) + +if options.debug: + init_logging(None, debug=True) + +sim.setup(quit_on_end=False) + + +# === Build and instrument the network ======================================= + +spike_times = np.hstack((np.arange(10, 100, 10), np.arange(250, 350, 10))) +spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times)) + +connector = sim.AllToAllConnector() + +depressing = dict(U=0.8, tau_rec=100.0, tau_facil=0.0, weight=0.01, delay=0.5) +facilitating = dict(U=0.04, tau_rec=50.0, tau_facil=200.0, weight=0.01, delay=0.5) + +synapse_types = { + 'depressing, n=1': sim.MultiQuantalSynapse(n=1, **depressing), + 'depressing, n=5': sim.MultiQuantalSynapse(n=5, **depressing), + 'facilitating, n=1': sim.MultiQuantalSynapse(n=1, **facilitating), + 'facilitating, n=5': sim.MultiQuantalSynapse(n=5, **facilitating), +} + +populations = {} +projections = {} +for label in synapse_types: + populations[label] = sim.Population(1000, sim.IF_cond_exp(e_rev_I=-75, tau_syn_I=4.3), label=label) + populations[label].record('gsyn_inh') + projections[label] = sim.Projection(spike_source, populations[label], connector, + receptor_type='inhibitory', + synapse_type=synapse_types[label]) + projections[label].initialize(a=synapse_types[label].parameter_space['n'], u=synapse_types[label].parameter_space['U']) + +spike_source.record('spikes') + +if "nest" in sim.__name__: + print(sim.nest.GetStatus([projections['depressing, n=5'].nest_connections[0]])) + +# === Run the simulation ===================================================== + +sim.run(400.0) + + +# === Save the results, optionally plot a figure ============================= + +for label, p in populations.items(): + filename = normalized_filename("Results", "multiquantal_synapses_%s" % label, + "pkl", options.simulator) + p.write_data(filename, annotations={'script_name': __file__}) + + +if options.plot_figure: + from pyNN.utility.plotting import Figure, Panel + #figure_filename = normalized_filename("Results", "multiquantal_synapses", + # "png", options.simulator) + figure_filename = "Results/multiquantal_synapses_{}.png".format(options.simulator) + + data = {} + for label in synapse_types: + data[label] = populations[label].get_data().segments[0] + gsyn = data[label].filter(name='gsyn_inh')[0] + gsyn_mean = neo.AnalogSignal(gsyn.mean(axis=1).reshape(-1, 1), + sampling_rate=gsyn.sampling_rate, + channel_index=np.array([0])) + gsyn_mean.name = 'gsyn_inh_mean' + data[label].analogsignals.append(gsyn_mean) + #import pdb; pdb.set_trace() + + def make_panel(population, label): + return Panel(population.get_data().segments[0].filter(name='gsyn_inh')[0], + data_labels=[label], yticks=True) + labels = sorted(synapse_types) + panels = [ + Panel(data[label].filter(name='gsyn_inh_mean')[0], + data_labels=[label], yticks=True) + for label in labels + ] + # add ylabel to top panel in each group + panels[0].options.update(ylabel=u'Synaptic conductance (µS)') + ##panels[len(synapse_types)].options.update(ylabel='Membrane potential (mV)') + # add xticks and xlabel to final panel + panels[-1].options.update(xticks=True, xlabel="Time (ms)") + + Figure(*panels, + title="Example of facilitating and depressing multi-quantal release synapses", + annotations="Simulated with %s" % options.simulator.upper() + ).save(figure_filename) + print(figure_filename) + + +# === Clean up and quit ======================================================== + +sim.end() diff --git a/examples/nineml_brunel.py b/examples/nineml_brunel.py index 634d42fa9..30e8e92c8 100644 --- a/examples/nineml_brunel.py +++ b/examples/nineml_brunel.py @@ -88,12 +88,12 @@ def instantaneous_firing_rate(segment, begin, end): if args.plot: Figure( Panel(stim_data.spiketrains, markersize=0.2, xlim=args.limits), - Panel(exc_data.analogsignalarrays[0], yticks=True, xlim=args.limits), - Panel(exc_data.analogsignalarrays[1], yticks=True, xlim=args.limits), + Panel(exc_data.analogsignals[0], yticks=True, xlim=args.limits), + Panel(exc_data.analogsignals[1], yticks=True, xlim=args.limits), Panel(exc_data.spiketrains[:100], markersize=0.5, xlim=args.limits), Panel(instantaneous_firing_rate(exc_data, *args.limits), yticks=True), - Panel(inh_data.analogsignalarrays[0], yticks=True, xlim=args.limits), - Panel(inh_data.analogsignalarrays[1], yticks=True, xlim=args.limits), + Panel(inh_data.analogsignals[0], yticks=True, xlim=args.limits), + Panel(inh_data.analogsignals[1], yticks=True, xlim=args.limits), Panel(inh_data.spiketrains[:100], markersize=0.5, xlim=args.limits), Panel(instantaneous_firing_rate(inh_data, *args.limits), xticks=True, xlabel="Time (ms)", yticks=True), diff --git a/examples/simple_STDP.py b/examples/simple_STDP.py index 5e37b33d6..9a1d48677 100644 --- a/examples/simple_STDP.py +++ b/examples/simple_STDP.py @@ -127,9 +127,10 @@ def __call__(self, t): return t + self.interval def get_weights(self): - return neo.AnalogSignalArray(self._weights, units='nA', sampling_period=self.interval * ms, - channel_index=numpy.arange(len(self._weights[0])), - name="weight") + signal = neo.AnalogSignal(self._weights, units='nA', sampling_period=self.interval * ms, + name="weight") + signal.channel_index = neo.ChannelIndex(numpy.arange(len(self._weights[0]))) + return signal weight_recorder = WeightRecorder(sampling_interval=1.0, projection=connections) diff --git a/examples/stochastic_deterministic_comparison.py b/examples/stochastic_deterministic_comparison.py new file mode 100644 index 000000000..d1dc20a09 --- /dev/null +++ b/examples/stochastic_deterministic_comparison.py @@ -0,0 +1,118 @@ +# encoding: utf-8 +""" +Example of facilitating and depressing synapses in deterministic and stochastic versions + +""" + +import matplotlib +matplotlib.use('Agg') +import numpy as np +import neo +from pyNN.utility import get_simulator, init_logging, normalized_filename + + +# === Configure the simulator ================================================ + +sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file.", {"action": "store_true"}), + ("--debug", "Print debugging information")) + +if options.debug: + init_logging(None, debug=True) + +sim.setup(quit_on_end=False) + + +# === Build and instrument the network ======================================= + +spike_times = np.hstack((np.arange(10, 100, 10), np.arange(250, 350, 10))) +spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times)) + +connector = sim.AllToAllConnector() + +depressing = dict(U=0.8, tau_rec=100.0, tau_facil=0.0, weight=0.01, delay=0.5) +facilitating = dict(U=0.04, tau_rec=50.0, tau_facil=200.0, weight=0.01, delay=0.5) + +synapse_types = { + 'depressing, deterministic': sim.TsodyksMarkramSynapse(**depressing), + 'depressing, stochastic': sim.StochasticTsodyksMarkramSynapse(**depressing), + 'facilitating, deterministic': sim.TsodyksMarkramSynapse(**facilitating), + 'facilitating, stochastic': sim.StochasticTsodyksMarkramSynapse(**facilitating), +} + +populations = {} +projections = {} +for label in synapse_types: + populations[label] = sim.Population(1000, sim.IF_cond_exp(e_rev_I=-75, tau_syn_I=4.3), label=label) + populations[label].record('gsyn_inh') + projections[label] = sim.Projection(spike_source, populations[label], connector, + receptor_type='inhibitory', + synapse_type=synapse_types[label]) + +spike_source.record('spikes') + + +# === Run the simulation ===================================================== + +sim.run(400.0) + + +# === Save the results, optionally plot a figure ============================= + +for label, p in populations.items(): + filename = normalized_filename("Results", "stochastic_comparison_%s" % label, + "pkl", options.simulator) + p.write_data(filename, annotations={'script_name': __file__}) + + +if options.plot_figure: + from pyNN.utility.plotting import Figure, Panel + #figure_filename = normalized_filename("Results", "stochastic_comparison", + # "png", options.simulator) + figure_filename = "Results/stochastic_comparison_{}.png".format(options.simulator) + + data = {} + for label in synapse_types: + data[label] = populations[label].get_data().segments[0] + if 'stochastic' in label: + gsyn = data[label].filter(name='gsyn_inh')[0] + gsyn_mean = neo.AnalogSignal(gsyn.mean(axis=1).reshape(-1, 1), + sampling_rate=gsyn.sampling_rate) + gsyn_mean.channel_index = neo.ChannelIndex(np.array([0])) + gsyn_mean.name = 'gsyn_inh_mean' + data[label].analogsignals.append(gsyn_mean) + #import pdb; pdb.set_trace() + + def make_panel(population, label): + return Panel(population.get_data().segments[0].filter(name='gsyn_inh')[0], + data_labels=[label], yticks=True) + panels = [ + Panel(data['depressing, deterministic'].filter(name='gsyn_inh')[0][:, 0], + data_labels=['depressing, deterministic'], yticks=True, + ylim=[0, 0.008]), + Panel(data['depressing, stochastic'].filter(name='gsyn_inh_mean')[0], + data_labels=['depressing, stochastic mean'], yticks=True, + ylim=[0, 0.008]), + Panel(data['facilitating, deterministic'].filter(name='gsyn_inh')[0][:, 0], + data_labels=['facilitating, deterministic'], yticks=True, + ylim=[0, 0.002]), + Panel(data['facilitating, stochastic'].filter(name='gsyn_inh_mean')[0], + data_labels=['facilitating, stochastic mean'], yticks=True, + ylim=[0, 0.002]), + + ] + # add ylabel to top panel in each group + panels[0].options.update(ylabel=u'Synaptic conductance (µS)') + ##panels[len(synapse_types)].options.update(ylabel='Membrane potential (mV)') + # add xticks and xlabel to final panel + panels[-1].options.update(xticks=True, xlabel="Time (ms)") + + Figure(*panels, + title="Example of facilitating and depressing synapses in deterministic and stochastic versions", + annotations="Simulated with %s" % options.simulator.upper() + ).save(figure_filename) + print(figure_filename) + + +# === Clean up and quit ======================================================== + +sim.end() diff --git a/examples/stochastic_synapses.py b/examples/stochastic_synapses.py new file mode 100644 index 000000000..6182e8f08 --- /dev/null +++ b/examples/stochastic_synapses.py @@ -0,0 +1,86 @@ +# encoding: utf-8 +""" +Example of simple stochastic synapses + +""" + +import matplotlib +matplotlib.use('Agg') +import numpy +from pyNN.utility import get_simulator, init_logging, normalized_filename + + +# === Configure the simulator ================================================ + +sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file.", {"action": "store_true"}), + ("--debug", "Print debugging information")) + +if options.debug: + init_logging(None, debug=True) + +sim.setup(quit_on_end=False) + + +# === Build and instrument the network ======================================= + +spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=numpy.arange(10, 100, 10))) + +connector = sim.AllToAllConnector() + +synapse_types = { + 'static': sim.StaticSynapse(weight=0.01, delay=0.5), + 'stochastic': sim.SimpleStochasticSynapse(p=0.5, weight=0.02, delay=0.5) +} + +populations = {} +projections = {} +for label in 'static', 'stochastic': + populations[label] = sim.Population(1, sim.IF_cond_exp(), label=label) + populations[label].record(['v', 'gsyn_inh']) + projections[label] = sim.Projection(spike_source, populations[label], connector, + receptor_type='inhibitory', + synapse_type=synapse_types[label]) + +spike_source.record('spikes') + + +# === Run the simulation ===================================================== + +sim.run(200.0) + + +# === Save the results, optionally plot a figure ============================= + +for label, p in populations.items(): + filename = normalized_filename("Results", "stochastic_synapses_%s" % label, + "pkl", options.simulator) + p.write_data(filename, annotations={'script_name': __file__}) + + +if options.plot_figure: + from pyNN.utility.plotting import Figure, Panel + figure_filename = normalized_filename("Results", "stochastic_synapses_", + "png", options.simulator) + panels = [] + for variable in ('gsyn_inh', 'v'): + for population in populations.values(): + panels.append( + Panel(population.get_data().segments[0].filter(name=variable)[0], + data_labels=[population.label], yticks=True), + ) + # add ylabel to top panel in each group + panels[0].options.update(ylabel=u'Synaptic conductance (µS)') + panels[3].options.update(ylabel='Membrane potential (mV)') + # add xticks and xlabel to final panel + panels[-1].options.update(xticks=True, xlabel="Time (ms)") + + Figure(*panels, + title="Example of simple stochastic synapses", + annotations="Simulated with %s" % options.simulator.upper() + ).save(figure_filename) + print(figure_filename) + + +# === Clean up and quit ======================================================== + +sim.end() diff --git a/examples/stochastic_tsodyksmarkram.py b/examples/stochastic_tsodyksmarkram.py new file mode 100644 index 000000000..6748a2296 --- /dev/null +++ b/examples/stochastic_tsodyksmarkram.py @@ -0,0 +1,103 @@ +# encoding: utf-8 +""" +Example of depressing and facilitating synapses + +Usage: stochastic_tsodyksmarkram.py [-h] [--plot-figure] [--debug DEBUG] simulator + +positional arguments: + simulator neuron, nest, brian or another backend simulator + +optional arguments: + -h, --help show this help message and exit + --plot-figure Plot the simulation results to a file. + --debug DEBUG Print debugging information + +""" + +import matplotlib +matplotlib.use('Agg') +import numpy as np +from pyNN.utility import get_simulator, init_logging, normalized_filename + + +# === Configure the simulator ================================================ + +sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file.", {"action": "store_true"}), + ("--debug", "Print debugging information")) + +if options.debug: + init_logging(None, debug=True) + +sim.setup(quit_on_end=False) + + +# === Build and instrument the network ======================================= + +spike_times = np.hstack((np.arange(10, 100, 10), np.arange(250, 350, 10))) +spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times)) + +connector = sim.AllToAllConnector() + +depressing = dict(U=0.8, tau_rec=100.0, tau_facil=0.0, weight=0.01, delay=0.5) +facilitating = dict(U=0.04, tau_rec=50.0, tau_facil=200.0, weight=0.01, delay=0.5) + +synapse_types = { + 'depressing, deterministic': sim.TsodyksMarkramSynapse(**depressing), + 'depressing, stochastic': sim.StochasticTsodyksMarkramSynapse(**depressing), + 'facilitating, deterministic': sim.TsodyksMarkramSynapse(**facilitating), + 'facilitating, stochastic': sim.StochasticTsodyksMarkramSynapse(**facilitating), +} + +populations = {} +projections = {} +for label in synapse_types: + populations[label] = sim.Population(3, sim.IF_cond_exp(e_rev_I=-75, tau_syn_I=[1.2, 6.7, 4.3]), label=label) + populations[label].record(['v', 'gsyn_inh']) + projections[label] = sim.Projection(spike_source, populations[label], connector, + receptor_type='inhibitory', + synapse_type=synapse_types[label]) + +spike_source.record('spikes') + + +# === Run the simulation ===================================================== + +sim.run(400.0) + + +# === Save the results, optionally plot a figure ============================= + +for label, p in populations.items(): + filename = normalized_filename("Results", "stochastic_tsodyksmarkram_%s" % label, + "pkl", options.simulator) + p.write_data(filename, annotations={'script_name': __file__}) + + +if options.plot_figure: + from pyNN.utility.plotting import Figure, Panel + #figure_filename = normalized_filename("Results", "stochastic_tsodyksmarkram", + # "png", options.simulator) + figure_filename = "Results/stochastic_tsodyksmarkram_{}.png".format(options.simulator) + panels = [] + for variable in ('gsyn_inh',): # 'v'): + for population in sorted(populations.values(), key=lambda p: p.label): + panels.append( + Panel(population.get_data().segments[0].filter(name=variable)[0], + data_labels=[population.label], yticks=True), + ) + # add ylabel to top panel in each group + panels[0].options.update(ylabel=u'Synaptic conductance (µS)') + ##panels[len(synapse_types)].options.update(ylabel='Membrane potential (mV)') + # add xticks and xlabel to final panel + panels[-1].options.update(xticks=True, xlabel="Time (ms)") + + Figure(*panels, + title="Example of facilitating and depressing synapses in deterministic and stochastic versions", + annotations="Simulated with %s" % options.simulator.upper() + ).save(figure_filename) + print(figure_filename) + + +# === Clean up and quit ======================================================== + +sim.end() diff --git a/examples/tools/VAbenchmark_graphs.py b/examples/tools/VAbenchmark_graphs.py index 41351979b..7e9af1530 100644 --- a/examples/tools/VAbenchmark_graphs.py +++ b/examples/tools/VAbenchmark_graphs.py @@ -28,7 +28,7 @@ def plot_signal(panel, signal, index, colour='b', linewidth='1', label='', fake_aps=False, hide_axis_labels=False): - label = "%s (Neuron %d)" % (label, signal.channel_index[index]) + label = "%s (Neuron %d)" % (label, signal.channel_index.index[index]) if fake_aps: # add fake APs for plotting v_thresh = fake_aps spike_indices = signal >= v_thresh - 0.05 * mV @@ -54,10 +54,10 @@ def plot_hist(panel, hist, bins, width, xlabel=None, ylabel=None, def plot_vm_traces(panel, segment, label, hide_axis_labels=False): - for array in segment.analogsignalarrays: - sorted_channels = sorted(array.channel_index) + for array in segment.analogsignals: + sorted_channels = sorted(array.channel_index.index) for j in range(2): - i = array.channel_index.tolist().index(j) + i = array.channel_index.index.tolist().index(j) print("plotting '%s' for %s" % (array.name, label)) col = 'rbgmck'[j % 6] plot_signal(panel, array, i, colour=col, linewidth=1, label=label, diff --git a/examples/tools/comparison_plot.py b/examples/tools/comparison_plot.py index bd0460a81..93cf48922 100644 --- a/examples/tools/comparison_plot.py +++ b/examples/tools/comparison_plot.py @@ -19,7 +19,7 @@ def variable_names(segment): - return set(signal.name for signal in segment.analogsignalarrays) + return set(signal.name for signal in segment.analogsignals) def plot_signal(panel, signal, index, colour='b', linewidth='1', label=''): diff --git a/pyNN/__init__.py b/pyNN/__init__.py index 0eee94dd1..9886acdbc 100644 --- a/pyNN/__init__.py +++ b/pyNN/__init__.py @@ -69,7 +69,7 @@ :license: CeCILL, see LICENSE for details. """ -__version__ = '0.8.3' +__version__ = '0.9.1' __all__ = ["common", "random", "nest", "neuron", "brian", "recording", "errors", "space", "descriptions", "standardmodels", "parameters", "core"] diff --git a/pyNN/brian/__init__.py b/pyNN/brian/__init__.py index b50ce6428..8d8d35804 100644 --- a/pyNN/brian/__init__.py +++ b/pyNN/brian/__init__.py @@ -28,13 +28,15 @@ def list_standard_models(): def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, - max_delay=DEFAULT_MAX_DELAY, **extra_params): + **extra_params): """ Should be called at the very beginning of a script. extra_params contains any keyword arguments that are required by a given simulator but not by others. """ - common.setup(timestep, min_delay, max_delay, **extra_params) + + max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) + common.setup(timestep, min_delay, **extra_params) simulator.state.clear() brian.set_global_preferences(**extra_params) simulator.state.dt = timestep # move to common.setup? diff --git a/pyNN/brian/standardmodels/electrodes.py b/pyNN/brian/standardmodels/electrodes.py index ca938a6ee..cec376165 100644 --- a/pyNN/brian/standardmodels/electrodes.py +++ b/pyNN/brian/standardmodels/electrodes.py @@ -14,6 +14,7 @@ import logging import numpy +import brian from brian import ms, nA, Hz, network_operation, amp as ampere from pyNN.standardmodels import electrodes, build_translations, StandardCurrentSource from pyNN.parameters import ParameterSpace, Sequence @@ -56,8 +57,10 @@ def set_native_parameters(self, parameters): self._reset() def _reset(self): - self.i = 0 - self.running = True + # self.i reset to 0 only at the start of a new run; not for continuation of existing runs + if not hasattr(self, 'running') or self.running == False: + self.i = 0 + self.running = True if self._is_computed: self._generate() @@ -71,7 +74,8 @@ def inject_into(self, cell_list): self.indices.extend([cell.parent.id_to_index(cell)]) def _update_current(self): - if self.running and simulator.state.t >= self.times[self.i] * 1e3: + # check if current timestamp is within dt/2 of target time; Brian uses seconds as unit of time + if self.running and abs(simulator.state.t - self.times[self.i] * 1e3) < (simulator.state.dt/2.0): for cell, idx in zip(self.cell_list, self.indices): if not self._is_playable: cell.parent.brian_group.i_inj[idx] = self.amplitudes[self.i] * ampere @@ -80,6 +84,25 @@ def _update_current(self): self.i += 1 if self.i >= len(self.times): self.running = False + if self._is_playable: + # ensure that currents are set to 0 after t_stop + for cell, idx in zip(self.cell_list, self.indices): + cell.parent.brian_group.i_inj[idx] = 0 + + def record(self): + self.i_state_monitor = brian.StateMonitor(self.cell_list[0].parent.brian_group[self.indices[0]], 'i_inj', record=0, when='start') + simulator.state.network.add(self.i_state_monitor) + + def get_data(self): + # code based on brian/recording.py:_get_all_signals() + # because we use `when='start'`, we need to add the + # value at the end of the final time step. + device = self.i_state_monitor + current_t_value = device.P.state_('t')[device.record] + current_i_value = device.P.state_(device.varname)[device.record] + t_all_values = numpy.append(device.times, current_t_value) + i_all_values = numpy.append(device._values, current_i_value) + return (t_all_values / ms, i_all_values / nA) class StepCurrentSource(BrianCurrentSource, electrodes.StepCurrentSource): @@ -112,10 +135,13 @@ def __init__(self, **parameters): self._generate() def _generate(self): - self.times = numpy.arange(self.start, self.stop, simulator.state.dt) + # Note: Brian uses seconds as unit of time + temp_num_t = len(numpy.arange(self.start, self.stop + simulator.state.dt * 1e-3, simulator.state.dt * 1e-3)) + self.times = numpy.array([self.start+(i*simulator.state.dt*1e-3) for i in range(temp_num_t)]) def _compute(self, time): - return self.amplitude * numpy.sin(time * 2 * numpy.pi * self.frequency / 1000. + 2 * numpy.pi * self.phase / 360) + # Note: Brian uses seconds as unit of time; frequency is specified in Hz; thus no conversion required + return self.offset + self.amplitude * numpy.sin((time-self.start) * 2 * numpy.pi * self.frequency + 2 * numpy.pi * self.phase / 360) class DCSource(BrianCurrentSource, electrodes.DCSource): @@ -140,6 +166,12 @@ def _generate(self): else: self.times = [0.0, self.start, self.stop] self.amplitudes = [0.0, self.amplitude, 0.0] + # ensures proper handling of changes in parameters on the fly + if self.start < simulator.state.t*1e-3 < self.stop: + self.times.insert(-1, simulator.state.t*1e-3) + self.amplitudes.insert(-1, self.amplitude) + if (self.start==0 and self.i==2) or (self.start!=0 and self.i==3): + self.i -= 1 class NoisyCurrentSource(BrianCurrentSource, electrodes.NoisyCurrentSource): @@ -160,7 +192,9 @@ def __init__(self, **parameters): self._generate() def _generate(self): - self.times = numpy.arange(self.start, self.stop, simulator.state.dt) + temp_num_t = len(numpy.arange(self.start, self.stop, max(self.dt, simulator.state.dt * 1e-3))) + self.times = numpy.array([self.start+(i*max(self.dt, simulator.state.dt * 1e-3)) for i in range(temp_num_t)]) + self.times = numpy.append(self.times, self.stop) def _compute(self, time): - return self.mean + (self.stdev * self.dt) * numpy.random.randn() + return self.mean + self.stdev * numpy.random.randn() diff --git a/pyNN/common/control.py b/pyNN/common/control.py index ec87f5ae5..cafe6f525 100644 --- a/pyNN/common/control.py +++ b/pyNN/common/control.py @@ -31,7 +31,7 @@ def __init__(self): def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, - max_delay=DEFAULT_MAX_DELAY, **extra_params): + **extra_params): """ Initialises/reinitialises the simulator. Any existing network structure is destroyed. @@ -41,6 +41,7 @@ def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, `extra_params` contains any keyword arguments that are required by a given simulator but not by others. """ + max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) invalid_extra_params = ('mindelay', 'maxdelay', 'dt', 'time_step') for param in invalid_extra_params: if param in extra_params: diff --git a/pyNN/common/populations.py b/pyNN/common/populations.py index 93a087237..e93881c33 100644 --- a/pyNN/common/populations.py +++ b/pyNN/common/populations.py @@ -405,8 +405,15 @@ def initialize(self, **initial_values): self.initial_values[variable] = initial_value def find_units(self, variable): + """ + Returns units of the specified variable or parameter, as a string. + Works for all the recordable variables and neuron parameters of all standard models. + """ return self.celltype.units[variable] + def annotate(self, **annotations): + self.annotations.update(annotations) + def can_record(self, variable): """Determine whether `variable` can be recorded from this population.""" return self.celltype.can_record(variable) @@ -420,7 +427,7 @@ def record(self, variables, to_file=None, sampling_interval=None): names. For a given celltype class, `celltype.recordable` contains a list of variables that can be recorded for that celltype. - If specified, `to_file` should be a Neo IO instance and `write_data()` + If specified, `to_file` should be either a filename or a Neo IO instance and `write_data()` will be automatically called when `end()` is called. `sampling_interval` should be a value in milliseconds, and an integer @@ -438,6 +445,7 @@ def record(self, variables, to_file=None, sampling_interval=None): self.recorder.record(variables, self._record_filter, sampling_interval) if isinstance(to_file, basestring): self.recorder.file = to_file + self._simulator.state.write_on_end.append((self, variables, self.recorder.file)) @deprecated("record('v')") def record_v(self, to_file=True): @@ -735,9 +743,6 @@ def _set_positions(self, pos_array): giving the x,y,z coordinates of all the neurons (soma, in the case of non-point models).""") - def annotate(self, **annotations): - self.annotations.update(annotations) - def describe(self, template='population_default.txt', engine='default'): """ Returns a human-readable description of the population. @@ -828,6 +833,7 @@ def __init__(self, parent, selector, label=None): self.local_cells = self.all_cells[self._mask_local] self.first_id = numpy.min(self.all_cells) # only works if we assume all_cells is sorted, otherwise could use min() self.last_id = numpy.max(self.all_cells) + self.annotations = {} self.recorder = self.parent.recorder self._record_filter = self.all_cells @@ -923,6 +929,7 @@ def describe(self, template='populationview_default.txt', engine='default'): "parent": self.parent.label, "mask": self.mask, "size": self.size} + context.update(self.annotations) return descriptions.render(engine, template, context) @@ -1031,6 +1038,10 @@ def receptor_types(self): return rts def find_units(self, variable): + """ + Returns units of the specified variable or parameter, as a string. + Works for all the recordable variables and neuron parameters of all standard models. + """ units = set(p.find_units(variable) for p in self.populations) if len(units) > 1: raise ValueError("Inconsistent units") @@ -1240,7 +1251,7 @@ def record(self, variables, to_file=None, sampling_interval=None): names. For a given celltype class, `celltype.recordable` contains a list of variables that can be recorded for that celltype. - If specified, `to_file` should be a Neo IO instance and `write_data()` + If specified, `to_file` should be either a filename or a Neo IO instance and `write_data()` will be automatically called when `end()` is called. """ for p in self.populations: @@ -1305,19 +1316,18 @@ def get_data(self, variables='all', gather=True, clear=False, annotations=None): name = self.label description = self.describe() blocks = [p.get_data(variables, gather, clear) for p in self.populations] + # adjust channel_ids to match assembly channel indices offset = 0 for block, p in zip(blocks, self.populations): for segment in block.segments: - #segment.name = name - #segment.description = description - for signal_array in segment.analogsignalarrays: - signal_array.channel_index = numpy.array(signal_array.channel_index) + offset # hack + for signal_array in segment.analogsignals: + signal_array.channel_index.channel_ids += offset offset += p.size for i, block in enumerate(blocks): logger.debug("%d: %s", i, block.name) for j, segment in enumerate(block.segments): logger.debug(" %d: %s", j, segment.name) - for arr in segment.analogsignalarrays: + for arr in segment.analogsignals: logger.debug(" %s %s", arr.shape, arr.name) merged_block = blocks[0] for block in blocks[1:]: diff --git a/pyNN/common/procedural_api.py b/pyNN/common/procedural_api.py index 32ed44850..22aa406d9 100644 --- a/pyNN/common/procedural_api.py +++ b/pyNN/common/procedural_api.py @@ -74,11 +74,11 @@ def record(variables, source, filename, sampling_interval=None, annotations=None # would actually like to be able to record to an array and choose later # whether to write to a file. if not isinstance(source, (BasePopulation, Assembly)): - source = source.parent + if isinstance(source, (IDMixin)): + source = source.as_view() source.record(variables, to_file=filename, sampling_interval=sampling_interval) if annotations: source.annotate(**annotations) - simulator.state.write_on_end.append((source, variables, filename)) return record diff --git a/pyNN/common/projections.py b/pyNN/common/projections.py index 6333dcd31..d3f578882 100644 --- a/pyNN/common/projections.py +++ b/pyNN/common/projections.py @@ -104,6 +104,7 @@ def __init__(self, presynaptic_neurons, postsynaptic_neurons, connector, if label is None: if self.pre.label and self.post.label: self.label = u"%s→%s" % (self.pre.label, self.post.label) + self.initial_values = {} Projection._nProj += 1 def __len__(self): @@ -177,6 +178,31 @@ def set(self, **attributes): parameter_space = self.synapse_type.translate(parameter_space) self._set_attributes(parameter_space) + def initialize(self, **initial_values): + """ + Set initial values of state variables of synaptic plasticity models. + + Values passed to initialize() may be: + (1) single numeric values (all neurons set to the same value) + (2) RandomDistribution objects + (3) a 2D array with the same dimensions as the connectivity matrix + (as returned by `get(format='array')` + (4) a mapping function, which accepts a single float argument (the + distance between pre- and post-synaptic cells) and returns a single value. + + Values should be expressed in the standard PyNN units (i.e. millivolts, + nanoamps, milliseconds, microsiemens, nanofarads, event per second). + + Example:: + + prj.initialize(u=-70.0) + """ + for variable, value in initial_values.items(): + logger.debug("In Projection '%s', initialising %s to %s" % (self.label, variable, value)) + initial_value = LazyArray(value, shape=(self.size,), dtype=float) + self._set_initial_value_array(variable, initial_value) + self.initial_values[variable] = initial_value + def _value_list_to_array(self, attributes): """Convert a list of connection parameters/attributes to a 2D array.""" connection_mask = ~numpy.isnan(self.get('weight', format='array', gather='all')) diff --git a/pyNN/connectors.py b/pyNN/connectors.py index 4882936c7..bdaa4090e 100644 --- a/pyNN/connectors.py +++ b/pyNN/connectors.py @@ -1025,7 +1025,7 @@ def connect(self, projection): source_index = self.rng.next(1, 'uniform_int', {"low": 0, "high": projection.pre.size}, mask_local=False)[0] - target_index = self.rng.choice(possible_targets, size=1) + target_index = self.rng.choice(possible_targets, size=1)[0] connections[target_index].append(source_index) def build_source_masks(mask=None): diff --git a/pyNN/core.py b/pyNN/core.py index 326ab1bd5..87eeb76f0 100644 --- a/pyNN/core.py +++ b/pyNN/core.py @@ -90,3 +90,4 @@ def projection(self, projection): def __call__(self, i, j): raise NotImplementedError + diff --git a/pyNN/mock/__init__.py b/pyNN/mock/__init__.py index 755f55170..5d54dc7af 100644 --- a/pyNN/mock/__init__.py +++ b/pyNN/mock/__init__.py @@ -29,8 +29,10 @@ def list_standard_models(): def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, - max_delay=DEFAULT_MAX_DELAY, **extra_params): - common.setup(timestep, min_delay, max_delay, **extra_params) + **extra_params): + + max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) + common.setup(timestep, min_delay, **extra_params) simulator.state.clear() simulator.state.dt = timestep # move to common.setup? simulator.state.min_delay = min_delay diff --git a/pyNN/moose/__init__.py b/pyNN/moose/__init__.py index 25c59b401..ca28ff405 100644 --- a/pyNN/moose/__init__.py +++ b/pyNN/moose/__init__.py @@ -30,13 +30,14 @@ # ============================================================================== -def setup(timestep=0.1, min_delay=0.1, max_delay=10.0, **extra_params): +def setup(timestep=0.1, min_delay=0.1, **extra_params): """ Should be called at the very beginning of a script. extra_params contains any keyword arguments that are required by a given simulator but not by others. """ - common.setup(timestep, min_delay, max_delay, **extra_params) + max_delay = extra_params.get('max_delay', 10.0) + common.setup(timestep, min_delay, **extra_params) simulator.state.dt = timestep if not os.path.exists(temporary_directory): os.mkdir(temporary_directory) diff --git a/pyNN/nemo/__init__.py b/pyNN/nemo/__init__.py index 5373bfd37..2ce40849f 100644 --- a/pyNN/nemo/__init__.py +++ b/pyNN/nemo/__init__.py @@ -41,7 +41,7 @@ def list_standard_models(): # ============================================================================== -def setup(timestep=1, min_delay=1, max_delay=10.0, **extra_params): +def setup(timestep=1, min_delay=1, **extra_params): """ Should be called at the very beginning of a script. extra_params contains any keyword arguments that are required by a given @@ -52,7 +52,8 @@ def setup(timestep=1, min_delay=1, max_delay=10.0, **extra_params): if (min_delay < 1): raise Exception("It is not currently possible to have a min_delay less than 1ms with this simulator") - common.setup(timestep, min_delay, max_delay, **extra_params) + max_delay = extra_params.get('max_delay', 10.0) + common.setup(timestep, min_delay, **extra_params) simulator.state = simulator._State(timestep, min_delay, max_delay) simulator.spikes_array_list = [] simulator.recorder_list = [] diff --git a/pyNN/nest/__init__.py b/pyNN/nest/__init__.py index cbffd1e7c..fa8f37ffb 100644 --- a/pyNN/nest/__init__.py +++ b/pyNN/nest/__init__.py @@ -7,6 +7,7 @@ """ +import warnings import numpy try: import tables # due to freeze when importing nest before tables @@ -47,6 +48,11 @@ if logger.level == logging.NOTSET: logger.setLevel(logging.ERROR) +try: + nest.Install('pynn_extensions') +except nest.NESTError as err: + warnings.warn("Unable to install NEST extensions. Certain models may not be available.\nFurther details: {}".format(err)) + # ============================================================================== # Utility functions @@ -86,7 +92,7 @@ def _discrepancy_due_to_rounding(parameters, output_values): def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, - max_delay=DEFAULT_MAX_DELAY, **extra_params): + **extra_params): """ Should be called at the very beginning of a script. @@ -110,7 +116,8 @@ def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, `rng_seeds_seed`: a single seed that will be used to generate random values for `rng_seeds` """ - common.setup(timestep, min_delay, max_delay, **extra_params) + max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) + common.setup(timestep, min_delay, **extra_params) simulator.state.clear() for key in ("verbosity", "spike_precision", "recording_precision", "threads"): @@ -128,7 +135,7 @@ def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, simulator.state.rng_seeds = rng.next(n, 'uniform_int', {'low': 0, 'high': 100000}).tolist() # set resolution simulator.state.dt = timestep - # Set min_delay and max_delay for all synapse models + # Set min_delay and max_delay simulator.state.set_delays(min_delay, max_delay) nest.SetDefaults('spike_generator', {'precise_times': True}) return rank() diff --git a/pyNN/nest/extensions/CMakeLists.txt b/pyNN/nest/extensions/CMakeLists.txt new file mode 100644 index 000000000..df8c09cb5 --- /dev/null +++ b/pyNN/nest/extensions/CMakeLists.txt @@ -0,0 +1,282 @@ +# :copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. +# :license: CeCILL, see LICENSE for details. + +cmake_minimum_required( VERSION 2.8.12 ) + +# This CMakeLists.txt is configured to build your external module for NEST. +# +# The configuration requires a compiled and installed NEST; if `nest-config` is +# not in the PATH, please specify the absolute path with `-Dwith-nest=...`. +# +# For more informations on how to extend and use your module see: +# https://nest.github.io/nest-simulator/extension_modules + +# 1) Name your module here, i.e. add later with -Dexternal-modules=my: +set( SHORT_NAME pynn ) + +# the complete module name is here: +set( MODULE_NAME pynn_extensions ) + +# 2) Add all your sources here +set( MODULE_SOURCES + pynn_extensions.h pynn_extensions.cpp + simple_stochastic_connection.h + stochastic_stp_connection.h + stochastic_stp_connection_impl.h + ) + +# 3) We require a header name like this: +set( MODULE_HEADER ${MODULE_NAME}.h ) +# containing the class description of the class extending the SLIModule + +# 4) Specify your module version +set( MODULE_VERSION_MAJOR 1 ) +set( MODULE_VERSION_MINOR 0 ) +set( MODULE_VERSION "${MODULE_VERSION_MAJOR}.${MODULE_VERSION_MINOR}" ) + +# 5) Leave the rest as is. All files in `sli` will be installed to +# `share/nest/sli/`, so that NEST will find the during initialization. + +# Leave the call to "project(...)" for after the compiler is determined. + +# Set the `nest-config` executable to use during configuration. +set( with-nest OFF CACHE STRING "Specify the `nest-config` executable." ) + +# If it is not set, look for a `nest-config` in the PATH. +if ( NOT with-nest ) + # try find the program ourselves + find_program( NEST_CONFIG + NAMES nest-config + ) + if ( NEST_CONFIG STREQUAL "NEST_CONFIG-NOTFOUND" ) + message( FATAL_ERROR "Cannot find the program `nest-config`. Specify via -Dwith-nest=... ." ) + endif () +else () + set( NEST_CONFIG ${with-nest} ) +endif () + +# Use `nest-config` to get the compile and installation options used with the +# NEST installation. + +# Get the compiler that was used for NEST. +execute_process( + COMMAND ${NEST_CONFIG} --compiler + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_COMPILER + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# One check on first execution, if `nest-config` is working. +if ( NOT RES_VAR EQUAL 0 ) + message( FATAL_ERROR "Cannot run `${NEST_CONFIG}`. Please specify correct `nest-config` via -Dwith-nest=... " ) +endif () + +# Setting the compiler has to happen before the call to "project(...)" function. +set( CMAKE_CXX_COMPILER "${NEST_COMPILER}" ) + +project( ${MODULE_NAME} CXX ) + +# Get the install prefix. +execute_process( + COMMAND ${NEST_CONFIG} --prefix + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_PREFIX + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Use the `NEST_PREFIX` as `CMAKE_INSTALL_PREFIX`. +set( CMAKE_INSTALL_PREFIX "${NEST_PREFIX}" CACHE STRING "Install path prefix, prepended onto install directories." FORCE ) + +# Get the CXXFLAGS. +execute_process( + COMMAND ${NEST_CONFIG} --cflags + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_CXXFLAGS + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Get the Includes. +execute_process( + COMMAND ${NEST_CONFIG} --includes + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_INCLUDES + OUTPUT_STRIP_TRAILING_WHITESPACE +) +if ( NEST_INCLUDES ) + # make a cmake list + string( REPLACE " " ";" NEST_INCLUDES_LIST "${NEST_INCLUDES}" ) + foreach ( inc_complete ${NEST_INCLUDES_LIST} ) + # if it is actually a -Iincludedir + if ( "${inc_complete}" MATCHES "^-I.*" ) + # get the directory + string( REGEX REPLACE "^-I(.*)" "\\1" inc "${inc_complete}" ) + # and check whether it is a directory + if ( IS_DIRECTORY "${inc}" ) + include_directories( "${inc}" ) + endif () + endif () + endforeach () +endif () + +# Get, if NEST is build as a (mostly) static application. If yes, also only build +# static library. +execute_process( + COMMAND ${NEST_CONFIG} --static-libraries + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_STATIC_LIB + OUTPUT_STRIP_TRAILING_WHITESPACE +) +if ( NEST_STATIC_LIB ) + set( BUILD_SHARED_LIBS OFF ) +else () + set( BUILD_SHARED_LIBS ON ) +endif () + +# Get all linked libraries. +execute_process( + COMMAND ${NEST_CONFIG} --libs + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_LIBS + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Get the data install dir. +execute_process( + COMMAND ${NEST_CONFIG} --datadir + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_DATADIR + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Get the documentation install dir. +execute_process( + COMMAND ${NEST_CONFIG} --docdir + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_DOCDIR + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Get the library install dir. +execute_process( + COMMAND ${NEST_CONFIG} --libdir + RESULT_VARIABLE RES_VAR + OUTPUT_VARIABLE NEST_LIBDIR + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# on OS X +set( CMAKE_MACOSX_RPATH ON ) + +# Install all stuff to NEST's install directories. +set( CMAKE_INSTALL_LIBDIR ${NEST_LIBDIR}/nest CACHE STRING "object code libraries (lib/nest or lib64/nest or lib//nest on Debian)" FORCE ) +set( CMAKE_INSTALL_DOCDIR ${NEST_DOCDIR} CACHE STRING "documentation root (DATAROOTDIR/doc/nest)" FORCE ) +set( CMAKE_INSTALL_DATADIR ${NEST_DATADIR} CACHE STRING "read-only architecture-independent data (DATAROOTDIR/nest)" FORCE ) + +include( GNUInstallDirs ) + +# CPack stuff. Required for target `dist`. +set( CPACK_GENERATOR TGZ ) +set( CPACK_SOURCE_GENERATOR TGZ ) + +set( CPACK_PACKAGE_DESCRIPTION_SUMMARY "NEST Module ${MODULE_NAME}" ) +set( CPACK_PACKAGE_VENDOR "NEST Initiative (http://www.nest-initiative.org/)" ) + +set( CPACK_PACKAGE_VERSION_MAJOR ${MODULE_VERSION_MAJOR} ) +set( CPACK_PACKAGE_VERSION_MINOR ${MODULE_VERSION_MINOR} ) +set( CPACK_PACKAGE_VERSION ${MODULE_VERSION} ) + +set( CPACK_SOURCE_IGNORE_FILES + "\\\\.gitignore" + "\\\\.git/" + "\\\\.travis\\\\.yml" + + # if we have in source builds + "/build/" + "/_CPack_Packages/" + "CMakeFiles/" + "cmake_install\\\\.cmake" + "Makefile.*" + "CMakeCache\\\\.txt" + "CPackConfig\\\\.cmake" + "CPackSourceConfig\\\\.cmake" + ) +set( CPACK_SOURCE_PACKAGE_FILE_NAME ${MODULE_NAME} ) + +set( CPACK_PACKAGE_INSTALL_DIRECTORY "${MODULE_NAME} ${MODULE_VERSION}" ) +include( CPack ) + +# add make dist target +add_custom_target( dist + COMMAND ${CMAKE_MAKE_PROGRAM} package_source + # not sure about this... seems, that it will be removed before dist... + # DEPENDS doc + COMMENT "Creating a source distribution from ${MODULE_NAME}..." + ) + + +if ( BUILD_SHARED_LIBS ) + # When building shared libraries, also create a module for loading at runtime + # with the `Install` command. + add_library( ${MODULE_NAME}_module MODULE ${MODULE_SOURCES} ) + set_target_properties( ${MODULE_NAME}_module + PROPERTIES + COMPILE_FLAGS "${NEST_CXXFLAGS} -DLTX_MODULE" + LINK_FLAGS "${NEST_LIBS}" + PREFIX "" + OUTPUT_NAME ${MODULE_NAME} ) + install( TARGETS ${MODULE_NAME}_module + DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) +endif () + +# Build dynamic/static library for standard linking from NEST. +add_library( ${MODULE_NAME}_lib ${MODULE_SOURCES} ) +if ( BUILD_SHARED_LIBS ) + # Dynamic libraries are initiated by a `global` variable of the `SLIModule`, + # which is included, when the flag `LINKED_MODULE` is set. + target_compile_definitions( ${MODULE_NAME}_lib PRIVATE -DLINKED_MODULE ) +endif () +set_target_properties( ${MODULE_NAME}_lib + PROPERTIES + COMPILE_FLAGS "${NEST_CXXFLAGS}" + LINK_FLAGS "${NEST_LIBS}" + OUTPUT_NAME ${MODULE_NAME} ) + +# Install library, header and sli init files. +install( TARGETS ${MODULE_NAME}_lib DESTINATION ${CMAKE_INSTALL_LIBDIR} ) +install( FILES ${MODULE_HEADER} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} ) +install( DIRECTORY sli DESTINATION ${CMAKE_INSTALL_DATADIR} ) + +# Install help. +set( HELPDIRS "${PROJECT_SOURCE_DIR}:${PROJECT_SOURCE_DIR}/sli" ) +install( CODE + "execute_process(COMMAND ${CMAKE_COMMAND} + -DDOC_DIR='${CMAKE_INSTALL_FULL_DOCDIR}' + -DDATA_DIR='${CMAKE_INSTALL_FULL_DATADIR}' + -DHELPDIRS='${HELPDIRS}' + -DINSTALL_DIR='${CMAKE_INSTALL_PREFIX}' + -P ${CMAKE_INSTALL_FULL_DOCDIR}/generate_help.cmake + WORKING_DIRECTORY \"${PROJECT_BINARY_DIR}\" + )" + ) + +message( "" ) +message( "-------------------------------------------------------" ) +message( "${MODULE_NAME} Configuration Summary" ) +message( "-------------------------------------------------------" ) +message( "" ) +message( "C++ compiler : ${CMAKE_CXX_COMPILER}" ) +message( "Build static libs : ${NEST_STATIC_LIB}" ) +message( "C++ compiler flags : ${CMAKE_CXX_FLAGS}" ) +message( "NEST compiler flags : ${NEST_CXXFLAGS}" ) +message( "NEST include dirs : ${NEST_INCLUDES}" ) +message( "NEST libraries flags : ${NEST_LIBS}" ) +message( "" ) +message( "-------------------------------------------------------" ) +message( "" ) +message( "You can build and install ${MODULE_NAME} now, using" ) +message( " make" ) +message( " make install" ) +message( "" ) +message( "${MODULE_NAME} will be installed to: ${CMAKE_INSTALL_FULL_LIBDIR}" ) +message( "" ) diff --git a/pyNN/nest/extensions/pynn_extensions.cpp b/pyNN/nest/extensions/pynn_extensions.cpp new file mode 100644 index 000000000..44a3c73d3 --- /dev/null +++ b/pyNN/nest/extensions/pynn_extensions.cpp @@ -0,0 +1,120 @@ +/* + +:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. +:license: CeCILL, see LICENSE for details. + +*/ + +#include "pynn_extensions.h" + +// Generated includes: +#include "config.h" + +// include headers with your own stuff +#include "simple_stochastic_connection.h" +#include "stochastic_stp_connection.h" +#include "stochastic_stp_connection_impl.h" + +// Includes from nestkernel: +#include "connection_manager_impl.h" +#include "connector_model_impl.h" +#include "dynamicloader.h" +#include "exceptions.h" +#include "genericmodel.h" +#include "kernel_manager.h" +#include "model.h" +#include "model_manager_impl.h" +#include "nestmodule.h" +#include "target_identifier.h" + +// Includes from sli: +#include "booldatum.h" +#include "integerdatum.h" +#include "sliexceptions.h" +#include "tokenarray.h" + +// -- Interface to dynamic module loader --------------------------------------- + +/* + * There are three scenarios, in which PyNNExtensions can be loaded by NEST: + * + * 1) When loading your module with `Install`, the dynamic module loader must + * be able to find your module. You make the module known to the loader by + * defining an instance of your module class in global scope. (LTX_MODULE is + * defined) This instance must have the name + * + * _LTX_mod + * + * The dynamicloader can then load modulename and search for symbol "mod" in it. + * + * 2) When you link the library dynamically with NEST during compilation, a new + * object has to be created. In the constructor the DynamicLoaderModule will + * register your module. (LINKED_MODULE is defined) + * + * 3) When you link the library statically with NEST during compilation, the + * registration will take place in the file `static_modules.h`, which is + * generated by cmake. + */ +#if defined( LTX_MODULE ) | defined( LINKED_MODULE ) +pynn::PyNNExtensions pynn_extensions_LTX_mod; +#endif +// -- DynModule functions ------------------------------------------------------ + +pynn::PyNNExtensions::PyNNExtensions() +{ +#ifdef LINKED_MODULE + // register this module at the dynamic loader + // this is needed to allow for linking in this module at compile time + // all registered modules will be initialized by the main app's dynamic loader + nest::DynamicLoaderModule::registerLinkedModule( this ); +#endif +} + +pynn::PyNNExtensions::~PyNNExtensions() +{ +} + +const std::string +pynn::PyNNExtensions::name( void ) const +{ + return std::string( "PyNN extensions for NEST" ); // Return name of the module +} + +const std::string +pynn::PyNNExtensions::commandstring( void ) const +{ + // Instruct the interpreter to load pynn_extensions-init.sli + return std::string( "(pynn_extensions-init) run" ); +} + +//------------------------------------------------------------------------------------- + +void +pynn::PyNNExtensions::init( SLIInterpreter* i ) +{ + /* Register a neuron or device model. + Give node type as template argument and the name as second argument. + */ + /* nest::kernel().model_manager.register_node_model< pif_psc_alpha >( + "pif_psc_alpha" ); + */ + + /* Register a synapse type. + Give synapse type as template argument and the name as second argument. + + There are two choices for the template argument: + - nest::TargetIdentifierPtrRport + - nest::TargetIdentifierIndex + The first is the standard and you should usually stick to it. + nest::TargetIdentifierIndex reduces the memory requirement of synapses + even further, but limits the number of available rports. Please see + Kunkel et al, Front Neurofinfom 8:78 (2014), Sec 3.3.2, for details. + */ + nest::kernel() + .model_manager.register_connection_model< SimpleStochasticConnection< nest:: + TargetIdentifierPtrRport > >( "simple_stochastic_synapse" ); + nest::kernel() + .model_manager.register_connection_model< StochasticStpConnection< nest:: + TargetIdentifierPtrRport > >( "stochastic_stp_synapse" ); + +} // PyNNExtensions::init() diff --git a/pyNN/nest/extensions/pynn_extensions.h b/pyNN/nest/extensions/pynn_extensions.h new file mode 100644 index 000000000..d95667188 --- /dev/null +++ b/pyNN/nest/extensions/pynn_extensions.h @@ -0,0 +1,59 @@ +/* + +:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. +:license: CeCILL, see LICENSE for details. + +*/ + +#ifndef PYNNEXTENSIONS_H +#define PYNNEXTENSIONS_H + +// Includes from sli: +#include "slifunction.h" +#include "slimodule.h" + +// Put your stuff into your own namespace. +namespace pynn +{ + +/** + * Class defining your model. + * @note For each model, you must define one such class, with a unique name. + */ +class PyNNExtensions : public SLIModule +{ +public: + // Interface functions ------------------------------------------ + + /** + * @note The constructor registers the module with the dynamic loader. + * Initialization proper is performed by the init() method. + */ + PyNNExtensions(); + + /** + * @note The destructor does not do much in modules. + */ + ~PyNNExtensions(); + + /** + * Initialize module. + * @param SLIInterpreter* SLI interpreter + */ + void init( SLIInterpreter* ); + + /** + * Return the name of your model. + */ + const std::string name( void ) const; + + /** + * Return the name of a sli file to execute when the module is loaded. + * This mechanism can be used to define SLI commands associated with your + * module, in particular, set up type tries for functions you have defined. + */ + const std::string commandstring( void ) const; +}; +} // namespace pynn + +#endif diff --git a/pyNN/nest/extensions/simple_stochastic_connection.h b/pyNN/nest/extensions/simple_stochastic_connection.h new file mode 100644 index 000000000..9d416eeb5 --- /dev/null +++ b/pyNN/nest/extensions/simple_stochastic_connection.h @@ -0,0 +1,201 @@ +/* + * :copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. + * :license: CeCILL, see LICENSE for details. + * + */ + +#ifndef SIMPLE_STOCHASTIC_CONNECTION_H +#define SIMPLE_STOCHASTIC_CONNECTION_H + +// Includes from nestkernel: +#include "connection.h" + + +/* BeginDocumentation + Name: simple_stochastic_synapse - Synapse dropping spikes stochastically. + + Description: + This synapse will deliver spikes with probability p. + + Parameters: + p double - probability that a spike is transmitted, default = 1.0 (i.e. spike is always transmitted) + + Transmits: SpikeEvent + + SeeAlso: static_synapse, synapsedict +*/ + +namespace pynn +{ + +template < typename targetidentifierT > +class SimpleStochasticConnection : public nest::Connection< targetidentifierT > +{ +private: + double weight_; //!< Synaptic weight + double p_; //!< Probability of spike transmission + +public: + //! Type to use for representing common synapse properties + typedef nest::CommonSynapseProperties CommonPropertiesType; + + //! Shortcut for base class + typedef nest::Connection< targetidentifierT > ConnectionBase; + + /** + * Default Constructor. + * Sets default values for all parameters. Needed by GenericConnectorModel. + */ + SimpleStochasticConnection() + : ConnectionBase() + , weight_( 1.0 ) + , p_( 1.0 ) + { + } + + //! Default Destructor. + ~SimpleStochasticConnection() + { + } + + /** + * Helper class defining which types of events can be transmitted. + * + * These methods are only used to test whether a certain type of connection + * can be created. + * + * `handles_test_event()` should be added for all event types that the + * synapse can transmit. The methods shall return `invalid_port_`; the + * return value will be ignored. + * + * Since this is a synapse model dropping spikes, it is only for spikes, + * therefore we only implement `handles_test_event()` only for spike + * events. + * + * See Kunkel et al (2014), Sec 3.3.1, for background information. + */ + class ConnTestDummyNode : public nest::ConnTestDummyNodeBase + { + public: + using nest::ConnTestDummyNodeBase::handles_test_event; + nest::port + handles_test_event( nest::SpikeEvent&, nest::rport ) + { + return nest::invalid_port_; + } + + nest::port + handles_test_event( nest::DSSpikeEvent&, nest::rport ) + { + return nest::invalid_port_; + } + }; + + /** + * Check that requested connection can be created. + * + * This function is a boilerplate function that should be included unchanged + * in all synapse models. It is called before a connection is added to check + * that the connection is legal. It is a wrapper that allows us to call + * the "real" `check_connection_()` method with the `ConnTestDummyNode + * dummy_target;` class for this connection type. This avoids a virtual + * function call for better performance. + * + * @param s Source node for connection + * @param t Target node for connection + * @param receptor_type Receptor type for connection + * @param lastspike Time of most recent spike of presynaptic (sender) neuron, + * not used here + */ + void + check_connection( nest::Node& s, + nest::Node& t, + nest::rport receptor_type, + double, + const CommonPropertiesType& ) + { + ConnTestDummyNode dummy_target; + ConnectionBase::check_connection_( dummy_target, s, t, receptor_type ); + } + + /** + * Send an event to the receiver of this connection. + * @param e The event to send + * @param t Thread + * @param t_lastspike Point in time of last spike sent. + * @param cp Common properties to all synapses. + */ + void send( nest::Event& e, + nest::thread t, + double t_lastspike, + const CommonPropertiesType& cp ); + + // The following methods contain mostly fixed code to forward the + // corresponding tasks to corresponding methods in the base class and the w_ + // data member holding the weight. + + //! Store connection status information in dictionary + void get_status( DictionaryDatum& d ) const; + + /** + * Set connection status. + * + * @param d Dictionary with new parameter values + * @param cm ConnectorModel is passed along to validate new delay values + */ + void set_status( const DictionaryDatum& d, nest::ConnectorModel& cm ); + + //! Allows efficient initialization on construction + void + set_weight( double w ) + { + weight_ = w; + } +}; + + +template < typename targetidentifierT > +inline void +SimpleStochasticConnection< targetidentifierT >::send( nest::Event& e, + nest::thread t, + double t_lastspike, + const CommonPropertiesType& props ) +{ + const int vp = ConnectionBase::get_target( t )->get_vp(); + if ( nest::kernel().rng_manager.get_rng( vp )->drand() < (1 - p_) ) // drop spike + return; + + // Even time stamp, we send the spike using the normal sending mechanism + // send the spike to the target + e.set_weight( weight_ ); + e.set_delay( ConnectionBase::get_delay_steps() ); + e.set_receiver( *ConnectionBase::get_target( t ) ); + e.set_rport( ConnectionBase::get_rport() ); + e(); // this sends the event +} + +template < typename targetidentifierT > +void +SimpleStochasticConnection< targetidentifierT >::get_status( + DictionaryDatum& d ) const +{ + ConnectionBase::get_status( d ); + def< double >( d, nest::names::weight, weight_ ); + def< double >( d, nest::names::p, p_ ); + def< long >( d, nest::names::size_of, sizeof( *this ) ); +} + +template < typename targetidentifierT > +void +SimpleStochasticConnection< targetidentifierT >::set_status( + const DictionaryDatum& d, + nest::ConnectorModel& cm ) +{ + ConnectionBase::set_status( d, cm ); + updateValue< double >( d, nest::names::weight, weight_ ); + updateValue< double >( d, nest::names::p, p_ ); +} + +} // namespace + +#endif // simple_stochastic_connection.h diff --git a/pyNN/nest/extensions/sli/pynn_extensions-init.sli b/pyNN/nest/extensions/sli/pynn_extensions-init.sli new file mode 100644 index 000000000..07871fc63 --- /dev/null +++ b/pyNN/nest/extensions/sli/pynn_extensions-init.sli @@ -0,0 +1,13 @@ +/* + +:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. +:license: CeCILL, see LICENSE for details. + +*/ + +/* + * Initialization file for PyNNExtensions module. + * Run automatically when PyNNExtensions module is loaded. + */ + +M_DEBUG (pynn_extensions.sli) (Initializing SLI support for PyNNExtensions.) message diff --git a/pyNN/nest/extensions/stochastic_stp_connection.h b/pyNN/nest/extensions/stochastic_stp_connection.h new file mode 100644 index 000000000..05ac0aa56 --- /dev/null +++ b/pyNN/nest/extensions/stochastic_stp_connection.h @@ -0,0 +1,205 @@ +/* + * stochastic_stp_connection.h + * + * :copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. + * :license: CeCILL, see LICENSE for details. + * + */ + +#ifndef STOCHASTIC_STP_CONNECTION_H +#define STOCHASTIC_STP_CONNECTION_H + +// Includes from librandom: +#include "binomial_randomdev.h" + +// Includes from nestkernel: +#include "connection.h" + +/* BeginDocumentation + Name: stochastic_stp_synapse - Probabilistic synapse model with short term + plasticity. + + Description: + + This synapse model implements synaptic short-term depression and + short-term facilitation according to an algorithm developed by + the Blue Brain Project. + + Parameters: + The following parameters can be set in the status dictionary: + U double - Maximal fraction of available resources [0,1], + default=0.5 + u double - release probability, default=0.5 + p double - probability that a vesicle is available, default = 1.0 + R double - recovered state {0=unrecovered, 1=recovered}, default=1 + tau_rec double - time constant for depression in ms, default=800 ms + tau_fac double - time constant for facilitation in ms, default=0 (off) + t_surv double - time since last evaluation of survival in ms, default=0 + + Transmits: SpikeEvent + + FirstVersion: December 2016 + Author: Andrew Davison, based on quantal_stp_synapse + and the NMODL file ProbGABAAB_EMS.mod + from the Blue Brain Project + SeeAlso: tsodyks2_synapse, synapsedict, quantal_stp_synapse, static_synapse +*/ + +namespace pynn +{ + +template < typename targetidentifierT > +class StochasticStpConnection : public nest::Connection< targetidentifierT > +{ +public: + typedef nest::CommonSynapseProperties CommonPropertiesType; + typedef nest::Connection< targetidentifierT > ConnectionBase; + + /** + * Default Constructor. + * Sets default values for all parameters. Needed by GenericConnectorModel. + */ + StochasticStpConnection(); + /** + * Copy constructor to propagate common properties. + */ + StochasticStpConnection( const StochasticStpConnection& ); + + // Explicitly declare all methods inherited from the dependent base + // ConnectionBase. This avoids explicit name prefixes in all places these + // functions are used. Since ConnectionBase depends on the template parameter, + // they are not automatically found in the base class. + using ConnectionBase::get_delay_steps; + using ConnectionBase::get_delay; + using ConnectionBase::get_rport; + using ConnectionBase::get_target; + + /** + * Get all properties of this connection and put them into a dictionary. + */ + void get_status( DictionaryDatum& d ) const; + + /** + * Set default properties of this connection from the values given in + * dictionary. + */ + void set_status( const DictionaryDatum& d, nest::ConnectorModel& cm ); + + /** + * Send an event to the receiver of this connection. + * \param e The event to send + * \param t_lastspike Point in time of last spike sent. + * \param cp Common properties to all synapses (empty). + */ + void send( nest::Event& e, + nest::thread t, + double t_lastspike, + const CommonPropertiesType& cp ); + + class ConnTestDummyNode : public nest::ConnTestDummyNodeBase + { + public: + // Ensure proper overriding of overloaded virtual functions. + // Return values from functions are ignored. + using nest::ConnTestDummyNodeBase::handles_test_event; + nest::port + handles_test_event( nest::SpikeEvent&, nest::rport ) + { + return nest::invalid_port_; + } + }; + + void + check_connection( nest::Node& s, + nest::Node& t, + nest::rport receptor_type, + double, + const CommonPropertiesType& ) + { + ConnTestDummyNode dummy_target; + ConnectionBase::check_connection_( dummy_target, s, t, receptor_type ); + } + + void + set_weight( double w ) + { + weight_ = w; + } + +private: + double weight_; //!< synaptic weight + double U_; //!< unit increment of a facilitating synapse (U) + double u_; //!< dynamic value of probability of release + double tau_rec_; //!< [ms] time constant for recovery from depression (D) + double tau_fac_; //!< [ms] time constant for facilitation (F) + double R_; //!< recovered state {0=unrecovered, 1=recovered} + double t_surv_; //!< time since last evaluation of survival +}; + + +/** + * Send an event to the receiver of this connection. + * \param e The event to send + * \param t The thread on which this connection is stored. + * \param t_lastspike Time point of last spike emitted + * \param cp Common properties object, containing the stochastic_stp parameters. + */ +template < typename targetidentifierT > +inline void +StochasticStpConnection< targetidentifierT >::send( nest::Event& e, + nest::thread thr, + double t_lastspike, + const CommonPropertiesType& ) +{ + const int vp = get_target( thr )->get_vp(); + + double t_spike = e.get_stamp().get_ms(); + + // calculation of u + if ( tau_fac_ > 1.0e-10 ) { + u_ *= std::exp( -(t_spike - t_lastspike) / tau_fac_ ); + u_ += U_ * ( 1 - u_ ); + } else { + u_ = U_; + } + + // check for recovery + + bool release = false; + double p_surv = 0.0; // survival probability of unrecovered state + + if ( R_ == 0 ) { + release = false; + // probability of survival of unrecovered state based on Poisson recovery with rate 1/tau_rec + p_surv = std::exp( -(t_spike - t_surv_) / tau_rec_ ); + if ( nest::kernel().rng_manager.get_rng( vp )->drand() > p_surv ) { + R_ = 1; // recovered + } else { + t_surv_ = t_spike; // failed to recover + } + } + + // check for release + if ( R_ == 1 ) { + if ( nest::kernel().rng_manager.get_rng( vp )->drand() < u_ ) { // release + release = true; + R_ = 0; + t_surv_ = t_spike; + } else { + release = false; + } + } + + if ( release ) + { + e.set_receiver( *get_target( thr ) ); + e.set_weight( weight_ ); + e.set_delay( get_delay_steps() ); + e.set_rport( get_rport() ); + e(); + } +} + +} // namespace + +#endif // STOCHASTIC_STP_CONNECTION_H diff --git a/pyNN/nest/extensions/stochastic_stp_connection_impl.h b/pyNN/nest/extensions/stochastic_stp_connection_impl.h new file mode 100644 index 000000000..1e1c41c30 --- /dev/null +++ b/pyNN/nest/extensions/stochastic_stp_connection_impl.h @@ -0,0 +1,84 @@ +/* + * stochastic_stp_connection_impl.h + * + * :copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS. + * :license: CeCILL, see LICENSE for details. + * + */ + +#ifndef STOCHASTIC_STP_CONNECTION_IMPL_H +#define STOCHASTIC_STP_CONNECTION_IMPL_H + +#include "stochastic_stp_connection.h" + +// Includes from nestkernel: +#include "connection.h" +#include "connector_model.h" +#include "nest_names.h" + +// Includes from sli: +#include "dictutils.h" + +namespace pynn +{ + +template < typename targetidentifierT > +StochasticStpConnection< targetidentifierT >::StochasticStpConnection() + : ConnectionBase() + , weight_( 1.0 ) + , U_( 0.5 ) + , u_( 0.0 ) + , tau_rec_( 800.0 ) + , tau_fac_( 10.0 ) + , R_( 1.0 ) + , t_surv_( 0.0 ) +{ +} + +template < typename targetidentifierT > +StochasticStpConnection< targetidentifierT >::StochasticStpConnection( + const StochasticStpConnection& rhs ) + : ConnectionBase( rhs ) + , weight_( rhs.weight_ ) + , U_( rhs.U_ ) + , u_( rhs.u_ ) + , tau_rec_( rhs.tau_rec_ ) + , tau_fac_( rhs.tau_fac_ ) + , R_( rhs.R_ ) + , t_surv_( rhs.t_surv_ ) +{ +} + + +template < typename targetidentifierT > +void +StochasticStpConnection< targetidentifierT >::get_status( + DictionaryDatum& d ) const +{ + ConnectionBase::get_status( d ); + def< double >( d, nest::names::weight, weight_ ); + def< double >( d, nest::names::dU, U_ ); + def< double >( d, nest::names::u, u_ ); + def< double >( d, nest::names::tau_rec, tau_rec_ ); + def< double >( d, nest::names::tau_fac, tau_fac_ ); +} + + +template < typename targetidentifierT > +void +StochasticStpConnection< targetidentifierT >::set_status( + const DictionaryDatum& d, + nest::ConnectorModel& cm ) +{ + ConnectionBase::set_status( d, cm ); + updateValue< double >( d, nest::names::weight, weight_ ); + + updateValue< double >( d, nest::names::dU, U_ ); + updateValue< double >( d, nest::names::u, u_ ); + updateValue< double >( d, nest::names::tau_rec, tau_rec_ ); + updateValue< double >( d, nest::names::tau_fac, tau_fac_ ); +} + +} // of namespace pynn + +#endif // #ifndef STOCHASTIC_STP_CONNECTION_IMPL_H diff --git a/pyNN/nest/projections.py b/pyNN/nest/projections.py index 8ba114b98..ef3efd502 100644 --- a/pyNN/nest/projections.py +++ b/pyNN/nest/projections.py @@ -362,3 +362,7 @@ def _get_attributes_as_arrays(self, names, multiple_synapses='sum'): value_arr *= -1 # NEST uses negative values for inhibitory weights, even if these are conductances all_values.append(value_arr) return all_values + + def _set_initial_value_array(self, variable, value): + local_value = value.evaluate(simplify=True) + nest.SetStatus(self.nest_connections, variable, local_value) diff --git a/pyNN/nest/recording.py b/pyNN/nest/recording.py index 60c172204..66bef64af 100644 --- a/pyNN/nest/recording.py +++ b/pyNN/nest/recording.py @@ -88,7 +88,11 @@ def __init__(self, to_memory=True): def connect_to_cells(self): assert not self._connected - nest.Connect(list(self._all_ids), list(self.device), {'rule': 'all_to_all'}, {'model': 'static_synapse'}) + nest.Connect(list(self._all_ids), + list(self.device), + {'rule': 'all_to_all'}, + {'model': 'static_synapse', + 'delay': simulator.state.min_delay}) self._connected = True def get_spiketimes(self, desired_ids): @@ -122,7 +126,11 @@ def __init__(self, to_memory=True): def connect_to_cells(self): assert not self._connected - nest.Connect(list(self.device), list(self._all_ids), {'rule': 'all_to_all'}, {'model': 'static_synapse'}) + nest.Connect(list(self.device), + list(self._all_ids), + {'rule': 'all_to_all'}, + {'model': 'static_synapse', + 'delay': simulator.state.min_delay}) self._connected = True @property diff --git a/pyNN/nest/standardmodels/electrodes.py b/pyNN/nest/standardmodels/electrodes.py index 473bba15a..7c1fd063d 100644 --- a/pyNN/nest/standardmodels/electrodes.py +++ b/pyNN/nest/standardmodels/electrodes.py @@ -82,6 +82,24 @@ def get_native_parameters(self): return ParameterSpace(dict((k, v) for k, v in all_params.items() if k in self.get_native_names())) + def record(self): + self.i_multimeter = nest.Create('multimeter', params={'record_from': ['I'], 'interval' :state.dt}) + nest.Connect(self.i_multimeter, self._device) + + def get_data(self): + events = nest.GetStatus(self.i_multimeter)[0]['events'] + # Similar to recording.py: NEST does not record values at + # the zeroth time step, so we add them here. + t_arr = numpy.insert(numpy.array(events['times']), 0, 0.0) + i_arr = numpy.insert(numpy.array(events['I']/1000.0), 0, 0.0) + # NEST and pyNN have different concepts of current initiation times + # To keep this consistent across simulators, we will have current + # initiating at the electrode at t_start and effect on cell at next dt + # This requires padding min_delay equivalent period with 0's + pad_length = int(state.min_delay/state.dt) + i_arr = numpy.insert(i_arr[:-pad_length], 0, [0]*pad_length) + return t_arr, i_arr + class DCSource(NestCurrentSource, electrodes.DCSource): __doc__ = electrodes.DCSource.__doc__ diff --git a/pyNN/nest/standardmodels/synapses.py b/pyNN/nest/standardmodels/synapses.py index fa7ffe497..d3e50e2c5 100644 --- a/pyNN/nest/standardmodels/synapses.py +++ b/pyNN/nest/standardmodels/synapses.py @@ -89,6 +89,41 @@ class TsodyksMarkramSynapse(synapses.TsodyksMarkramSynapse, NESTSynapseMixin): nest_name = 'tsodyks_synapse' +class SimpleStochasticSynapse(synapses.SimpleStochasticSynapse, NESTSynapseMixin): + + translations = build_translations( + ('weight', 'weight', 1000.0), + ('delay', 'delay'), + ('p', 'p'), + ) + nest_name = 'simple_stochastic_synapse' + + +class StochasticTsodyksMarkramSynapse(synapses.StochasticTsodyksMarkramSynapse, NESTSynapseMixin): + + translations = build_translations( + ('weight', 'weight', 1000.0), + ('delay', 'delay'), + ('U', 'U'), + ('tau_rec', 'tau_rec'), + ('tau_facil', 'tau_fac') + ) + nest_name = 'stochastic_stp_synapse' + + +class MultiQuantalSynapse(synapses.MultiQuantalSynapse, NESTSynapseMixin): + + translations = build_translations( + ('weight', 'weight', 1000.0), + ('delay', 'delay'), + ('U', 'U'), + ('n', 'n'), + ('tau_rec', 'tau_rec'), + ('tau_facil', 'tau_fac') + ) + nest_name = 'quantal_stp_synapse' + + class AdditiveWeightDependence(synapses.AdditiveWeightDependence): __doc__ = synapses.AdditiveWeightDependence.__doc__ diff --git a/pyNN/nest/synapses.py b/pyNN/nest/synapses.py index 97786d77b..3909fc78b 100644 --- a/pyNN/nest/synapses.py +++ b/pyNN/nest/synapses.py @@ -18,7 +18,9 @@ def get_synapse_defaults(model_name): defaults = nest.GetDefaults(model_name) ignore = ['max_delay', 'min_delay', 'num_connections', 'num_connectors', 'receptor_type', 'synapsemodel', - 'property_object', 'element_type', 'type', 'sizeof'] + 'property_object', 'element_type', 'type', 'sizeof', + 'has_delay', 'synapse_model', 'requires_symmetric', + 'weight_recorder'] default_params = {} for name, value in defaults.items(): if name not in ignore: diff --git a/pyNN/neuron/__init__.py b/pyNN/neuron/__init__.py index 3bbf33b16..a170886a3 100644 --- a/pyNN/neuron/__init__.py +++ b/pyNN/neuron/__init__.py @@ -51,8 +51,7 @@ def list_standard_models(): # ============================================================================== -def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, - max_delay=DEFAULT_MAX_DELAY, **extra_params): +def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, **extra_params): """ Should be called at the very beginning of a script. @@ -72,12 +71,12 @@ def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, returns: MPI rank """ - common.setup(timestep, min_delay, max_delay, **extra_params) + common.setup(timestep, min_delay, **extra_params) simulator.initializer.clear() simulator.state.clear() simulator.state.dt = timestep simulator.state.min_delay = min_delay - simulator.state.max_delay = max_delay + simulator.state.max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) if 'use_cvode' in extra_params: simulator.state.cvode.active(int(extra_params['use_cvode'])) if 'rtol' in extra_params: diff --git a/pyNN/neuron/nmodl/quantal_stp.mod b/pyNN/neuron/nmodl/quantal_stp.mod new file mode 100644 index 000000000..8ff8bb4d6 --- /dev/null +++ b/pyNN/neuron/nmodl/quantal_stp.mod @@ -0,0 +1,118 @@ +COMMENT +Implementation of the NEST quantal_stp_connection model for NEURON + +Original NEST version by Marc-Oliver Gewaltig +Adapted to NMODL by Andrew Davison, UNIC, CNRS, 2016. +ENDCOMMENT + +NEURON { + POINT_PROCESS QuantalSTPWA + RANGE tau_rec, tau_fac, U, u0, n + POINTER wsyn, rng +} + +PARAMETER { + tau_rec = 800 (ms) : time constant for depression + tau_fac = 0 (ms) : time constant for facilitation + U = 0.5 (1) <0, 1> : maximal fraction of available resource + u0 = 0.5 (1) <0, 1> : initial available fraction of resources + n = 1 : total number of release sites +} + +ASSIGNED { + u (1) : available fraction of resources + wsyn : transmitted synaptic weight + rng +} + +INITIAL { + u = u0 +} + +NET_RECEIVE(w, available, t_last (ms)) { + : available - number of available release sites + : t_last - time of the last spike + + LOCAL depleted, rv, p_decay, u_decay, n_release, i + + INITIAL{ + available = n + t_last = -1e99 + } + + : Compute the decay factors, based on the time since the last spike. + p_decay = exp(-(t - t_last)/tau_rec) + if (tau_fac < 1e-10) { + u_decay = 0.0 + } else { + u_decay = exp( -(t - t_last)/tau_fac) + } + + : Compute release probability + u = U + u*(1 - U)*u_decay + + : Compute number of sites that recovered during the interval. + depleted = n - available + while (depleted > 0) { + rv = urand() + if (rv < (1 - p_decay)) { + available = available + 1 + } + depleted = depleted - 1 + } + + : Compute number of released sites + n_release = 0 + i = available + while (i > 0) { + rv = urand() + if (rv < u) { + n_release = n_release + 1 + } + i = i - 1 + } + + if (n_release > 0) { + wsyn = n_release/n * w + available = available - n_release + } else { + wsyn = 0 + } + t_last = t +} + +PROCEDURE setRNG() { + : This function takes a NEURON Random object declared in hoc and makes it usable by this mod file + : The Random must be in uniform(1) mode +VERBATIM + { + void** pv = (void**)(&_p_rng); + if( ifarg(1)) { + *pv = nrn_random_arg(1); + } else { + *pv = (void*)0; + } + } +ENDVERBATIM +} + +FUNCTION urand() { +VERBATIM + double value; + if (_p_rng) { + /* + :Supports separate independent but reproducible streams for + : each instance. However, the corresponding hoc Random + : distribution MUST be set to Random.negexp(1) + */ + value = nrn_random_pick(_p_rng); + //printf("random stream for this simulation = %lf\n",value); + return value; + } else { +ENDVERBATIM + value = scop_random(1) +VERBATIM + } +ENDVERBATIM + urand = value +} diff --git a/pyNN/neuron/nmodl/stochastic_synapse.mod b/pyNN/neuron/nmodl/stochastic_synapse.mod new file mode 100644 index 000000000..87da7bada --- /dev/null +++ b/pyNN/neuron/nmodl/stochastic_synapse.mod @@ -0,0 +1,76 @@ +COMMENT +Implementation of a simple stochastic synapse (constant release probability) +as a "weight adjuster" (i.e. it sets the weight of the synapse to zero if +transmission fails). + +Andrew Davison, UNIC, CNRS, 2016 +ENDCOMMENT + +NEURON { + POINT_PROCESS SimpleStochasticWA + RANGE p + POINTER rng, wsyn +} + +PARAMETER { + p = 0.5 : probability that transmission succeeds +} + +VERBATIM +#include +#include +#include + +double nrn_random_pick(void* r); +void* nrn_random_arg(int argpos); + +ENDVERBATIM + +ASSIGNED { + wsyn + rng +} + +NET_RECEIVE(w) { + if (urand() < p) { + wsyn = w + } else { + wsyn = 0.0 + } +} + +PROCEDURE setRNG() { + : This function takes a NEURON Random object declared in hoc and makes it usable by this mod file + : The Random must be in uniform(1) mode +VERBATIM + { + void** pv = (void**)(&_p_rng); + if( ifarg(1)) { + *pv = nrn_random_arg(1); + } else { + *pv = (void*)0; + } + } +ENDVERBATIM +} + +FUNCTION urand() { +VERBATIM + double value; + if (_p_rng) { + /* + :Supports separate independent but reproducible streams for + : each instance. However, the corresponding hoc Random + : distribution MUST be set to Random.negexp(1) + */ + value = nrn_random_pick(_p_rng); + //printf("random stream for this simulation = %lf\n",value); + return value; + } else { +ENDVERBATIM + value = scop_random(1) +VERBATIM + } +ENDVERBATIM + urand = value +} diff --git a/pyNN/neuron/nmodl/stochastic_tsodyksmarkram.mod b/pyNN/neuron/nmodl/stochastic_tsodyksmarkram.mod new file mode 100644 index 000000000..32dec5063 --- /dev/null +++ b/pyNN/neuron/nmodl/stochastic_tsodyksmarkram.mod @@ -0,0 +1,121 @@ +COMMENT +Implementation of the stochastic Tsodyks-Markram mechanism for synaptic depression and +facilitation as a "weight adjuster" + +cf Fuhrmann et al. 2002 +The algorithm is as in ProbGABAAB_EMS.mod from the Blue Brain Project. + +Andrew Davison, UNIC, CNRS, 2016. +ENDCOMMENT + +NEURON { + POINT_PROCESS StochasticTsodyksMarkramWA + RANGE tau_rec, tau_facil, U, u0 + POINTER wsyn, rng +} + +PARAMETER { + tau_rec = 100 (ms) <1e-9, 1e9> + tau_facil = 1000 (ms) <0, 1e9> + U = 0.04 (1) <0, 1> + u0 = 0 (1) <0, 1> +} + +ASSIGNED { + u (1) : release probability + t_last (ms) : time of the last spike + wsyn : transmitted synaptic weight + R (1) : recovered state {0=unrecovered, 1=recovered} + rng +} + +INITIAL { + u = u0 + t_last = -1e99 + R = 1 +} + +NET_RECEIVE(w, p_surv, t_surv) { + : p_surv - survival probability of unrecovered state + : t_surv - time since last evaluation of survival + LOCAL result + INITIAL{ + t_last = t + } + + if (w > 0) { + :printf("START tau_facil=%-4g tau_rec=%-4g U=%-4.2g time=%g p_surv=%-5.3g t_surv=%4.1f t_last=%4.1f u=%-5.3g R=%g wsyn=%g\n", tau_facil, tau_rec, U, t, p_surv, t_surv, t_last, u, R, wsyn) + : calculation of u + if (tau_facil > 0) { + u = u*exp(-(t - t_last)/tau_facil) + u = u + U*(1-u) + } else { + u = U + } + t_last = t + + : check for recovery + if (R == 0) { + wsyn = 0 + : probability of survival of unrecovered state based on Poisson recovery with rate 1/tau_rec + p_surv = exp(-(t - t_surv)/tau_rec) + result = urand() + if (result > p_surv) { + R = 1 : recovered + :printf("recovered\n") + } else { + t_surv = t : failed to recover + } + } + + : check for release + if (R == 1) { + result = urand() + if (result < u) { : release + wsyn = w + R = 0 + t_surv = t + :printf("release\n") + } else { + wsyn = 0 + } + } + :printf("END tau_facil=%-4g tau_rec=%-4g U=%-4.2g time=%g p_surv=%-5.3g t_surv=%4.1f t_last=%4.1f u=%-5.3g R=%g wsyn=%g\n\n", tau_facil, tau_rec, U, t, p_surv, t_surv, t_last, u, R, wsyn) + } +} + +PROCEDURE setRNG() { + : This function takes a NEURON Random object declared in hoc and makes it usable by this mod file + : The Random must be in uniform(1) mode +VERBATIM + { + void** pv = (void**)(&_p_rng); + if( ifarg(1)) { + *pv = nrn_random_arg(1); + } else { + *pv = (void*)0; + } + } +ENDVERBATIM +} + +FUNCTION urand() { +VERBATIM + double value; + if (_p_rng) { + /* + :Supports separate independent but reproducible streams for + : each instance. However, the corresponding hoc Random + : distribution MUST be set to Random.negexp(1) + */ + value = nrn_random_pick(_p_rng); + //printf("random stream for this simulation = %lf\n",value); + return value; + } else { +ENDVERBATIM + value = scop_random(1) +VERBATIM + } +ENDVERBATIM + urand = value +} diff --git a/pyNN/neuron/projections.py b/pyNN/neuron/projections.py index d4e58d32c..7200cf235 100644 --- a/pyNN/neuron/projections.py +++ b/pyNN/neuron/projections.py @@ -137,3 +137,6 @@ def _set_attributes(self, parameter_space): for index in connection_group: for connection in connection_group[index]: setattr(connection, name, value[index]) + + def _set_initial_value_array(self, variable, value): + raise NotImplemented diff --git a/pyNN/neuron/recording.py b/pyNN/neuron/recording.py index 73a5ba064..ac4fc9da9 100644 --- a/pyNN/neuron/recording.py +++ b/pyNN/neuron/recording.py @@ -101,7 +101,7 @@ def _get_all_signals(self, variable, ids, clear=False): # assuming not using cvode, otherwise need to get times as well and use IrregularlySampledAnalogSignal if len(ids) > 0: signals = numpy.vstack((id._cell.traces[variable] for id in ids)).T - expected_length = int(simulator.state.tstop / self.sampling_interval) + 1 + expected_length = numpy.rint(simulator.state.tstop / self.sampling_interval) + 1 if signals.shape[0] != expected_length: # generally due to floating point/rounding issues signals = numpy.vstack((signals, signals[-1, :])) else: diff --git a/pyNN/neuron/simulator.py b/pyNN/neuron/simulator.py index e20d69a5d..29f86b862 100644 --- a/pyNN/neuron/simulator.py +++ b/pyNN/neuron/simulator.py @@ -247,17 +247,17 @@ def _pre_run(self): assert local_minimum_delay >= self.min_delay, \ "There are connections with delays (%g) shorter than the minimum delay (%g)" % (local_minimum_delay, self.min_delay) - def _update_current_sources(self): + def _update_current_sources(self, tstop): for source in self.current_sources: for iclamp in source._devices: - source._update_iclamp(iclamp) + source._update_iclamp(iclamp, tstop) def run(self, simtime): """Advance the simulation for a certain time.""" self.run_until(self.tstop + simtime) def run_until(self, tstop): - self._update_current_sources() + self._update_current_sources(tstop) self._pre_run() self.tstop = tstop #logger.info("Running the simulation until %g ms" % tstop) @@ -415,6 +415,12 @@ def _setup_plasticity(self, synapse_type, parameters): setattr(self.weight_adjuster, name, value) if mechanism == 'TsodyksMarkramWA': # or could assume that any weight_adjuster parameter called "tau_syn" should be set like this self.weight_adjuster.tau_syn = self.nc.syn().tau + elif 'Stochastic' in mechanism: + pass + # todo: (optionally?) set per-stream RNG, i.e. + #self.rng = h.Random(seed) + #self.rng.uniform() + #self.weight_adjuster.setRNG(self.rng) # setpointer i = len(h.plastic_connections) h.plastic_connections.append(self) diff --git a/pyNN/neuron/standardmodels/electrodes.py b/pyNN/neuron/standardmodels/electrodes.py index aaf872adc..80bd4ea30 100644 --- a/pyNN/neuron/standardmodels/electrodes.py +++ b/pyNN/neuron/standardmodels/electrodes.py @@ -60,11 +60,11 @@ def _reset(self): self._times = None self._generate() for iclamp in self._h_iclamps.values(): - self._update_iclamp(iclamp) + self._update_iclamp(iclamp, 0.0) # send tstop = 0.0 on _reset() - def _update_iclamp(self, iclamp): + def _update_iclamp(self, iclamp, tstop): if not self._is_playable: - iclamp.delay = max(0, self.start - simulator.state.t) + iclamp.delay = self.start iclamp.dur = self.stop - self.start iclamp.amp = self.amplitude @@ -72,6 +72,20 @@ def _update_iclamp(self, iclamp): iclamp.delay = 0.0 iclamp.dur = 1e12 iclamp.amp = 0.0 + + # check exists only for StepCurrentSource (_is_playable = True, _is_computed = False) + # t_stop should be part of the time sequence to handle repeated runs + if not self._is_computed and tstop not in self._h_times.to_python(): + ind = self._h_times.indwhere(">=", tstop) + if ind == -1: # tstop beyond last specified time instant + ind = self._h_times.size() + if ind == 0.0: # tstop before first specified time instant + amp_val = 0.0 + else: + amp_val = self._h_amplitudes.x[int(ind)-1] + self._h_times.insrt(ind, tstop) + self._h_amplitudes.insrt(ind, amp_val) + self._h_amplitudes.play(iclamp._ref_amp, self._h_times) def set_native_parameters(self, parameters): @@ -96,14 +110,23 @@ def inject_into(self, cells): self._h_iclamps[id] = h.IClamp(0.5, sec=id._cell.source_section) self._devices.append(self._h_iclamps[id]) - def _record(self): + def record(self): self.itrace = h.Vector() self.itrace.record(self._devices[0]._ref_i) self.record_times = h.Vector() self.record_times.record(h._ref_t) - def _get_data(self): - return numpy.array((self.record_times, self.itrace)) + def get_data(self): + # NEURON and pyNN have different concepts of current initiation times + # To keep this consistent across simulators, pyNN will have current + # initiating at the electrode at t_start and effect on cell at next dt. + # This requires removing the first element from the current Vector + # as NEURON computes the currents one time step later. The vector length + # is compensated by repeating the last recorded value of current. + t_arr = numpy.array(self.record_times) + i_arr = numpy.array(self.itrace)[1:] + i_arr = numpy.append(i_arr, i_arr[-1]) + return (t_arr, i_arr) class DCSource(NeuronCurrentSource, electrodes.DCSource): @@ -187,7 +210,7 @@ def __init__(self, **parameters): def _generate(self): ## Not efficient at all... Is there a way to have those vectors computed on the fly ? ## Otherwise should have a buffer mechanism - self.times = numpy.arange(self.start, self.stop + simulator.state.dt, simulator.state.dt) - tmp = numpy.arange(0, self.stop - self.start, simulator.state.dt) - self.amplitudes = self.mean + (self.stdev * self.dt) * numpy.random.randn(len(tmp)) + self.times = numpy.arange(self.start, self.stop, max(self.dt, simulator.state.dt)) + self.times = numpy.append(self.times, self.stop) + self.amplitudes = self.mean + self.stdev * numpy.random.randn(len(self.times)) self.amplitudes[-1] = 0.0 diff --git a/pyNN/neuron/standardmodels/synapses.py b/pyNN/neuron/standardmodels/synapses.py index d07a2781f..4d8fd4860 100644 --- a/pyNN/neuron/standardmodels/synapses.py +++ b/pyNN/neuron/standardmodels/synapses.py @@ -91,8 +91,54 @@ class TsodyksMarkramSynapse(BaseSynapse, synapses.TsodyksMarkramSynapse): def _get_minimum_delay(self): return state.min_delay - - + + +class SimpleStochasticSynapse(BaseSynapse, synapses.SimpleStochasticSynapse): + translations = build_translations( + ('weight', 'weight'), + ('delay', 'delay'), + ('p', 'p'), + ) + model = 'SimpleStochasticWA' + postsynaptic_variable = None + + def _get_minimum_delay(self): + return state.min_delay + + +class StochasticTsodyksMarkramSynapse(BaseSynapse, synapses.StochasticTsodyksMarkramSynapse): + + translations = build_translations( + ('weight', 'weight'), + ('delay', 'delay'), + ('U', 'U'), + ('tau_rec', 'tau_rec'), + ('tau_facil', 'tau_facil'), + ) + model = 'StochasticTsodyksMarkramWA' + postsynaptic_variable = None + + def _get_minimum_delay(self): + return state.min_delay + + +class MultiQuantalSynapse(BaseSynapse, synapses.MultiQuantalSynapse): + + translations = build_translations( + ('weight', 'weight'), + ('delay', 'delay'), + ('U', 'U'), + ('n', 'n'), + ('tau_rec', 'tau_rec'), + ('tau_facil', 'tau_fac') + ) + model = 'QuantalSTPWA' + postsynaptic_variable = None + + def _get_minimum_delay(self): + return state.min_delay + + class AdditiveWeightDependence(BaseSynapse, synapses.AdditiveWeightDependence): __doc__ = synapses.AdditiveWeightDependence.__doc__ diff --git a/pyNN/nineml/__init__.py b/pyNN/nineml/__init__.py index e6bbf8aee..9c8a6056c 100644 --- a/pyNN/nineml/__init__.py +++ b/pyNN/nineml/__init__.py @@ -24,12 +24,12 @@ def list_standard_models(): return [obj.__name__ for obj in globals().values() if isinstance(obj, type) and issubclass(obj, std.StandardCellType)] -def setup(timestep=0.1, min_delay=0.1, max_delay=10.0, **extra_params): - common.setup(timestep, min_delay, max_delay, **extra_params) +def setup(timestep=0.1, min_delay=0.1, **extra_params): + common.setup(timestep, min_delay, **extra_params) simulator.state.clear() simulator.state.dt = timestep # move to common.setup? simulator.state.min_delay = min_delay - simulator.state.max_delay = max_delay + simulator.state.max_delay = extra_params.get('max_delay', 10.0) simulator.state.mpi_rank = extra_params.get('rank', 0) simulator.state.num_processes = extra_params.get('num_processes', 1) simulator.state.output_filename = extra_params.get("filename", "PyNN29ML.xml") diff --git a/pyNN/recording/__init__.py b/pyNN/recording/__init__.py index 6cad253c6..c52f65ba1 100644 --- a/pyNN/recording/__init__.py +++ b/pyNN/recording/__init__.py @@ -270,22 +270,22 @@ def _get_current_segment(self, filter_ids=None, variables='all', clear=False): current_time = self._simulator.state.t * pq.ms mpi_node = self._simulator.state.mpi_rank # for debugging if signal_array.size > 0: # may be empty if none of the recorded cells are on this MPI node - channel_indices = numpy.array([self.population.id_to_index(id) for id in ids]) units = self.population.find_units(variable) source_ids = numpy.fromiter(ids, dtype=int) - segment.analogsignalarrays.append( - neo.AnalogSignalArray( - signal_array, - units=units, - t_start=t_start, - sampling_period=sampling_period, - name=variable, - source_population=self.population.label, - channel_index=channel_indices, - source_ids=source_ids) - ) - logger.debug("%d **** ids=%s, channels=%s", mpi_node, source_ids, channel_indices) - assert segment.analogsignalarrays[0].t_stop - current_time - 2 * sampling_period < 1e-10 + signal = neo.AnalogSignal( + signal_array, + units=units, + t_start=t_start, + sampling_period=sampling_period, + name=variable, + source_population=self.population.label, + source_ids=source_ids) + signal.channel_index = neo.ChannelIndex( + index=numpy.arange(source_ids.size), + channel_ids=numpy.array([self.population.id_to_index(id) for id in ids])) + segment.analogsignals.append(signal) + logger.debug("%d **** ids=%s, channels=%s", mpi_node, source_ids, signal.channel_index) + assert segment.analogsignals[0].t_stop - current_time - 2 * sampling_period < 1e-10 # need to add `Unit` and `RecordingChannelGroup` objects return segment @@ -298,6 +298,10 @@ def get(self, variables, gather=False, filter_ids=None, clear=False, for segment in self.cache] if self._simulator.state.running: # reset() has not been called, so current segment is not in cache data.segments.append(self._get_current_segment(filter_ids=filter_ids, variables=variables, clear=clear)) + # collect channel indexes + for segment in data.segments: + for signal in segment.analogsignals: + data.channel_indexes.append(signal.channel_index) data.name = self.population.label data.description = self.population.describe() data.rec_datetime = data.segments[0].rec_datetime diff --git a/pyNN/recording/files.py b/pyNN/recording/files.py index db97399bd..56bd6ab16 100644 --- a/pyNN/recording/files.py +++ b/pyNN/recording/files.py @@ -255,14 +255,22 @@ def __init__(self, filename, mode='r', title="PyNN data file"): """ self.name = filename self.mode = mode - self.fileobj = tables.openFile(filename, mode=mode, title=title) + try: + self.fileobj = tables.open_file(filename, mode=mode, title=title) + self._new_pytables = True + except AttributeError: + self.fileobj = tables.openFile(filename, mode=mode, title=title) + self._new_pytables = False # may not work with old versions of PyTables < 1.3, since they only support numarray, not numpy def write(self, data, metadata): __doc__ = BaseFile.write.__doc__ if len(data) > 0: try: - node = self.fileobj.createArray(self.fileobj.root, "data", data) + if self._new_pytables: + node = self.fileobj.create_array(self.fileobj.root, "data", data) + else: + node = self.fileobj.createArray(self.fileobj.root, "data", data) except tables.HDF5ExtError as e: raise tables.HDF5ExtError("%s. data.shape=%s, metadata=%s" % (e, data.shape, metadata)) for name, value in metadata.items(): diff --git a/pyNN/space.py b/pyNN/space.py index 9bf1b25a8..571943007 100644 --- a/pyNN/space.py +++ b/pyNN/space.py @@ -244,7 +244,8 @@ def calculate_size(self, n): nx = math.sqrt(n * self.aspect_ratio) if n % nx != 0: raise Exception("Invalid size: n=%g, nx=%d" % (n, nx)) - ny = n / nx + nx = int(round(nx)) + ny = n // nx return nx, ny def generate_positions(self, n): diff --git a/pyNN/standardmodels/cells.py b/pyNN/standardmodels/cells.py index b4f7de2b5..a8c7d1104 100644 --- a/pyNN/standardmodels/cells.py +++ b/pyNN/standardmodels/cells.py @@ -60,6 +60,15 @@ class IF_curr_alpha(StandardCellType): 'v': 'mV', 'isyn_exc': 'nA', 'isyn_inh': 'nA', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'i_offset': 'nA', + 'v_reset': 'mV', + 'v_thresh': 'mV', } @@ -92,6 +101,15 @@ class IF_curr_exp(StandardCellType): 'v': 'mV', 'isyn_exc': 'nA', 'isyn_inh': 'nA', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'i_offset': 'nA', + 'v_reset': 'mV', + 'v_thresh': 'mV', } @@ -124,6 +142,17 @@ class IF_cond_alpha(StandardCellType): 'v': 'mV', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'e_rev_E': 'mV', + 'e_rev_I': 'mV', + 'v_thresh': 'mV', + 'v_reset': 'mV', + 'i_offset': 'nA', } @@ -156,6 +185,17 @@ class IF_cond_exp(StandardCellType): 'v': 'mV', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'e_rev_E': 'mV', + 'e_rev_I': 'mV', + 'v_thresh': 'mV', + 'v_reset': 'mV', + 'i_offset': 'nA', } @@ -205,6 +245,23 @@ class IF_cond_exp_gsfa_grr(StandardCellType): 'g_s': 'nS', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'e_rev_E': 'mV', + 'e_rev_I': 'mV', + 'v_thresh': 'mV', + 'v_reset': 'mV', + 'i_offset': 'nA', + 'tau_sfa': 'ms', + 'e_rev_sfa': 'mV', + 'q_sfa': 'nS', + 'tau_rr': 'ms', + 'e_rev_rr': 'mV', + 'q_rr': 'nS', } @@ -238,6 +295,13 @@ class IF_facets_hardware1(StandardCellType): 'v': 'mV', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'g_leak': 'nS', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'v_reset': 'mV', + 'e_rev_I': 'mV', + 'v_rest': 'mV', + 'v_thresh': 'mV', } @@ -273,6 +337,19 @@ class HH_cond_exp(StandardCellType): 'v': 'mV', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'gbar_Na': 'uS', + 'gbar_K': 'uS', + 'g_leak': 'uS', + 'cm': 'nF', + 'v_offset': 'mV', + 'e_rev_Na': 'mV', + 'e_rev_K': 'mV', + 'e_rev_leak': 'mV', + 'e_rev_E': 'mV', + 'e_rev_I': 'mV', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'i_offset': 'nA', } @@ -317,6 +394,22 @@ class EIF_cond_alpha_isfa_ista(StandardCellType): 'w': 'nA', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'cm': 'nF', + 'tau_refrac': 'ms', + 'v_spike': 'mV', + 'v_reset': 'mV', + 'v_rest': 'mV', + 'tau_m': 'ms', + 'i_offset': 'nA', + 'a': 'nS', + 'b': 'nA', + 'delta_T': 'mV', + 'tau_w': 'ms', + 'v_thresh': 'mV', + 'e_rev_E': 'mV', + 'tau_syn_E': 'ms', + 'e_rev_I': 'mV', + 'tau_syn_I': 'ms', } @@ -361,6 +454,22 @@ class EIF_cond_exp_isfa_ista(StandardCellType): 'w': 'nA', 'gsyn_exc': 'uS', 'gsyn_inh': 'uS', + 'cm': 'nF', + 'tau_refrac': 'ms', + 'v_spike': 'mV', + 'v_reset': 'mV', + 'v_rest': 'mV', + 'tau_m': 'ms', + 'i_offset': 'nA', + 'a': 'nS', + 'b': 'nA', + 'delta_T': 'mV', + 'tau_w': 'ms', + 'v_thresh': 'mV', + 'e_rev_E': 'mV', + 'tau_syn_E': 'ms', + 'e_rev_I': 'mV', + 'tau_syn_I': 'ms', } @@ -374,7 +483,7 @@ class Izhikevich(StandardCellType): du/dt = a*(b*v - u) Synapses are modeled as Dirac delta currents (voltage step), as in the original model - + NOTE: name should probably be changed to match standard nomenclature, e.g. QIF_cond_delta_etc_etc, although keeping "Izhikevich" as an alias would be good @@ -397,6 +506,11 @@ class Izhikevich(StandardCellType): units = { 'v': 'mV', 'u': 'mV/ms', + 'a': '/ms', + 'b': '/ms', + 'c': 'mV', + 'd': 'mV/ms', + 'i_offset': 'nA', } @@ -457,6 +571,31 @@ class GIF_cond_exp(StandardCellType): 'gsyn_inh': 'uS', 'i_eta': 'nA', 'v_t': 'mV', + 'v_rest': 'mV', + 'cm': 'nF', + 'tau_m': 'ms', + 'tau_refrac': 'ms', + 'tau_syn_E': 'ms', + 'tau_syn_I': 'ms', + 'e_rev_E': 'mV', + 'e_rev_I': 'mV', + 'v_reset': 'mV', + 'i_offset': 'nA', + 'delta_v': 'mV', + 'v_t_star': 'mV', + 'lambda0': 'Hz', + 'tau_eta1': 'ms', + 'tau_eta2': 'ms', + 'tau_eta3': 'ms', + 'tau_gamma1': 'ms', + 'tau_gamma2': 'ms', + 'tau_gamma3': 'ms', + 'a_eta1': 'nA', + 'a_eta2': 'nA', + 'a_eta3': 'nA', + 'a_gamma1': 'mV', + 'a_gamma2': 'mV', + 'a_gamma3': 'mV', } @@ -471,6 +610,11 @@ class SpikeSourcePoisson(StandardCellType): recordable = ['spikes'] injectable = False receptor_types = () + units = { + 'rate': 'Hz', + 'start': 'ms', + 'duration': 'ms', + } class SpikeSourcePoissonRefractory(StandardCellType): @@ -485,6 +629,12 @@ class SpikeSourcePoissonRefractory(StandardCellType): recordable = ['spikes'] injectable = False receptor_types = () + units = { + 'rate': 'Hz', + 'tau_refrac': 'ms', + 'start': 'ms', + 'duration': 'ms', + } class SpikeSourceGamma(StandardCellType): @@ -502,6 +652,12 @@ class SpikeSourceGamma(StandardCellType): recordable = ['spikes'] injectable = False receptor_types = () + units = { + 'alpha': 'dimensionless', + 'beta': 'Hz', + 'start': 'ms', + 'duration': 'ms', + } class SpikeSourceInhGamma(StandardCellType): @@ -523,6 +679,13 @@ class SpikeSourceInhGamma(StandardCellType): recordable = ['spikes'] injectable = False receptor_types = () + units = { + 'a': 'dimensionless', + 'b': 's', + 'tbins': 'ms', + 'start': 'ms', + 'duration': 'ms', + } class SpikeSourceArray(StandardCellType): @@ -532,3 +695,6 @@ class SpikeSourceArray(StandardCellType): recordable = ['spikes'] injectable = False receptor_types = () + units = { + 'spike_times': 'ms', + } diff --git a/pyNN/standardmodels/synapses.py b/pyNN/standardmodels/synapses.py index 351133f94..199235b35 100644 --- a/pyNN/standardmodels/synapses.py +++ b/pyNN/standardmodels/synapses.py @@ -74,6 +74,63 @@ class TsodyksMarkramSynapse(StandardSynapseType): 'tau_rec': 100.0, # depression time constant (ms) 'tau_facil': 0.0, # facilitation time constant (ms) } + default_initial_values = { + 'u': 0.0 + } + + +class SimpleStochasticSynapse(StandardSynapseType): + """ + Each spike is transmitted with a fixed probability `p`. + """ + default_parameters = { + 'weight': 0.0, + 'delay': None, + 'p': 0.5, + } + + +class StochasticTsodyksMarkramSynapse(StandardSynapseType): + """ + Synapse exhibiting facilitation and depression, implemented using the model + of Tsodyks, Markram et al.: + + `Tsodyks, Uziel and Markram (2000)`_ Synchrony Generation in Recurrent Networks + with Frequency-Dependent Synapses. Journal of Neuroscience 20:RC50 + + in its stochastic version (cf Fuhrmann et al. 2002) + + Arguments: + `U`: + use parameter. + `tau_rec`: + depression time constant (ms). + `tau_facil`: + facilitation time constant (ms). + + .. _`Tsodyks, Uziel and Markram (2000)`: http://www.jneurosci.org/content/20/1/RC50.long + """ + default_parameters = { + 'weight': 0.0, + 'delay': None, + 'U': 0.5, # use parameter + 'tau_rec': 100.0, # depression time constant (ms) + 'tau_facil': 0.0, # facilitation time constant (ms) + } + + +class MultiQuantalSynapse(StandardSynapseType): + """ + docstring needed + """ + default_parameters = { + 'weight': 0.0, + 'delay': None, + 'U': 0.5, # maximal fraction of available resources + 'n': 1, # total number of release sites + 'tau_rec': 800.0, # depression time constant (ms) + 'tau_facil': 0.0, # facilitation time constant (ms) + } class STDPMechanism(StandardSynapseType): diff --git a/pyNN/utility/plotting.py b/pyNN/utility/plotting.py index 4508054f2..fbe7a5766 100644 --- a/pyNN/utility/plotting.py +++ b/pyNN/utility/plotting.py @@ -19,7 +19,7 @@ import matplotlib.gridspec as gridspec import numpy as np from quantities import ms -from neo import AnalogSignalArray, AnalogSignal, SpikeTrain +from neo import AnalogSignal, SpikeTrain try: from sys import maxint except ImportError: # Py3 @@ -53,7 +53,7 @@ def handle_options(ax, options): def plot_signal(ax, signal, index=None, label='', **options): """ - Plot an AnalogSignal or one signal from an AnalogSignalArray. + Plot a single channel from an AnalogSignal. """ if "ylabel" in options: if options["ylabel"] == "auto": @@ -61,7 +61,7 @@ def plot_signal(ax, signal, index=None, label='', **options): signal.units._dimensionality.string) handle_options(ax, options) if index is None: - label = "%s (Neuron %d)" % (label, signal.channel_index) + label = "%s (Neuron %d)" % (label, signal.channel_index or 0) else: label = "%s (Neuron %d)" % (label, signal.channel_index[index]) signal = signal[:, index] @@ -71,7 +71,7 @@ def plot_signal(ax, signal, index=None, label='', **options): def plot_signals(ax, signal_array, label_prefix='', **options): """ - Plot all signals in an AnalogSignalArray in a single panel. + Plot all channels in an AnalogSignal in a single panel. """ if "ylabel" in options: if options["ylabel"] == "auto": @@ -80,8 +80,8 @@ def plot_signals(ax, signal_array, label_prefix='', **options): handle_options(ax, options) offset = options.pop("y_offset", None) show_legend = options.pop("legend", True) - for i in signal_array.channel_index.argsort(): - channel = signal_array.channel_index[i] + for i in signal_array.channel_index.index.argsort(): + channel = signal_array.channel_index.index[i] signal = signal_array[:, i] if label_prefix: label = "%s (Neuron %d)" % (label_prefix, channel) @@ -155,10 +155,10 @@ def plot_hist(ax, histogram, label='', **options): def variable_names(segment): """ - List the names of all the AnalogSignalArrays (used for the variable name by + List the names of all the AnalogSignals (used for the variable name by PyNN) in the given segment. """ - return set(signal.name for signal in segment.analogsignalarrays) + return set(signal.name for signal in segment.analogsignals) class Figure(object): @@ -221,7 +221,7 @@ class Panel(object): Represents a single panel in a multi-panel figure. A panel is a Matplotlib Axes or Subplot instance. A data item may be an - AnalogSignal, AnalogSignalArray, or a list of SpikeTrains. The Panel will + AnalogSignal, AnalogSignal, or a list of SpikeTrains. The Panel will automatically choose an appropriate representation. Multiple data items may be plotted in the same panel. @@ -253,8 +253,6 @@ def plot(self, axes): elif isinstance(datum, Histogram): plot_hist(axes, datum, label=label, **properties) elif isinstance(datum, AnalogSignal): - plot_signal(axes, datum, label=label, **properties) - elif isinstance(datum, AnalogSignalArray): plot_signals(axes, datum, label_prefix=label, **properties) elif isinstance(datum, list) and len(datum) > 0 and isinstance(datum[0], SpikeTrain): plot_spiketrains(axes, datum, label=label, **properties) @@ -286,7 +284,7 @@ def comparison_plot(segments, labels, title='', annotations=None, lw = 2 * (n_seg - k) - 1 col = 'rbgmck'[k % 6] line_properties.append({"linewidth": lw, "color": col}) - for array in segment.analogsignalarrays: + for array in segment.analogsignals: for i in array.channel_index.argsort(): channel = array.channel_index[i] signal = array[:, i] diff --git a/requirements.txt b/requirements.txt index 8a2f82287..e5b6c8eba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ Jinja2>=2.6 docutils>=0.10 mock>1.0 numpy>=1.5 -quantities>=0.10 -lazyarray>=0.2.9 -neo>=0.3 +#quantities>=0.10 +git+https://github.com/python-quantities/python-quantities.git +lazyarray>=0.2.10 +neo>=0.5 diff --git a/setup.py b/setup.py index 2ba713867..59a3dff0a 100644 --- a/setup.py +++ b/setup.py @@ -3,22 +3,30 @@ from distutils.core import setup from distutils.command.build import build as _build import os +import subprocess + + +def run_command(path, working_directory): + p = subprocess.Popen(path, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + universal_newlines=True, + close_fds=True, cwd=working_directory) + result = p.wait() + stdout = p.stdout.readlines() + return result, stdout class build(_build): - """Add nrnivmodl to the end of the build process.""" + """At the end of the build process, try to compile NEURON and NEST extensions.""" def run(self): _build.run(self) - nrnivmodl = self.find_nrnivmodl() + # try to compile NEURON extensions + nrnivmodl = self.find("nrnivmodl") if nrnivmodl: print("nrnivmodl found at", nrnivmodl) - import subprocess - p = subprocess.Popen(nrnivmodl, shell=True, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - close_fds=True, cwd=os.path.join(os.getcwd(), self.build_lib, 'pyNN/neuron/nmodl')) - stdout = p.stdout.readlines() - result = p.wait() + result, stdout = run_command(nrnivmodl, + os.path.join(os.getcwd(), self.build_lib, 'pyNN/neuron/nmodl')) # test if nrnivmodl was successful if result != 0: print("Unable to compile NEURON extensions. Output was:") @@ -27,28 +35,57 @@ def run(self): print("Successfully compiled NEURON extensions.") else: print("Unable to find nrnivmodl. It will not be possible to use the pyNN.neuron module.") - - def find_nrnivmodl(self): - """Try to find the nrnivmodl executable.""" + # try to compile NEST extensions + nest_config = self.find("nest-config") + if nest_config: + print("nest-config found at", nest_config) + nest_build_dir = os.path.join(os.getcwd(), self.build_lib, 'pyNN/nest/_build') + if not os.path.exists(nest_build_dir): + os.mkdir(nest_build_dir) + result, stdout = run_command("cmake -Dwith-nest={} ../extensions".format(nest_config), + nest_build_dir) + if result != 0: + print("Problem running cmake. Output was:") + print(' '.join([''] + stdout)) + else: + result, stdout = run_command("make", nest_build_dir) + if result != 0: + print("Unable to compile NEST extensions. Output was:") + print(' '.join([''] + stdout)) + else: + result, stdout = run_command("make install", nest_build_dir) + if result != 0: + print("Unable to install NEST extensions. Output was:") + print(' '.join([''] + stdout)) + else: + print("Successfully compiled NEST extensions.") + + def find(self, command): + """Try to find an executable file.""" path = os.environ.get("PATH", "").split(os.pathsep) - nrnivmodl = '' + cmd = '' for dir_name in path: - abs_name = os.path.abspath(os.path.normpath(os.path.join(dir_name, "nrnivmodl"))) + abs_name = os.path.abspath(os.path.normpath(os.path.join(dir_name, command))) if os.path.isfile(abs_name): - nrnivmodl = abs_name + cmd = abs_name break - return nrnivmodl + return cmd setup( name="PyNN", - version="0.8.3", + version="0.9.1", packages=['pyNN', 'pyNN.nest', 'pyNN.neuron', - 'pyNN.brian', 'pyNN.common', 'pyNN.mock', 'pyNN.neuroml', - 'pyNN.recording', 'pyNN.standardmodels', 'pyNN.descriptions', - 'pyNN.nest.standardmodels', 'pyNN.neuroml.standardmodels', - 'pyNN.neuron.standardmodels', 'pyNN.brian.standardmodels', - 'pyNN.utility', 'pyNN.nineml'], - package_data={'pyNN': ['neuron/nmodl/*.mod', "descriptions/templates/*/*"]}, + 'pyNN.brian', 'pyNN.common', 'pyNN.mock', 'pyNN.neuroml', + 'pyNN.recording', 'pyNN.standardmodels', 'pyNN.descriptions', + 'pyNN.nest.standardmodels', 'pyNN.neuroml.standardmodels', + 'pyNN.neuron.standardmodels', 'pyNN.brian.standardmodels', + 'pyNN.utility', 'pyNN.nineml'], + package_data={'pyNN': ['neuron/nmodl/*.mod', + 'nest/extensions/*.h', + 'nest/extensions/*.cpp', + 'nest/extensions/CMakeLists.txt', + 'nest/extensions/sli/*.sli', + "descriptions/templates/*/*"]}, author="The PyNN team", author_email="andrew.davison@unic.cnrs-gif.fr", description="A Python package for simulator-independent specification of neuronal network models", diff --git a/test/hardware/unittests/test_population.py b/test/hardware/unittests/test_population.py index 369c8eec6..117714537 100644 --- a/test/hardware/unittests/test_population.py +++ b/test/hardware/unittests/test_population.py @@ -364,21 +364,21 @@ def test_record_with_single_variable(self): p.record('v') sim.run(12.3) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(12.3 / sim.get_time_step())) + 1 - self.assertEqual(data.analogsignalarrays[0].name, 'v') - self.assertEqual(data.analogsignalarrays[0].shape, (n_values, p.size)) + self.assertEqual(data.analogsignals[0].name, 'v') + self.assertEqual(data.analogsignals[0].shape, (n_values, p.size)) def test_record_with_multiple_variables(self): p = sim.Population(2, EIF_cond_exp_isfa_ista()) p.record(('v', 'spikes')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, p.size)) def test_record_v(self): diff --git a/test/hardware/unittests/test_populationview.py b/test/hardware/unittests/test_populationview.py index 5b795d783..1bc6e50b0 100644 --- a/test/hardware/unittests/test_populationview.py +++ b/test/hardware/unittests/test_populationview.py @@ -343,10 +343,10 @@ def test_record_with_single_variable(self): pv.record('v') sim.run(12.3) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(12.3 / sim.get_time_step())) + 1 - self.assertEqual(data.analogsignalarrays[0].name, 'v') - self.assertEqual(data.analogsignalarrays[0].shape, (n_values, pv.size)) + self.assertEqual(data.analogsignals[0].name, 'v') + self.assertEqual(data.analogsignals[0].shape, (n_values, pv.size)) def test_record_with_multiple_variables(self): p = sim.Population(4, sim.EIF_cond_exp_isfa_ista()) @@ -354,11 +354,11 @@ def test_record_with_multiple_variables(self): pv.record(('v', 'spikes')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, pv.size)) def test_record_v(self): diff --git a/test/system/scenarios/__init__.py b/test/system/scenarios/__init__.py index 7eb87dc3b..cd972deef 100644 --- a/test/system/scenarios/__init__.py +++ b/test/system/scenarios/__init__.py @@ -1,17 +1,26 @@ # encoding: utf-8 +from testconfig import config -from . import (scenario1, - scenario2, - scenario3, - ticket166, - test_simulation_control, - test_recording, - test_cell_types, - test_electrodes, - scenario4, - test_parameter_handling, - test_procedural_api, - issue274, - test_connectors, - issue231, - test_connection_handling) +#import pdb +#pdb.set_trace() + +if 'testFile' in config: + file_name = config['testFile'] + exec("from . import ( %s )" % file_name) +else: + from . import ( scenario1, + scenario2, + scenario3, + ticket166, + test_simulation_control, + test_recording, + test_cell_types, + test_electrodes, + scenario4, + test_parameter_handling, + test_procedural_api, + issue274, + test_connectors, + issue231, + test_connection_handling, + test_synapse_types) diff --git a/test/system/scenarios/registry.py b/test/system/scenarios/registry.py index 9924bb71b..7c905b535 100644 --- a/test/system/scenarios/registry.py +++ b/test/system/scenarios/registry.py @@ -1,10 +1,11 @@ +from testconfig import config registry = [] def register(exclude=[]): def inner_register(scenario): - if scenario not in registry: + if scenario not in registry and not ('testName' in config and not scenario.__name__ == config['testName']): scenario.exclude = exclude registry.append(scenario) return scenario diff --git a/test/system/scenarios/test_cell_types.py b/test/system/scenarios/test_cell_types.py index 3112f87e2..86ce3bb18 100644 --- a/test/system/scenarios/test_cell_types.py +++ b/test/system/scenarios/test_cell_types.py @@ -8,7 +8,7 @@ except ImportError: have_scipy = False import quantities as pq -from nose.tools import assert_less +from nose.tools import assert_greater, assert_less from .registry import register @@ -22,14 +22,14 @@ def test_EIF_cond_alpha_isfa_ista(sim, plot_figure=False): ifcell.initialize(v=-65, w=0) sim.run(200.0) data = ifcell.get_data().segments[0] - expected_spike_times = numpy.array([10.02, 25.52, 43.18, 63.42, 86.67, 113.13, 142.69, 174.79]) * pq.ms + expected_spike_times = numpy.array([10.02, 25.52, 43.18, 63.42, 86.67, 113.13, 142.69, 174.79]) if plot_figure: import matplotlib.pyplot as plt - vm = data.analogsignalarrays[0] + vm = data.analogsignals[0] plt.plot(vm.times, vm) plt.plot(expected_spike_times, -40 * numpy.ones_like(expected_spike_times), "ro") plt.savefig("test_EIF_cond_alpha_isfa_ista_%s.png" % sim.__name__) - diff = (data.spiketrains[0] - expected_spike_times) / expected_spike_times + diff = (data.spiketrains[0].rescale(pq.ms).magnitude - expected_spike_times) / expected_spike_times assert abs(diff).max() < 0.01, abs(diff).max() sim.end() return data @@ -80,7 +80,7 @@ def issue367(sim, plot_figure=False): # we take the average membrane potential 0.1 ms before the spike and # compare it to the spike threshold spike_times = data.spiketrains[0] - vm = data.analogsignalarrays[0] + vm = data.analogsignals[0] spike_bins = ((spike_times - 0.1 * pq.ms) / vm.sampling_period).magnitude.astype(int) vm_before_spike = vm.magnitude[spike_bins] if plot_figure: diff --git a/test/system/scenarios/test_electrodes.py b/test/system/scenarios/test_electrodes.py index 2e8408554..a256acbf8 100644 --- a/test/system/scenarios/test_electrodes.py +++ b/test/system/scenarios/test_electrodes.py @@ -1,10 +1,18 @@ -from nose.tools import assert_equal +from nose.tools import assert_equal, assert_true, assert_false from numpy.testing import assert_array_equal import quantities as pq +import numpy from .registry import register +try: + import scipy + have_scipy = True +except ImportError: + have_scipy = False +from nose.plugins.skip import SkipTest + @register(exclude=["nemo"]) def test_changing_electrode(sim): @@ -24,7 +32,7 @@ def test_changing_electrode(sim): sim.run(simtime) c.amplitude += 0.1 - data = p.get_data().segments[0].analogsignalarrays[0] + data = p.get_data().segments[0].analogsignals[0] sim.end() @@ -51,10 +59,10 @@ def ticket226(sim): sim.run(30.0) v = cell.get_data().segments[0].filter(name='v')[0][:, 0] sim.end() - v_10p0 = v[abs(v.times - 10.0 * pq.ms) < 0.01 * pq.ms][0] - assert abs(v_10p0 - -60.0 * pq.mV) < 1e-10 - v_10p1 = v[abs(v.times - 10.1 * pq.ms) < 0.01 * pq.ms][0] - assert v_10p1 > -59.99 * pq.mV, v_10p1 + v_10p0 = v.magnitude[abs(v.times - 10.0 * pq.ms) < 0.01 * pq.ms, 0][0] + assert abs(v_10p0 - -60.0) < 1e-10 + v_10p1 = v.magnitude[abs(v.times - 10.1 * pq.ms) < 0.01 * pq.ms, 0][0] + assert v_10p1 > -59.99, v_10p1 @register() @@ -95,6 +103,356 @@ def issue321(sim): assert abs((v[-3:, 1] - v[-3:, 0]).max()) < 0.2 +@register() +def issue437(sim): + """ + Checks whether NoisyCurrentSource works properly, by verifying that: + 1) no change in vm before start time + 2) change in vm at dt after start time + 3) monotonic decay of vm after stop time + 4) noise.dt is properly implemented + Note: On rare occasions this test might fail as the signal is stochastic. + Test implementation makes use of certain approximations for thresholding. + If fails, run the test again to confirm. Passes 9/10 times on first attempt. + """ + if not have_scipy: + raise SkipTest + + v_rest = -60.0 # for this test keep v_rest < v_reset + sim.setup(timestep=0.1, min_delay=0.1) + cells = sim.Population(2, sim.IF_curr_alpha(tau_m=20.0, cm=1.0, v_rest=v_rest, + v_reset=-55.0, tau_refrac=5.0)) + cells.initialize(v=-60.0) + + #We test two cases: dt = simulator.state.dt and dt != simulator.state.dt + t_start = 25.0 + t_stop = 150.0 + dt_0 = 0.1 + dt_1 = 1.0 + noise_0 = sim.NoisyCurrentSource(mean=0.5, stdev=0.25, start=t_start, stop=t_stop, dt=dt_0) + noise_1 = sim.NoisyCurrentSource(mean=0.5, stdev=0.25, start=t_start, stop=t_stop, dt=dt_1) + cells[0].inject(noise_0) + cells[1].inject(noise_1) + + cells.record('v') + sim.run(200.0) + v = cells.get_data().segments[0].filter(name="v")[0] + v0 = v[:, 0] + v1 = v[:, 1] + t = v.times + sim.end() + + t_start_ind = int(numpy.argmax(t >= t_start)) + t_stop_ind = int(numpy.argmax(t >= t_stop)) + + # test for no change in vm before start time + # note: exact matches not appropriate owing to floating point rounding errors + assert_true (all(abs(val0 - v_rest*pq.mV) < 1e-9 and abs(val1 - v_rest*pq.mV) < 1e-9 for val0, val1 in zip(v0[:t_start_ind+1], v1[:t_start_ind+1]))) + + # test for change in vm at dt after start time + assert_true (abs(v0[t_start_ind+1] - v_rest*pq.mV) >= 1e-9 and abs(v1[t_start_ind+1] - v_rest*pq.mV) >= 1e-9) + + # test for monotonic decay of vm after stop time + assert_true (all(val0 >= val0_next and val1 >= val1_next for val0, val0_next, val1, val1_next in zip(v0[t_stop_ind:], v0[t_stop_ind+1:], v1[t_stop_ind:], v1[t_stop_ind+1:]))) + + # test for ensuring noise.dt is properly implemented; checking first instance for each + # recording current profiles not implemented currently, thus using double derivative of vm + # necessary to upsample signal with noise of dt; else fails in certain scenarios + # Test implementation makes use of certain approximations for thresholding. + # Note: there can be a much simpler check for this once recording current profiles enabled (for all simulators). + # Test implementation makes use of certain approximations for thresholding; hence taking mode of initial values + t_up = numpy.arange(float(min(t)), float(max(t))+dt_0/10.0, dt_0/10.0) + v0_up = numpy.interp(t_up, t, v0.magnitude.flat) + v1_up = numpy.interp(t_up, t, v1.magnitude.flat) + d2_v0_up = numpy.diff(v0_up, n=2) + d2_v1_up = numpy.diff(v1_up, n=2) + dt_0_list = [ j for (i,j) in zip(d2_v0_up, t_up) if abs(i) >= 0.00005 ] + dt_1_list = [ j for (i,j) in zip(d2_v1_up, t_up) if abs(i) >= 0.00005 ] + dt_0_list_diff = numpy.diff(dt_0_list, n=1) + dt_1_list_diff = numpy.diff(dt_1_list, n=1) + dt_0_mode = scipy.stats.mode(dt_0_list_diff[0:10])[0][0] + dt_1_mode = scipy.stats.mode(dt_1_list_diff[0:10])[0][0] + assert_true (abs(dt_0_mode - dt_0) < 1e-9 or abs(dt_1_mode - dt_1) < 1e-9) + + +@register() +def issue442(sim): + """ + Checks whether ACSource works properly, by verifying that: + 1) no change in vm before start time + 2) change in vm at dt after start time + 3) monotonic decay of vm after stop time + 4) accurate frequency of output signal + 5) offset included in output signal + """ + v_rest = -60.0 + sim.setup(timestep=0.1, min_delay=0.1) + cells = sim.Population(1, sim.IF_curr_alpha(tau_m=20.0, cm=1.0, v_rest=v_rest, + v_reset=-65.0, tau_refrac=5.0)) + cells.initialize(v=v_rest) + + # set t_start, t_stop and freq such that + # "freq*1e-3*(t_stop-t_start)" is an integral value + t_start = 22.5 + t_stop = 122.5 + freq = 100.0 + acsource = sim.ACSource(start=t_start, stop=t_stop, amplitude=0.5, offset=0.1, frequency=freq, phase=0.0) + cells[0].inject(acsource) + + cells.record('v') + sim.run(150.0) + v = cells.get_data().segments[0].filter(name="v")[0] + v0 = v[:, 0] + t = v.times + sim.end() + + t_start_ind = int(numpy.argmax(t >= t_start)) + t_stop_ind = int(numpy.argmax(t >= t_stop)) + + # test for no change in vm before start time + # note: exact matches not appropriate owing to floating point rounding errors + assert_true(all(abs(val0 - v_rest*pq.mV) < 1e-9 for val0 in v0[:t_start_ind+1])) + + # test for change in vm at dt after start time + assert_true(abs(v0[t_start_ind+1] - v0[t_start_ind]) >= 1e-9) + + # test for monotonic decay of vm after stop time + assert_true(all(val0 >= val0_next for val0, val0_next in zip(v0[t_stop_ind:], v0[t_stop_ind+1:]))) + + # test for accurate frequency; simply counts peaks + peak_ctr = 0 + peak_ind = [] + for i in range(t_stop_ind-t_start_ind): + if v0[t_start_ind+i-1] < v0[t_start_ind+i] and v0[t_start_ind+i] >= v0[t_start_ind+i+1]: + peak_ctr+=1 + peak_ind.append(t_start_ind+i) + assert_equal(peak_ctr, freq*1e-3*(t_stop-t_start)) + # also test for offset; peaks initially increase in magnitude + assert_true(v0[peak_ind[0]] < v0[peak_ind[1]] and v0[peak_ind[1]] < v0[peak_ind[2]]) + + +@register() +def issue445(sim): + """ + This test basically checks if a new value of current is calculated at every + time step, and that the total number of time steps is as expected theoretically + Note: NEST excluded as recording of electrode currents still to be implemented + """ + sim_dt = 0.1 + simtime = 200.0 + sim.setup(timestep=sim_dt, min_delay=1.0) + cells = sim.Population(1, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0)) + t_start=50.0 + t_stop=125.0 + acsource = sim.ACSource(start=t_start, stop=t_stop, amplitude=0.5, offset=0.0, frequency=100.0, phase=0.0) + cells[0].inject(acsource) + acsource.record() + + sim.run(simtime) + sim.end() + + i_t_ac, i_amp_ac = acsource.get_data() + t_start_ind = numpy.argmax(i_t_ac >= t_start) + t_stop_ind = numpy.argmax(i_t_ac >= t_stop) + assert_true (all(val != val_next for val, val_next in zip(i_t_ac[t_start_ind:t_stop_ind-1], i_t_ac[t_start_ind+1:t_stop_ind]))) + # note: exact matches not appropriate owing to floating point rounding errors + assert_true (( len(i_t_ac) - ((max(i_t_ac)-min(i_t_ac))/sim_dt + 1) )< 1e-9) + + +@register() +def issue451(sim): + """ + Modification of test: test_changing_electrode + Difference: incorporates a start and stop time for stimulus + Check that changing the values of the electrodes on the fly is taken into account + """ + repeats = 2 + dt = 0.1 + simtime = 100 + sim.setup(timestep=dt, min_delay=dt) + v_rest = -60.0 + p = sim.Population(1, sim.IF_curr_exp(v_rest=v_rest)) + p.initialize(v=v_rest) + c = sim.DCSource(amplitude=0.0, start=25.0, stop=50.0) + c.inject_into(p) + p.record('v') + + for i in range(repeats): + sim.run(simtime) + c.amplitude += 0.1 + + v = p.get_data().segments[0].filter(name="v")[0] + sim.end() + # check that the value of v is equal to v_rest throughout the simulation + # note: exact matches not appropriate owing to floating point rounding errors + assert_true (all( (val.item()-v_rest)<1e-9 for val in v[:, 0])) + + +@register() +def issue483(sim): + """ + Test to ensure that length of recorded voltage vector is as expected + (checks for the specific scenario that failed earlier) + """ + dt = 0.1 + sim.setup(timestep=dt, min_delay=dt) + p = sim.Population(1, sim.IF_curr_exp()) + c = sim.DCSource(amplitude=0.5) + c.inject_into(p) + p.record('v') + + simtime = 200.0 + sim.run(100.0) + sim.run(100.0) + + v = p.get_data().segments[0].filter(name="v")[0] + + # check that the length of vm vector is as expected theoretically + assert (len(v) == (int(simtime/dt) + 1)) + + +@register() +def issue487(sim): + """ + Test to ensure that DCSource and StepCurrentSource work properly + for repeated runs. Problem existed under pyNN.neuron. + Following sub-tests performed: + 1) DCSource active across two runs + 2) StepCurrentSource active across two runs + 3) DCSource active only during second run (earlier resulted in no current input) + 4) StepCurrentSource active only during second run (earlier resulted in current initiation at end of first run) + """ + dt = 0.1 + sim.setup(timestep=dt, min_delay=dt) + + v_rest = -60.0 + cells = sim.Population(4, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0, v_rest=v_rest)) + cells.initialize(v=v_rest) + cells.record('v') + + dcsource = sim.DCSource(amplitude=0.15, start=25.0, stop=115.0) + cells[0].inject(dcsource) + + step = sim.StepCurrentSource(times=[25.0, 75.0, 115.0], amplitudes=[0.05, 0.10, 0.20]) + cells[1].inject(step) + + dcsource_2 = sim.DCSource(amplitude=0.15, start=115.0, stop=145.0) + cells[2].inject(dcsource_2) + + step_2 = sim.StepCurrentSource(times=[125.0, 175.0, 215.0], amplitudes=[0.05, 0.10, 0.20]) + cells[3].inject(step_2) + + simtime = 100.0 + sim.run(simtime) + sim.run(simtime) + + v = cells.get_data().segments[0].filter(name="v")[0] + sim.end() + v_dc = v[:, 0] + v_step = v[:, 1] + v_dc_2 = v[:, 2] + v_step_2 = v[:, 3] + + # check that membrane potential does not fall after end of first run + # Test 1 + assert_true (v_dc[int(simtime/dt)] < v_dc[int(simtime/dt)+1]) + # Test 2 + assert_true (v_step[int(simtime/dt)] < v_step[int(simtime/dt)+1]) + # check that membrane potential of cell undergoes a change + # Test 3 + v_dc_2_arr = numpy.squeeze(numpy.array(v_dc_2)) + assert_false (numpy.isclose(v_dc_2_arr, v_rest).all()) + # check that membrane potential of cell undergoes no change till start of current injection + # Test 4 + v_step_2_arr = numpy.squeeze(numpy.array(v_step_2)) + assert_true (numpy.isclose(v_step_2_arr[0:int(step_2.times[0]/dt)], v_rest).all()) + + +@register() +def issue_465_474(sim): + """ + Checks the current traces recorded for each of the four types of + electrodes in pyNN, and verifies that: + 1) Length of the current traces are as expected + 2) Values at t = t_start and t = t_stop present + 3) Changes in current value occur at the expected time instant + 4) Change in Vm begins at the immediate next time instant following current injection + """ + sim_dt = 0.1 + sim.setup(min_delay=1.0, timestep = sim_dt) + + v_rest = -60.0 + cells = sim.Population(4, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0, v_rest=v_rest)) + cells.initialize(v=v_rest) + + amp=0.5 + offset = 0.1 + start=50.0 + stop=125.0 + + acsource = sim.ACSource(start=start, stop=stop, amplitude=amp, offset=offset, frequency=100.0, phase=0.0) + cells[0].inject(acsource) + acsource.record() + + dcsource = sim.DCSource(amplitude=amp, start=start, stop=stop) + cells[1].inject(dcsource) + dcsource.record() + + noise = sim.NoisyCurrentSource(mean=amp, stdev=0.05, start=start, stop=stop, dt=sim_dt) + cells[2].inject(noise) + noise.record() + + step = sim.StepCurrentSource(times=[start, (start+stop)/2, stop], amplitudes=[0.4, 0.6, 0.2]) + cells[3].inject(step) + step.record() + + cells.record('v') + runtime = 100.0 + simtime = 0 + # testing for repeated runs + sim.run(runtime) + simtime += runtime + sim.run(runtime) + simtime += runtime + + vm = cells.get_data().segments[0].filter(name="v")[0] + sim.end() + + v_ac = vm[:, 0] + v_dc = vm[:, 1] + v_noise = vm[:, 2] + v_step = vm[:, 3] + + i_t_ac, i_amp_ac = acsource.get_data() + i_t_dc, i_amp_dc = dcsource.get_data() + i_t_noise, i_amp_noise = noise.get_data() + i_t_step, i_amp_step = step.get_data() + + # test for length of recorded current traces + assert_true (len(i_t_ac) == len(i_amp_ac) == (int(simtime/sim_dt)+1) == len(v_ac)) + assert_true (len(i_t_dc) == len(i_amp_dc) == int(simtime/sim_dt)+1 == len(v_dc)) + assert_true (len(i_t_noise) == len(i_amp_noise) == int(simtime/sim_dt)+1 == len(v_noise)) + assert_true (len(i_t_step) == len(i_amp_step) == int(simtime/sim_dt)+1 == len(v_step)) + + # test to check values exist at start and end of simulation + assert_true (i_t_ac[0]==0.0 and numpy.isclose(i_t_ac[-1],simtime)) + assert_true (i_t_dc[0]==0.0 and numpy.isclose(i_t_dc[-1],simtime)) + assert_true (i_t_noise[0]==0.0 and numpy.isclose(i_t_noise[-1],simtime)) + assert_true (i_t_step[0]==0.0 and numpy.isclose(i_t_step[-1],simtime)) + + # test to check current changes at the expected time instant + assert_true (i_amp_ac[(int(start/sim_dt))-1]==0 and i_amp_ac[int(start/sim_dt)]!=0) + assert_true (i_amp_dc[int(start/sim_dt)-1]==0 and i_amp_dc[int(start/sim_dt)]!=0) + assert_true (i_amp_noise[int(start/sim_dt)-1]==0 and i_amp_noise[int(start/sim_dt)]!=0) + assert_true (i_amp_step[int(start/sim_dt)-1]==0 and i_amp_step[int(start/sim_dt)]!=0) + + # test to check vm changes at the time step following current initiation + assert_true (numpy.isclose(v_ac[int(start/sim_dt)].item(),v_rest) and v_ac[int(start/sim_dt)+1]!=v_rest) + assert_true (numpy.isclose(v_dc[int(start/sim_dt)].item(),v_rest) and v_dc[int(start/sim_dt)+1]!=v_rest) + assert_true (numpy.isclose(v_noise[int(start/sim_dt)].item(),v_rest) and v_noise[int(start/sim_dt)+1]!=v_rest) + assert_true (numpy.isclose(v_step[int(start/sim_dt)].item(),v_rest) and v_step[int(start/sim_dt)+1]!=v_rest) + + if __name__ == '__main__': from pyNN.utility import get_simulator sim, args = get_simulator() @@ -102,3 +460,10 @@ def issue321(sim): ticket226(sim) issue165(sim) issue321(sim) + issue437(sim) + issue442(sim) + issue445(sim) + issue451(sim) + issue483(sim) + issue487(sim) + issue_465_474(sim) diff --git a/test/system/scenarios/test_recording.py b/test/system/scenarios/test_recording.py index 53c0a0333..b274019b6 100644 --- a/test/system/scenarios/test_recording.py +++ b/test/system/scenarios/test_recording.py @@ -2,10 +2,12 @@ import os import numpy import quantities as pq -from nose.tools import assert_equal +from nose.tools import assert_equal, assert_true +from numpy.testing import assert_array_equal, assert_array_almost_equal from neo.io import get_io -from pyNN.utility import assert_arrays_equal, assert_arrays_almost_equal, init_logging +from pyNN.utility import assert_arrays_equal, assert_arrays_almost_equal, init_logging, normalized_filename from .registry import register +import pickle @register(exclude=['nemo']) @@ -30,12 +32,11 @@ def test_reset_recording(sim): sim.run(10.0) data = p.get_data() sim.end() - ti = lambda i: data.segments[i].analogsignalarrays[0].times + ti = lambda i: data.segments[i].analogsignals[0].times assert_arrays_equal(ti(0), ti(1)) - idx = lambda i: data.segments[i].analogsignalarrays[0].channel_index - assert idx(0) == [3] - assert idx(1) == [4] - vi = lambda i: data.segments[i].analogsignalarrays[0] + assert_array_equal(data.segments[0].analogsignals[0].channel_index.channel_ids, numpy.array([3])) + assert_array_equal(data.segments[1].analogsignals[0].channel_index.channel_ids, numpy.array([4])) + vi = lambda i: data.segments[i].analogsignals[0] assert vi(0).shape == vi(1).shape == (101, 1) assert vi(0)[0, 0] == vi(1)[0, 0] == p.initial_values['v'].evaluate(simplify=True) * pq.mV # the first value should be the same assert not (vi(0)[1:, 0] == vi(1)[1:, 0]).any() # none of the others should be, because of different i_offset @@ -79,14 +80,20 @@ def test_record_vm_and_gsyn_from_assembly(sim): assert_equal(gsyn_p1.shape, (n_points, 4)) assert_equal(gsyn_all.shape, (n_points, 7)) - assert_arrays_equal(vm_p1[:, 3], vm_all[:, 8]) + assert_array_equal(vm_p1[:, 3], vm_all[:, 8]) - assert_arrays_equal(vm_p0.channel_index, numpy.arange(5)) - assert_arrays_equal(vm_p1.channel_index, numpy.arange(6)) - assert_arrays_equal(vm_all.channel_index, numpy.arange(11)) - assert_arrays_equal(gsyn_p0.channel_index, numpy.array([2, 3, 4])) - assert_arrays_equal(gsyn_p1.channel_index, numpy.arange(4)) - assert_arrays_equal(gsyn_all.channel_index, numpy.arange(2, 9)) + assert_array_equal(vm_p0.channel_index.index, numpy.arange(5)) + assert_array_equal(vm_p1.channel_index.index, numpy.arange(6)) + assert_array_equal(vm_all.channel_index.index, numpy.arange(11)) + assert_array_equal(vm_p0.channel_index.channel_ids, numpy.arange(5)) + assert_array_equal(vm_p1.channel_index.channel_ids, numpy.arange(6)) + assert_array_equal(vm_all.channel_index.channel_ids, numpy.arange(11)) + assert_array_equal(gsyn_p0.channel_index.index, numpy.arange(3)) + assert_array_equal(gsyn_p1.channel_index.index, numpy.arange(4)) + assert_array_equal(gsyn_all.channel_index.index, numpy.arange(7)) + assert_array_equal(gsyn_p0.channel_index.channel_ids, numpy.array([2, 3, 4])) + assert_array_equal(gsyn_p1.channel_index.channel_ids, numpy.arange(4)) + assert_array_equal(gsyn_all.channel_index.channel_ids, numpy.arange(2, 9)) sim.end() test_record_vm_and_gsyn_from_assembly.__test__ = False @@ -111,8 +118,8 @@ def issue259(sim): print(spiketrains2[0]) sim.end() - assert_arrays_almost_equal(spiketrains0[0], numpy.array([0.075]) * pq.ms, 1e-17) - assert_arrays_almost_equal(spiketrains1[0], numpy.array([10.025, 12.34]) * pq.ms, 1e-14) + assert_arrays_almost_equal(spiketrains0[0].rescale(pq.ms).magnitude, numpy.array([0.075]), 1e-17) + assert_arrays_almost_equal(spiketrains1[0].rescale(pq.ms).magnitude, numpy.array([10.025, 12.34]), 1e-14) assert_equal(spiketrains2[0].size, 0) @@ -127,8 +134,8 @@ def test_sampling_interval(sim): p1.record('v', sampling_interval=1.0) p2.record('v', sampling_interval=0.5) sim.run(10.0) - d1 = p1.get_data().segments[0].analogsignalarrays[0] - d2 = p2.get_data().segments[0].analogsignalarrays[0] + d1 = p1.get_data().segments[0].analogsignals[0] + d2 = p2.get_data().segments[0].analogsignals[0] assert_equal(d1.sampling_period, 1.0 * pq.ms) assert_equal(d1.shape, (11, 3)) assert_equal(d2.sampling_period, 0.5 * pq.ms) @@ -151,14 +158,209 @@ def test_mix_procedural_and_oo(sim): data_proc = get_io(fn_proc).read()[0] data_oo = get_io(fn_oo).read()[0] - assert_arrays_equal(data_proc.segments[0].analogsignalarrays[0], - data_oo.segments[0].analogsignalarrays[0]) + assert_array_equal(data_proc.segments[0].analogsignals[0], + data_oo.segments[0].analogsignals[0]) os.remove(fn_proc) os.remove(fn_oo) test_mix_procedural_and_oo.__test__ = False +@register() +def issue_449_490_491(sim): + """ + Test to ensure that Simulator and Population recording work properly + The following 12 scenarios are explored: + Note: var1 = "spikes", var2 = "v" + 1) sim.record() + i) cell[0] + a) 2 parameters (2vars) (scenario 1) + b) parameter1 (var1) (scenario 2) + c) parameter2 (var2) (scenario 3) + ii) cell[1] + a) 2 parameters (2vars) (scenario 4) + b) parameter1 (var1) (scenario 5) + c) parameter2 (var2) (scenario 6) + iii) population + a) 2 parameters (2vars) (scenario 7) + b) parameter1 (var1) (scenario 8) + c) parameter2 (var2) (scenario 9) + 2) pop.record() - always records for a population; not a single cell + a) 2 parameters (2vars) (scenario 10) + b) parameter1 (var1) (scenario 11) + c) parameter2 (var2) (scenario 12) + """ + # START ***** defining methods needed for test ***** + + def get_file_data(filename): + # method to access pickled file and retrieve data + data = [] + with (open(filename, "rb")) as openfile: + while True: + try: + data.append(pickle.load(openfile)) + except EOFError: + break + return data + + def eval_num_cells(data): + # scan data object to evaluate number of cells; returns 4 values + # nCells : # of cells in analogsignals (if "v" recorded) + # nspikes1: # of spikes in first recorded cell + # nspikes2: # of spikes in second recorded cell (if exists) + # -- if any parameter absent, return -1 as its value + # annot_bool # true if specified annotation exists; false otherwise + + try: + nCells = data[0].segments[0].analogsignals[0].shape[1] + except: + nCells = -1 + + try: + nspikes1 = data[0].segments[0].spiketrains[0].shape[0] + except: + nspikes1 = -1 + + try: + nspikes2 = data[0].segments[0].spiketrains[1].shape[0] + except: + nspikes2 = -1 + + if 'script_name' in data[0].annotations.keys(): + annot_bool = True + else: + annot_bool = False + + return (nCells, nspikes1, nspikes2, annot_bool) + + # END ***** defining methods needed for test ***** + + sim_dt = 0.1 + sim.setup(min_delay=1.0, timestep = sim_dt) + + # creating a population of two cells; only cell[0] gets stimulus + # hence only cell[0] will have entries for spiketrains + cells = sim.Population(2, sim.IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0)) + steady = sim.DCSource(amplitude=2.5, start=25.0, stop=75.0) + cells[0].inject(steady) + + # specify appropriate filenames for output files + filename_sim_cell1_2vars = normalized_filename("Results", "sim_cell1_2vars", "pkl", sim) + filename_sim_cell1_var1 = normalized_filename("Results", "sim_cell1_var1", "pkl", sim) + filename_sim_cell1_var2 = normalized_filename("Results", "sim_cell1_var2", "pkl", sim) + filename_sim_cell2_2vars = normalized_filename("Results", "sim_cell2_2vars", "pkl", sim) + filename_sim_cell2_var1 = normalized_filename("Results", "sim_cell2_var1", "pkl", sim) + filename_sim_cell2_var2 = normalized_filename("Results", "sim_cell2_var2", "pkl", sim) + filename_sim_popl_2vars = normalized_filename("Results", "sim_popl_2vars", "pkl", sim) + filename_sim_popl_var1 = normalized_filename("Results", "sim_popl_var1", "pkl", sim) + filename_sim_popl_var2 = normalized_filename("Results", "sim_popl_var2", "pkl", sim) + filename_rec_2vars = normalized_filename("Results", "rec_2vars", "pkl", sim) + filename_rec_var1 = normalized_filename("Results", "rec_var1", "pkl", sim) + filename_rec_var2 = normalized_filename("Results", "rec_var2", "pkl", sim) + + # instruct pynn to record as per above scenarios + sim.record(["spikes", "v"], cells[0], filename_sim_cell1_2vars, annotations={'script_name': __file__}) + sim.record(["spikes"], cells[0], filename_sim_cell1_var1, annotations={'script_name': __file__}) + sim.record(["v"], cells[0], filename_sim_cell1_var2, annotations={'script_name': __file__}) + sim.record(["spikes", "v"], cells[1], filename_sim_cell2_2vars, annotations={'script_name': __file__}) + sim.record(["spikes"], cells[1], filename_sim_cell2_var1, annotations={'script_name': __file__}) + sim.record(["v"], cells[1], filename_sim_cell2_var2, annotations={'script_name': __file__}) + sim.record(["spikes", "v"], cells, filename_sim_popl_2vars, annotations={'script_name': __file__}) + sim.record(["spikes"], cells, filename_sim_popl_var1, annotations={'script_name': __file__}) + sim.record(["v"], cells, filename_sim_popl_var2, annotations={'script_name': __file__}) + cells.record(["spikes", "v"], to_file=filename_rec_2vars) + cells.record(["spikes"], to_file=filename_rec_var1) + cells.record(["v"], to_file=filename_rec_var2) + + sim.run(100.0) + sim.end() + + # retrieve data from the created files, and perform appropriate checks + # scenario 1 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell1_2vars)) + assert_true (nCells == 1) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 2 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell1_var1)) + assert_true (nCells == -1) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 3 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell1_var2)) + assert_true (nCells == 1) + assert_true (nspikes1 == -1) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 4 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell2_2vars)) + assert_true (nCells == 1) + assert_true (nspikes1 == 0) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 5 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell2_var1)) + assert_true (nCells == -1) + assert_true (nspikes1 == 0) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 6 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_cell2_var2)) + assert_true (nCells == 1) + assert_true (nspikes1 == -1) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 7 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_popl_2vars)) + assert_true (nCells == 2) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == 0) + assert_true (annot_bool) + + # scenario 8 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_popl_var1)) + assert_true (nCells == -1) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == 0) + assert_true (annot_bool) + + # scenario 9 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_sim_popl_var2)) + assert_true (nCells == 2) + assert_true (nspikes1 == -1) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + # scenario 10 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_rec_2vars)) + assert_true (nCells == 2) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == 0) + assert_true (annot_bool) + + # scenario 11 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_rec_var1)) + assert_true (nCells == -1) + assert_true (nspikes1 > 0) + assert_true (nspikes2 == 0) + assert_true (annot_bool) + + # scenario 12 + nCells, nspikes1, nspikes2, annot_bool = eval_num_cells(get_file_data(filename_rec_var2)) + assert_true (nCells == 2) + assert_true (nspikes1 == -1) + assert_true (nspikes2 == -1) + assert_true (annot_bool) + + if __name__ == '__main__': from pyNN.utility import get_simulator sim, args = get_simulator() @@ -167,3 +369,4 @@ def test_mix_procedural_and_oo(sim): issue259(sim) test_sampling_interval(sim) test_mix_procedural_and_oo(sim) + issue_449_490_491(sim) diff --git a/test/system/scenarios/test_simulation_control.py b/test/system/scenarios/test_simulation_control.py index 3b5c3846b..3a0f839f2 100644 --- a/test/system/scenarios/test_simulation_control.py +++ b/test/system/scenarios/test_simulation_control.py @@ -1,5 +1,6 @@ from nose.tools import assert_almost_equal, assert_raises +from numpy.testing import assert_array_equal, assert_array_almost_equal from pyNN.utility import assert_arrays_equal, assert_arrays_almost_equal from .registry import register @@ -24,8 +25,8 @@ def test_reset(sim): assert len(data.segments) == repeats for segment in data.segments[1:]: - assert_arrays_almost_equal(segment.analogsignalarrays[0], - data.segments[0].analogsignalarrays[0], 1e-11) + assert_array_almost_equal(segment.analogsignals[0], + data.segments[0].analogsignals[0], 10) test_reset.__test__ = False @@ -51,8 +52,8 @@ def test_reset_with_clear(sim): for rec in data: assert len(rec.segments) == 1 - assert_arrays_almost_equal(rec.segments[0].analogsignalarrays[0], - data[0].segments[0].analogsignalarrays[0], 1e-11) + assert_arrays_almost_equal(rec.segments[0].analogsignals[0], + data[0].segments[0].analogsignals[0], 1e-11) test_reset_with_clear.__test__ = False @@ -77,9 +78,9 @@ def test_setup(sim): assert len(data) == n for block in data: assert len(block.segments) == 1 - signals = block.segments[0].analogsignalarrays + signals = block.segments[0].analogsignals assert len(signals) == 1 - assert_arrays_equal(signals[0], data[0].segments[0].analogsignalarrays[0]) + assert_array_equal(signals[0], data[0].segments[0].analogsignals[0]) test_setup.__test__ = False diff --git a/test/system/scenarios/test_synapse_types.py b/test/system/scenarios/test_synapse_types.py new file mode 100644 index 000000000..0bdc9a3b7 --- /dev/null +++ b/test/system/scenarios/test_synapse_types.py @@ -0,0 +1,61 @@ +from __future__ import division + +import numpy as np +try: + import scipy + have_scipy = True +except ImportError: + have_scipy = False +from nose.tools import assert_equal, assert_less, assert_greater, assert_not_equal + +from .registry import register + + +@register(exclude=['moose', 'nemo', 'brian']) +def test_simple_stochastic_synapse(sim, plot_figure=False): + # in this test we connect + sim.setup(min_delay=0.5) + t_stop = 1000.0 + spike_times = np.arange(2.5, t_stop, 5.0) + source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times)) + neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0)) + synapse_type = sim.SimpleStochasticSynapse(weight=0.5, + p=np.array([[0.0, 0.5, 0.5, 1.0]])) + connections = sim.Projection(source, neurons, sim.AllToAllConnector(), + synapse_type=synapse_type) + source.record('spikes') + neurons.record('gsyn_exc') + sim.run(t_stop) + + data = neurons.get_data().segments[0] + gsyn = data.analogsignals[0].rescale('uS') + if plot_figure: + import matplotlib.pyplot as plt + for i in range(neurons.size): + plt.subplot(neurons.size, 1, i+1) + plt.plot(gsyn.times, gsyn[:, i]) + plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__) + print(data.analogsignals[0].units) + crossings = [] + for i in range(neurons.size): + crossings.append( + gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])]) + assert_equal(crossings[0].size, 0) + assert_less(crossings[1].size, 0.6*spike_times.size) + assert_greater(crossings[1].size, 0.4*spike_times.size) + assert_equal(crossings[3].size, spike_times.size) + assert_not_equal(crossings[1], crossings[2]) + print(crossings[1].size / spike_times.size) + return data + + +test_simple_stochastic_synapse.__test__ = False + + + +if __name__ == '__main__': + from pyNN.utility import get_simulator + sim, args = get_simulator(("--plot-figure", + {"help": "generate a figure", + "action": "store_true"})) + test_simple_stochastic_synapse(sim, plot_figure=args.plot_figure) diff --git a/test/system/scenarios/ticket166.py b/test/system/scenarios/ticket166.py index cc745e824..1f895a3bf 100644 --- a/test/system/scenarios/ticket166.py +++ b/test/system/scenarios/ticket166.py @@ -33,7 +33,7 @@ def ticket166(sim, plot_figure=False): # note we add no new spikes to the second source t = sim.run(t_step) # first neuron gets depolarized again - vm = cells.get_data().segments[0].analogsignalarrays[0] + vm = cells.get_data().segments[0].analogsignals[0] final_v_0 = vm[-1, 0] final_v_1 = vm[-1, 1] diff --git a/test/system/test_nest.py b/test/system/test_nest.py index 9808d9b13..071a23faa 100644 --- a/test/system/test_nest.py +++ b/test/system/test_nest.py @@ -60,7 +60,7 @@ def test_record_native_model(): tstop = 250.0 nest.run(tstop) - vm = p1.get_data().segments[0].analogsignalarrays[0] + vm = p1.get_data().segments[0].analogsignals[0] n_points = int(tstop / nest.get_time_step()) + 1 assert_equal(vm.shape, (n_points, n_cells)) assert vm.max() > 0.0 # should have some spikes diff --git a/test/system/test_neuron.py b/test/system/test_neuron.py index c009dadb9..7e4c47f40 100644 --- a/test/system/test_neuron.py +++ b/test/system/test_neuron.py @@ -140,8 +140,8 @@ def test_electrical_synapse(): p1.record('v') p2.record('v') pyNN.neuron.run(200) - p1_trace = p1.get_data(('v',)).segments[0].analogsignalarrays[0] - p2_trace = p2.get_data(('v',)).segments[0].analogsignalarrays[0] + p1_trace = p1.get_data(('v',)).segments[0].analogsignals[0] + p2_trace = p2.get_data(('v',)).segments[0].analogsignals[0] # Check the local forward connection assert p2_trace[:, 0].max() - p2_trace[:, 0].min() > 50 # Check the remote forward connection @@ -181,7 +181,7 @@ def test_record_native_model(): nrn.run(250.0) - data = p1.get_data().segments[0].analogsignalarrays + data = p1.get_data().segments[0].analogsignals assert_equal(len(data), 2) # one array per variable assert_equal(data[0].name, 'apical(1.0).v') assert_equal(data[1].name, 'soma(0.5).ina') diff --git a/test/unittests/test_assembly.py b/test/unittests/test_assembly.py index c985a3432..d17af98ec 100644 --- a/test/unittests/test_assembly.py +++ b/test/unittests/test_assembly.py @@ -392,7 +392,7 @@ def test_get_data_with_gather(self, sim=sim): a.record('v') sim.run(t1) # what if we call p.record between two run statements? - # would be nice to get an AnalogSignalArray with a non-zero t_start + # would be nice to get an AnalogSignal with a non-zero t_start # but then need to make sure we get the right initial value sim.run(t2) sim.reset() @@ -402,7 +402,7 @@ def test_get_data_with_gather(self, sim=sim): data = a.get_data(gather=True) self.assertEqual(len(data.segments), 2) seg0 = data.segments[0] - self.assertEqual(len(seg0.analogsignalarrays), 1) + self.assertEqual(len(seg0.analogsignals), 1) v = seg0.filter(name='v')[0] self.assertEqual(v.name, 'v') num_points = int(round((t1 + t2) / sim.get_time_step())) + 1 @@ -413,7 +413,7 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(seg0.spiketrains), 0) seg1 = data.segments[1] - self.assertEqual(len(seg1.analogsignalarrays), 2) + self.assertEqual(len(seg1.analogsignals), 2) w = seg1.filter(name='w')[0] self.assertEqual(w.name, 'w') num_points = int(round(t3 / sim.get_time_step())) + 1 diff --git a/test/unittests/test_lowlevelapi.py b/test/unittests/test_lowlevelapi.py index 8f831e1da..20a336142 100644 --- a/test/unittests/test_lowlevelapi.py +++ b/test/unittests/test_lowlevelapi.py @@ -55,7 +55,8 @@ def test_build_record(): source.record = Mock() record_function(('v', 'spikes'), source, "filename") source.record.assert_called_with(('v', 'spikes'), to_file="filename", sampling_interval=None) - assert_equal(simulator.state.write_on_end, [(source, ('v', 'spikes'), "filename")]) + # below check needs to be re-implmented with pyNN.mock + # assert_equal(simulator.state.write_on_end, [(source, ('v', 'spikes'), "filename")]) def test_build_record_with_assembly(): @@ -71,4 +72,5 @@ def test_build_record_with_assembly(): source.record = Mock() record_function('foo', source, "filename") source.record.assert_called_with('foo', to_file="filename", sampling_interval=None) - assert_equal(simulator.state.write_on_end, [(source, 'foo', "filename")]) # not sure this is what we want - won't file get over-written? + # below check needs to be re-implmented with pyNN.mock + # assert_equal(simulator.state.write_on_end, [(source, 'foo', "filename")]) # not sure this is what we want - won't file get over-written? diff --git a/test/unittests/test_neuron.py b/test/unittests/test_neuron.py index b8913c6f1..403bcb5e2 100644 --- a/test/unittests/test_neuron.py +++ b/test/unittests/test_neuron.py @@ -390,8 +390,8 @@ def test__record(self): # self.cells[0]._cell.record_times = self.cells[1]._cell.record_times = numpy.arange(0.0, 1.0, 0.1) # simulator.state.t = simulator.state.dt * len(self.cells[0]._cell.vtrace) # vdata = self.rv._get_current_segment(variables=['v'], filter_ids=None) - # self.assertEqual(len(vdata.analogsignalarrays), 1) - # assert_array_equal(numpy.array(vdata.analogsignalarrays[0]), + # self.assertEqual(len(vdata.analogsignals), 1) + # assert_array_equal(numpy.array(vdata.analogsignals[0]), # numpy.vstack((self.cells[0]._cell.vtrace, self.cells[1]._cell.vtrace)).T) def test__get_spikes(self): @@ -415,10 +415,10 @@ def test__get_spikes(self): # cell._cell.record_times = self.cells[1]._cell.record_times = numpy.arange(0.0, 1.0, 0.1) # simulator.state.t = simulator.state.dt * len(cell._cell.gsyn_trace['excitatory']) # gdata = self.rg._get_current_segment(variables=['gsyn_exc', 'gsyn_inh'], filter_ids=None) - # self.assertEqual(len(gdata.analogsignalarrays), 2) - # assert_array_equal(numpy.array(gdata.analogsignalarrays[0][:,0]), + # self.assertEqual(len(gdata.analogsignals), 2) + # assert_array_equal(numpy.array(gdata.analogsignals[0][:,0]), # cell._cell.gsyn_trace['excitatory']) - # assert_array_equal(numpy.array(gdata.analogsignalarrays[1][:,0]), + # assert_array_equal(numpy.array(gdata.analogsignals[1][:,0]), # cell._cell.gsyn_trace['inhibitory']) # def test__local_count(self): diff --git a/test/unittests/test_population.py b/test/unittests/test_population.py index 59c0155c3..fa76ee9de 100644 --- a/test/unittests/test_population.py +++ b/test/unittests/test_population.py @@ -429,10 +429,10 @@ def test_record_with_single_variable(self, sim=sim): p.record('v') sim.run(12.3) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(12.3 / sim.get_time_step())) + 1 - self.assertEqual(data.analogsignalarrays[0].name, 'v') - self.assertEqual(data.analogsignalarrays[0].shape, (n_values, p.size)) + self.assertEqual(data.analogsignals[0].name, 'v') + self.assertEqual(data.analogsignals[0].shape, (n_values, p.size)) @register(exclude=['hardware.brainscales']) def test_record_with_multiple_variables(self, sim=sim): @@ -440,11 +440,11 @@ def test_record_with_multiple_variables(self, sim=sim): p.record(('v', 'w', 'gsyn_exc')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 3) + self.assertEqual(len(data.analogsignals), 3) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v', 'w', 'gsyn_exc'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, p.size)) @register() @@ -453,11 +453,11 @@ def test_record_with_v_and_spikes(self, sim=sim): p.record(('v', 'spikes')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, p.size)) @register() @@ -493,7 +493,7 @@ def test_get_data_with_gather(self, sim=sim): p.record('v') sim.run(t1) # what if we call p.record between two run statements? - # would be nice to get an AnalogSignalArray with a non-zero t_start + # would be nice to get an AnalogSignal with a non-zero t_start # but then need to make sure we get the right initial value sim.run(t2) sim.reset() @@ -504,8 +504,8 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(data.segments), 2) seg0 = data.segments[0] - self.assertEqual(len(seg0.analogsignalarrays), 1) - v = seg0.analogsignalarrays[0] + self.assertEqual(len(seg0.analogsignals), 1) + v = seg0.analogsignals[0] self.assertEqual(v.name, 'v') num_points = int(round((t1 + t2) / sim.get_time_step())) + 1 self.assertEqual(v.shape, (num_points, p.size)) @@ -515,7 +515,7 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(seg0.spiketrains), 0) seg1 = data.segments[1] - self.assertEqual(len(seg1.analogsignalarrays), 2) + self.assertEqual(len(seg1.analogsignals), 2) w = seg1.filter(name='w')[0] self.assertEqual(w.name, 'w') num_points = int(round(t3 / sim.get_time_step())) + 1 @@ -540,11 +540,11 @@ def test_get_spikes_with_gather(self, sim=sim): self.assertEqual(len(data.segments), 2) seg0 = data.segments[0] - self.assertEqual(len(seg0.analogsignalarrays), 1) + self.assertEqual(len(seg0.analogsignals), 1) self.assertEqual(len(seg0.spiketrains), 0) seg1 = data.segments[1] - self.assertEqual(len(seg1.analogsignalarrays), 2) + self.assertEqual(len(seg1.analogsignals), 2) self.assertEqual(len(seg1.spiketrains), p.size) assert_array_equal(seg1.spiketrains[7], numpy.array([p.first_id + 7, p.first_id + 7 + 5]) % t3) diff --git a/test/unittests/test_populationview.py b/test/unittests/test_populationview.py index 3d1e81cc5..640d1a88e 100644 --- a/test/unittests/test_populationview.py +++ b/test/unittests/test_populationview.py @@ -415,10 +415,10 @@ def test_record_with_single_variable(self, sim=sim): pv.record('v') sim.run(12.3) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(12.3 / sim.get_time_step())) + 1 - self.assertEqual(data.analogsignalarrays[0].name, 'v') - self.assertEqual(data.analogsignalarrays[0].shape, (n_values, pv.size)) + self.assertEqual(data.analogsignals[0].name, 'v') + self.assertEqual(data.analogsignals[0].shape, (n_values, pv.size)) @register(exclude=['hardware.brainscales']) def test_record_with_multiple_variables(self, sim=sim): @@ -427,11 +427,11 @@ def test_record_with_multiple_variables(self, sim=sim): pv.record(('v', 'w', 'gsyn_exc')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 3) + self.assertEqual(len(data.analogsignals), 3) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v', 'w', 'gsyn_exc'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, pv.size)) @register() @@ -441,11 +441,11 @@ def test_record_with_v_spikes(self, sim=sim): pv.record(('v', 'spikes')) sim.run(10.0) data = p.get_data(gather=True).segments[0] - self.assertEqual(len(data.analogsignalarrays), 1) + self.assertEqual(len(data.analogsignals), 1) n_values = int(round(10.0 / sim.get_time_step())) + 1 - names = set(arr.name for arr in data.analogsignalarrays) + names = set(arr.name for arr in data.analogsignals) self.assertEqual(names, set(('v'))) - for arr in data.analogsignalarrays: + for arr in data.analogsignals: self.assertEqual(arr.shape, (n_values, pv.size)) @register() @@ -482,7 +482,7 @@ def test_get_data_with_gather(self, sim=sim): pv.record('v') sim.run(t1) # what if we call p.record between two run statements? - # would be nice to get an AnalogSignalArray with a non-zero t_start + # would be nice to get an AnalogSignal with a non-zero t_start # but then need to make sure we get the right initial value sim.run(t2) sim.reset() @@ -493,8 +493,8 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(data.segments), 2) seg0 = data.segments[0] - self.assertEqual(len(seg0.analogsignalarrays), 1) - v = seg0.analogsignalarrays[0] + self.assertEqual(len(seg0.analogsignals), 1) + v = seg0.analogsignals[0] self.assertEqual(v.name, 'v') num_points = int(round((t1 + t2) / sim.get_time_step())) + 1 self.assertEqual(v.shape, (num_points, pv.size)) @@ -504,7 +504,7 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(seg0.spiketrains), 0) seg1 = data.segments[1] - self.assertEqual(len(seg1.analogsignalarrays), 2) + self.assertEqual(len(seg1.analogsignals), 2) w = seg1.filter(name='w')[0] self.assertEqual(w.name, 'w') num_points = int(round(t3 / sim.get_time_step())) + 1 @@ -522,7 +522,7 @@ def test_get_data_with_gather(self, sim=sim): pv.record('v') sim.run(t1) # what if we call p.record between two run statements? - # would be nice to get an AnalogSignalArray with a non-zero t_start + # would be nice to get an AnalogSignal with a non-zero t_start # but then need to make sure we get the right initial value sim.run(t2) sim.reset() @@ -533,11 +533,11 @@ def test_get_data_with_gather(self, sim=sim): self.assertEqual(len(data.segments), 2) seg0 = data.segments[0] - self.assertEqual(len(seg0.analogsignalarrays), 1) + self.assertEqual(len(seg0.analogsignals), 1) self.assertEqual(len(seg0.spiketrains), 0) seg1 = data.segments[1] - self.assertEqual(len(seg1.analogsignalarrays), 2) + self.assertEqual(len(seg1.analogsignals), 2) self.assertEqual(len(seg1.spiketrains), pv.size) assert_array_equal(seg1.spiketrains[2], numpy.array([p.first_id + 6, p.first_id + 6 + 5]) % t3) diff --git a/test/unittests/test_recording.py b/test/unittests/test_recording.py index 728be2809..0edf6f735 100644 --- a/test/unittests/test_recording.py +++ b/test/unittests/test_recording.py @@ -60,6 +60,11 @@ class MockNeoIO(object): class MockRecorder(recording.Recorder): _simulator = MockSimulator(mpi_rank=0) + def _get_current_segment(self, filter_ids=None, variables='all', clear=False): + segment = Mock() + segment.analogsignals = [Mock(), Mock()] + return segment + class MockPopulation(object): size = 11 @@ -159,7 +164,6 @@ def test_filter_recorded(): def test_get(): p = MockPopulation() r = MockRecorder(p) - r._get_current_segment = Mock() data = r.get('spikes') assert_equal(data.name, p.label) assert_equal(data.description, p.describe())