Skip to content

Commit

Permalink
Merge pull request #981 from brian-team/float32_support
Browse files Browse the repository at this point in the history
Float32 support
  • Loading branch information
mstimberg committed Aug 29, 2018
2 parents e32a9e6 + 9732bd1 commit b6a3a6c
Show file tree
Hide file tree
Showing 41 changed files with 598 additions and 399 deletions.
24 changes: 15 additions & 9 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,24 @@ matrix:
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="27" DO_CONDA_BUILD='no' SPLIT_RUN=2 ARCHITECTURE="x86_64"
os: osx
# - python: "2.7"
# env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="27" DO_CONDA_BUILD='no' SPLIT_RUN=1 ARCHITECTURE="x86"
# os: linux
# - python: "2.7"
# env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="27" DO_CONDA_BUILD='yes' SPLIT_RUN=2 ARCHITECTURE="x86"
# os: linux
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="27" DO_CONDA_BUILD='yes' SPLIT_RUN=1 ARCHITECTURE="x86_64" FLOAT_DTYPE_32=yes
os: osx
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="27" DO_CONDA_BUILD='no' SPLIT_RUN=2 ARCHITECTURE="x86_64" FLOAT_DTYPE_32=yes
os: osx
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=yes REPORT_COVERAGE=no SPLIT_RUN=1 ARCHITECTURE="x86_64"
os: linux
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=yes REPORT_COVERAGE=no SPLIT_RUN=2 ARCHITECTURE="x86_64"
os: linux
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=yes REPORT_COVERAGE=no SPLIT_RUN=1 ARCHITECTURE="x86_64" FLOAT_DTYPE_32=yes
os: linux
- python: "2.7"
env: PYTHON="2.7" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=yes REPORT_COVERAGE=no SPLIT_RUN=2 ARCHITECTURE="x86_64" FLOAT_DTYPE_32=yes
os: linux
- python: "3.5"
env: PYTHON="3.5" STANDALONE=no CYTHON=yes MINIMAL_VERSIONS=no REPORT_COVERAGE=no CONDA_PY="35" DO_CONDA_BUILD='yes' ARCHITECTURE="x86_64"
os: linux
Expand All @@ -77,9 +83,9 @@ matrix:
- python: "2.7"
env: PYTHON="2.7" STANDALONE=yes CYTHON=no MINIMAL_VERSIONS=no REPORT_COVERAGE=yes DO_CONDA_BUILD='no' ARCHITECTURE="x86_64"
os: linux
# - python: "2.7"
# env: PYTHON="2.7" STANDALONE=yes CYTHON=no MINIMAL_VERSIONS=no REPORT_COVERAGE=no DO_CONDA_BUILD='no' ARCHITECTURE="x86"
# os: linux
- python: "2.7"
env: PYTHON="2.7" STANDALONE=yes CYTHON=no MINIMAL_VERSIONS=no REPORT_COVERAGE=yes DO_CONDA_BUILD='no' ARCHITECTURE="x86_64" FLOAT_DTYPE_32=yes
os: linux
- python: "3.6"
env: PYTHON="3.6" STANDALONE=yes CYTHON=no MINIMAL_VERSIONS=no REPORT_COVERAGE=no DO_CONDA_BUILD='no' ARCHITECTURE="x86_64"
os: linux
Expand Down
24 changes: 24 additions & 0 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,11 +84,35 @@ environment:
DO_CONDA_BUILD: "FALSE"
SPLIT_RUN: "2"

- PYTHON: "C:\\Miniconda-x64"
PYTHON_VERSION: "2.7"
PYTHON_ARCH: "64"
platform: x64
STANDALONE: "FALSE"
CONDA_PY: "27"
DO_CONDA_BUILD: "FALSE"
FLOAT_DTYPE_32: "TRUE"
SPLIT_RUN: "1"

- PYTHON: "C:\\Miniconda-x64"
PYTHON_VERSION: "2.7"
PYTHON_ARCH: "64"
platform: x64
STANDALONE: "FALSE"
DO_CONDA_BUILD: "FALSE"
FLOAT_DTYPE_32: "TRUE"
SPLIT_RUN: "2"
- PYTHON: "C:\\Miniconda-x64"
PYTHON_VERSION: "2.7"
PYTHON_ARCH: "64"
platform: x64
STANDALONE: "TRUE"
- PYTHON: "C:\\Miniconda-x64"
PYTHON_VERSION: "2.7"
PYTHON_ARCH: "64"
platform: x64
STANDALONE: "TRUE"
FLOAT_DTYPE_32: "TRUE"

install:
# Add the paths
Expand Down
22 changes: 13 additions & 9 deletions brian2/codegen/generators/GSL_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,8 @@ def add_meta_variables(self, options):
try:
N = int(self.variables['N'].get_value())
self.owner.variables.add_array('_last_timestep', size=N,
values=np.ones(N)*options['dt_start'])
values=np.ones(N)*options['dt_start'],
dtype=np.float64)
except KeyError:
# has already been run
pass
Expand Down Expand Up @@ -855,7 +856,7 @@ def translate(self, code, dtype): # TODO: it's not so nice we have to copy the c
self.function_names = self.find_function_names()

scalar_code, vector_code, kwds = self.generator.translate_statement_sequence(scalar_statements,
vector_statements)
vector_statements)

############ translate code for GSL

Expand All @@ -864,7 +865,7 @@ def translate(self, code, dtype): # TODO: it's not so nice we have to copy the c
for code in code_list:
m = re.search('\[(\w+)\]', code)
if m is not None:
if m.group(1)!='0' and m.group(1)!='_idx':
if m.group(1) != '0' and m.group(1) != '_idx':
from brian2.stateupdaters.base import UnsupportedEquationsException
raise UnsupportedEquationsException(("Equations result in state "
"updater code with indexing "
Expand All @@ -880,7 +881,7 @@ def translate(self, code, dtype): # TODO: it's not so nice we have to copy the c

# analyze all needed variables; if not in self.variables: put in separate dic.
# also keep track of variables needed for scalar statements and vector statements
other_variables = self.find_undefined_variables(scalar_statements[None]+\
other_variables = self.find_undefined_variables(scalar_statements[None] +
vector_statements[None])
variables_in_scalar = self.find_used_variables(scalar_statements[None],
other_variables)
Expand Down Expand Up @@ -1006,7 +1007,7 @@ def __getattr__(self, item):
'end_struct': '\n};',
'open_cast': '(',
'close_cast': ')',
'diff_var_declaration': 'const double '}
'diff_var_declaration': 'const scalar '}

def c_data_type(self, dtype):
return self.generator.c_data_type(dtype)
Expand All @@ -1016,16 +1017,19 @@ def initialize_array(self, varname, values):
return 'double const %s[] = {%s};' % (varname, value_list)

def var_replace_diff_var_lhs(self, var, ind):
scalar_dtype = self.c_data_type(prefs.core.default_float_dtype)
f = 'f[{ind}]'.format(ind=ind)
try:
if 'unless refractory' in self.variable_flags[var]:
return {'_gsl_{var}_f{ind}'.format(var=var,ind=ind) : f,
'double _gsl_{var}_f{ind};'.format(var=var,ind=ind): '',
'double {f};'.format(f=f): ''} # in case the replacement
# of _gsl_var_find to f[ind] happens first
'{scalar_dtype} _gsl_{var}_f{ind};'.format(var=var, ind=ind,
scalar_dtype=scalar_dtype): '',
'{scalar_dtype} {f};'.format(f=f, scalar_dtype=scalar_dtype): ''} # in case the replacement
# of _gsl_var_find to f[ind] happens first
except KeyError:
pass
return {'const double _gsl_{var}_f{ind}'.format(var=var, ind=ind) : f}
return {'const {scalar_dtype} _gsl_{var}_f{ind}'.format(scalar_dtype=scalar_dtype,
var=var, ind=ind) : f}

def var_init_lhs(self, var, type):
return type + var
Expand Down
6 changes: 2 additions & 4 deletions brian2/core/core_preferences.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Definitions, documentation, default values and validation functions for core
Brian preferences.
'''
from numpy import float64, int32
from numpy import float32, float64, int32

from brian2.core.preferences import BrianPreference, prefs

Expand All @@ -14,16 +14,14 @@ def dtype_repr(dtype):


def default_float_dtype_validator(dtype):
return dtype is float64
return dtype in [float32, float64]


prefs.register_preferences('core', 'Core Brian preferences',
default_float_dtype=BrianPreference(
default=float64,
docs='''
Default dtype for all arrays of scalars (state variables, weights, etc.).
Currently, this is not supported (only float64 can be used).
''',
representor=dtype_repr,
validator=default_float_dtype_validator,
Expand Down
7 changes: 4 additions & 3 deletions brian2/core/variables.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,8 @@ def get_len(self):
return self.size

def get_addressable_value(self, name, group):
return VariableView(name=name, variable=self, group=group, dimensions=None)
return VariableView(name=name, variable=self, group=group,
dimensions=None)

def get_addressable_value_with_unit(self, name, group):
return VariableView(name=name, variable=self, group=group,
Expand Down Expand Up @@ -648,7 +649,7 @@ def __init__(self, name, owner, expr, device, dimensions=DIMENSIONLESS,

def get_addressable_value(self, name, group):
return VariableView(name=name, variable=self, group=group,
dimensions=None)
dimensions=DIMENSIONLESS)

def get_addressable_value_with_unit(self, name, group):
return VariableView(name=name, variable=self, group=group,
Expand Down Expand Up @@ -828,7 +829,7 @@ def get_item(self, item, level=0, namespace=None):
else:
values = self.get_with_index_array(item)

if self.dim is None:
if self.dim is DIMENSIONLESS:
return values
else:
return Quantity(values, self.dim)
Expand Down
5 changes: 2 additions & 3 deletions brian2/devices/cpp_standalone/templates/objects.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@ const int _num_{{name}} = {{N}};
{% for S in synapses | sort(attribute='name') %}
// {{S.name}}
{% for path in S._pathways | sort(attribute='name') %}
SynapticPathway<double> {{path.name}}(
{{dynamic_array_specs[path.variables['delay']]}},
SynapticPathway {{path.name}}(
{{dynamic_array_specs[path.synapse_sources]}},
{{path.source.start}}, {{path.source.stop}});
{% endfor %}
Expand Down Expand Up @@ -305,7 +304,7 @@ extern const int _num_{{name}};
{% for S in synapses | sort(attribute='name') %}
// {{S.name}}
{% for path in S._pathways | sort(attribute='name') %}
extern SynapticPathway<double> {{path.name}};
extern SynapticPathway {{path.name}};
{% endfor %}
{% endfor %}

Expand Down
17 changes: 6 additions & 11 deletions brian2/devices/cpp_standalone/templates/synapses_classes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,20 @@

#include "brianlib/spikequeue.h"

template<class scalar> class SynapticPathway;

template <class scalar>
class SynapticPathway
{
public:
int Nsource, Ntarget, _nb_threads;
std::vector<scalar> &delay;
std::vector<int> &sources;
std::vector<int> all_peek;
std::vector< CSpikeQueue<scalar> * > queue;
SynapticPathway(std::vector<scalar>& _delay, std::vector<int> &_sources,
int _spikes_start, int _spikes_stop)
: delay(_delay), sources(_sources)
std::vector< CSpikeQueue * > queue;
SynapticPathway(std::vector<int> &_sources, int _spikes_start, int _spikes_stop)
: sources(_sources)
{
_nb_threads = {{ openmp_pragma('get_num_threads') }};

for (int _idx=0; _idx < _nb_threads; _idx++)
queue.push_back(new CSpikeQueue<scalar>(_spikes_start, _spikes_stop));
queue.push_back(new CSpikeQueue(_spikes_start, _spikes_stop));
};

~SynapticPathway()
Expand Down Expand Up @@ -65,7 +60,7 @@ class SynapticPathway
return &all_peek;
}

void prepare(int n_source, int n_target, scalar *real_delays, int n_delays,
template <typename scalar> void prepare(int n_source, int n_target, scalar *real_delays, int n_delays,
int *sources, int n_synapses, double _dt)
{
Nsource = n_source;
Expand All @@ -86,7 +81,7 @@ class SynapticPathway
else if (n_delays == 1)
queue[{{ openmp_pragma('get_thread_num') }}]->prepare(&real_delays[0], 1, &sources[padding], length, _dt);
else // no synapses
queue[{{ openmp_pragma('get_thread_num') }}]->prepare(NULL, 0, &sources[padding], length, _dt);
queue[{{ openmp_pragma('get_thread_num') }}]->prepare((scalar *)NULL, 0, &sources[padding], length, _dt);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,16 @@
{% set pathobj = owner.name %}
void _run_{{codeobj_name}}() {
using namespace brian;
{{pointers_lines|autoindent}}

double* real_delays = {{pathobj}}.delay.empty() ? 0 : &({{pathobj}}.delay[0]);
{{pointers_lines|autoindent}}
{% set scalar = c_data_type(owner.variables['delay'].dtype) %}
std::vector<{{scalar}}> &real_delays = {{get_array_name(owner.variables['delay'], access_data=False)}};
{{scalar}}* real_delays_data = real_delays.empty() ? 0 : &(real_delays[0]);
int32_t* sources = {{pathobj}}.sources.empty() ? 0 : &({{pathobj}}.sources[0]);
const unsigned int n_delays = {{pathobj}}.delay.size();
const unsigned int n_delays = real_delays.size();
const unsigned int n_synapses = {{pathobj}}.sources.size();
{{pathobj}}.prepare({{constant_or_scalar('_n_sources', variables['_n_sources'])}},
{{constant_or_scalar('_n_targets', variables['_n_targets'])}},
real_delays, n_delays, sources,
real_delays_data, n_delays, sources,
n_synapses,
{{_source_dt}});
}
Expand All @@ -22,6 +23,8 @@ void _run_{{codeobj_name}}() {
#ifndef _INCLUDED_{{codeobj_name}}
#define _INCLUDED_{{codeobj_name}}

#include "objects.h"

void _run_{{codeobj_name}}();

#endif
Expand Down
5 changes: 5 additions & 0 deletions brian2/groups/neurongroup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
'''
This model defines the `NeuronGroup`, the core of most simulations.
'''
import collections
import string

import numpy as np
Expand Down Expand Up @@ -399,6 +400,10 @@ def __init__(self, N, model,
codeobj_class=None):
Group.__init__(self, dt=dt, clock=clock, when='start', order=order,
name=name)
if dtype is None:
dtype = {}
if isinstance(dtype, collections.MutableMapping):
dtype['lastspike'] = self._clock.variables['t'].dtype

self.codeobj_class = codeobj_class

Expand Down
6 changes: 4 additions & 2 deletions brian2/input/spikegeneratorgroup.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,8 @@ def __init__(self, N, indices, times, dt=None, clock=None,
# standard variables
self.variables.add_constant('N', value=N)
self.variables.add_array('period', dimensions=second.dim, size=1,
constant=True, read_only=True, scalar=True)
constant=True, read_only=True, scalar=True,
dtype=self._clock.variables['t'].dtype)
self.variables.add_arange('i', N)
self.variables.add_dynamic_array('spike_number',
values=np.arange(len(indices)),
Expand All @@ -113,7 +114,8 @@ def __init__(self, N, indices, times, dt=None, clock=None,
read_only=True, constant=True)
self.variables.add_dynamic_array('spike_time', values=times, size=len(times),
dimensions=second.dim, index='spike_number',
read_only=True, constant=True)
read_only=True, constant=True,
dtype=self._clock.variables['t'].dtype)
self.variables.add_array('_spikespace', size=N+1, dtype=np.int32)
self.variables.add_array('_lastindex', size=1, values=0, dtype=np.int32,
read_only=True, scalar=True)
Expand Down
13 changes: 8 additions & 5 deletions brian2/monitors/ratemonitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ class PopulationRateMonitor(Group, CodeRunner):
``source.name+'_ratemonitor_0'``, etc.
codeobj_class : class, optional
The `CodeObject` class to run code with.
dtype : dtype, optional
The dtype to use to store the ``rate`` variable. Defaults to
`~numpy.float64`, i.e. double precision.
Notes
-----
Currently, this monitor can only monitor the instantaneous firing rates at
Expand All @@ -35,8 +37,8 @@ class PopulationRateMonitor(Group, CodeRunner):
'''
invalidates_magic_network = False
add_to_magic_network = True
def __init__(self, source, name='ratemonitor*',
codeobj_class=None):
def __init__(self, source, name='ratemonitor*', codeobj_class=None,
dtype=np.float64):

#: The group we are recording from
self.source = source
Expand All @@ -55,9 +57,10 @@ def __init__(self, source, name='ratemonitor*',
self.variables.add_constant('_source_stop', stop)
self.variables.add_reference('_spikespace', source)
self.variables.add_dynamic_array('rate', size=0, dimensions=hertz.dim,
read_only=True)
read_only=True, dtype=dtype)
self.variables.add_dynamic_array('t', size=0, dimensions=second.dim,
read_only=True)
read_only=True,
dtype=self._clock.variables['t'].dtype)
self.variables.add_reference('_num_source_neurons', source, 'N')
self.variables.add_array('N', dtype=np.int32, size=1,
scalar=True, read_only=True)
Expand Down
Loading

0 comments on commit b6a3a6c

Please sign in to comment.